hexsha
stringlengths 40
40
| size
int64 7
1.05M
| ext
stringclasses 13
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
269
| max_stars_repo_name
stringlengths 5
109
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
sequencelengths 1
9
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
269
| max_issues_repo_name
stringlengths 5
116
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
sequencelengths 1
9
| max_issues_count
int64 1
48.5k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
269
| max_forks_repo_name
stringlengths 5
116
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
sequencelengths 1
9
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 7
1.05M
| avg_line_length
float64 1.21
330k
| max_line_length
int64 6
990k
| alphanum_fraction
float64 0.01
0.99
| author_id
stringlengths 2
40
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4fe208e83a2a28c3cc73a8d93d5c8da50bbda1f2 | 612 | hpp | C++ | iOS/G3MApp/G3MApp/G3MCanvas2DDemoScene.hpp | AeroGlass/g3m | a21a9e70a6205f1f37046ae85dafc6e3bfaeb917 | [
"BSD-2-Clause"
] | null | null | null | iOS/G3MApp/G3MApp/G3MCanvas2DDemoScene.hpp | AeroGlass/g3m | a21a9e70a6205f1f37046ae85dafc6e3bfaeb917 | [
"BSD-2-Clause"
] | null | null | null | iOS/G3MApp/G3MApp/G3MCanvas2DDemoScene.hpp | AeroGlass/g3m | a21a9e70a6205f1f37046ae85dafc6e3bfaeb917 | [
"BSD-2-Clause"
] | null | null | null | //
// G3MCanvas2DDemoScene.hpp
// G3MApp
//
// Created by Diego Gomez Deck on 2/12/15.
// Copyright (c) 2015 Igo Software SL. All rights reserved.
//
#ifndef __G3MApp__G3MCanvas2DDemoScene__
#define __G3MApp__G3MCanvas2DDemoScene__
#include "G3MDemoScene.hpp"
class G3MCanvas2DDemoScene : public G3MDemoScene {
protected:
void rawActivate(const G3MContext* context);
void rawSelectOption(const std::string& option,
int optionIndex) {
// do nothing
}
public:
G3MCanvas2DDemoScene(G3MDemoModel* model) :
G3MDemoScene(model, "Canvas 2D", "", -1)
{
}
};
#endif
| 18.545455 | 60 | 0.696078 | AeroGlass |
4fe6ad2f0632184bd2f29c82339cdfb448172276 | 576 | cpp | C++ | plugins/protein_calls/src/IntSelectionCall.cpp | azuki-monster/megamol | f5d75ae5630f9a71a7fbf81624bfd4f6b253c655 | [
"BSD-3-Clause"
] | 2 | 2020-10-16T10:15:37.000Z | 2021-01-21T13:06:00.000Z | plugins/protein_calls/src/IntSelectionCall.cpp | azuki-monster/megamol | f5d75ae5630f9a71a7fbf81624bfd4f6b253c655 | [
"BSD-3-Clause"
] | null | null | null | plugins/protein_calls/src/IntSelectionCall.cpp | azuki-monster/megamol | f5d75ae5630f9a71a7fbf81624bfd4f6b253c655 | [
"BSD-3-Clause"
] | 1 | 2021-01-28T01:19:54.000Z | 2021-01-28T01:19:54.000Z | #include "stdafx.h"
#include "protein_calls/IntSelectionCall.h"
using namespace megamol;
using namespace megamol::protein_calls;
/*
* IntSelectionCall::CallForGetSelection
*/
const unsigned int IntSelectionCall::CallForGetSelection = 0;
/*
* IntSelectionCall::CallForSetSelection
*/
const unsigned int IntSelectionCall::CallForSetSelection = 1;
/*
* IntSelectionCall:IntSelectionCall
*/
IntSelectionCall::IntSelectionCall(void) : selection(NULL) {
}
/*
* IntSelectionCall::~IntSelectionCall
*/
IntSelectionCall::~IntSelectionCall(void) {
selection = NULL;
}
| 19.2 | 61 | 0.767361 | azuki-monster |
4fe9c225e69fafe57d715f9a24c4ab3a36656d64 | 18,866 | hpp | C++ | src/Interface/hiopInterface.hpp | pelesh/hiop | 26bf95fc380dfee6d251d6c870cf1b4c76841828 | [
"BSD-3-Clause"
] | null | null | null | src/Interface/hiopInterface.hpp | pelesh/hiop | 26bf95fc380dfee6d251d6c870cf1b4c76841828 | [
"BSD-3-Clause"
] | null | null | null | src/Interface/hiopInterface.hpp | pelesh/hiop | 26bf95fc380dfee6d251d6c870cf1b4c76841828 | [
"BSD-3-Clause"
] | null | null | null | // Copyright (c) 2017, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory (LLNL).
// Written by Cosmin G. Petra, [email protected].
// LLNL-CODE-742473. All rights reserved.
//
// This file is part of HiOp. For details, see https://github.com/LLNL/hiop. HiOp
// is released under the BSD 3-clause license (https://opensource.org/licenses/BSD-3-Clause).
// Please also read “Additional BSD Notice” below.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
// i. Redistributions of source code must retain the above copyright notice, this list
// of conditions and the disclaimer below.
// ii. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the disclaimer (as noted below) in the documentation and/or
// other materials provided with the distribution.
// iii. Neither the name of the LLNS/LLNL nor the names of its contributors may be used to
// endorse or promote products derived from this software without specific prior written
// permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
// AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Additional BSD Notice
// 1. This notice is required to be provided under our contract with the U.S. Department
// of Energy (DOE). This work was produced at Lawrence Livermore National Laboratory under
// Contract No. DE-AC52-07NA27344 with the DOE.
// 2. Neither the United States Government nor Lawrence Livermore National Security, LLC
// nor any of their employees, makes any warranty, express or implied, or assumes any
// liability or responsibility for the accuracy, completeness, or usefulness of any
// information, apparatus, product, or process disclosed, or represents that its use would
// not infringe privately-owned rights.
// 3. Also, reference herein to any specific commercial products, process, or services by
// trade name, trademark, manufacturer or otherwise does not necessarily constitute or
// imply its endorsement, recommendation, or favoring by the United States Government or
// Lawrence Livermore National Security, LLC. The views and opinions of authors expressed
// herein do not necessarily state or reflect those of the United States Government or
// Lawrence Livermore National Security, LLC, and shall not be used for advertising or
// product endorsement purposes.
#ifndef HIOP_INTERFACE_BASE
#define HIOP_INTERFACE_BASE
#include "hiop_defs.hpp"
#ifdef HIOP_USE_MPI
#include "mpi.h"
#else
#ifndef MPI_Comm
#define MPI_Comm int
#endif
#ifndef MPI_COMM_WORLD
#define MPI_COMM_WORLD 0
#endif
#endif
namespace hiop
{
/** Solver status codes. */
enum hiopSolveStatus {
//(partial) success
Solve_Success=0,
Solve_Success_RelTol=1,
Solve_Acceptable_Level=2,
Infeasible_Problem=5,
Iterates_Diverging=6,
Feasible_Not_Optimal = 7,
//solver stopped based on user-defined criteria that are not related to optimality
Max_Iter_Exceeded=10,
Max_CpuTime_Exceeded=11,
User_Stopped=12,
//NLP algorithm/solver reports issues in solving the problem and stops without being certain
//that is solved the problem to optimality or that the problem is infeasible.
//Feasible_Point_Found,
NlpAlgorithm_failure=-1,
Diverging_Iterates=-2,
Search_Dir_Too_Small=-3,
Steplength_Too_Small=-4,
Err_Step_Computation=-5,
//errors related to user-provided data (e.g., inconsistent problem specification, 'nans' in the
//function/sensitivity evaluations, invalid options)
Invalid_Problem_Definition=-11,
Invalid_Parallelization=-12,
Invalid_UserOption=-13,
Invalid_Number=-14,
Error_In_User_Function=-15,
//ungraceful errors and returns
Exception_Unrecoverable=-100,
Memory_Alloc_Problem=-101,
SolverInternal_Error=-199,
//unknown NLP solver errors or return codes
UnknownNLPSolveStatus=-1000,
SolveInitializationError=-1001,
//intermediary statuses for the solver
NlpSolve_IncompleteInit=-10001,
NlpSolve_SolveNotCalled=-10002,
NlpSolve_Pending=-10003
};
/** The base interface class */
class hiopInterfaceBase
{
/** Base class for the solver's interface that has no assumptions how the
* matrices are stored. The vectors are dense and distributed row-wise.
* The data distribution is decided by the calling code (that implements
* this interface) and specified to the optimization via 'get_vecdistrib_info'
*
* Two possible implementations are for sparse NLPs and NLPs with small
* number of global constraints.
*
*
*/
public:
enum NonlinearityType{ hiopLinear=0, hiopQuadratic, hiopNonlinear};
public:
hiopInterfaceBase() {};
virtual ~hiopInterfaceBase() {};
/** problem dimensions: n number of variables, m number of constraints */
virtual bool get_prob_sizes(long long& n, long long& m)=0;
/** bounds on the variables
* (xlow<=-1e20 means no lower bound, xupp>=1e20 means no upper bound) */
virtual bool get_vars_info(const long long& n, double *xlow, double* xupp, NonlinearityType* type)=0;
/** bounds on the constraints
* (clow<=-1e20 means no lower bound, cupp>=1e20 means no upper bound) */
virtual bool get_cons_info(const long long& m, double* clow, double* cupp, NonlinearityType* type)=0;
/** Objective function evaluation
* When MPI enabled, each rank returns the obj. value. Also, x points to the local entries and
* the function is responsible for knowing the local buffer size.
*/
virtual bool eval_f(const long long& n, const double* x, bool new_x, double& obj_value)=0;
/** Gradient of objective.
* When MPI enabled, each rank works only with local buffers x and gradf.
*/
virtual bool eval_grad_f(const long long& n, const double* x, bool new_x, double* gradf)=0;
/** Evaluates a subset of the constraints cons(x) (where clow<=cons(x)<=cupp). The subset is of size
* 'num_cons' and is described by indexes in the 'idx_cons' array. The method will be called at each
* iteration separately for the equality constraints subset and for the inequality constraints subset.
* This is done for performance considerations, to avoid auxiliary/temporary storage and copying.
*
* Parameters:
* - n, m: the global number of variables and constraints
* - num_cons, the number constraints/size of subset to be evaluated
* - idx_cons: indexes in {1,2,...,m} of the constraints to be evaluated
* - x: the point where the constraints are to be evaluated
* - new_x: whether x has been changed from the previous call to f, grad_f, or Jac
* - cons: array of size num_cons containing the value of the constraints indicated by idx_cons
*
* When MPI enabled, every rank populates 'cons' since the constraints are not distributed.
*/
virtual bool eval_cons(const long long& n, const long long& m,
const long long& num_cons, const long long* idx_cons,
const double* x, bool new_x,
double* cons)=0;
/** Evaluates the constraints cons(x), both equalities and inequalities in one call.
*
* Parameters:
* - n, m: the global number of variables and constraints
* - x: the point where the constraints are to be evaluated
* - new_x: whether x has been changed from the previous call to f, grad_f, or Jac
* - cons: array of size num_cons containing the value of the constraints indicated by idx_cons
*
* When MPI enabled, every rank populates 'cons' since the constraints are not distributed.
*
* HiOp will first call 'eval_cons' above (twice). If the implementer/user wants the functionality
* of the one-call 'eval_cons' below, he should return false from 'eval_cons' above; then HiOp will
* call the method below.
*
*/
virtual bool eval_cons(const long long& n, const long long& m,
const double* x, bool new_x,
double* cons) { return false; }
/** pass the communicator, defaults to MPI_COMM_WORLD (dummy for non-MPI builds) */
virtual bool get_MPI_comm(MPI_Comm& comm_out) { comm_out=MPI_COMM_WORLD; return true;}
/** column partitioning specification for distributed memory vectors
* Process P owns cols[P], cols[P]+1, ..., cols[P+1]-1, P={0,1,...,NumRanks}.
* Example: for a vector x of 6 elements on 3 ranks, the col partitioning is cols=[0,2,4,6].
* The caller manages memory associated with 'cols', array of size NumRanks+1
*/
virtual bool get_vecdistrib_info(long long global_n, long long* cols) {
return false; //defaults to serial
}
/* Method providing a primal starting point. This point is subject to internal adjustments in hiOP.
* The method returns true (and populate x0) or return false, in which case hiOP will use set
* x0 to all zero (still subject to internal adjustement).
*
* TODO: provide API for a full, primal-dual restart.
*/
virtual bool get_starting_point(const long long&n, double* x0) { return false; }
/** callback for the optimal solution.
* Note that:
* i. x, z_L, z_U contain only the array slice that is local to the calling process
* ii. g, lambda are replicated across all processes, which means they can be used as-is, without reducing them.
* iii. all other scalar quantities are replicated across all processes, which means they can be used as-is,
* without reducing them.
*/
virtual void solution_callback(hiopSolveStatus status,
int n, const double* x,
const double* z_L,
const double* z_U,
int m, const double* g,
const double* lambda,
double obj_value) { };
/** Callback for the iteration: at the end of each iteration. This is NOT called during the line-searches.
* Note: all the notes for @solution_callback apply.
*/
virtual bool iterate_callback(int iter, double obj_value,
int n, const double* x,
const double* z_L,
const double* z_U,
int m, const double* g,
const double* lambda,
double inf_pr, double inf_du,
double mu,
double alpha_du, double alpha_pr,
int ls_trials) {return true;}
private:
hiopInterfaceBase(const hiopInterfaceBase& ) {};
void operator=(const hiopInterfaceBase&) {};
};
/** Specialized interface for NLPs with 'global' but few constraints.
*/
class hiopInterfaceDenseConstraints : public hiopInterfaceBase
{
public:
hiopInterfaceDenseConstraints() {};
virtual ~hiopInterfaceDenseConstraints() {};
/** Evaluates the Jacobian of the subset of constraints indicated by idx_cons and of size num_cons.
* Example: Assuming idx_cons[k]=i, which means that the gradient of the (i+1)th constraint is
* to be evaluated, one needs to do Jac[k][0]=d/dx_0 con_i(x), Jac[k][1]=d/dx_1 con_i(x), ...
* When MPI enabled, each rank computes only the local columns of the Jacobian, that is the partials
* with respect to local variables.
*
* Parameters: see eval_cons
*/
virtual bool eval_Jac_cons(const long long& n, const long long& m,
const long long& num_cons, const long long* idx_cons,
const double* x, bool new_x,
double** Jac) = 0;
/** Evaluates the Jacobian of equality and inequality constraints in one call.
*
* The main difference from the above 'eval_Jac_cons' is that the implementer/user of this
* method does not have to split the constraints into equalities and inequalities; instead,
* HiOp does this internally.
*
* TODO: build an example (new one-call Nlp formulation derived from ex2) to illustrate this
* feature and to test HiOp's internal implementation of eq.-ineq. spliting.
*/
virtual bool eval_Jac_cons(const long long& n, const long long& m,
const double* x, bool new_x,
double** Jac) { return false; }
};
/** Specialized interface for NLPs having mixed DENSE and sparse (MDS) blocks in the
* Jacobian and Hessian.
*
* More specifically, this interface is for specifying optimization problem in x
* split as (xs,xd), the rule of thumb being that xs have sparse derivatives and
* xd have dense derivatives
*
* min f(x) s.t. g(x) <= or = 0, lb<=x<=ub
*
* such that
* - Jacobian w.r.t. xs and LagrHessian w.r.t. (xs,xs) are sparse
* - Jacobian w.r.t. xd and LagrHessian w.r.t. (xd,xd) are dense
* - LagrHessian w.r.t (xs,xd) is zero (later this assumption will be relaxed)
*
* Notes
* 1) HiOp expects the sparse variables first and then the dense variables. In many cases,
* the implementer has to (inconviniently) keep a map between his internal variables
* indexes and the indexes HiOp
* 2) this interface is 'local' in the sense that data is not assumed to be
* distributed across MPI ranks ('get_vecdistrib_info' should return 'false')
*
*/
class hiopInterfaceMDS : public hiopInterfaceBase {
public:
hiopInterfaceMDS() {};
virtual ~hiopInterfaceMDS() {};
virtual bool get_sparse_dense_blocks_info(int& nx_sparse, int& nx_dense,
int& nnz_sparse_Jaceq, int& nnz_sparse_Jacineq,
int& nnz_sparse_Hess_Lagr_SS,
int& nnz_sparse_Hess_Lagr_SD) = 0;
/** Evaluates the Jacobian of constraints split in the sparse (triplet format) and
* dense matrices (rows storage)
*
* This method is called twice per Jacobian evaluation, once for equalities and once for
* inequalities (see 'eval_cons' for more information). It is advantageous to provide
* this method when the underlying NLP's constraints come naturally split in equalities
* and inequalities. When it is not convinient to do so, use 'eval_Jac_cons' below.
*
* Parameters:
* - first six: see eval_cons (in parent class)
* - nnzJacS, iJacS, jJacS, MJacS: number of nonzeros, (i,j) indexes, and values of
* the sparse Jacobian
* - JacD: dense Jacobian as a contiguous array storing the matrix by rows; array is
* "primed" to support double indexing JacD[i][j]
*
* Notes for implementer of this method:
* 1) 'JacD' parameter will be always non-null
* 2) When 'iJacS' and 'jJacS' are non-null, the implementer should provide the (i,j)
* indexes.
* 3) When 'MJacS' is non-null, the implementer should provide the values corresponding to
* entries specified by 'iJacS' and 'jJacS'
* 4) 'iJacS' and 'jJacS' are both either non-null or null during a call.
* 5) Both 'iJacS'/'jJacS' and 'MJacS' can be non-null during the same call or only one of them
* non-null; but they will not be both null.
*
*/
virtual bool eval_Jac_cons(const long long& n, const long long& m,
const long long& num_cons, const long long* idx_cons,
const double* x, bool new_x,
const long long& nsparse, const long long& ndense,
const int& nnzJacS, int* iJacS, int* jJacS, double* MJacS,
double** JacD) = 0;
/** Evaluates the Jacobian of equality and inequality constraints in one call. This Jacobian is
* mixed dense-sparse (MDS), which means is structurally split in the sparse (triplet format) and
* dense matrices (rows storage)
*
* The main difference from the above 'eval_Jac_cons' is that the implementer/user of this
* method does not have to split the constraints into equalities and inequalities; instead,
* HiOp does this internally.
*
* Parameters:
* - first four: number of variables, number of constraints, (primal) variables at which the
* Jacobian should be evaluated, and boolean flag indicating whether the variables 'x' have
* changed since a previous call to ny of the function and derivative evaluations.
* - nsparse and ndense: number of sparse and dense variables, respectively; must add
* up to 'n'
* - nnzJacS, iJacS, jJacS, MJacS: number of nonzeros, (i,j) indexes, and values of
* the sparse Jacobian block; indexes are within the sparse Jacobian block (not within
* the entire Jacobian)
* - JacD: dense Jacobian block as a contiguous array storing the matrix by rows; array is
* "primed" to support double indexing JacD[i][j]
*
* Notes for implementer of this method:
* 1) 'JacD' parameter will be always non-null
* 2) When 'iJacS' and 'jJacS' are non-null, the implementer should provide the (i,j)
* indexes.
* 3) When 'MJacS' is non-null, the implementer should provide the values corresponding to
* entries specified by 'iJacS' and 'jJacS'
* 4) 'iJacS' and 'jJacS' are both either non-null or null during a call.
* 5) Both 'iJacS'/'jJacS' and 'MJacS' can be non-null during the same call or only one of them
* non-null; but they will not be both null.
*
* HiOp will call this method whenever the implementer/user returns false from the 'eval_Jac_cons'
* (which is called for equalities and inequalities separately) above.
*/
virtual bool eval_Jac_cons(const long long& n, const long long& m,
const double* x, bool new_x,
const long long& nsparse, const long long& ndense,
const int& nnzJacS, int* iJacS, int* jJacS, double* MJacS,
double** JacD){ return false; }
/** Evaluates the Hessian of the Lagrangian function in 3 structural blocks
* - HSS is the Hessian w.r.t.(xs,xs)
* - HDD is the Hessian w.r.t.(xd,xd)
* - HSD is the Hessian w.r.t (xs,xd)
*
* Note: HSD is for now assumed to be zero. The implementer should return nnzHSD=0
* during the first call to 'eval_Hess_Lagr'. On subsequent calls, HiOp will pass the
* triplet arrays for HSD set to NULL and the implementer (obviously) should not use them.
*
* Notes
* 1)-5) from 'eval_Jac_cons' applies to xxxHSS and HDD arrays
* 6) The order is multipliers is: lambda=[lambda_eq, lambda_ineq]
*/
virtual bool eval_Hess_Lagr(const long long& n, const long long& m,
const double* x, bool new_x, const double& obj_factor,
const double* lambda, bool new_lambda,
const long long& nsparse, const long long& ndense,
const int& nnzHSS, int* iHSS, int* jHSS, double* MHSS,
double** HDD,
int& nnzHSD, int* iHSD, int* jHSD, double* MHSD) = 0;
};
} //end of namespace
#endif
| 45.902676 | 115 | 0.716262 | pelesh |
4fecebbbb9b3f3cab920782731e176f84c039690 | 2,366 | cpp | C++ | ex2/matmultran/src/main.cpp | akowalew/rim-lab | 4ffc992c543e1ed7fcaa7c88a0fcd94d09daa829 | [
"MIT"
] | null | null | null | ex2/matmultran/src/main.cpp | akowalew/rim-lab | 4ffc992c543e1ed7fcaa7c88a0fcd94d09daa829 | [
"MIT"
] | null | null | null | ex2/matmultran/src/main.cpp | akowalew/rim-lab | 4ffc992c543e1ed7fcaa7c88a0fcd94d09daa829 | [
"MIT"
] | null | null | null | #include <cmath>
#include <cstdio>
#include <ctime>
#include <chrono>
#include <thread>
#include "matmultran.hpp"
void alloc_mem(int m, int n, float **A_ptr, float **C_ptr, float **D_ptr)
{
*A_ptr = (float *) malloc(m * n * sizeof(float));
*C_ptr = (float *) malloc(m * m * sizeof(float));
*D_ptr = (float *) malloc(m * m * sizeof(float));
}
void free_mem(float *A, float *C, float *D)
{
free(A);
free(C);
free(D);
}
void read_data(int *m_ptr, int *n_ptr, float **A_ptr, float **C_ptr, float **D_ptr)
{
FILE *f = fopen("matmultran.dat", "rb");
fread(m_ptr, sizeof(int), 1, f);
fread(n_ptr, sizeof(int), 1, f);
alloc_mem(*m_ptr, *n_ptr, A_ptr, C_ptr, D_ptr);
fread(*A_ptr, sizeof(float), *m_ptr * *n_ptr, f);
fread(*D_ptr, sizeof(float), *m_ptr * *m_ptr, f);
fclose(f);
}
void matcmp(float *C, float *D, int m, int n)
{
int k;
float d, e = -1.0f;
for (k = 0; k < m * n; k++)
{
if ((d = fabsf(C[k] - D[k])) > e)
{
e = d;
}
}
printf("max. abs. err. = %.1e\n", e);
}
#ifdef _WIN32
#define WINDOWS_LEAN_AND_MEAN
#include <windows.h>
typedef LARGE_INTEGER app_timer_t;
#define timer(t_ptr) QueryPerformanceCounter(t_ptr)
void elapsed_time(app_timer_t start, app_timer_t stop, double flop)
{
double etime;
LARGE_INTEGER clk_freq;
QueryPerformanceFrequency(&clk_freq);
etime = (stop.QuadPart - start.QuadPart) / (double)clk_freq.QuadPart;
printf("CPU (total!) time = %.3f ms (%6.3f GFLOP/s)\n", etime * 1e3, 1e-9 * flop / etime);
}
#else
using app_timer_t = std::chrono::time_point<std::chrono::steady_clock>;
#define timer(t_ptr) *t_ptr = std::chrono::steady_clock::now()
void elapsed_time(app_timer_t start, app_timer_t stop, double flop)
{
const auto diff = stop - start;
const auto diff_ms = std::chrono::duration_cast<std::chrono::milliseconds>(diff);
const auto diff_ms_count = diff_ms.count();
printf("CPU (total!) time = %ldms (%6.3f GFLOP/s)\n", diff_ms_count, flop/diff_ms_count);
}
#endif
int main(int argc, char *argv[])
{
app_timer_t start, stop;
int m, n;
float *A, *C, *D;
read_data(&m, &n, &A, &C, &D);
timer(&start);
matmultran(C, A, m, n);
timer(&stop);
elapsed_time(start, stop, 2 * m * m * n);
matcmp(C, D, m, m);
free_mem(A, C, D);
return 0;
}
| 24.391753 | 94 | 0.609045 | akowalew |
4fedc7a154eb5284125d41a1b45c3a29855af6fe | 3,387 | cpp | C++ | src/day12.cpp | beached/aoc_2017 | 553d42e50b81384ad93aae6e0aec624ca7c8bf58 | [
"MIT"
] | 1 | 2017-12-11T16:17:18.000Z | 2017-12-11T16:17:18.000Z | src/day12.cpp | beached/aoc_2017 | 553d42e50b81384ad93aae6e0aec624ca7c8bf58 | [
"MIT"
] | null | null | null | src/day12.cpp | beached/aoc_2017 | 553d42e50b81384ad93aae6e0aec624ca7c8bf58 | [
"MIT"
] | null | null | null | // The MIT License (MIT)
//
// Copyright (c) 2017 Darrell Wright
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files( the "Software" ), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <iostream>
#include <map>
#include <numeric>
#include <set>
#include <string>
#include <vector>
#include <daw/daw_string.h>
#include <daw/daw_string_view.h>
#include "day12.h"
#include "str_splitter.h"
namespace daw {
namespace aoc_2017 {
namespace day12 {
using node_name_t = std::string;
using node_t = std::set<node_name_t>;
using graph_t = std::map<node_name_t, node_t>;
namespace {
std::pair<node_name_t, node_t> parse_line( daw::string_view line ) {
std::pair<node_name_t, node_t> result{};
result.first = line.pop_front( " " ).to_string( );
line.pop_front( "> " );
while( !line.empty( ) ) {
result.second.insert( line.pop_front( ", " ).to_string( ) );
}
return result;
}
std::set<node_name_t> get_group( graph_t const &nodes, node_name_t cur_node ) {
std::set<node_name_t> visited{};
std::vector<node_name_t> to_visit{};
to_visit.push_back( cur_node );
while( !to_visit.empty( ) ) {
auto tmp = to_visit.back( );
to_visit.pop_back( );
visited.insert( tmp );
for( auto const &node : nodes.find( tmp )->second ) {
if( visited.count( node ) == 0 ) {
to_visit.push_back( node );
}
}
}
return visited;
}
graph_t parse_input( std::vector<std::string> lines ) {
graph_t graph{};
for( auto const &line : lines ) {
auto tmp = parse_line( line );
graph[tmp.first].insert( tmp.second.begin( ), tmp.second.end( ) );
for( auto const &node : tmp.second ) {
graph[node].insert( tmp.first );
}
}
return graph;
}
} // namespace
intmax_t count_connections_to_zero( std::vector<std::string> lines ) {
auto graph = parse_input( lines );
return static_cast<intmax_t>( get_group( graph, "0" ).size( ) );
}
intmax_t num_groups( std::vector<std::string> lines ) {
auto graph = parse_input( lines );
std::set<node_t> groups{};
for( auto const &node : graph ) {
groups.insert( get_group( graph, node.first ) );
}
return static_cast<intmax_t>( groups.size( ) );
}
} // namespace day12
} // namespace aoc_2017
} // namespace daw
| 32.883495 | 83 | 0.663714 | beached |
4fef4ce7a337887fc210a91cdf8e90794590e7fe | 33,522 | cpp | C++ | main.cpp | abitrolly/watchman | 658e9ec680fe7691e1fe1d136180b282511a301a | [
"Apache-2.0"
] | 1 | 2019-10-18T12:35:33.000Z | 2019-10-18T12:35:33.000Z | main.cpp | MedRedha/watchman | 073fdd5c13d73c66563a8f07acebc53a1c6d7dde | [
"Apache-2.0"
] | null | null | null | main.cpp | MedRedha/watchman | 073fdd5c13d73c66563a8f07acebc53a1c6d7dde | [
"Apache-2.0"
] | 1 | 2019-12-19T01:16:22.000Z | 2019-12-19T01:16:22.000Z | /* Copyright 2012-present Facebook, Inc.
* Licensed under the Apache License, Version 2.0 */
#include "watchman.h"
#include "ChildProcess.h"
#include "Logging.h"
#include "ThreadPool.h"
#ifndef _WIN32
#include <poll.h>
#endif
#include <folly/Exception.h>
#include <folly/ScopeGuard.h>
#include <folly/Singleton.h>
using watchman::ChildProcess;
using watchman::FileDescriptor;
using Options = ChildProcess::Options;
using namespace watchman;
static int show_help = 0;
static int show_version = 0;
static enum w_pdu_type server_pdu = is_bser;
static enum w_pdu_type output_pdu = is_json_pretty;
static uint32_t server_capabilities = 0;
static uint32_t output_capabilities = 0;
static char* server_encoding = NULL;
static char* output_encoding = NULL;
static char* test_state_dir = NULL;
static char* pid_file = NULL;
static char** daemon_argv = NULL;
static int persistent = 0;
static int foreground = 0;
static int no_pretty = 0;
static int no_spawn = 0;
static int no_local = 0;
static int no_site_spawner = 0;
#ifndef _WIN32
static int inetd_style = 0;
static struct sockaddr_un un;
#endif
static int json_input_arg = 0;
#ifdef __APPLE__
#include <mach-o/dyld.h>
#endif
static const char* compute_user_name(void);
static void compute_file_name(
char** strp,
const char* user,
const char* suffix,
const char* what);
static bool lock_pidfile(void) {
// We defer computing this path until we're in the server context because
// eager evaluation can trigger integration test failures unless all clients
// are aware of both the pidfile and the sockpath being used in the tests.
compute_file_name(&pid_file, compute_user_name(), "pid", "pidfile");
#if !defined(USE_GIMLI) && !defined(_WIN32)
struct flock lock;
pid_t mypid;
mypid = getpid();
memset(&lock, 0, sizeof(lock));
lock.l_type = F_WRLCK;
lock.l_start = 0;
lock.l_whence = SEEK_SET;
lock.l_len = 0;
FileDescriptor fd(open(pid_file, O_RDWR | O_CREAT, 0644));
if (!fd) {
w_log(
W_LOG_ERR,
"Failed to open pidfile %s for write: %s\n",
pid_file,
strerror(errno));
return false;
}
// Ensure that no children inherit the locked pidfile descriptor
fd.setCloExec();
if (fcntl(fd.fd(), F_SETLK, &lock) != 0) {
char pidstr[32];
int len;
len = read(fd.fd(), pidstr, sizeof(pidstr) - 1);
pidstr[len] = '\0';
w_log(
W_LOG_ERR,
"Failed to lock pidfile %s: process %s owns it: %s\n",
pid_file,
pidstr,
strerror(errno));
return false;
}
// Replace contents of the pidfile with our pid string
if (ftruncate(fd.fd(), 0)) {
w_log(
W_LOG_ERR,
"Failed to truncate pidfile %s: %s\n",
pid_file,
strerror(errno));
return false;
}
auto pidString = watchman::to<std::string>(mypid);
ignore_result(write(fd.fd(), pidString.data(), pidString.size()));
fsync(fd.fd());
/* We are intentionally not closing the fd and intentionally not storing
* a reference to it anywhere: the intention is that it remain locked
* for the rest of the lifetime of our process.
* close(fd); // NOPE!
*/
fd.release();
return true;
#else
// One does not simply, and without risk of races, write a pidfile
// on win32. Instead we're using a named mutex in the global namespace.
// This gives us a very simple way to exclusively claim ownership of
// the lock for this user. To make things a little more complicated,
// since we scope our locks based on the state dir location and require
// this to work for our integration tests, we need to create a unique
// name per state dir. This is made even more interesting because
// we are forbidden from using windows directory separator characters
// in the name, so we cannot simply concatenate the state dir path
// with a watchman specific prefix. Instead we iterate the path
// and rewrite any backslashes with forward slashes and use that
// for the name.
// Using a mutex for this does make it more awkward to discover
// the process id of the exclusive owner, but that's not critically
// important; it is possible to connect to the instance and issue
// a get-pid command if that is needed.
// We use the global namespace so that we ensure that we have one
// watchman process per user per state dir location. If we didn't
// use the Global namespace we'd end using a local namespace scoped
// to the user session and that might cause confusion/insanity if
// they are doing something elaborate like being logged in via
// ssh in multiple sessions and expecting to share state.
std::string name("Global\\Watchman-");
const auto* it = pid_file;
while (*it != 0) {
if (*it == '\\') {
// We're not allowed to use backslash in the name, so normalize
// to forward slashes.
name.append("/");
} else {
name.append(it, 1);
}
++it;
}
auto mutex = CreateMutexA(nullptr, true, name.c_str());
if (!mutex) {
watchman::log(
watchman::ERR,
"Failed to create mutex named: ",
name,
": ",
GetLastError(),
"\n");
return false;
}
if (GetLastError() == ERROR_ALREADY_EXISTS) {
watchman::log(
watchman::ERR,
"Failed to acquire mutex named: ",
name,
"; watchman is already running for this context\n");
return false;
}
/* We are intentionally not closing the mutex and intentionally not storing
* a reference to it anywhere: the intention is that it remain locked
* for the rest of the lifetime of our process.
* CloseHandle(mutex); // NOPE!
*/
return true;
#endif
}
#ifndef _WIN32
// Returns the current process priority aka `nice` level.
// Since `-1` is a valid nice level, in order to detect an
// error we clear errno first and then test whether it is
// non-zero after we have retrieved the nice value.
static int get_nice_value() {
errno = 0;
auto value = nice(0);
folly::checkPosixError(errno, "failed to get `nice` value");
return value;
}
static void check_nice_value() {
if (get_nice_value() > cfg_get_int("min_acceptable_nice_value", 0)) {
watchman::log(
watchman::FATAL,
"Watchman is running at a lower than normal priority. Since that "
"results in poor performance that is otherwise very difficult to "
"trace, diagnose and debug, Watchman is refusing to start.\n");
}
}
#endif
static void run_service(void) {
int fd;
bool res;
#ifndef _WIN32
// Before we redirect stdin/stdout to the log files, move any inetd-provided
// socket to a different descriptor number.
if (inetd_style) {
w_listener_prep_inetd();
}
if (isatty(0)) {
// This case can happen when a user is running watchman using
// the `--foreground` switch.
// Check and raise this error before we detach from the terminal
check_nice_value();
}
#endif
// redirect std{in,out,err}
fd = ::open("/dev/null", O_RDONLY);
if (fd != -1) {
ignore_result(::dup2(fd, STDIN_FILENO));
::close(fd);
}
fd = open(log_name, O_WRONLY | O_APPEND | O_CREAT, 0600);
if (fd != -1) {
ignore_result(::dup2(fd, STDOUT_FILENO));
ignore_result(::dup2(fd, STDERR_FILENO));
::close(fd);
}
#ifndef _WIN32
// If we weren't attached to a tty, check this now that we've opened
// the log files so that we can log the problem there.
check_nice_value();
#endif
if (!lock_pidfile()) {
return;
}
#ifndef _WIN32
/* we are the child, let's set things up */
ignore_result(chdir("/"));
#endif
w_set_thread_name("listener");
{
char hostname[256];
gethostname(hostname, sizeof(hostname));
hostname[sizeof(hostname) - 1] = '\0';
w_log(
W_LOG_ERR,
"Watchman %s %s starting up on %s\n",
PACKAGE_VERSION,
#ifdef WATCHMAN_BUILD_INFO
WATCHMAN_BUILD_INFO,
#else
"<no build info set>",
#endif
hostname);
}
#ifndef _WIN32
// Block SIGCHLD by default; we only want it to be delivered
// to the reaper thread and only when it is ready to reap.
// This MUST happen before we spawn any threads so that they
// can pick up our default blocked signal mask.
{
sigset_t sigset;
sigemptyset(&sigset);
sigaddset(&sigset, SIGCHLD);
sigprocmask(SIG_BLOCK, &sigset, NULL);
}
#endif
watchman::getThreadPool().start(
cfg_get_int("thread_pool_worker_threads", 16),
cfg_get_int("thread_pool_max_items", 1024 * 1024));
ClockSpec::init();
w_state_load();
res = w_start_listener(sock_name);
w_root_free_watched_roots();
cfg_shutdown();
watchman::log(watchman::ERR, "Exiting from service with res=", res, "\n");
if (res) {
exit(0);
}
exit(1);
}
#ifndef _WIN32
// close any random descriptors that we may have inherited,
// leaving only the main stdio descriptors open, if we execute a
// child process.
static void close_random_fds(void) {
struct rlimit limit;
long open_max = 0;
int max_fd;
// Deduce the upper bound for number of descriptors
limit.rlim_cur = 0;
#ifdef RLIMIT_NOFILE
if (getrlimit(RLIMIT_NOFILE, &limit) != 0) {
limit.rlim_cur = 0;
}
#elif defined(RLIM_OFILE)
if (getrlimit(RLIMIT_OFILE, &limit) != 0) {
limit.rlim_cur = 0;
}
#endif
#ifdef _SC_OPEN_MAX
open_max = sysconf(_SC_OPEN_MAX);
#endif
if (open_max <= 0) {
open_max = 36; /* POSIX_OPEN_MAX (20) + some padding */
}
if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > INT_MAX) {
// "no limit", which seems unlikely
limit.rlim_cur = INT_MAX;
}
// Take the larger of the two values we compute
if (limit.rlim_cur > (rlim_t)open_max) {
open_max = limit.rlim_cur;
}
for (max_fd = open_max; max_fd > STDERR_FILENO; --max_fd) {
close(max_fd);
}
}
#endif
#if !defined(USE_GIMLI) && !defined(_WIN32)
static void daemonize(void) {
// Make sure we're not about to inherit an undesirable nice value
check_nice_value();
close_random_fds();
// the double-fork-and-setsid trick establishes a
// child process that runs in its own process group
// with its own session and that won't get killed
// off when your shell exits (for example).
if (fork()) {
// The parent of the first fork is the client
// process that is being run by the user, and
// we want to allow that to continue.
return;
}
setsid();
if (fork()) {
// The parent of the second fork has served its
// purpose, so we simply exit here, otherwise
// we'll duplicate the effort of either the
// client or the server depending on if we
// return or not.
_exit(0);
}
// we are the child, let's set things up
run_service();
}
#endif
#ifdef _WIN32
static void spawn_win32(void) {
char module_name[WATCHMAN_NAME_MAX];
GetModuleFileName(NULL, module_name, sizeof(module_name));
Options opts;
opts.setFlags(POSIX_SPAWN_SETPGROUP);
opts.open(STDIN_FILENO, "/dev/null", O_RDONLY, 0666);
opts.open(STDOUT_FILENO, log_name, O_WRONLY | O_CREAT | O_APPEND, 0600);
opts.dup2(STDOUT_FILENO, STDERR_FILENO);
std::vector<w_string_piece> args{module_name, "--foreground"};
for (size_t i = 0; daemon_argv[i]; i++) {
args.push_back(daemon_argv[i]);
}
ChildProcess proc(args, std::move(opts));
proc.disown();
}
#endif
#ifdef USE_GIMLI
static void spawn_via_gimli(void) {
std::vector<w_string_piece> args{
GIMLI_MONITOR_PATH,
#ifdef WATCHMAN_STATE_DIR
"--trace-dir=" WATCHMAN_STATE_DIR "/traces",
#endif
"--pidfile",
pid_file,
"watchman",
"--foreground",
};
for (size_t i = 0; daemon_argv[i]; i++) {
args.push_back(daemon_argv[i]);
}
close_random_fds();
Options opts;
opts.open(STDIN_FILENO, "/dev/null", O_RDONLY, 0666);
opts.open(STDOUT_FILENO, log_name, O_WRONLY | O_CREAT | O_APPEND, 0600);
opts.dup2(STDOUT_FILENO, STDERR_FILENO);
ChildProcess proc(args, std::move(opts));
proc.disown();
}
#endif
#ifndef _WIN32
// Spawn watchman via a site-specific spawn helper program.
// We'll pass along any daemon-appropriate arguments that
// we noticed during argument parsing.
static void spawn_site_specific(const char* spawner) {
std::vector<w_string_piece> args{
spawner,
};
for (size_t i = 0; daemon_argv[i]; i++) {
args.push_back(daemon_argv[i]);
}
close_random_fds();
// Note that we're not setting up the output to go to the log files
// here. This is intentional; we'd like any failures in the spawner
// to bubble up to the user as having things silently fail and get
// logged to the server log doesn't provide any obvious cues to the
// user about what went wrong. Watchman will open and redirect output
// to its log files when it ultimately is launched and enters the
// run_service() function above.
// However, we do need to make sure that any output from both stdout
// and stderr goes to stderr of the end user.
Options opts;
opts.open(STDIN_FILENO, "/dev/null", O_RDONLY, 0666);
opts.dup2(STDERR_FILENO, STDOUT_FILENO);
opts.dup2(STDERR_FILENO, STDERR_FILENO);
try {
ChildProcess proc(args, std::move(opts));
auto res = proc.wait();
if (WIFEXITED(res) && WEXITSTATUS(res) == 0) {
return;
}
if (WIFEXITED(res)) {
log(FATAL, spawner, ": exited with status ", WEXITSTATUS(res), "\n");
} else if (WIFSIGNALED(res)) {
log(FATAL, spawner, ": signaled with ", WTERMSIG(res), "\n");
}
log(FATAL, spawner, ": failed to start, exit status ", res, "\n");
} catch (const std::exception& exc) {
log(FATAL,
"Failed to spawn watchman via `",
spawner,
"': ",
exc.what(),
"\n");
}
}
#endif
#ifdef __APPLE__
static void spawn_via_launchd(void) {
char watchman_path[WATCHMAN_NAME_MAX];
uint32_t size = sizeof(watchman_path);
char plist_path[WATCHMAN_NAME_MAX];
FILE* fp;
struct passwd* pw;
uid_t uid;
close_random_fds();
if (_NSGetExecutablePath(watchman_path, &size) == -1) {
log(FATAL, "_NSGetExecutablePath: path too long; size ", size, "\n");
}
uid = getuid();
pw = getpwuid(uid);
if (!pw) {
log(FATAL,
"getpwuid(",
uid,
") failed: ",
strerror(errno),
". I don't know who you are\n");
}
snprintf(
plist_path, sizeof(plist_path), "%s/Library/LaunchAgents", pw->pw_dir);
// Best effort attempt to ensure that the agents dir exists. We'll detect
// and report the failure in the fopen call below.
mkdir(plist_path, 0755);
snprintf(
plist_path,
sizeof(plist_path),
"%s/Library/LaunchAgents/com.github.facebook.watchman.plist",
pw->pw_dir);
if (access(plist_path, R_OK) == 0) {
// Unload any that may already exist, as it is likely wrong
ChildProcess unload_proc(
{"/bin/launchctl", "unload", "-F", plist_path}, Options());
unload_proc.wait();
// Forcibly remove the plist. In some cases it may have some attributes
// set that prevent launchd from loading it. This can happen where
// the system was re-imaged or restored from a backup
unlink(plist_path);
}
fp = fopen(plist_path, "w");
if (!fp) {
log(FATAL,
"Failed to open ",
plist_path,
" for write: ",
strerror(errno),
"\n");
}
compute_file_name(&pid_file, compute_user_name(), "pid", "pidfile");
auto plist_content = watchman::to<std::string>(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
"<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" "
"\"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n"
"<plist version=\"1.0\">\n"
"<dict>\n"
" <key>Label</key>\n"
" <string>com.github.facebook.watchman</string>\n"
" <key>Disabled</key>\n"
" <false/>\n"
" <key>ProgramArguments</key>\n"
" <array>\n"
" <string>",
watchman_path,
"</string>\n"
" <string>--foreground</string>\n"
" <string>--logfile=",
log_name,
"</string>\n"
" <string>--log-level=",
log_level,
"</string>\n"
" <string>--sockname=",
sock_name,
"</string>\n"
" <string>--statefile=",
watchman_state_file,
"</string>\n"
" <string>--pidfile=",
pid_file,
"</string>\n"
" </array>\n"
" <key>KeepAlive</key>\n"
" <dict>\n"
" <key>Crashed</key>\n"
" <true/>\n"
" </dict>\n"
" <key>RunAtLoad</key>\n"
" <true/>\n"
" <key>EnvironmentVariables</key>\n"
" <dict>\n"
" <key>PATH</key>\n"
" <string><![CDATA[",
getenv("PATH"),
"]]></string>\n"
" </dict>\n"
" <key>ProcessType</key>\n"
" <string>Interactive</string>\n"
" <key>Nice</key>\n"
" <integer>-5</integer>\n"
"</dict>\n"
"</plist>\n");
fwrite(plist_content.data(), 1, plist_content.size(), fp);
fclose(fp);
// Don't rely on umask, ensure we have the correct perms
chmod(plist_path, 0644);
ChildProcess load_proc(
{"/bin/launchctl", "load", "-F", plist_path}, Options());
auto res = load_proc.wait();
if (WIFEXITED(res) && WEXITSTATUS(res) == 0) {
return;
}
// Most likely cause is "headless" operation with no GUI context
if (WIFEXITED(res)) {
w_log(W_LOG_ERR, "launchctl: exited with status %d\n", WEXITSTATUS(res));
} else if (WIFSIGNALED(res)) {
w_log(W_LOG_ERR, "launchctl: signaled with %d\n", WTERMSIG(res));
}
w_log(W_LOG_ERR, "Falling back to daemonize\n");
daemonize();
}
#endif
static void parse_encoding(const char* enc, enum w_pdu_type* pdu) {
if (!enc) {
return;
}
if (!strcmp(enc, "json")) {
*pdu = is_json_compact;
return;
}
if (!strcmp(enc, "bser")) {
*pdu = is_bser;
return;
}
if (!strcmp(enc, "bser-v2")) {
*pdu = is_bser_v2;
return;
}
w_log(
W_LOG_ERR,
"Invalid encoding '%s', use one of json, bser or bser-v2\n",
enc);
exit(EX_USAGE);
}
static const char* get_env_with_fallback(
const char* name1,
const char* name2,
const char* fallback) {
const char* val;
val = getenv(name1);
if (!val || *val == 0) {
val = getenv(name2);
}
if (!val || *val == 0) {
val = fallback;
}
return val;
}
static void compute_file_name(
char** strp,
const char* user,
const char* suffix,
const char* what) {
char* str = NULL;
str = *strp;
if (!str) {
/* We'll put our various artifacts in a user specific dir
* within the state dir location */
char* state_dir = NULL;
const char* state_parent = test_state_dir ? test_state_dir :
#ifdef WATCHMAN_STATE_DIR
WATCHMAN_STATE_DIR
#else
watchman_tmp_dir
#endif
;
ignore_result(asprintf(&state_dir, "%s/%s-state", state_parent, user));
if (!state_dir) {
log(FATAL, "out of memory computing ", what, "\n");
}
if (mkdir(state_dir, 0700) == 0 || errno == EEXIST) {
#ifndef _WIN32
// verify ownership
struct stat st;
int dir_fd;
int ret = 0;
uid_t euid = geteuid();
// TODO: also allow a gid to be specified here
const char* sock_group_name = cfg_get_string("sock_group", nullptr);
// S_ISGID is set so that files inside this directory inherit the group
// name
mode_t dir_perms =
cfg_get_perms(
"sock_access", false /* write bits */, true /* execute bits */) |
S_ISGID;
auto dirp =
w_dir_open(state_dir, false /* don't need strict symlink rules */);
dir_fd = dirp->getFd();
if (dir_fd == -1) {
w_log(W_LOG_ERR, "dirfd(%s): %s\n", state_dir, strerror(errno));
goto bail;
}
if (fstat(dir_fd, &st) != 0) {
w_log(W_LOG_ERR, "fstat(%s): %s\n", state_dir, strerror(errno));
ret = 1;
goto bail;
}
if (euid != st.st_uid) {
w_log(
W_LOG_ERR,
"the owner of %s is uid %d and doesn't match your euid %d\n",
state_dir,
st.st_uid,
euid);
ret = 1;
goto bail;
}
if (st.st_mode & 0022) {
w_log(
W_LOG_ERR,
"the permissions on %s allow others to write to it. "
"Verify that you own the contents and then fix its "
"permissions by running `chmod 0700 %s`\n",
state_dir,
state_dir);
ret = 1;
goto bail;
}
if (sock_group_name) {
const struct group* sock_group = w_get_group(sock_group_name);
if (!sock_group) {
ret = 1;
goto bail;
}
if (fchown(dir_fd, -1, sock_group->gr_gid) == -1) {
w_log(
W_LOG_ERR,
"setting up group '%s' failed: %s\n",
sock_group_name,
strerror(errno));
ret = 1;
goto bail;
}
}
// Depending on group and world accessibility, change permissions on the
// directory. We can't leave the directory open and set permissions on the
// socket because not all POSIX systems respect permissions on UNIX domain
// sockets, but all POSIX systems respect permissions on the containing
// directory.
w_log(W_LOG_DBG, "Setting permissions on state dir to 0%o\n", dir_perms);
if (fchmod(dir_fd, dir_perms) == -1) {
w_log(
W_LOG_ERR,
"fchmod(%s, %#o): %s\n",
state_dir,
dir_perms,
strerror(errno));
ret = 1;
goto bail;
}
bail:
if (ret) {
exit(ret);
}
#endif
} else {
w_log(
W_LOG_ERR,
"while computing %s: failed to create %s: %s\n",
what,
state_dir,
strerror(errno));
exit(1);
}
ignore_result(asprintf(&str, "%s/%s", state_dir, suffix));
if (!str) {
log(FATAL, "out of memory computing ", what, "\n");
}
free(state_dir);
}
#ifndef _WIN32
if (str[0] != '/') {
log(FATAL, "invalid ", what, ": ", str, "\n");
}
#endif
*strp = str;
}
static const char* compute_user_name(void) {
const char* user = get_env_with_fallback("USER", "LOGNAME", NULL);
#ifdef _WIN32
static char user_buf[256];
#endif
if (!user) {
#ifdef _WIN32
DWORD size = sizeof(user_buf);
if (GetUserName(user_buf, &size)) {
user_buf[size] = 0;
user = user_buf;
} else {
log(FATAL,
"GetUserName failed: ",
win32_strerror(GetLastError()),
". I don't know who you are\n");
}
#else
uid_t uid = getuid();
struct passwd* pw;
pw = getpwuid(uid);
if (!pw) {
log(FATAL,
"getpwuid(",
uid,
") failed: ",
strerror(errno),
". I don't know who you are\n");
}
user = pw->pw_name;
#endif
if (!user) {
log(FATAL, "watchman requires that you set $USER in your env\n");
}
}
return user;
}
static void setup_sock_name(void) {
const char* user = compute_user_name();
watchman_tmp_dir = get_env_with_fallback("TMPDIR", "TMP", "/tmp");
#ifdef _WIN32
if (!sock_name) {
asprintf(&sock_name, "\\\\.\\pipe\\watchman-%s", user);
}
#else
compute_file_name(&sock_name, user, "sock", "sockname");
#endif
compute_file_name(&watchman_state_file, user, "state", "statefile");
compute_file_name(&log_name, user, "log", "logname");
#ifdef USE_GIMLI
compute_file_name(&pid_file, user, "pid", "pidfile");
#endif
#ifndef _WIN32
if (strlen(sock_name) >= sizeof(un.sun_path) - 1) {
log(FATAL, sock_name, ": path is too long\n");
}
un.sun_family = PF_LOCAL;
memcpy(un.sun_path, sock_name, strlen(sock_name) + 1);
#endif
}
static bool should_start(int err) {
if (err == ECONNREFUSED) {
return true;
}
if (err == ENOENT) {
return true;
}
return false;
}
static bool try_command(json_t* cmd, int timeout) {
w_jbuffer_t buffer;
w_jbuffer_t output_pdu_buffer;
int err;
auto client = w_stm_connect(sock_name, timeout * 1000);
if (!client) {
return false;
}
if (!cmd) {
return true;
}
// Send command
if (!buffer.pduEncodeToStream(
server_pdu, server_capabilities, cmd, client.get())) {
err = errno;
w_log(W_LOG_ERR, "error sending PDU to server\n");
errno = err;
return false;
}
buffer.clear();
do {
if (!buffer.passThru(
output_pdu,
output_capabilities,
&output_pdu_buffer,
client.get())) {
return false;
}
} while (persistent);
return true;
}
static struct watchman_getopt opts[] = {
{"help", 'h', "Show this help", OPT_NONE, &show_help, NULL, NOT_DAEMON},
#ifndef _WIN32
{"inetd",
0,
"Spawning from an inetd style supervisor",
OPT_NONE,
&inetd_style,
NULL,
IS_DAEMON},
#endif
{"no-site-spawner",
'S',
"Don't use the site or system spawner",
OPT_NONE,
&no_site_spawner,
NULL,
IS_DAEMON},
{"version",
'v',
"Show version number",
OPT_NONE,
&show_version,
NULL,
NOT_DAEMON},
{"sockname",
'U',
"Specify alternate sockname",
REQ_STRING,
&sock_name,
"PATH",
IS_DAEMON},
{"logfile",
'o',
"Specify path to logfile",
REQ_STRING,
&log_name,
"PATH",
IS_DAEMON},
{"log-level",
0,
"set the log level (0 = off, default is 1, verbose = 2)",
REQ_INT,
&log_level,
NULL,
IS_DAEMON},
#ifdef USE_GIMLI
{"pidfile",
0,
"Specify path to gimli monitor pidfile",
REQ_STRING,
&pid_file,
"PATH",
NOT_DAEMON},
#else
{"pidfile",
0,
"Specify path to pidfile",
REQ_STRING,
&pid_file,
"PATH",
IS_DAEMON},
#endif
{"persistent",
'p',
"Persist and wait for further responses",
OPT_NONE,
&persistent,
NULL,
NOT_DAEMON},
{"no-save-state",
'n',
"Don't save state between invocations",
OPT_NONE,
&dont_save_state,
NULL,
IS_DAEMON},
{"statefile",
0,
"Specify path to file to hold watch and trigger state",
REQ_STRING,
&watchman_state_file,
"PATH",
IS_DAEMON},
{"json-command",
'j',
"Instead of parsing CLI arguments, take a single "
"json object from stdin",
OPT_NONE,
&json_input_arg,
NULL,
NOT_DAEMON},
{"output-encoding",
0,
"CLI output encoding. json (default) or bser",
REQ_STRING,
&output_encoding,
NULL,
NOT_DAEMON},
{"server-encoding",
0,
"CLI<->server encoding. bser (default) or json",
REQ_STRING,
&server_encoding,
NULL,
NOT_DAEMON},
{"foreground",
'f',
"Run the service in the foreground",
OPT_NONE,
&foreground,
NULL,
NOT_DAEMON},
{"no-pretty",
0,
"Don't pretty print JSON",
OPT_NONE,
&no_pretty,
NULL,
NOT_DAEMON},
{"no-spawn",
0,
"Don't try to start the service if it is not available",
OPT_NONE,
&no_spawn,
NULL,
NOT_DAEMON},
{"no-local",
0,
"When no-spawn is enabled, don't try to handle request"
" in client mode if service is unavailable",
OPT_NONE,
&no_local,
NULL,
NOT_DAEMON},
// test-state-dir is for testing only and should not be used in production:
// instead, use the compile-time WATCHMAN_STATE_DIR option
{"test-state-dir", 0, NULL, REQ_STRING, &test_state_dir, "DIR", NOT_DAEMON},
{0, 0, 0, OPT_NONE, 0, 0, 0}};
static void parse_cmdline(int* argcp, char*** argvp) {
cfg_load_global_config_file();
w_getopt(opts, argcp, argvp, &daemon_argv);
if (show_help) {
usage(opts, stdout);
}
if (show_version) {
printf("%s\n", PACKAGE_VERSION);
exit(0);
}
watchman::getLog().setStdErrLoggingLevel(
static_cast<enum watchman::LogLevel>(log_level));
setup_sock_name();
parse_encoding(server_encoding, &server_pdu);
parse_encoding(output_encoding, &output_pdu);
if (!output_encoding) {
output_pdu = no_pretty ? is_json_compact : is_json_pretty;
}
// Prevent integration tests that call the watchman cli from
// accidentally spawning a server.
if (getenv("WATCHMAN_NO_SPAWN")) {
no_spawn = true;
}
}
static json_ref build_command(int argc, char** argv) {
int i;
// Read blob from stdin
if (json_input_arg) {
auto err = json_error_t();
w_jbuffer_t buf;
auto cmd = buf.decodeNext(w_stm_stdin(), &err);
if (buf.pdu_type == is_bser) {
// If they used bser for the input, select bser for output
// unless they explicitly requested something else
if (!server_encoding) {
server_pdu = is_bser;
}
if (!output_encoding) {
output_pdu = is_bser;
}
} else if (buf.pdu_type == is_bser_v2) {
// If they used bser v2 for the input, select bser v2 for output
// unless they explicitly requested something else
if (!server_encoding) {
server_pdu = is_bser_v2;
}
if (!output_encoding) {
output_pdu = is_bser_v2;
}
}
if (!cmd) {
fprintf(
stderr,
"failed to parse command from stdin: "
"line %d, column %d, position %d: %s\n",
err.line,
err.column,
err.position,
err.text);
exit(1);
}
return cmd;
}
// Special case: no arguments means that we just want
// to verify that the service is up, starting it if
// needed
if (argc == 0) {
return nullptr;
}
auto cmd = json_array();
for (i = 0; i < argc; i++) {
json_array_append_new(cmd, typed_string_to_json(argv[i], W_STRING_UNICODE));
}
return cmd;
}
static void spawn_watchman(void) {
#ifndef _WIN32
if (no_site_spawner) {
// The astute reader will notice this we're calling daemonize() here
// and not the various other platform spawning functions in the block
// further below in this function. This is deliberate: we want
// to do the most simple background running possible when the
// no_site_spawner flag is used. In the future we plan to
// migrate the platform spawning functions to use the site_spawn
// functionality.
daemonize();
return;
}
// If we have a site-specific spawning requirement, then we'll
// invoke that spawner rather than using any of the built-in
// spawning functionality.
const char* site_spawn = cfg_get_string("spawn_watchman_service", nullptr);
if (site_spawn) {
spawn_site_specific(site_spawn);
return;
}
#endif
#ifdef USE_GIMLI
spawn_via_gimli();
#elif defined(__APPLE__)
spawn_via_launchd();
#elif defined(_WIN32)
spawn_win32();
#else
daemonize();
#endif
}
int main(int argc, char** argv) {
bool ran;
// Since we don't fully integrate with folly, but may pull
// in dependencies that do, we need to perform a little bit
// of bootstrapping. We don't want to run the full folly
// init today because it will interfere with our own signal
// handling. In the future we will integrate this properly.
folly::SingletonVault::singleton()->registrationComplete();
SCOPE_EXIT {
folly::SingletonVault::singleton()->destroyInstances();
};
parse_cmdline(&argc, &argv);
if (foreground) {
run_service();
return 0;
}
w_set_thread_name("cli");
auto cmd = build_command(argc, argv);
preprocess_command(cmd, output_pdu, output_capabilities);
ran = try_command(cmd, 0);
if (!ran && should_start(errno)) {
if (no_spawn) {
if (!no_local) {
ran = try_client_mode_command(cmd, !no_pretty);
}
} else {
spawn_watchman();
// Some site spawner scripts will asynchronously launch the service.
// When that happens we may encounter ECONNREFUSED. We need to
// tolerate this, so we add some retries.
int attempts = 10;
std::chrono::milliseconds interval(10);
while (true) {
ran = try_command(cmd, 10);
if (!ran && should_start(errno) && attempts-- > 0) {
/* sleep override */ std::this_thread::sleep_for(interval);
interval *= 2;
continue;
}
// Success or terminal failure
break;
}
}
}
if (ran) {
return 0;
}
if (!no_spawn) {
w_log(
W_LOG_ERR,
"unable to talk to your watchman on %s! (%s)\n",
sock_name,
strerror(errno));
#ifdef __APPLE__
if (getenv("TMUX")) {
w_log(
W_LOG_ERR,
"\n"
"You may be hitting a tmux related session issue.\n"
"An immediate workaround is to run:\n"
"\n"
" watchman version\n"
"\n"
"just once, from *outside* your tmux session, to allow the launchd\n"
"registration to be setup. Once done, you can continue to access\n"
"watchman from inside your tmux sessions as usual.\n"
"\n"
"Longer term, you may wish to install this tool:\n"
"\n"
" https://github.com/ChrisJohnsen/tmux-MacOSX-pasteboard\n"
"\n"
"and configure tmux to use `reattach-to-user-namespace`\n"
"when it launches your shell.\n");
}
#endif
}
return 1;
}
/* vim:ts=2:sw=2:et:
*/
| 26.27116 | 80 | 0.614373 | abitrolly |
4ff03862e9080e0d503f1a21891f33cd504b5ef4 | 3,303 | hpp | C++ | cppcache/src/FixedPartitionAttributesImpl.hpp | austxcodemonkey/geode-native | a816ac99cbbac557629686cb2542fdc74d464338 | [
"Apache-2.0"
] | 1 | 2018-09-08T05:05:22.000Z | 2018-09-08T05:05:22.000Z | cppcache/src/FixedPartitionAttributesImpl.hpp | austxcodemonkey/geode-native | a816ac99cbbac557629686cb2542fdc74d464338 | [
"Apache-2.0"
] | 1 | 2021-02-23T12:27:00.000Z | 2021-02-23T12:27:00.000Z | cppcache/src/FixedPartitionAttributesImpl.hpp | isabella232/geode-native | 0d9a99d5e0632de62df17921950cf3f6640efb33 | [
"Apache-2.0"
] | null | null | null | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef GEODE_FIXEDPARTITIONATTRIBUTESIMPL_H_
#define GEODE_FIXEDPARTITIONATTRIBUTESIMPL_H_
#include <geode/CacheableBuiltins.hpp>
#include <geode/CacheableString.hpp>
#include <geode/DataInput.hpp>
#include <geode/DataOutput.hpp>
#include <geode/Serializable.hpp>
namespace apache {
namespace geode {
namespace client {
class FixedPartitionAttributesImpl : public internal::DataSerializableInternal {
private:
std::string m_partitionName;
bool m_isPrimary;
int m_numBuckets;
int m_startingBucketId;
public:
FixedPartitionAttributesImpl()
: Serializable(),
m_partitionName(nullptr),
m_isPrimary(false),
m_numBuckets(1),
m_startingBucketId(-1) {}
const std::string& getPartitionName() { return m_partitionName; }
int getNumBuckets() const { return m_numBuckets; }
int isPrimary() const { return m_isPrimary; }
void toData(DataOutput& output) const override {
output.writeString(m_partitionName);
output.writeBoolean(m_isPrimary);
output.writeInt(m_numBuckets);
output.writeInt(m_startingBucketId);
}
void fromData(DataInput& input) override {
m_partitionName = input.readString();
m_isPrimary = input.readBoolean();
m_numBuckets = input.readInt32();
m_startingBucketId = input.readInt32();
}
size_t objectSize() const override {
return sizeof(int) + sizeof(int) + sizeof(bool) +
(m_partitionName.length() *
sizeof(decltype(m_partitionName)::value_type));
}
FixedPartitionAttributesImpl& operator=(
const FixedPartitionAttributesImpl& rhs) {
if (this == &rhs) return *this;
this->m_partitionName = rhs.m_partitionName;
this->m_isPrimary = rhs.m_isPrimary;
this->m_numBuckets = rhs.m_numBuckets;
this->m_startingBucketId = rhs.m_startingBucketId;
return *this;
}
FixedPartitionAttributesImpl(const FixedPartitionAttributesImpl& rhs) {
this->m_partitionName = rhs.m_partitionName;
this->m_isPrimary = rhs.m_isPrimary;
this->m_numBuckets = rhs.m_numBuckets;
this->m_startingBucketId = rhs.m_startingBucketId;
}
int getStartingBucketID() const { return m_startingBucketId; }
int getLastBucketID() const { return m_startingBucketId + m_numBuckets - 1; }
bool hasBucket(int bucketId) {
return getStartingBucketID() <= bucketId && bucketId <= getLastBucketID();
}
};
} // namespace client
} // namespace geode
} // namespace apache
#endif // GEODE_FIXEDPARTITIONATTRIBUTESIMPL_H_
| 31.457143 | 80 | 0.735695 | austxcodemonkey |
4ff77fb96ce84be8fc4866cf29b0a17cd6a21fb0 | 5,312 | cpp | C++ | OOP/OOP-Homework-2/BigInteger.cpp | Rossoner40/NBU-Classwork-and-Homework | 823e5eab2da616ae6d965da9c0a22fa0212d7887 | [
"MIT"
] | null | null | null | OOP/OOP-Homework-2/BigInteger.cpp | Rossoner40/NBU-Classwork-and-Homework | 823e5eab2da616ae6d965da9c0a22fa0212d7887 | [
"MIT"
] | null | null | null | OOP/OOP-Homework-2/BigInteger.cpp | Rossoner40/NBU-Classwork-and-Homework | 823e5eab2da616ae6d965da9c0a22fa0212d7887 | [
"MIT"
] | null | null | null | #include "BigInteger.h"
BigInteger::BigInteger():n(0), val(NULL), pos(false){
}
BigInteger::BigInteger(std::string s){
n = s.length();
pos = (s[0]=='-')?false:true;
if(!pos) n--;
val = new int[n];
for (int i = 0; i < n; i++)
{
if(s[i]!='-' || s[i]!='+'){
if(pos) val[i] = s[i]-'0';
else val[i] = s[i+1]-'0';
}
}
}
BigInteger::BigInteger(const BigInteger & r){
n = r.n;
val = new int[n];
pos = r.pos;
for (int i = 0; i < n; i++)
{
val[i] = r.val[i];
}
}
BigInteger::~BigInteger(){
if(n!=0) delete [] val;
}
BigInteger& BigInteger::operator=(const BigInteger & r){
if(this!=&r){
if(n!=0) delete [] val;
n = r.n;
val = new int[n];
pos = r.pos;
for (int i = 0; i < n; i++)
{
val[i] = r.val[i];
}
}
return *this;
}
BigInteger BigInteger::operator+(const BigInteger & r) const{
if(pos == r.pos){
int * t = reverse();
int * k = r.reverse();
int length = (n>r.n)? n:r.n;
int add_val = 0, curr1, curr2;
int * ans = new int[length+1];
for (int i = 0; i < length; i++)
{
curr1 = (i<n)?t[i]:0;
curr2 = (i<r.n)?k[i]:0;
ans[i] = (curr1+curr2+add_val)%10;
add_val = (curr1+curr2+add_val)/10;
}
if(add_val) ans[length] = add_val;
else ans[length] = 0;
BigInteger answer;
answer.pos = pos;
int cnt;
if(add_val){
cnt = length+1;
}
else cnt = length;
answer.n = cnt;
answer.val = new int[cnt];
for(int i=cnt-1, j=0; i>=0; i--,j++){
answer.val[j] = ans[i];
}
delete [] ans;
return answer;
}
else{
int sub_val=0;
BigInteger max = abs_max(r)?*this:r;
BigInteger min = !abs_max(r)?*this:r;
int * t = max.reverse();
int * k = min.reverse();
int length = max.n, curr1, curr2;
int * ans = new int[length];
for(int i=0; i<length; i++){
curr1 = (i<n)?t[i]:0;
curr1 -= sub_val;
curr2 = (i<r.n)?k[i]:0;
if(curr1 < curr2){
ans[i] = curr1+10-curr2;
sub_val = 1;
}
else{
ans[i] = curr1-curr2;
sub_val = 0;
}
}
BigInteger answer;
answer.pos = max.pos;
int cnt=0;
bool flag = true;
for(int i=length-1; i>=0; i--){
if(!(ans[i] == 0 && flag)){
cnt++;
flag = false;
}
}
answer.n = cnt;
answer.val = new int[cnt];
for(int i=cnt-1, j=0; i>=0; i--,j++){
answer.val[j] = ans[i];
}
delete [] ans;
return answer;
}
return BigInteger(0);
}
BigInteger BigInteger::operator-(const BigInteger & r) const{
if(pos == r.pos){
int sub_val=0;
BigInteger max = abs_max(r)?*this:r;
BigInteger min = !abs_max(r)?*this:r;
int * t = max.reverse();
int * k = min.reverse();
int length = max.n, curr1, curr2;
int * ans = new int[length];
for(int i=0; i<length; i++){
curr1 = (i<n)?t[i]:0;
curr1 -= sub_val;
curr2 = (i<r.n)?k[i]:0;
if(curr1 < curr2){
ans[i] = curr1+10-curr2;
sub_val = 1;
}
else{
ans[i] = curr1-curr2;
sub_val = 0;
}
}
BigInteger answer;
answer.pos = max.pos;
int cnt=0;
bool flag = true;
for(int i=length-1; i>=0; i--){
if(!(ans[i] == 0 && flag)){
cnt++;
flag = false;
}
}
answer.n = cnt;
answer.val = new int[cnt];
for(int i=cnt-1, j=0; i>=0; i--,j++){
answer.val[j] = ans[i];
}
delete [] ans;
return answer;
}
else{
int * t = reverse();
int * k = r.reverse();
int length = (n>r.n)? n:r.n;
int add_val = 0, curr1, curr2;
int * ans = new int[length+1];
for (int i = 0; i < length; i++)
{
curr1 = (i<n)?t[i]:0;
curr2 = (i<r.n)?k[i]:0;
ans[i] = (curr1+curr2+add_val)%10;
add_val = (curr1+curr2+add_val)/10;
}
if(add_val) ans[length] = add_val;
else ans[length] = 0;
BigInteger answer;
answer.pos = pos;
int cnt;
if(add_val){
cnt = length+1;
}
else cnt = length;
answer.n = cnt;
answer.val = new int[cnt];
for(int i=cnt-1, j=0; i>=0; i--,j++){
answer.val[j] = ans[i];
}
delete [] ans;
return answer;
}
return BigInteger(0);
}
bool BigInteger::operator >(const BigInteger & r)const{
if(pos != r.pos){
return pos;
}
else{
if(n>r.n) return true;
else if(n<r.n) return false;
else{
for (int i = 0; i < n; i++)
{
if(val[i]>r.val[i]) return true;
}
}
}
return false;
}
bool BigInteger::operator <(const BigInteger & r)const{
return !(*this>r);
}
bool BigInteger::abs_max(const BigInteger & r)const{
if(n>r.n) return true;
else if(n<r.n) return false;
else{
for (int i = 0; i < n; i++)
{
if(val[i]>r.val[i]) return true;
}
}
return false;
}
int BigInteger::getN()const{
return n;
}
void BigInteger::setN(int length){
n = length;
}
int * BigInteger::reverse() const{
int * r = new int[n];
int ind=0;
for(int i=n-1; i>=0; i--){
r[ind] = val[i];
ind++;
}
return r;
}
std::ostream& operator << (std::ostream& out, const BigInteger & r){
if(!r.pos) out << "-";
for (int i = 0; i < r.n; i++)
{
out<<r.val[i];
}
return out;
} | 20.274809 | 68 | 0.492282 | Rossoner40 |
4ff94fda49ac35645dadad0f002bc31ff25dc5d3 | 9,075 | cc | C++ | src/solver.cc | CHEN-Lin/OpenMoor | f463f586487b9023e7f3678c9d851000558b14d7 | [
"Apache-2.0"
] | 7 | 2019-02-10T07:03:45.000Z | 2022-03-04T16:09:38.000Z | src/solver.cc | CHEN-Lin/OpenMoor | f463f586487b9023e7f3678c9d851000558b14d7 | [
"Apache-2.0"
] | null | null | null | src/solver.cc | CHEN-Lin/OpenMoor | f463f586487b9023e7f3678c9d851000558b14d7 | [
"Apache-2.0"
] | 4 | 2021-01-25T23:33:11.000Z | 2022-03-27T13:22:56.000Z | // This file is part of OpenMOOR, an Open-source simulation program for MOORing
// systems in offshore renewable energy applications.
//
// Created by Lin Chen on Sep 15, 2017.
//
// Copyright 2018 Lin Chen <[email protected]> & Biswajit Basu <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
////////////////////////////////////////////////////////////////////////////////
#include "solver.h"
namespace moor {
////////////////////////////////////////////////////////////////////////////////
/// When basic parameters are obatined from the input data. Initialize the other
/// parameters.
////////////////////////////////////////////////////////////////////////////////
void Solver::initialize(const int n, const int n_b)
{
n_nodal_state = n;
n_bound_constraint = n_b;
n_iteration = 0;
relaxation_factor = initial_relaxation_factor;
alpha_k = lambda_infinity/(lambda_infinity - 1.0);
alpha_m = (3*lambda_infinity+1)/2/(lambda_infinity - 1.0);
gamma = 0.5 - alpha_m + alpha_k;
// Initialization constants for use.
alpha_k1 = 1-alpha_k;
alpha_k_square = pow(alpha_k, 2.0);
alpha_k1_square = pow(alpha_k1, 2.0);
alpha_k_cross = alpha_k * alpha_k1;
alpha_m1 = 1 - alpha_m;
alpha_m_square = pow(alpha_m, 2.0);
alpha_m1_square = pow(alpha_m1, 2.0);
alpha_m_cross = alpha_m * alpha_m1;
gamma1 = 1 - gamma;
}
////////////////////////////////////////////////////////////////////////////////
/// Solve the augmented matrix for state increment, by three steps:
/// - Reduction;
/// - Gauss-Jordan elimination;
/// - Back substitution.
////////////////////////////////////////////////////////////////////////////////
int Solver::solve(std::vector< Eigen::MatrixXd >& aug_mat)
{
int fail = 0, n_node = aug_mat.size() - 1;
fail = gauss_jordan_eliminate(aug_mat[0],
n_bound_constraint,
n_nodal_state-1,
n_nodal_state);
int k = 1;
for (k=1; k<n_node; k++)
{
reduce(aug_mat, k);
fail = gauss_jordan_eliminate(aug_mat[k], 0,
n_nodal_state-1,
n_bound_constraint);
}
reduce(aug_mat,n_node);
fail = gauss_jordan_eliminate(aug_mat[n_node],
0, (n_nodal_state-n_bound_constraint)-1,
n_bound_constraint);
back_substitute(aug_mat);
// Sort the last column of the augmented matrix.
for (k=0; k<n_node; k++)
{
aug_mat[k].block(0,2*n_nodal_state,n_bound_constraint,1)
= aug_mat[k].block(n_nodal_state-n_bound_constraint,2*n_nodal_state,n_bound_constraint,1);
aug_mat[k].block(n_bound_constraint,2*n_nodal_state,n_nodal_state-n_bound_constraint,1)
= aug_mat[k+1].block(0,2*n_nodal_state,n_nodal_state-n_bound_constraint,1);
}
return fail;
}
////////////////////////////////////////////////////////////////////////////////
/// Adjust the relaxation factor according to the error change trend.
////////////////////////////////////////////////////////////////////////////////
void Solver::adjust_relaxation(double present_error, double prev_error)
{
if (present_error > prev_error || prev_error == 0.0)
relaxation_factor = relaxation_factor / decrement_factor;
else
relaxation_factor = relaxation_factor * increment_factor;
relaxation_factor = (relaxation_factor < initial_relaxation_factor ?
relaxation_factor : initial_relaxation_factor);
relaxation_factor = relaxation_factor > 1E-5 ? relaxation_factor : 1E-5;
}
////////////////////////////////////////////////////////////////////////////////
/// Diagonalize the square block of augmented matrix by Gauss Jordan Elimination
/// using pivoting.
/// <pre>
/// 0 0 0 X X X X X X X B 0 0 0 1 0 0 0 0 S S C
/// 0 0 0 X X X X X X X B 0 0 0 0 1 0 0 0 S S C
/// 0 0 0 X X X X X X X B => 0 0 0 0 0 1 0 0 S S C
/// 0 0 0 X X X X X X X B 0 0 0 0 0 0 1 0 S S C
/// 0 0 0 X X X X X X X B 0 0 0 0 0 0 0 1 S S C
/// </pre>
/// The rows are swapped to form a block diagonal matrix.
////////////////////////////////////////////////////////////////////////////////
int Solver::gauss_jordan_eliminate(Eigen::MatrixXd& aug_mat,
int i_start, int i_end,
int j_start)
{
int n_dim = i_end - i_start + 1;
int i_pivot, j_pivot; double pivot;
Eigen::RowVectorXd temp_row;
for (int k=0; k<n_dim; k++)
{
pivot = aug_mat.block(i_start+k,j_start+k,n_dim-k,1).array().
abs().maxCoeff(&i_pivot,&j_pivot);
if (pivot <= 1E-20)
return 1; // Singularity. // assert(pivot > 1E-20);
// Swap rows.
if (i_pivot!=0)
{
temp_row = aug_mat.row(i_start+k+i_pivot);
aug_mat.row(i_start+k+i_pivot) = aug_mat.row(i_start+k);
aug_mat.row(i_start+k) = temp_row;
}
aug_mat.row(i_start+k) = aug_mat.row(i_start+k) *
(1.0/aug_mat(i_start+k,j_start+k));
aug_mat(i_start+k,j_start+k) = 1.0;
// Elimination.
for (int i=k+1; i<n_dim; i++)
{
aug_mat.row(i_start+i) -= aug_mat.row(i_start+k) *
aug_mat(i_start+i,j_start+k);
}
// Set zeros.
if (n_dim > 1)
aug_mat.block(i_start+k+1,j_start+k,n_dim-k-1,1).setZero();
}
// Back substitution.
for (int j=n_dim-1; j>0; j--)
{
for (int i=j-1; i>=0; i--)
{
aug_mat.row(i_start+i) -= aug_mat.row(i_start+j) *
aug_mat(i_start+i,j_start+j);
}
aug_mat.block(i_start,j_start+j,j,1).setZero();
}
return 0;
}
////////////////////////////////////////////////////////////////////////////////
/// Reduce columns jz1 .. jz2-1 of the s matrix, using previous results as
/// stored in the c matrix. Only colums jm1 .. jm2-1 and jmf are affected by
/// prior results.
/// <pre>
/// X X X X X X X X X X B 0 0 0 S S S S S S S C
/// X X X X X X X X X X B 0 0 0 S S S S S S S C
/// X X X X X X X X X X B => 0 0 0 S S S S S S S C
/// X X X X X X X X X X B 0 0 0 S S S S S S S C
/// X X X X X X X X X X B 0 0 0 S S S S S S S C
/// </pre>
////////////////////////////////////////////////////////////////////////////////
void Solver::reduce(std::vector< Eigen::MatrixXd >& s,int i)
{
// Alter the columns of the coefficient matrix.
s[i].block(0,n_bound_constraint,n_nodal_state,n_nodal_state-n_bound_constraint)
-= (s[i].block(0,0,n_nodal_state,n_bound_constraint)
* s[i-1].block(n_nodal_state-n_bound_constraint,
n_nodal_state+n_bound_constraint,
n_bound_constraint,n_nodal_state-n_bound_constraint));
// Alter the b column.
s[i].col(2*n_nodal_state)
-= (s[i].block(0,0,n_nodal_state,n_bound_constraint)
* s[i-1].block(n_nodal_state-n_bound_constraint,
2*n_nodal_state,n_bound_constraint,1));
// For testing.
s[i].block(0,0,n_nodal_state,n_bound_constraint)
-= (s[i].block(0,0,n_nodal_state,n_bound_constraint)
* s[i-1].block(n_nodal_state-n_bound_constraint,
n_nodal_state,n_bound_constraint,n_bound_constraint));
}
////////////////////////////////////////////////////////////////////////////////
/// Back substitute to dealing with the following structure
/// <pre>
/// 1 X X V B
/// 1 X X V B
/// 1 X X V B
/// 1 X X V B
/// 1 X X V B
/// 1 X X V B
/// 1 V B
/// 1 V B
/// </pre>
/// Note: Values of B after back substitution are the solution.
////////////////////////////////////////////////////////////////////////////////
void Solver::back_substitute(std::vector< Eigen::MatrixXd >& s)
{
int n = s.size();
for (int i=n-2; i>=0; i--)
{
s[i].col(2*n_nodal_state)
-= (s[i].block(0,n_nodal_state+n_bound_constraint,
n_nodal_state, n_nodal_state-n_bound_constraint) *
s[i+1].block(0,2*n_nodal_state,n_nodal_state-n_bound_constraint,1));
}
}
} // End of namespace moor.
| 37.970711 | 98 | 0.513388 | CHEN-Lin |
4ffa16bfcf8e2aa794f27ed2543621bbad9bd3f5 | 2,989 | cpp | C++ | tests/test_logmanager.cpp | DeanoC/simple_logmanager | 406317ae0f0a3b2b187a505ba6694f1902f63124 | [
"Apache-2.0"
] | null | null | null | tests/test_logmanager.cpp | DeanoC/simple_logmanager | 406317ae0f0a3b2b187a505ba6694f1902f63124 | [
"Apache-2.0"
] | null | null | null | tests/test_logmanager.cpp | DeanoC/simple_logmanager | 406317ae0f0a3b2b187a505ba6694f1902f63124 | [
"Apache-2.0"
] | null | null | null | #include "al2o3_platform/platform.h"
#include "al2o3_catch2/catch2.hpp"
#include "utils_simple_logmanager/logmanager.h"
#include "al2o3_os/file.h"
#include "al2o3_os/filesystem.h"
TEST_CASE("Alloc/Free", "[SimpleLogManager]") {
auto slm = SimpleLogManager_Alloc();
REQUIRE(slm);
SimpleLogManager_Free(slm);
}
TEST_CASE("Quiet settings", "[SimpleLogManager]") {
auto slm = SimpleLogManager_Alloc();
REQUIRE(slm);
// defaults
REQUIRE(!SimpleLogManager_IsFailedAssertQuiet(slm));
REQUIRE(!SimpleLogManager_IsInfoQuiet(slm));
REQUIRE(!SimpleLogManager_IsDebugMsgQuiet(slm));
REQUIRE(!SimpleLogManager_IsErrorQuiet(slm));
REQUIRE(!SimpleLogManager_IsWarningQuiet(slm));
REQUIRE(SimpleLogManager_IsInfoFileLineQuiet(slm));
REQUIRE(!SimpleLogManager_IsWarningFileLineQuiet(slm));
REQUIRE(!SimpleLogManager_IsErrorFileLineQuiet(slm));
SimpleLogManager_SetInfoFileLineQuiet(slm, true);
REQUIRE(SimpleLogManager_IsInfoFileLineQuiet(slm));
SimpleLogManager_SetWarningFileLineQuiet(slm, true);
REQUIRE(SimpleLogManager_IsWarningFileLineQuiet(slm));
SimpleLogManager_SetErrorFileLineQuiet(slm, true);
REQUIRE(SimpleLogManager_IsErrorFileLineQuiet(slm));
SimpleLogManager_SetInfoFileLineQuiet(slm, false);
REQUIRE(!SimpleLogManager_IsInfoFileLineQuiet(slm));
SimpleLogManager_SetWarningFileLineQuiet(slm, false);
REQUIRE(!SimpleLogManager_IsWarningFileLineQuiet(slm));
SimpleLogManager_SetErrorFileLineQuiet(slm, false);
REQUIRE(!SimpleLogManager_IsErrorFileLineQuiet(slm));
SimpleLogManager_SetInfoQuiet(slm, true);
SimpleLogManager_SetDebugMsgQuiet(slm, true);
SimpleLogManager_SetErrorQuiet(slm, true);
SimpleLogManager_SetWarningQuiet(slm, true);
REQUIRE(SimpleLogManager_IsInfoQuiet(slm));
REQUIRE(SimpleLogManager_IsDebugMsgQuiet(slm));
REQUIRE(SimpleLogManager_IsErrorQuiet(slm));
REQUIRE(SimpleLogManager_IsWarningQuiet(slm));
SimpleLogManager_Free(slm);
}
TEST_CASE("Default log file OK", "[SimpleLogManager]") {
char filePath[2048];
char const logFilename[] = "log.log";
Os_GetCurrentDir(filePath, sizeof(filePath));
ASSERT( strlen(filePath) + sizeof(logFilename) < sizeof(filePath));
strcat(filePath, logFilename);
// delete any old log first
if( Os_FileExists(filePath) ) {
Os_FileDelete(filePath);
}
auto slm = SimpleLogManager_Alloc();
REQUIRE(slm);
LOGINFO("test default");
REQUIRE(Os_FileExists(filePath));
SimpleLogManager_Free(slm);
}
TEST_CASE("Custom log file OK", "[SimpleLogManager]") {
char filePath[2048];
char const logFilename[] = "custom_test.log";
Os_GetCurrentDir(filePath, sizeof(filePath));
ASSERT( strlen(filePath) + sizeof(logFilename) < sizeof(filePath));
strcat(filePath, logFilename);
// delete any old log first
if( Os_FileExists(filePath) ) {
Os_FileDelete(filePath);
}
auto slm = SimpleLogManager_Alloc();
REQUIRE(slm);
LOGINFO("test default");
SimpleLogManager_UseFileForLog(slm, filePath);
LOGINFO("test custom");
REQUIRE(Os_FileExists(filePath));
SimpleLogManager_Free(slm);
} | 31.135417 | 68 | 0.79458 | DeanoC |
4ffb7b971e0ea1a7dae67d8a598fab3e9e8e4e7f | 6,357 | cpp | C++ | src/Emulators/nestopiaue/common/nstini.cpp | slajerek/RetroDebugger | e761e4f9efd103a05e65ef283423b142fa4324c7 | [
"Apache-2.0",
"MIT"
] | 34 | 2021-05-29T07:04:17.000Z | 2022-03-10T20:16:03.000Z | src/Emulators/nestopiaue/common/nstini.cpp | slajerek/RetroDebugger | e761e4f9efd103a05e65ef283423b142fa4324c7 | [
"Apache-2.0",
"MIT"
] | 6 | 2021-12-25T13:05:21.000Z | 2022-01-19T17:35:17.000Z | src/Emulators/nestopiaue/common/nstini.cpp | slajerek/RetroDebugger | e761e4f9efd103a05e65ef283423b142fa4324c7 | [
"Apache-2.0",
"MIT"
] | 6 | 2021-12-24T18:37:41.000Z | 2022-02-06T23:06:02.000Z | /* inih -- simple .INI file parser
Copyright (c) 2009, Brush Technology
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Brush Technology nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY BRUSH TECHNOLOGY ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL BRUSH TECHNOLOGY BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <ctype.h>
#include <string.h>
#include "nstini.h"
#if !INI_USE_STACK
#include <stdlib.h>
#endif
#define MAX_SECTION 50
#define MAX_NAME 50
/* Strip whitespace chars off end of given string, in place. Return s. */
static char* rstrip(char* s)
{
char* p = s + strlen(s);
while (p > s && isspace((unsigned char)(*--p)))
*p = '\0';
return s;
}
/* Return pointer to first non-whitespace char in given string. */
static char* lskip(const char* s)
{
while (*s && isspace((unsigned char)(*s)))
s++;
return (char*)s;
}
/* Return pointer to first char c or ';' comment in given string, or pointer to
null at end of string if neither found. ';' must be prefixed by a whitespace
character to register as a comment. */
static char* find_char_or_comment(const char* s, char c)
{
int was_whitespace = 0;
while (*s && *s != c && !(was_whitespace && *s == ';')) {
was_whitespace = isspace((unsigned char)(*s));
s++;
}
return (char*)s;
}
/* Version of strncpy that ensures dest (size bytes) is null-terminated. */
static char* strncpy0(char* dest, const char* src, size_t size)
{
strncpy(dest, src, size);
dest[size - 1] = '\0';
return dest;
}
/* See documentation in header file. */
int ini_parse_file(FILE* file,
int (*handler)(void*, const char*, const char*,
const char*),
void* user)
{
/* Uses a fair bit of stack (use heap instead if you need to) */
#if INI_USE_STACK
char line[INI_MAX_LINE];
#else
char* line;
#endif
char section[MAX_SECTION] = "";
char prev_name[MAX_NAME] = "";
char* start;
char* end;
char* name;
char* value;
int lineno = 0;
int error = 0;
#if !INI_USE_STACK
line = (char*)malloc(INI_MAX_LINE);
if (!line) {
return -2;
}
#endif
/* Scan through file line by line */
while (fgets(line, INI_MAX_LINE, file) != NULL) {
lineno++;
start = line;
#if INI_ALLOW_BOM
if (lineno == 1 && (unsigned char)start[0] == 0xEF &&
(unsigned char)start[1] == 0xBB &&
(unsigned char)start[2] == 0xBF) {
start += 3;
}
#endif
start = lskip(rstrip(start));
if (*start == ';' || *start == '#') {
/* Per Python ConfigParser, allow '#' comments at start of line */
}
#if INI_ALLOW_MULTILINE
else if (*prev_name && *start && start > line) {
/* Non-black line with leading whitespace, treat as continuation
of previous name's value (as per Python ConfigParser). */
if (!handler(user, section, prev_name, start) && !error)
error = lineno;
}
#endif
else if (*start == '[') {
/* A "[section]" line */
end = find_char_or_comment(start + 1, ']');
if (*end == ']') {
*end = '\0';
strncpy0(section, start + 1, sizeof(section));
*prev_name = '\0';
}
else if (!error) {
/* No ']' found on section line */
error = lineno;
}
}
else if (*start && *start != ';') {
/* Not a comment, must be a name[=:]value pair */
end = find_char_or_comment(start, '=');
if (*end != '=') {
end = find_char_or_comment(start, ':');
}
if (*end == '=' || *end == ':') {
*end = '\0';
name = rstrip(start);
value = lskip(end + 1);
end = find_char_or_comment(value, '\0');
if (*end == ';')
*end = '\0';
rstrip(value);
/* Valid name[=:]value pair found, call handler */
strncpy0(prev_name, name, sizeof(prev_name));
if (!handler(user, section, name, value) && !error)
error = lineno;
}
else if (!error) {
/* No '=' or ':' found on name[=:]value line */
error = lineno;
}
}
}
#if !INI_USE_STACK
free(line);
#endif
return error;
}
/* See documentation in header file. */
int ini_parse(const char* filename,
int (*handler)(void*, const char*, const char*, const char*),
void* user)
{
FILE* file;
int error;
file = fopen(filename, "r");
if (!file)
return -1;
error = ini_parse_file(file, handler, user);
fclose(file);
return error;
}
| 32.269036 | 80 | 0.55608 | slajerek |
4ffbdf4569166734a20b6794b81d291056b33c2a | 2,232 | cpp | C++ | 35-minimum-spanning-tree/minimumtree.cpp | wlep/cp-course | 9e52788e8f6a76752149b74d06d0272e16c3b528 | [
"MIT"
] | null | null | null | 35-minimum-spanning-tree/minimumtree.cpp | wlep/cp-course | 9e52788e8f6a76752149b74d06d0272e16c3b528 | [
"MIT"
] | null | null | null | 35-minimum-spanning-tree/minimumtree.cpp | wlep/cp-course | 9e52788e8f6a76752149b74d06d0272e16c3b528 | [
"MIT"
] | null | null | null | #include <iostream>
#include <vector>
#include <algorithm>
using namespace std;
template<typename T> class UnionFind {
public:
struct Node {
T data;
int rank;
int forestIndex;
Node* parent;
Node(T _data, int _forestIndex) {
data = _data;
rank = 0;
forestIndex = _forestIndex;
parent = this;
}
};
UnionFind() {
}
Node* MakeSet(T data) {
Node* newNode = new Node(data, forest.size());
forest.push_back(newNode);
return newNode;
}
// Union by Rank, if equal y becomes root
void Union(Node* x, Node* y) {
Node* rootX = Find(x);
Node* rootY = Find(y);
if (rootX == rootY) {
return;
}
if (rootX->rank > rootY->rank) {
rootY->parent = rootX;
return;
}
rootX->parent = rootY;
if (rootX->rank == rootY->rank)
rootY->rank++;
}
// Find with Path Compression
Node* Find(Node* x) {
if (x->parent != x)
x->parent = Find(x->parent);
return x->parent;
}
vector<Node*> Forest() {
return forest;
}
private:
vector<Node*> forest;
};
struct Edge {
int i;
int j;
int w;
Edge(int _i, int _j, int _w) {
i = _i;
j = _j;
w = _w;
}
};
int main() {
std::ios_base::sync_with_stdio(false);
int n, m;
cin >> n >> m;
vector<Edge*> edges;
for (int e = 0; e < m; e++) {
int i, j, w;
cin >> i >> j >> w;
edges.push_back(new Edge(i,j,w));
}
sort(edges.begin(), edges.end(), [](Edge* e1, Edge* e2) {return e1->w < e2->w;});
UnionFind<int> uf;
vector<UnionFind<int>::Node*> nodes;
for (int i = 0; i < n; i++) {
nodes.push_back(uf.MakeSet(i));
}
uint64_t sum = 0;
for (int i = 0; i < m; i++) {
Edge* e = edges[i];
UnionFind<int>::Node* u = nodes[e->i-1];
UnionFind<int>::Node* v = nodes[e->j-1];
if (uf.Find(u)->data != uf.Find(v)->data) {
uf.Union(u,v);
sum += e->w;
}
}
cout << sum << endl;
return 0;
} | 19.578947 | 85 | 0.46595 | wlep |
4fffb2cad6e15713572254da23a904b649a21e5b | 2,393 | cpp | C++ | inference-engine/tests/functional/plugin/gpu/shared_tests_instances/multi/gpu_remote_blob_tests.cpp | ElenaGvozdeva/openvino | 084aa4e5916fa2ed3e353dcd45d081ab11d9c75a | [
"Apache-2.0"
] | null | null | null | inference-engine/tests/functional/plugin/gpu/shared_tests_instances/multi/gpu_remote_blob_tests.cpp | ElenaGvozdeva/openvino | 084aa4e5916fa2ed3e353dcd45d081ab11d9c75a | [
"Apache-2.0"
] | 23 | 2021-03-12T07:34:43.000Z | 2022-02-21T13:06:03.000Z | inference-engine/tests/functional/plugin/gpu/shared_tests_instances/multi/gpu_remote_blob_tests.cpp | ElenaGvozdeva/openvino | 084aa4e5916fa2ed3e353dcd45d081ab11d9c75a | [
"Apache-2.0"
] | 1 | 2020-07-22T15:53:40.000Z | 2020-07-22T15:53:40.000Z | // Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <string>
#include <vector>
#include "gpu/gpu_config.hpp"
#include "multi/multi_remote_blob_tests.hpp"
#include "common_test_utils/test_constants.hpp"
const std::vector<DevicesNamesAndSupportPair> device_names_and_support_for_remote_blobs {
{{GPU}, true}, // GPU via MULTI,
#ifdef ENABLE_MKL_DNN
{{GPU, CPU}, true}, // GPU+CPU
{{CPU, GPU}, true}, // CPU+GPU
#endif
};
INSTANTIATE_TEST_CASE_P(smoke_RemoteBlobMultiGPU, MultiDevice_SupportTest,
::testing::ValuesIn(device_names_and_support_for_remote_blobs), MultiDevice_SupportTest::getTestCaseName);
TEST_P(MultiDevice_Test, cannotInferRemoteBlobIfNotInitializedForDevice) {
InferenceEngine::CNNNetwork net(fn_ptr);
auto ie = PluginCache::get().ie();
// load a network to the GPU to make sure we have a remote context
auto exec_net = ie->LoadNetwork(net, GPU);
auto ctx = exec_net.GetContext();
const InferenceEngine::ConstInputsDataMap inputInfo = exec_net.GetInputsInfo();
auto& first_input_name = inputInfo.begin()->first;
auto& first_input = inputInfo.begin()->second;
auto rblob = InferenceEngine::make_shared_blob(first_input->getTensorDesc(), ctx);
rblob->allocate();
InferenceEngine::ExecutableNetwork exec_net_multi;
try {
exec_net_multi = ie->LoadNetwork(net, device_names);
} catch(...) {
// device is unavailable (e.g. for the "second GPU" test) or other (e.g. env) issues not related to the test
return;
}
InferenceEngine::InferRequest req = exec_net_multi.CreateInferRequest();
ASSERT_TRUE(req);
ASSERT_NO_THROW(req.SetBlob(first_input_name, rblob));
ASSERT_NO_THROW(req.StartAsync());
ASSERT_THROW(req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY), InferenceEngine::Exception);
}
const std::vector<DevicesNames> device_names_and_support_for_remote_blobs2 {
#ifdef ENABLE_MKL_DNN
{CPU}, // stand-alone CPU via MULTI (no GPU), no OCL context
#endif
{"GPU.1"}, // another GPU (the test will test its presence), different OCL contexts
};
INSTANTIATE_TEST_CASE_P(smoke_RemoteBlobMultiInitializedWithoutGPU, MultiDevice_Test,
::testing::ValuesIn(device_names_and_support_for_remote_blobs2), MultiDevice_Test::getTestCaseName);
| 41.258621 | 130 | 0.728374 | ElenaGvozdeva |
8b01a9285a5ca03ea5b64552a3b85ac5e45dc213 | 1,494 | hh | C++ | dune/ax1/acme1MD/configurations/bigmac/bigmac_solution_con.hh | pederpansen/dune-ax1 | 152153824d95755a55bdd4fba80686863e928196 | [
"BSD-3-Clause"
] | null | null | null | dune/ax1/acme1MD/configurations/bigmac/bigmac_solution_con.hh | pederpansen/dune-ax1 | 152153824d95755a55bdd4fba80686863e928196 | [
"BSD-3-Clause"
] | null | null | null | dune/ax1/acme1MD/configurations/bigmac/bigmac_solution_con.hh | pederpansen/dune-ax1 | 152153824d95755a55bdd4fba80686863e928196 | [
"BSD-3-Clause"
] | null | null | null | /*
* bigmac_solution_con.hh
*
* Created on: Jan 17, 2012
* Author: jpods
*/
#ifndef DUNE_AX1_BIGMAC_SOLUTION_CON_HH
#define DUNE_AX1_BIGMAC_SOLUTION_CON_HH
#include <dune/pdelab/common/function.hh>
#include <dune/ax1/common/constants.hh>
#include <dune/ax1/acme1MD/common/acme1MD_parametertree.hh>
template<typename GV, typename RF, int dim>
class BigmacCon :
public Dune::PDELab::AnalyticGridFunctionBase<
Dune::PDELab::AnalyticGridFunctionTraits<GV,RF,dim>,
BigmacCon<GV,RF,dim> >
{
public:
typedef Dune::PDELab::AnalyticGridFunctionTraits<GV,RF,dim> Traits;
typedef Dune::PDELab::AnalyticGridFunctionBase<Traits, BigmacCon<GV,RF,dim> > BaseT;
typedef typename Traits::DomainType DomainType;
typedef typename Traits::RangeType RangeType;
BigmacCon(const GV& gv_, const Acme1MDParameters& params_)
: BaseT(gv_),
gv(gv_),
params(params_),
time(0.0)
{}
inline void evaluateGlobal(const DomainType & x, RangeType & y) const
{
//
double A = 1.0;
double B = 0.0;
double v = 1.0;
y[0] = - 2.0 * A * A * ( 1.0 - std::pow(tanh( A * ( x - v * time ) + B ),2) );
}
inline const GV& getGridView () const
{
return gv;
}
// set time for subsequent evaluation
virtual void setTime (double t)
{
time = t;
}
private:
const GV& gv;
const Acme1MDParameters& params;
protected:
RF time;
};
#endif /* DUNE_AX1_BIGMAC_SOLUTION_CON_HH */
| 22.636364 | 88 | 0.65328 | pederpansen |
8b022a18324bcac46b891a0dd9b0b2c361d698d7 | 40,091 | cpp | C++ | extras/jbcoin-libpp/extras/jbcoind/src/jbcoin/shamap/impl/SHAMap.cpp | trongnmchainos/validator-keys-tool | cae131d6ab46051c0f47509b79b6efc47a70eec0 | [
"BSL-1.0"
] | 2 | 2020-03-03T12:46:29.000Z | 2020-11-14T09:52:14.000Z | extras/jbcoin-libpp/extras/jbcoind/src/jbcoin/shamap/impl/SHAMap.cpp | trongnmchainos/validator-keys-tool | cae131d6ab46051c0f47509b79b6efc47a70eec0 | [
"BSL-1.0"
] | null | null | null | extras/jbcoin-libpp/extras/jbcoind/src/jbcoin/shamap/impl/SHAMap.cpp | trongnmchainos/validator-keys-tool | cae131d6ab46051c0f47509b79b6efc47a70eec0 | [
"BSL-1.0"
] | 1 | 2020-03-03T12:46:30.000Z | 2020-03-03T12:46:30.000Z | //------------------------------------------------------------------------------
/*
This file is part of jbcoind: https://github.com/jbcoin/jbcoind
Copyright (c) 2012, 2013 Jbcoin Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <BeastConfig.h>
#include <jbcoin/basics/contract.h>
#include <jbcoin/shamap/SHAMap.h>
namespace jbcoin {
SHAMap::SHAMap (
SHAMapType t,
Family& f,
version v)
: f_ (f)
, journal_(f.journal())
, seq_ (1)
, state_ (SHAMapState::Modifying)
, type_ (t)
{
if (v == version{2})
root_ = std::make_shared<SHAMapInnerNodeV2>(seq_, 0);
else
root_ = std::make_shared<SHAMapInnerNode>(seq_);
}
SHAMap::SHAMap (
SHAMapType t,
uint256 const& hash,
Family& f,
version v)
: f_ (f)
, journal_(f.journal())
, seq_ (1)
, state_ (SHAMapState::Synching)
, type_ (t)
{
if (v == version{2})
root_ = std::make_shared<SHAMapInnerNodeV2>(seq_, 0);
else
root_ = std::make_shared<SHAMapInnerNode>(seq_);
}
SHAMap::~SHAMap ()
{
state_ = SHAMapState::Invalid;
}
std::shared_ptr<SHAMap>
SHAMap::snapShot (bool isMutable) const
{
auto ret = std::make_shared<SHAMap> (type_, f_, get_version());
SHAMap& newMap = *ret;
if (!isMutable)
newMap.state_ = SHAMapState::Immutable;
newMap.seq_ = seq_ + 1;
newMap.root_ = root_;
newMap.backed_ = backed_;
if ((state_ != SHAMapState::Immutable) || !isMutable)
{
// If either map may change, they cannot share nodes
newMap.unshare ();
}
return ret;
}
std::shared_ptr<SHAMap>
SHAMap::make_v2() const
{
assert(!is_v2());
auto ret = std::make_shared<SHAMap>(type_, f_, version{2});
ret->seq_ = seq_ + 1;
SharedPtrNodeStack stack;
for (auto leaf = peekFirstItem(stack); leaf != nullptr;
leaf = peekNextItem(leaf->peekItem()->key(), stack))
{
auto node_type = leaf->getType();
ret->addGiveItem(leaf->peekItem(),
node_type != SHAMapTreeNode::tnACCOUNT_STATE,
node_type == SHAMapTreeNode::tnTRANSACTION_MD);
}
NodeObjectType t;
switch (type_)
{
case SHAMapType::TRANSACTION:
t = hotTRANSACTION_NODE;
break;
case SHAMapType::STATE:
t = hotACCOUNT_NODE;
break;
default:
t = hotUNKNOWN;
break;
}
ret->flushDirty(t, ret->seq_);
ret->unshare();
return ret;
}
std::shared_ptr<SHAMap>
SHAMap::make_v1() const
{
assert(is_v2());
auto ret = std::make_shared<SHAMap>(type_, f_, version{1});
ret->seq_ = seq_ + 1;
SharedPtrNodeStack stack;
for (auto leaf = peekFirstItem(stack); leaf != nullptr;
leaf = peekNextItem(leaf->peekItem()->key(), stack))
{
auto node_type = leaf->getType();
ret->addGiveItem(leaf->peekItem(),
node_type != SHAMapTreeNode::tnACCOUNT_STATE,
node_type == SHAMapTreeNode::tnTRANSACTION_MD);
}
NodeObjectType t;
switch (type_)
{
case SHAMapType::TRANSACTION:
t = hotTRANSACTION_NODE;
break;
case SHAMapType::STATE:
t = hotACCOUNT_NODE;
break;
default:
t = hotUNKNOWN;
break;
}
ret->flushDirty(t, ret->seq_);
ret->unshare();
return ret;
}
void
SHAMap::dirtyUp (SharedPtrNodeStack& stack,
uint256 const& target, std::shared_ptr<SHAMapAbstractNode> child)
{
// walk the tree up from through the inner nodes to the root_
// update hashes and links
// stack is a path of inner nodes up to, but not including, child
// child can be an inner node or a leaf
assert ((state_ != SHAMapState::Synching) && (state_ != SHAMapState::Immutable));
assert (child && (child->getSeq() == seq_));
while (!stack.empty ())
{
auto node = std::dynamic_pointer_cast<SHAMapInnerNode>(stack.top ().first);
SHAMapNodeID nodeID = stack.top ().second;
stack.pop ();
assert (node != nullptr);
int branch = nodeID.selectBranch (target);
assert (branch >= 0);
node = unshareNode(std::move(node), nodeID);
node->setChild (branch, child);
child = std::move (node);
}
}
SHAMapTreeNode*
SHAMap::walkTowardsKey(uint256 const& id, SharedPtrNodeStack* stack) const
{
assert(stack == nullptr || stack->empty());
auto inNode = root_;
SHAMapNodeID nodeID;
auto const isv2 = is_v2();
while (inNode->isInner())
{
if (stack != nullptr)
stack->push({inNode, nodeID});
if (isv2)
{
auto n = std::static_pointer_cast<SHAMapInnerNodeV2>(inNode);
if (!n->has_common_prefix(id))
return nullptr;
}
auto const inner = std::static_pointer_cast<SHAMapInnerNode>(inNode);
auto const branch = nodeID.selectBranch (id);
if (inner->isEmptyBranch (branch))
return nullptr;
inNode = descendThrow (inner, branch);
if (isv2)
{
if (inNode->isInner())
{
auto n = std::dynamic_pointer_cast<SHAMapInnerNodeV2>(inNode);
if (n == nullptr)
{
assert (false);
return nullptr;
}
nodeID = SHAMapNodeID{n->depth(), n->common()};
}
else
{
nodeID = SHAMapNodeID{64, inNode->key()};
}
}
else
{
nodeID = nodeID.getChildNodeID (branch);
}
}
if (stack != nullptr)
stack->push({inNode, nodeID});
return static_cast<SHAMapTreeNode*>(inNode.get());
}
SHAMapTreeNode*
SHAMap::findKey(uint256 const& id) const
{
SHAMapTreeNode* leaf = walkTowardsKey(id);
if (leaf && leaf->peekItem()->key() != id)
leaf = nullptr;
return leaf;
}
std::shared_ptr<SHAMapAbstractNode>
SHAMap::fetchNodeFromDB (SHAMapHash const& hash) const
{
std::shared_ptr<SHAMapAbstractNode> node;
if (backed_)
{
std::shared_ptr<NodeObject> obj = f_.db().fetch (hash.as_uint256());
if (obj)
{
try
{
node = SHAMapAbstractNode::make(makeSlice(obj->getData()),
0, snfPREFIX, hash, true, f_.journal());
if (node && node->isInner())
{
bool isv2 = std::dynamic_pointer_cast<SHAMapInnerNodeV2>(node) != nullptr;
if (isv2 != is_v2())
{
auto root = std::dynamic_pointer_cast<SHAMapInnerNode>(root_);
assert(root);
assert(root->isEmpty());
if (isv2)
{
auto temp = make_v2();
swap(temp->root_, const_cast<std::shared_ptr<SHAMapAbstractNode>&>(root_));
}
else
{
auto temp = make_v1();
swap(temp->root_, const_cast<std::shared_ptr<SHAMapAbstractNode>&>(root_));
}
}
}
if (node)
canonicalize (hash, node);
}
catch (std::exception const&)
{
JLOG(journal_.warn()) <<
"Invalid DB node " << hash;
return std::shared_ptr<SHAMapTreeNode> ();
}
}
else if (ledgerSeq_ != 0)
{
f_.missing_node(ledgerSeq_);
const_cast<std::uint32_t&>(ledgerSeq_) = 0;
}
}
return node;
}
// See if a sync filter has a node
std::shared_ptr<SHAMapAbstractNode>
SHAMap::checkFilter(SHAMapHash const& hash,
SHAMapSyncFilter* filter) const
{
std::shared_ptr<SHAMapAbstractNode> node;
if (auto nodeData = filter->getNode (hash))
{
node = SHAMapAbstractNode::make(
makeSlice(*nodeData), 0, snfPREFIX, hash, true, f_.journal ());
if (node)
{
filter->gotNode (true, hash,
std::move(*nodeData), node->getType ());
if (backed_)
canonicalize (hash, node);
}
}
return node;
}
// Get a node without throwing
// Used on maps where missing nodes are expected
std::shared_ptr<SHAMapAbstractNode> SHAMap::fetchNodeNT(
SHAMapHash const& hash,
SHAMapSyncFilter* filter) const
{
std::shared_ptr<SHAMapAbstractNode> node = getCache (hash);
if (node)
return node;
if (backed_)
{
node = fetchNodeFromDB (hash);
if (node)
{
canonicalize (hash, node);
return node;
}
}
if (filter)
node = checkFilter (hash, filter);
return node;
}
std::shared_ptr<SHAMapAbstractNode> SHAMap::fetchNodeNT (SHAMapHash const& hash) const
{
auto node = getCache (hash);
if (!node && backed_)
node = fetchNodeFromDB (hash);
return node;
}
// Throw if the node is missing
std::shared_ptr<SHAMapAbstractNode> SHAMap::fetchNode (SHAMapHash const& hash) const
{
auto node = fetchNodeNT (hash);
if (!node)
Throw<SHAMapMissingNode> (type_, hash);
return node;
}
SHAMapAbstractNode* SHAMap::descendThrow (SHAMapInnerNode* parent, int branch) const
{
SHAMapAbstractNode* ret = descend (parent, branch);
if (! ret && ! parent->isEmptyBranch (branch))
Throw<SHAMapMissingNode> (type_, parent->getChildHash (branch));
return ret;
}
std::shared_ptr<SHAMapAbstractNode>
SHAMap::descendThrow (std::shared_ptr<SHAMapInnerNode> const& parent, int branch) const
{
std::shared_ptr<SHAMapAbstractNode> ret = descend (parent, branch);
if (! ret && ! parent->isEmptyBranch (branch))
Throw<SHAMapMissingNode> (type_, parent->getChildHash (branch));
return ret;
}
SHAMapAbstractNode* SHAMap::descend (SHAMapInnerNode* parent, int branch) const
{
SHAMapAbstractNode* ret = parent->getChildPointer (branch);
if (ret || !backed_)
return ret;
std::shared_ptr<SHAMapAbstractNode> node = fetchNodeNT (parent->getChildHash (branch));
if (!node || isInconsistentNode(node))
return nullptr;
node = parent->canonicalizeChild (branch, std::move(node));
return node.get ();
}
std::shared_ptr<SHAMapAbstractNode>
SHAMap::descend (std::shared_ptr<SHAMapInnerNode> const& parent, int branch) const
{
std::shared_ptr<SHAMapAbstractNode> node = parent->getChild (branch);
if (node || !backed_)
return node;
node = fetchNode (parent->getChildHash (branch));
if (!node || isInconsistentNode(node))
return nullptr;
node = parent->canonicalizeChild (branch, std::move(node));
return node;
}
// Gets the node that would be hooked to this branch,
// but doesn't hook it up.
std::shared_ptr<SHAMapAbstractNode>
SHAMap::descendNoStore (std::shared_ptr<SHAMapInnerNode> const& parent, int branch) const
{
std::shared_ptr<SHAMapAbstractNode> ret = parent->getChild (branch);
if (!ret && backed_)
ret = fetchNode (parent->getChildHash (branch));
return ret;
}
std::pair <SHAMapAbstractNode*, SHAMapNodeID>
SHAMap::descend (SHAMapInnerNode * parent, SHAMapNodeID const& parentID,
int branch, SHAMapSyncFilter * filter) const
{
assert (parent->isInner ());
assert ((branch >= 0) && (branch < 16));
assert (!parent->isEmptyBranch (branch));
SHAMapAbstractNode* child = parent->getChildPointer (branch);
auto const& childHash = parent->getChildHash (branch);
if (!child)
{
std::shared_ptr<SHAMapAbstractNode> childNode = fetchNodeNT (childHash, filter);
if (childNode)
{
childNode = parent->canonicalizeChild (branch, std::move(childNode));
child = childNode.get ();
}
if (child && isInconsistentNode(childNode))
child = nullptr;
}
if (child && is_v2())
{
if (child->isInner())
{
auto n = static_cast<SHAMapInnerNodeV2*>(child);
return std::make_pair(child, SHAMapNodeID{n->depth(), n->key()});
}
return std::make_pair(child, SHAMapNodeID{64, child->key()});
}
return std::make_pair (child, parentID.getChildNodeID (branch));
}
SHAMapAbstractNode*
SHAMap::descendAsync (SHAMapInnerNode* parent, int branch,
SHAMapSyncFilter * filter, bool & pending) const
{
pending = false;
SHAMapAbstractNode* ret = parent->getChildPointer (branch);
if (ret)
return ret;
auto const& hash = parent->getChildHash (branch);
std::shared_ptr<SHAMapAbstractNode> ptr = getCache (hash);
if (!ptr)
{
if (filter)
ptr = checkFilter (hash, filter);
if (!ptr && backed_)
{
std::shared_ptr<NodeObject> obj;
if (! f_.db().asyncFetch (hash.as_uint256(), obj))
{
pending = true;
return nullptr;
}
if (!obj)
return nullptr;
ptr = SHAMapAbstractNode::make(makeSlice(obj->getData()), 0, snfPREFIX,
hash, true, f_.journal());
if (ptr && backed_)
canonicalize (hash, ptr);
}
}
if (ptr && isInconsistentNode(ptr))
ptr = nullptr;
if (ptr)
ptr = parent->canonicalizeChild (branch, std::move(ptr));
return ptr.get ();
}
template <class Node>
std::shared_ptr<Node>
SHAMap::unshareNode (std::shared_ptr<Node> node, SHAMapNodeID const& nodeID)
{
// make sure the node is suitable for the intended operation (copy on write)
assert (node->isValid ());
assert (node->getSeq () <= seq_);
if (node->getSeq () != seq_)
{
// have a CoW
assert (state_ != SHAMapState::Immutable);
node = std::static_pointer_cast<Node>(node->clone(seq_));
assert (node->isValid ());
if (nodeID.isRoot ())
root_ = node;
}
return node;
}
SHAMapTreeNode*
SHAMap::firstBelow(std::shared_ptr<SHAMapAbstractNode> node,
SharedPtrNodeStack& stack, int branch) const
{
// Return the first item at or below this node
if (node->isLeaf())
{
auto n = std::static_pointer_cast<SHAMapTreeNode>(node);
stack.push({node, {64, n->peekItem()->key()}});
return n.get();
}
auto inner = std::static_pointer_cast<SHAMapInnerNode>(node);
if (stack.empty())
stack.push({inner, SHAMapNodeID{}});
else
{
if (is_v2())
{
auto inner2 = std::dynamic_pointer_cast<SHAMapInnerNodeV2>(inner);
assert(inner2 != nullptr);
stack.push({inner2, {inner2->depth(), inner2->common()}});
}
else
{
stack.push({inner, stack.top().second.getChildNodeID(branch)});
}
}
for (int i = 0; i < 16;)
{
if (!inner->isEmptyBranch(i))
{
node = descendThrow(inner, i);
assert(!stack.empty());
if (node->isLeaf())
{
auto n = std::static_pointer_cast<SHAMapTreeNode>(node);
stack.push({n, {64, n->peekItem()->key()}});
return n.get();
}
inner = std::static_pointer_cast<SHAMapInnerNode>(node);
if (is_v2())
{
auto inner2 = std::static_pointer_cast<SHAMapInnerNodeV2>(inner);
stack.push({inner2, {inner2->depth(), inner2->common()}});
}
else
{
stack.push({inner, stack.top().second.getChildNodeID(branch)});
}
i = 0; // scan all 16 branches of this new node
}
else
++i; // scan next branch
}
return nullptr;
}
static const std::shared_ptr<SHAMapItem const> no_item;
std::shared_ptr<SHAMapItem const> const&
SHAMap::onlyBelow (SHAMapAbstractNode* node) const
{
// If there is only one item below this node, return it
while (!node->isLeaf ())
{
SHAMapAbstractNode* nextNode = nullptr;
auto inner = static_cast<SHAMapInnerNode*>(node);
for (int i = 0; i < 16; ++i)
{
if (!inner->isEmptyBranch (i))
{
if (nextNode)
return no_item;
nextNode = descendThrow (inner, i);
}
}
if (!nextNode)
{
assert (false);
return no_item;
}
node = nextNode;
}
// An inner node must have at least one leaf
// below it, unless it's the root_
auto leaf = static_cast<SHAMapTreeNode*>(node);
assert (leaf->hasItem () || (leaf == root_.get ()));
return leaf->peekItem ();
}
static std::shared_ptr<
SHAMapItem const> const nullConstSHAMapItem;
SHAMapTreeNode const*
SHAMap::peekFirstItem(SharedPtrNodeStack& stack) const
{
assert(stack.empty());
SHAMapTreeNode* node = firstBelow(root_, stack);
if (!node)
{
while (!stack.empty())
stack.pop();
return nullptr;
}
return node;
}
SHAMapTreeNode const*
SHAMap::peekNextItem(uint256 const& id, SharedPtrNodeStack& stack) const
{
assert(!stack.empty());
assert(stack.top().first->isLeaf());
stack.pop();
while (!stack.empty())
{
auto node = stack.top().first;
auto nodeID = stack.top().second;
assert(!node->isLeaf());
auto inner = std::static_pointer_cast<SHAMapInnerNode>(node);
for (auto i = nodeID.selectBranch(id) + 1; i < 16; ++i)
{
if (!inner->isEmptyBranch(i))
{
node = descendThrow(inner, i);
auto leaf = firstBelow(node, stack, i);
if (!leaf)
Throw<SHAMapMissingNode> (type_, id);
assert(leaf->isLeaf());
return leaf;
}
}
stack.pop();
}
// must be last item
return nullptr;
}
std::shared_ptr<SHAMapItem const> const&
SHAMap::peekItem (uint256 const& id) const
{
SHAMapTreeNode* leaf = findKey(id);
if (!leaf)
return no_item;
return leaf->peekItem ();
}
std::shared_ptr<SHAMapItem const> const&
SHAMap::peekItem (uint256 const& id, SHAMapTreeNode::TNType& type) const
{
SHAMapTreeNode* leaf = findKey(id);
if (!leaf)
return no_item;
type = leaf->getType ();
return leaf->peekItem ();
}
std::shared_ptr<SHAMapItem const> const&
SHAMap::peekItem (uint256 const& id, SHAMapHash& hash) const
{
SHAMapTreeNode* leaf = findKey(id);
if (!leaf)
return no_item;
hash = leaf->getNodeHash ();
return leaf->peekItem ();
}
SHAMap::const_iterator
SHAMap::upper_bound(uint256 const& id) const
{
// Get a const_iterator to the next item in the tree after a given item
// item need not be in tree
SharedPtrNodeStack stack;
walkTowardsKey(id, &stack);
std::shared_ptr<SHAMapAbstractNode> node;
SHAMapNodeID nodeID;
auto const isv2 = is_v2();
while (!stack.empty())
{
std::tie(node, nodeID) = stack.top();
if (node->isLeaf())
{
auto leaf = static_cast<SHAMapTreeNode*>(node.get());
if (leaf->peekItem()->key() > id)
return const_iterator(this, leaf->peekItem().get(), std::move(stack));
}
else
{
auto inner = std::static_pointer_cast<SHAMapInnerNode>(node);
int branch;
if (isv2)
{
auto n = std::static_pointer_cast<SHAMapInnerNodeV2>(inner);
if (n->has_common_prefix(id))
branch = nodeID.selectBranch(id) + 1;
else if (id < n->common())
branch = 0;
else
branch = 16;
}
else
{
branch = nodeID.selectBranch(id) + 1;
}
for (; branch < 16; ++branch)
{
if (!inner->isEmptyBranch(branch))
{
node = descendThrow(inner, branch);
auto leaf = firstBelow(node, stack, branch);
if (!leaf)
Throw<SHAMapMissingNode> (type_, id);
return const_iterator(this, leaf->peekItem().get(),
std::move(stack));
}
}
}
stack.pop();
}
return end();
}
bool SHAMap::hasItem (uint256 const& id) const
{
// does the tree have an item with this ID
SHAMapTreeNode* leaf = findKey(id);
return (leaf != nullptr);
}
bool SHAMap::delItem (uint256 const& id)
{
// delete the item with this ID
assert (state_ != SHAMapState::Immutable);
SharedPtrNodeStack stack;
walkTowardsKey(id, &stack);
if (stack.empty ())
Throw<SHAMapMissingNode> (type_, id);
auto leaf = std::dynamic_pointer_cast<SHAMapTreeNode>(stack.top ().first);
stack.pop ();
if (!leaf || (leaf->peekItem ()->key() != id))
return false;
SHAMapTreeNode::TNType type = leaf->getType ();
// What gets attached to the end of the chain
// (For now, nothing, since we deleted the leaf)
std::shared_ptr<SHAMapAbstractNode> prevNode;
while (!stack.empty ())
{
auto node = std::static_pointer_cast<SHAMapInnerNode>(stack.top().first);
SHAMapNodeID nodeID = stack.top().second;
stack.pop();
node = unshareNode(std::move(node), nodeID);
node->setChild(nodeID.selectBranch(id), prevNode);
if (!nodeID.isRoot ())
{
// we may have made this a node with 1 or 0 children
// And, if so, we need to remove this branch
int bc = node->getBranchCount();
if (is_v2())
{
assert(bc != 0);
if (bc == 1)
{
for (int i = 0; i < 16; ++i)
{
if (!node->isEmptyBranch (i))
{
prevNode = descendThrow(node, i);
break;
}
}
}
else // bc >= 2
{
// This node is now the end of the branch
prevNode = std::move(node);
}
}
else
{
if (bc == 0)
{
// no children below this branch
prevNode.reset ();
}
else if (bc == 1)
{
// If there's only one item, pull up on the thread
auto item = onlyBelow (node.get ());
if (item)
{
for (int i = 0; i < 16; ++i)
{
if (!node->isEmptyBranch (i))
{
node->setChild (i, nullptr);
break;
}
}
prevNode = std::make_shared<SHAMapTreeNode>(item, type, node->getSeq());
}
else
{
prevNode = std::move (node);
}
}
else
{
// This node is now the end of the branch
prevNode = std::move (node);
}
}
}
}
return true;
}
static
uint256
prefix(unsigned depth, uint256 const& key)
{
uint256 r{};
auto x = r.begin();
auto y = key.begin();
for (auto i = 0; i < depth/2; ++i, ++x, ++y)
*x = *y;
if (depth & 1)
*x = *y & 0xF0;
return r;
}
bool
SHAMap::addGiveItem (std::shared_ptr<SHAMapItem const> const& item,
bool isTransaction, bool hasMeta)
{
// add the specified item, does not update
uint256 tag = item->key();
SHAMapTreeNode::TNType type = !isTransaction ? SHAMapTreeNode::tnACCOUNT_STATE :
(hasMeta ? SHAMapTreeNode::tnTRANSACTION_MD : SHAMapTreeNode::tnTRANSACTION_NM);
assert (state_ != SHAMapState::Immutable);
SharedPtrNodeStack stack;
walkTowardsKey(tag, &stack);
if (stack.empty ())
Throw<SHAMapMissingNode> (type_, tag);
auto node = stack.top ().first;
auto nodeID = stack.top ().second;
stack.pop ();
if (node->isLeaf())
{
auto leaf = std::static_pointer_cast<SHAMapTreeNode>(node);
if (leaf->peekItem()->key() == tag)
return false;
}
node = unshareNode(std::move(node), nodeID);
if (is_v2())
{
if (node->isInner())
{
auto inner = std::static_pointer_cast<SHAMapInnerNodeV2>(node);
if (inner->has_common_prefix(tag))
{
int branch = nodeID.selectBranch(tag);
assert(inner->isEmptyBranch(branch));
auto newNode = std::make_shared<SHAMapTreeNode>(item, type, seq_);
inner->setChild(branch, newNode);
}
else
{
assert(!stack.empty());
auto parent = unshareNode(
std::static_pointer_cast<SHAMapInnerNodeV2>(stack.top().first),
stack.top().second);
stack.top().first = parent;
auto parent_depth = parent->depth();
auto depth = inner->get_common_prefix(tag);
auto new_inner = std::make_shared<SHAMapInnerNodeV2>(seq_);
nodeID = SHAMapNodeID{depth, prefix(depth, inner->common())};
new_inner->setChild(nodeID.selectBranch(inner->common()), inner);
nodeID = SHAMapNodeID{depth, prefix(depth, tag)};
new_inner->setChild(nodeID.selectBranch(tag),
std::make_shared<SHAMapTreeNode>(item, type, seq_));
new_inner->set_common(depth, prefix(depth, tag));
nodeID = SHAMapNodeID{parent_depth, prefix(parent_depth, tag)};
parent->setChild(nodeID.selectBranch(tag), new_inner);
node = new_inner;
}
}
else
{
auto leaf = std::static_pointer_cast<SHAMapTreeNode>(node);
auto inner = std::make_shared<SHAMapInnerNodeV2>(seq_);
inner->setChildren(leaf, std::make_shared<SHAMapTreeNode>(item, type, seq_));
assert(!stack.empty());
auto parent = unshareNode(
std::static_pointer_cast<SHAMapInnerNodeV2>(stack.top().first),
stack.top().second);
stack.top().first = parent;
node = inner;
}
}
else // !is_v2()
{
if (node->isInner ())
{
// easy case, we end on an inner node
auto inner = std::static_pointer_cast<SHAMapInnerNode>(node);
int branch = nodeID.selectBranch (tag);
assert (inner->isEmptyBranch (branch));
auto newNode = std::make_shared<SHAMapTreeNode> (item, type, seq_);
inner->setChild (branch, newNode);
}
else
{
// this is a leaf node that has to be made an inner node holding two items
auto leaf = std::static_pointer_cast<SHAMapTreeNode>(node);
std::shared_ptr<SHAMapItem const> otherItem = leaf->peekItem ();
assert (otherItem && (tag != otherItem->key()));
node = std::make_shared<SHAMapInnerNode>(node->getSeq());
int b1, b2;
while ((b1 = nodeID.selectBranch (tag)) ==
(b2 = nodeID.selectBranch (otherItem->key())))
{
stack.push ({node, nodeID});
// we need a new inner node, since both go on same branch at this level
nodeID = nodeID.getChildNodeID (b1);
node = std::make_shared<SHAMapInnerNode> (seq_);
}
// we can add the two leaf nodes here
assert (node->isInner ());
std::shared_ptr<SHAMapTreeNode> newNode =
std::make_shared<SHAMapTreeNode> (item, type, seq_);
assert (newNode->isValid () && newNode->isLeaf ());
auto inner = std::static_pointer_cast<SHAMapInnerNode>(node);
inner->setChild (b1, newNode);
newNode = std::make_shared<SHAMapTreeNode> (otherItem, type, seq_);
assert (newNode->isValid () && newNode->isLeaf ());
inner->setChild (b2, newNode);
}
}
dirtyUp (stack, tag, node);
return true;
}
bool
SHAMap::addItem(SHAMapItem&& i, bool isTransaction, bool hasMetaData)
{
return addGiveItem(std::make_shared<SHAMapItem const>(std::move(i)),
isTransaction, hasMetaData);
}
SHAMapHash
SHAMap::getHash () const
{
auto hash = root_->getNodeHash();
if (hash.isZero())
{
const_cast<SHAMap&>(*this).unshare();
hash = root_->getNodeHash();
}
return hash;
}
bool
SHAMap::updateGiveItem (std::shared_ptr<SHAMapItem const> const& item,
bool isTransaction, bool hasMeta)
{
// can't change the tag but can change the hash
uint256 tag = item->key();
assert (state_ != SHAMapState::Immutable);
SharedPtrNodeStack stack;
walkTowardsKey(tag, &stack);
if (stack.empty ())
Throw<SHAMapMissingNode> (type_, tag);
auto node = std::dynamic_pointer_cast<SHAMapTreeNode>(stack.top().first);
auto nodeID = stack.top ().second;
stack.pop ();
if (!node || (node->peekItem ()->key() != tag))
{
assert (false);
return false;
}
node = unshareNode(std::move(node), nodeID);
if (!node->setItem (item, !isTransaction ? SHAMapTreeNode::tnACCOUNT_STATE :
(hasMeta ? SHAMapTreeNode::tnTRANSACTION_MD : SHAMapTreeNode::tnTRANSACTION_NM)))
{
JLOG(journal_.trace()) <<
"SHAMap setItem, no change";
return true;
}
dirtyUp (stack, tag, node);
return true;
}
bool SHAMap::fetchRoot (SHAMapHash const& hash, SHAMapSyncFilter* filter)
{
if (hash == root_->getNodeHash ())
return true;
if (auto stream = journal_.trace())
{
if (type_ == SHAMapType::TRANSACTION)
{
stream
<< "Fetch root TXN node " << hash;
}
else if (type_ == SHAMapType::STATE)
{
stream <<
"Fetch root STATE node " << hash;
}
else
{
stream <<
"Fetch root SHAMap node " << hash;
}
}
auto newRoot = fetchNodeNT (hash, filter);
if (newRoot)
{
root_ = newRoot;
assert (root_->getNodeHash () == hash);
return true;
}
return false;
}
// Replace a node with a shareable node.
//
// This code handles two cases:
//
// 1) An unshared, unshareable node needs to be made shareable
// so immutable SHAMap's can have references to it.
//
// 2) An unshareable node is shared. This happens when you make
// a mutable snapshot of a mutable SHAMap.
std::shared_ptr<SHAMapAbstractNode>
SHAMap::writeNode (
NodeObjectType t, std::uint32_t seq, std::shared_ptr<SHAMapAbstractNode> node) const
{
// Node is ours, so we can just make it shareable
assert (node->getSeq() == seq_);
assert (backed_);
node->setSeq (0);
canonicalize (node->getNodeHash(), node);
Serializer s;
node->addRaw (s, snfPREFIX);
f_.db().store (t,
std::move (s.modData ()), node->getNodeHash ().as_uint256());
return node;
}
// We can't modify an inner node someone else might have a
// pointer to because flushing modifies inner nodes -- it
// makes them point to canonical/shared nodes.
template <class Node>
std::shared_ptr<Node>
SHAMap::preFlushNode (std::shared_ptr<Node> node) const
{
// A shared node should never need to be flushed
// because that would imply someone modified it
assert (node->getSeq() != 0);
if (node->getSeq() != seq_)
{
// Node is not uniquely ours, so unshare it before
// possibly modifying it
node = std::static_pointer_cast<Node>(node->clone(seq_));
}
return node;
}
int SHAMap::unshare ()
{
// Don't share nodes wth parent map
return walkSubTree (false, hotUNKNOWN, 0);
}
/** Convert all modified nodes to shared nodes */
// If requested, write them to the node store
int SHAMap::flushDirty (NodeObjectType t, std::uint32_t seq)
{
return walkSubTree (true, t, seq);
}
int
SHAMap::walkSubTree (bool doWrite, NodeObjectType t, std::uint32_t seq)
{
int flushed = 0;
Serializer s;
if (!root_ || (root_->getSeq() == 0))
return flushed;
if (root_->isLeaf())
{ // special case -- root_ is leaf
root_ = preFlushNode (std::move(root_));
root_->updateHash();
if (doWrite && backed_)
root_ = writeNode(t, seq, std::move(root_));
else
root_->setSeq (0);
return 1;
}
auto node = std::static_pointer_cast<SHAMapInnerNode>(root_);
if (node->isEmpty ())
{ // replace empty root with a new empty root
if (is_v2())
root_ = std::make_shared<SHAMapInnerNodeV2>(0, 0);
else
root_ = std::make_shared<SHAMapInnerNode>(0);
return 1;
}
// Stack of {parent,index,child} pointers representing
// inner nodes we are in the process of flushing
using StackEntry = std::pair <std::shared_ptr<SHAMapInnerNode>, int>;
std::stack <StackEntry, std::vector<StackEntry>> stack;
node = preFlushNode(std::move(node));
int pos = 0;
// We can't flush an inner node until we flush its children
while (1)
{
while (pos < 16)
{
if (node->isEmptyBranch (pos))
{
++pos;
}
else
{
// No need to do I/O. If the node isn't linked,
// it can't need to be flushed
int branch = pos;
auto child = node->getChild(pos++);
if (child && (child->getSeq() != 0))
{
// This is a node that needs to be flushed
child = preFlushNode(std::move(child));
if (child->isInner ())
{
// save our place and work on this node
stack.emplace (std::move (node), branch);
node = std::static_pointer_cast<SHAMapInnerNode>(std::move(child));
pos = 0;
}
else
{
// flush this leaf
++flushed;
assert (node->getSeq() == seq_);
child->updateHash();
if (doWrite && backed_)
child = writeNode(t, seq, std::move(child));
else
child->setSeq (0);
node->shareChild (branch, child);
}
}
}
}
// update the hash of this inner node
node->updateHashDeep();
// This inner node can now be shared
if (doWrite && backed_)
node = std::static_pointer_cast<SHAMapInnerNode>(writeNode(t, seq,
std::move(node)));
else
node->setSeq (0);
++flushed;
if (stack.empty ())
break;
auto parent = std::move (stack.top().first);
pos = stack.top().second;
stack.pop();
// Hook this inner node to its parent
assert (parent->getSeq() == seq_);
parent->shareChild (pos, node);
// Continue with parent's next child, if any
node = std::move (parent);
++pos;
}
// Last inner node is the new root_
root_ = std::move (node);
return flushed;
}
void SHAMap::dump (bool hash) const
{
int leafCount = 0;
JLOG(journal_.info()) << " MAP Contains";
std::stack <std::pair <SHAMapAbstractNode*, SHAMapNodeID> > stack;
stack.push ({root_.get (), SHAMapNodeID ()});
do
{
auto node = stack.top().first;
auto nodeID = stack.top().second;
stack.pop();
JLOG(journal_.info()) << node->getString (nodeID);
if (hash)
{
JLOG(journal_.info()) << "Hash: " << node->getNodeHash();
}
if (node->isInner ())
{
auto inner = static_cast<SHAMapInnerNode*>(node);
for (int i = 0; i < 16; ++i)
{
if (!inner->isEmptyBranch (i))
{
auto child = inner->getChildPointer (i);
if (child)
{
assert (child->getNodeHash() == inner->getChildHash (i));
stack.push ({child, nodeID.getChildNodeID (i)});
}
}
}
}
else
++leafCount;
}
while (!stack.empty ());
JLOG(journal_.info()) << leafCount << " resident leaves";
}
std::shared_ptr<SHAMapAbstractNode> SHAMap::getCache (SHAMapHash const& hash) const
{
auto ret = f_.treecache().fetch (hash.as_uint256());
assert (!ret || !ret->getSeq());
return ret;
}
void
SHAMap::canonicalize(SHAMapHash const& hash, std::shared_ptr<SHAMapAbstractNode>& node) const
{
assert (backed_);
assert (node->getSeq() == 0);
assert (node->getNodeHash() == hash);
f_.treecache().canonicalize (hash.as_uint256(), node);
}
SHAMap::version
SHAMap::get_version() const
{
if (is_v2())
return version{2};
return version{1};
}
void
SHAMap::invariants() const
{
(void)getHash(); // update node hashes
auto node = root_.get();
assert(node != nullptr);
assert(!node->isLeaf());
SharedPtrNodeStack stack;
for (auto leaf = peekFirstItem(stack); leaf != nullptr;
leaf = peekNextItem(leaf->peekItem()->key(), stack))
;
node->invariants(is_v2(), true);
}
bool
SHAMap::isInconsistentNode(std::shared_ptr<SHAMapAbstractNode> const& node) const
{
assert(root_);
assert(node);
if (std::dynamic_pointer_cast<SHAMapTreeNode>(node) != nullptr)
return false;
bool is_node_v2 = std::dynamic_pointer_cast<SHAMapInnerNodeV2>(node) != nullptr;
assert (! is_node_v2 || (std::dynamic_pointer_cast<SHAMapInnerNodeV2>(node)->depth() != 0));
if (is_v2() == is_node_v2)
return false;
state_ = SHAMapState::Invalid;
return true;
}
} // jbcoin
| 28.988431 | 105 | 0.541269 | trongnmchainos |
8b03845050e6020e129e3ddd7fa42fc94e38c713 | 3,146 | cpp | C++ | src/module/base/ServiceStatusManager/src/ServiceStatusManager.cpp | 403712387/cgf | f26d7fa16ec8c7ca7565109b0d7f483cc7ad6288 | [
"MIT"
] | 2 | 2020-03-04T06:54:45.000Z | 2021-07-21T05:59:08.000Z | src/module/base/ServiceStatusManager/src/ServiceStatusManager.cpp | 403712387/cgf | f26d7fa16ec8c7ca7565109b0d7f483cc7ad6288 | [
"MIT"
] | null | null | null | src/module/base/ServiceStatusManager/src/ServiceStatusManager.cpp | 403712387/cgf | f26d7fa16ec8c7ca7565109b0d7f483cc7ad6288 | [
"MIT"
] | 3 | 2019-12-23T02:13:27.000Z | 2021-12-09T08:28:50.000Z | #include <stdio.h>
#include <sstream>
#include "ServiceStatusInfo.h"
#include "GetServiceStatusMessage.h"
#include "ServiceStatusManager.h"
#include "curl/curl.h"
#include "jsoncpp/json.h"
#include "libmicrohttpd/microhttpd.h"
#include "log4cplus/version.h"
#include "cryptopp/cryptlib.h"
#define _STR(x) _VAL(x)
#define _VAL(x) #x
ServiceStatusManager::ServiceStatusManager(MessageRoute *messageRoute)
:BaseProcess(messageRoute, "ServiceStatusManager")
{
// 初始化状态信息
initServiceStatusInfo();
// 获取第三方库信息
initLibraryVersionInfo();
// 订阅消息
subscribeMessage(Service_Status_Message);
}
// 初始化服务的状态信息
void ServiceStatusManager::initServiceStatusInfo()
{
// 服务的状态信息
mServiceStatusInfo = std::make_shared<ServiceStatusInfo>();
mServiceStatusInfo->setStartupTime(QDateTime::currentDateTime());
#ifdef GIT_BRANCH
mServiceStatusInfo->setGitBranch(_STR(GIT_BRANCH));
#endif
#ifdef GIT_COMMIT_ID
mServiceStatusInfo->setGitCommitId(_STR(GIT_COMMIT_ID));
#endif
}
// 初始化模块的版本信息
void ServiceStatusManager::initLibraryVersionInfo()
{
std::string curlVersion = LIBCURL_VERSION;
std::string jsoncppVersion = JSONCPP_VERSION_STRING;
std::string libmicrohttpdVersion = MHD_get_version();
std::string log4cplusVersion = LOG4CPLUS_VERSION_STR;
std::string qtVersion = qVersion();
mServiceStatusInfo->setLibraryVersion("curl", curlVersion);
mServiceStatusInfo->setLibraryVersion("jsoncpp", jsoncppVersion);
mServiceStatusInfo->setLibraryVersion("libmicrohttpd", libmicrohttpdVersion);
mServiceStatusInfo->setLibraryVersion("log4cplus", log4cplusVersion);
mServiceStatusInfo->setLibraryVersion("Qt", qtVersion);
}
bool ServiceStatusManager::init()
{
LOG_I(mClassName, "init module " << getModuleName());
// 打印git信息
LOG_I(mClassName, "git info:" << mServiceStatusInfo->gitInfoToString());
// 打印库信息
LOG_I(mClassName, "library version info:" << mServiceStatusInfo->libraryInfoToString());
return true;
}
void ServiceStatusManager::beginWork()
{
LOG_I(mClassName, "begin work, module " << getModuleName());
}
// 卸载模块
void ServiceStatusManager::uninit()
{
LOG_I(mClassName, "begin uninit");
BaseProcess::uninit();
LOG_I(mClassName, "end uninit");
}
// 处理消息的函数
std::shared_ptr<BaseResponse> ServiceStatusManager::onProcessMessage(std::shared_ptr<BaseMessage> &message)
{
std::shared_ptr<BaseResponse> response;
switch(message->getMessageType())
{
case Service_Status_Message: // 获取服务状态信息
response = onProcessGetServiceStatusMessage(message);
}
return response;
}
// 偷窥消息的处理函数
bool ServiceStatusManager::onForeseeMessage(std::shared_ptr<BaseMessage> &message)
{
return false;
}
// 处理消息的回应
void ServiceStatusManager::onProcessResponse(std::shared_ptr<BaseResponse> &response)
{
}
// 处理获取服务状态消息
std::shared_ptr<BaseResponse> ServiceStatusManager::onProcessGetServiceStatusMessage(std::shared_ptr<BaseMessage> &message)
{
std::shared_ptr<GetServiceStatusResponse> response = std::make_shared<GetServiceStatusResponse>(mServiceStatusInfo, message, Common::noError());
return response;
}
| 27.596491 | 148 | 0.75143 | 403712387 |
8b03c5272547f3929145061f550d3e75e9897b76 | 4,405 | cpp | C++ | src/appleseedmaya/attributeutils.cpp | wielandrochel/appleseed-maya | 913b8e3212e81fd5993d30691c22d1d39f70bf53 | [
"MIT"
] | 1 | 2018-07-17T21:57:01.000Z | 2018-07-17T21:57:01.000Z | src/appleseedmaya/attributeutils.cpp | wielandrochel/appleseed-maya | 913b8e3212e81fd5993d30691c22d1d39f70bf53 | [
"MIT"
] | null | null | null | src/appleseedmaya/attributeutils.cpp | wielandrochel/appleseed-maya | 913b8e3212e81fd5993d30691c22d1d39f70bf53 | [
"MIT"
] | null | null | null |
//
// This source file is part of appleseed.
// Visit https://appleseedhq.net/ for additional information and resources.
//
// This software is released under the MIT license.
//
// Copyright (c) 2016-2018 Esteban Tovagliari, The appleseedhq Organization
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
// Interface header.
#include "attributeutils.h"
// Maya headers.
#include "appleseedmaya/_beginmayaheaders.h"
#include <maya/MFnMatrixData.h>
#include "appleseedmaya/_endmayaheaders.h"
namespace
{
template <typename T>
MStatus get3(const MPlug& plug, T& x, T& y, T& z)
{
if (!plug.isCompound())
return MS::kFailure;
if (plug.numChildren() != 3)
return MS::kFailure;
MStatus status;
if (status) status = plug.child(0).getValue(x);
if (status) status = plug.child(1).getValue(y);
if (status) status = plug.child(2).getValue(z);
return status;
}
}
namespace AttributeUtils
{
MStatus get(const MPlug& plug, MAngle& value)
{
return plug.getValue(value);
}
MStatus get(const MPlug& plug, MColor& value)
{
value.a = 1.0f;
return get3(plug, value.r, value.g, value.b);
}
MStatus get(const MPlug& plug, MPoint& value)
{
return get3(plug, value.x, value.y, value.z);
}
MStatus get(const MPlug& plug, MVector& value)
{
return get3(plug, value.x, value.y, value.z);
}
MStatus get(const MPlug& plug, MMatrix& value)
{
value.setToIdentity();
MObject matrixObject;
MStatus status = plug.getValue(matrixObject);
if (!status)
return status;
MFnMatrixData matrixDataFn(matrixObject);
value = matrixDataFn.matrix(&status);
return status;
}
MStatus getPlugConnectedTo(const MPlug& dstPlug, MPlug& srcPlug)
{
if (!dstPlug.isConnected())
return MS::kFailure;
MStatus status;
MPlugArray inputConnections;
dstPlug.connectedTo(inputConnections, true, false, &status);
if (status)
{
if (inputConnections.length() == 0)
return MS::kFailure;
srcPlug = inputConnections[0];
}
return status;
}
bool hasConnections(const MPlug& plug, bool input)
{
MStatus status;
if (!plug.isConnected(&status))
return false;
MPlugArray connections;
plug.connectedTo(
connections,
input ? true : false,
input ? false : true,
&status);
if (status)
return connections.length() != 0;
return false;
}
bool anyChildPlugConnected(const MPlug& plug, bool input)
{
MStatus status;
if (!plug.isCompound(&status))
return false;
if (!status)
return false;
int numChildren = plug.numChildren(&status);
if (!status)
return false;
for (int i = 0, e = plug.numChildren(); i < e; ++i)
{
MPlug c = plug.child(i, &status);
if (!status)
continue;
if (hasConnections(c, input))
return true;
}
return false;
}
MStatus makeInput(MFnAttribute& attr)
{
attr.setStorable(true);
attr.setReadable(false);
attr.setWritable(true);
attr.setKeyable(true);
return MS::kSuccess;
}
MStatus makeOutput(MFnAttribute& attr)
{
attr.setStorable(false);
attr.setReadable(true);
attr.setWritable(false);
attr.setKeyable(false);
//attr.setHidden(true);
return MS::kSuccess;
}
}
| 23.810811 | 80 | 0.665153 | wielandrochel |
8b0c422928ab095f72a9c9e4c116577beb2722c0 | 12,545 | cpp | C++ | homework2/prt/src/accel.cpp | QRWells/Games-202-Homework | 5308f57ffe4a1b2d011e43bd0e9890ad6501146d | [
"MIT"
] | 1 | 2022-03-12T11:48:30.000Z | 2022-03-12T11:48:30.000Z | homework2/prt/src/accel.cpp | QRWells/Games-202-Homework | 5308f57ffe4a1b2d011e43bd0e9890ad6501146d | [
"MIT"
] | null | null | null | homework2/prt/src/accel.cpp | QRWells/Games-202-Homework | 5308f57ffe4a1b2d011e43bd0e9890ad6501146d | [
"MIT"
] | null | null | null | /*
This file is part of Nori, a simple educational ray tracer
Copyright (c) 2015 by Wenzel Jakob
Nori is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License Version 3
as published by the Free Software Foundation.
Nori is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <nori/accel.h>
#include <Eigen/Geometry>
#include <chrono>
using namespace std::chrono;
NORI_NAMESPACE_BEGIN
void Accel::addMesh(Mesh *mesh) {
if (m_num_meshes >= MAX_NUM_MESHES)
throw NoriException("Accel: only %d meshes are supported!", MAX_NUM_MESHES);
m_meshes[m_num_meshes] = mesh;
m_bbox.expandBy(mesh->getBoundingBox());
m_num_meshes++;
}
void Accel::build() {
if (m_num_meshes == 0)
throw NoriException("No mesh found, could not build acceleration structure");
auto start = high_resolution_clock::now();
// delete old hierarchy if present
delete m_root;
uint32_t num_triangles = 0;
for (uint32_t mesh_idx = 0; mesh_idx < m_num_meshes; mesh_idx++) {
num_triangles += m_meshes[mesh_idx]->getTriangleCount();
}
std::vector<uint32_t> triangles(num_triangles);
std::vector<uint32_t> mesh_indices(num_triangles);
uint32_t offset = 0;
for (uint32_t current_mesh_idx = 0; current_mesh_idx < m_num_meshes; current_mesh_idx++) {
uint32_t num_triangles_mesh = m_meshes[current_mesh_idx]->getTriangleCount();
for (uint32_t i = 0; i < num_triangles_mesh; i++) {
triangles[offset + i] = i;
mesh_indices[offset + i] = current_mesh_idx;
}
offset += num_triangles_mesh;
}
m_root = buildRecursive(m_bbox, triangles, mesh_indices, 0);
printf("Octree build time: %ldms \n", duration_cast<milliseconds>(high_resolution_clock::now() - start).count());
printf("Num nodes: %d \n", m_num_nodes);
printf("Num leaf nodes: %d \n", m_num_leaf_nodes);
printf("Num non-empty leaf nodes: %d \n", m_num_nonempty_leaf_nodes);
printf("Total number of saved triangles: %d \n", m_num_triangles_saved);
printf("Avg triangles per node: %f \n", (float)m_num_triangles_saved / (float)m_num_nodes);
printf("Recursion depth: %d \n", m_recursion_depth);
}
bool Accel::rayIntersect(const Ray3f &ray_, Intersection &its, bool shadowRay) const {
bool foundIntersection; // Was an intersection found so far?
uint32_t f = (uint32_t) -1; // Triangle index of the closest intersection
Ray3f ray(ray_); /// Make a copy of the ray (we will need to update its '.maxt' value)
foundIntersection = traverseRecursive(*m_root, ray, its, shadowRay, f);
if (shadowRay)
return foundIntersection;
if (foundIntersection) {
/* At this point, we now know that there is an intersection,
and we know the triangle index of the closest such intersection.
The following computes a number of additional properties which
characterize the intersection (normals, texture coordinates, etc..)
*/
/* Find the barycentric coordinates */
Vector3f bary;
bary << 1-its.uv.sum(), its.uv;
/* References to all relevant mesh buffers */
const Mesh *mesh = its.mesh;
const MatrixXf &V = mesh->getVertexPositions();
const MatrixXf &N = mesh->getVertexNormals();
const MatrixXf &UV = mesh->getVertexTexCoords();
const MatrixXu &F = mesh->getIndices();
/* Vertex indices of the triangle */
uint32_t idx0 = F(0, f), idx1 = F(1, f), idx2 = F(2, f);
Point3f p0 = V.col(idx0), p1 = V.col(idx1), p2 = V.col(idx2);
its.bary = bary;
its.tri_index = Point3f(idx0, idx1, idx2);
/* Compute the intersection positon accurately
using barycentric coordinates */
its.p = bary.x() * p0 + bary.y() * p1 + bary.z() * p2;
/* Compute proper texture coordinates if provided by the mesh */
if (UV.size() > 0)
its.uv = bary.x() * UV.col(idx0) +
bary.y() * UV.col(idx1) +
bary.z() * UV.col(idx2);
/* Compute the geometry frame */
its.geoFrame = Frame((p1-p0).cross(p2-p0).normalized());
if (N.size() > 0) {
/* Compute the shading frame. Note that for simplicity,
the current implementation doesn't attempt to provide
tangents that are continuous across the surface. That
means that this code will need to be modified to be able
use anisotropic BRDFs, which need tangent continuity */
its.shFrame = Frame(
(bary.x() * N.col(idx0) +
bary.y() * N.col(idx1) +
bary.z() * N.col(idx2)).normalized());
} else {
its.shFrame = its.geoFrame;
}
}
return foundIntersection;
}
Accel::Node* Accel::buildRecursive(const BoundingBox3f& bbox, std::vector<uint32_t>& triangle_indices,
std::vector<uint32_t>& mesh_indices, uint32_t recursion_depth) {
// a node is created in any case
m_num_nodes++;
uint32_t num_triangles = triangle_indices.size();
// return empty node if no triangles are left
if (num_triangles == 0) {
Node* node = new Node();
node->bbox = BoundingBox3f(bbox);
// add to statistics
m_num_leaf_nodes++;
return node;
}
// create leaf node if 10 or less triangles are left or if the max recursion depth is reached.
if (num_triangles <= MAX_TRIANGLES_PER_NODE || recursion_depth >= MAX_RECURSION_DEPTH) {
Node* node = new Node();
node->num_triangles = num_triangles;
node->triangle_indices = new uint32_t[num_triangles];
node->mesh_indices = new uint32_t [num_triangles];
for (uint32_t i = 0; i < num_triangles; i++) {
node->triangle_indices[i] = triangle_indices[i];
node->mesh_indices[i] = mesh_indices[i];
}
node->bbox = BoundingBox3f(bbox);
// add to statistics
m_num_leaf_nodes++;
m_num_nonempty_leaf_nodes++;
m_num_triangles_saved += num_triangles;
return node;
}
// create new parent node
Node* node = new Node();
node->bbox = BoundingBox3f(bbox);
BoundingBox3f child_bboxes[8] = {};
subdivideBBox(bbox, child_bboxes);
std::vector<std::vector<uint32_t>> child_triangle_indices(8);
std::vector<std::vector<uint32_t>> child_mesh_indices(8);
uint32_t child_num_triangles[8] = {};
// place every triangle in the children it overlaps with
// for every child bbox
for (uint32_t i = 0; i < 8; i++) {
// for every triangle inside of the parent create triangle bounding box
for (uint32_t j = 0; j < num_triangles; j++) {
// for every triangle vertex expand triangle bbox
uint32_t triangle_idx = triangle_indices[j];
uint32_t mesh_idx = mesh_indices[j];
BoundingBox3f triangle_bbox = m_meshes[mesh_idx]->getBoundingBox(triangle_idx);
// check if triangle is in bbox, if so put triangle index into triangle list of child
if (child_bboxes[i].overlaps(triangle_bbox)) {
child_triangle_indices[i].emplace_back(triangle_idx);
child_mesh_indices[i].emplace_back(mesh_idx);
child_num_triangles[i]++;
}
}
}
// release memory to avoid stack overflow
triangle_indices = std::vector<uint32_t>();
mesh_indices = std::vector<uint32_t>();
// for every child bbox
Node* last_child = nullptr;
for (uint32_t i = 0; i < 8; i++) {
// first child
if (i == 0) {
node->child = buildRecursive(child_bboxes[i], child_triangle_indices[i], child_mesh_indices[i], recursion_depth + 1);
last_child = node->child;
// neighbour children
} else {
last_child->next = buildRecursive(child_bboxes[i], child_triangle_indices[i], child_mesh_indices[i], recursion_depth + 1);
last_child = last_child->next;
}
m_recursion_depth = std::max(m_recursion_depth, recursion_depth + 1);
}
return node;
}
bool Accel::traverseRecursive(const Node& node, Ray3f &ray, Intersection &its, bool shadowRay, uint32_t& hit_idx) const {
bool foundIntersection = false;
// only check triangles of node and its children if ray intersects with node bbox
if (!node.bbox.rayIntersect(ray)) {
return false;
}
// search through all triangles in node
for (uint32_t i = 0; i < node.num_triangles; ++i) {
float u, v, t;
uint32_t triangle_idx = node.triangle_indices[i];
uint32_t mesh_idx = node.mesh_indices[i];
if (m_meshes[mesh_idx]->rayIntersect(triangle_idx, ray, u, v, t) && t < ray.maxt) {
/* An intersection was found! Can terminate
immediately if this is a shadow ray query */
if (shadowRay)
return true;
ray.maxt = t;
its.t = t;
its.uv = Point2f(u, v);
its.mesh = m_meshes[mesh_idx];
hit_idx = triangle_idx;
foundIntersection = true;
}
}
if (node.child) {
std::pair<Node*, float> children[8];
Node* current_child = node.child;
int i = 0;
do {
children[i] = std::pair<Node*, float>(current_child, current_child->bbox.distanceTo(ray.o));
current_child = current_child->next;
i++;
} while (current_child);
std::sort(children, children + 8, [ray](const std::pair<Node*, float>& l, const std::pair<Node*, float>& r) {
return l.second < r.second;
});
for (auto child: children) {
foundIntersection = traverseRecursive(*child.first, ray, its, shadowRay, hit_idx) || foundIntersection;
if (shadowRay && foundIntersection)
return true;
}
}
return foundIntersection;
}
void Accel::subdivideBBox(const nori::BoundingBox3f &parent, nori::BoundingBox3f *bboxes) {
Point3f extents = parent.getExtents();
Point3f x0_y0_z0 = parent.min;
Point3f x1_y0_z0 = Point3f(parent.min.x() + extents.x() / 2.f, parent.min.y(), parent.min.z());
Point3f x0_y1_z0 = Point3f(parent.min.x(), parent.min.y() + extents.y() / 2.f, parent.min.z());
Point3f x1_y1_z0 = Point3f(parent.min.x() + extents.x() / 2.f, parent.min.y() + extents.y() / 2.f, parent.min.z());
Point3f x0_y0_z1 = Point3f(parent.min.x(), parent.min.y(), parent.min.z() + extents.z() / 2.f);
Point3f x1_y0_z1 = Point3f(parent.min.x() + extents.x() / 2.f, parent.min.y(), parent.min.z() + extents.z() / 2.f);
Point3f x0_y1_z1 = Point3f(parent.min.x(), parent.min.y() + extents.y() / 2.f, parent.min.z() + extents.z() / 2.f);
Point3f x1_y1_z1 = Point3f(parent.min.x() + extents.x() / 2.f, parent.min.y() + extents.y() / 2.f, parent.min.z() + extents.z() / 2.f);
Point3f x2_y1_z1 = Point3f(parent.max.x(), parent.min.y() + extents.y() / 2.f, parent.min.z() + extents.z() / 2.f);
Point3f x1_y2_z1 = Point3f(parent.min.x() + extents.x() / 2.f, parent.max.y(), parent.min.z() + extents.z() / 2.f);
Point3f x2_y2_z1 = Point3f(parent.max.x(), parent.max.y(), parent.min.z() + extents.z() / 2.f);
Point3f x1_y1_z2 = Point3f(parent.min.x() + extents.x() / 2.f, parent.min.y() + extents.y() / 2.f, parent.max.z());
Point3f x2_y1_z2 = Point3f(parent.max.x(), parent.min.y() + extents.y() / 2.f, parent.max.z());
Point3f x1_y2_z2 = Point3f(parent.min.x() + extents.x() / 2.f, parent.max.y(), parent.max.z());
Point3f x2_y2_z2 = Point3f(parent.max.x(), parent.max.y(), parent.max.z());
bboxes[0] = BoundingBox3f(x0_y0_z0, x1_y1_z1);
bboxes[1] = BoundingBox3f(x1_y0_z0, x2_y1_z1);
bboxes[2] = BoundingBox3f(x0_y1_z0, x1_y2_z1);
bboxes[3] = BoundingBox3f(x1_y1_z0, x2_y2_z1);
bboxes[4] = BoundingBox3f(x0_y0_z1, x1_y1_z2);
bboxes[5] = BoundingBox3f(x1_y0_z1, x2_y1_z2);
bboxes[6] = BoundingBox3f(x0_y1_z1, x1_y2_z2);
bboxes[7] = BoundingBox3f(x1_y1_z1, x2_y2_z2);
}
NORI_NAMESPACE_END
| 40.208333 | 139 | 0.626305 | QRWells |
8b0d0d5092ad2c186dc45eff5b1fc496f9fe7987 | 20,236 | cpp | C++ | test/entt/meta/meta_data.cpp | matthew-nagy/entt | 2ef98e1787d150fb2561edf3cc61270f46a41386 | [
"MIT"
] | 77 | 2021-09-22T17:00:44.000Z | 2022-03-18T03:47:47.000Z | test/entt/meta/meta_data.cpp | matthew-nagy/entt | 2ef98e1787d150fb2561edf3cc61270f46a41386 | [
"MIT"
] | 63 | 2021-09-26T05:24:55.000Z | 2022-03-27T04:15:52.000Z | test/entt/meta/meta_data.cpp | matthew-nagy/entt | 2ef98e1787d150fb2561edf3cc61270f46a41386 | [
"MIT"
] | 18 | 2021-09-26T17:52:07.000Z | 2022-02-19T12:25:15.000Z | #include <cstdlib>
#include <string>
#include <utility>
#include <gtest/gtest.h>
#include <entt/core/hashed_string.hpp>
#include <entt/core/type_traits.hpp>
#include <entt/meta/factory.hpp>
#include <entt/meta/meta.hpp>
#include <entt/meta/node.hpp>
#include <entt/meta/resolve.hpp>
struct base_t {
virtual ~base_t() = default;
static void destroy(base_t &) {
++counter;
}
inline static int counter = 0;
int value{3};
};
struct derived_t: base_t {
derived_t() {}
};
struct clazz_t {
clazz_t()
: i{0},
j{1},
base{} {}
operator int() const {
return h;
}
int i{0};
const int j{1};
base_t base{};
inline static int h{2};
inline static const int k{3};
};
struct setter_getter_t {
setter_getter_t()
: value{0} {}
int setter(double val) {
return value = static_cast<int>(val);
}
int getter() {
return value;
}
int setter_with_ref(const int &val) {
return value = val;
}
const int &getter_with_ref() {
return value;
}
static int static_setter(setter_getter_t &type, int value) {
return type.value = value;
}
static int static_getter(const setter_getter_t &type) {
return type.value;
}
int value;
};
struct multi_setter_t {
multi_setter_t()
: value{0} {}
void from_double(double val) {
value = val;
}
void from_string(const char *val) {
value = std::atoi(val);
}
int value;
};
struct array_t {
static inline int global[3];
int local[5];
};
enum class property_t {
random,
value
};
struct MetaData: ::testing::Test {
void SetUp() override {
using namespace entt::literals;
entt::meta<double>()
.type("double"_hs);
entt::meta<base_t>()
.type("base"_hs)
.dtor<base_t::destroy>()
.data<&base_t::value>("value"_hs);
entt::meta<derived_t>()
.type("derived"_hs)
.base<base_t>()
.dtor<derived_t::destroy>()
.data<&base_t::value>("value_from_base"_hs);
entt::meta<clazz_t>()
.type("clazz"_hs)
.data<&clazz_t::i, entt::as_ref_t>("i"_hs)
.prop(3, 0)
.data<&clazz_t::i, entt::as_cref_t>("ci"_hs)
.data<&clazz_t::j>("j"_hs)
.prop(true, 1)
.data<&clazz_t::h>("h"_hs)
.prop(property_t::random, 2)
.data<&clazz_t::k>("k"_hs)
.prop(property_t::value, 3)
.data<&clazz_t::base>("base"_hs)
.data<&clazz_t::i, entt::as_void_t>("void"_hs)
.conv<int>();
entt::meta<setter_getter_t>()
.type("setter_getter"_hs)
.data<&setter_getter_t::static_setter, &setter_getter_t::static_getter>("x"_hs)
.data<&setter_getter_t::setter, &setter_getter_t::getter>("y"_hs)
.data<&setter_getter_t::static_setter, &setter_getter_t::getter>("z"_hs)
.data<&setter_getter_t::setter_with_ref, &setter_getter_t::getter_with_ref>("w"_hs)
.data<nullptr, &setter_getter_t::getter>("z_ro"_hs)
.data<nullptr, &setter_getter_t::value>("value"_hs);
entt::meta<multi_setter_t>()
.type("multi_setter"_hs)
.data<entt::value_list<&multi_setter_t::from_double, &multi_setter_t::from_string>, &multi_setter_t::value>("value"_hs);
entt::meta<array_t>()
.type("array"_hs)
.data<&array_t::global>("global"_hs)
.data<&array_t::local>("local"_hs);
base_t::counter = 0;
}
void TearDown() override {
entt::meta_reset();
}
};
using MetaDataDeathTest = MetaData;
TEST_F(MetaData, Functionalities) {
using namespace entt::literals;
auto data = entt::resolve<clazz_t>().data("i"_hs);
clazz_t instance{};
ASSERT_TRUE(data);
ASSERT_EQ(data.arity(), 1u);
ASSERT_EQ(data.type(), entt::resolve<int>());
ASSERT_EQ(data.arg(0u), entt::resolve<int>());
ASSERT_EQ(data.id(), "i"_hs);
ASSERT_FALSE(data.is_const());
ASSERT_FALSE(data.is_static());
ASSERT_EQ(data.get(instance).cast<int>(), 0);
ASSERT_TRUE(data.set(instance, 42));
ASSERT_EQ(data.get(instance).cast<int>(), 42);
for(auto curr: data.prop()) {
ASSERT_EQ(curr.key(), 3);
ASSERT_EQ(curr.value(), 0);
}
ASSERT_FALSE(data.prop(2));
ASSERT_FALSE(data.prop('c'));
auto prop = data.prop(3);
ASSERT_TRUE(prop);
ASSERT_EQ(prop.key(), 3);
ASSERT_EQ(prop.value(), 0);
}
TEST_F(MetaData, Const) {
using namespace entt::literals;
auto data = entt::resolve<clazz_t>().data("j"_hs);
clazz_t instance{};
ASSERT_TRUE(data);
ASSERT_EQ(data.arity(), 1u);
ASSERT_EQ(data.type(), entt::resolve<int>());
ASSERT_EQ(data.arg(0u), entt::resolve<int>());
ASSERT_EQ(data.id(), "j"_hs);
ASSERT_TRUE(data.is_const());
ASSERT_FALSE(data.is_static());
ASSERT_EQ(data.get(instance).cast<int>(), 1);
ASSERT_FALSE(data.set(instance, 42));
ASSERT_EQ(data.get(instance).cast<int>(), 1);
for(auto curr: data.prop()) {
ASSERT_EQ(curr.key(), true);
ASSERT_EQ(curr.value(), 1);
}
ASSERT_FALSE(data.prop(false));
ASSERT_FALSE(data.prop('c'));
auto prop = data.prop(true);
ASSERT_TRUE(prop);
ASSERT_EQ(prop.key(), true);
ASSERT_EQ(prop.value(), 1);
}
TEST_F(MetaData, Static) {
using namespace entt::literals;
auto data = entt::resolve<clazz_t>().data("h"_hs);
ASSERT_TRUE(data);
ASSERT_EQ(data.arity(), 1u);
ASSERT_EQ(data.type(), entt::resolve<int>());
ASSERT_EQ(data.arg(0u), entt::resolve<int>());
ASSERT_EQ(data.id(), "h"_hs);
ASSERT_FALSE(data.is_const());
ASSERT_TRUE(data.is_static());
ASSERT_EQ(data.get({}).cast<int>(), 2);
ASSERT_TRUE(data.set({}, 42));
ASSERT_EQ(data.get({}).cast<int>(), 42);
for(auto curr: data.prop()) {
ASSERT_EQ(curr.key(), property_t::random);
ASSERT_EQ(curr.value(), 2);
}
ASSERT_FALSE(data.prop(property_t::value));
ASSERT_FALSE(data.prop('c'));
auto prop = data.prop(property_t::random);
ASSERT_TRUE(prop);
ASSERT_EQ(prop.key(), property_t::random);
ASSERT_EQ(prop.value(), 2);
}
TEST_F(MetaData, ConstStatic) {
using namespace entt::literals;
auto data = entt::resolve<clazz_t>().data("k"_hs);
ASSERT_TRUE(data);
ASSERT_EQ(data.arity(), 1u);
ASSERT_EQ(data.type(), entt::resolve<int>());
ASSERT_EQ(data.arg(0u), entt::resolve<int>());
ASSERT_EQ(data.id(), "k"_hs);
ASSERT_TRUE(data.is_const());
ASSERT_TRUE(data.is_static());
ASSERT_EQ(data.get({}).cast<int>(), 3);
ASSERT_FALSE(data.set({}, 42));
ASSERT_EQ(data.get({}).cast<int>(), 3);
for(auto curr: data.prop()) {
ASSERT_EQ(curr.key(), property_t::value);
ASSERT_EQ(curr.value(), 3);
}
ASSERT_FALSE(data.prop(property_t::random));
ASSERT_FALSE(data.prop('c'));
auto prop = data.prop(property_t::value);
ASSERT_TRUE(prop);
ASSERT_EQ(prop.key(), property_t::value);
ASSERT_EQ(prop.value(), 3);
}
TEST_F(MetaData, GetMetaAnyArg) {
using namespace entt::literals;
entt::meta_any any{clazz_t{}};
any.cast<clazz_t &>().i = 99;
const auto value = entt::resolve<clazz_t>().data("i"_hs).get(any);
ASSERT_TRUE(value);
ASSERT_TRUE(static_cast<bool>(value.cast<int>()));
ASSERT_EQ(value.cast<int>(), 99);
}
TEST_F(MetaData, GetInvalidArg) {
using namespace entt::literals;
auto instance = 0;
ASSERT_FALSE(entt::resolve<clazz_t>().data("i"_hs).get(instance));
}
TEST_F(MetaData, SetMetaAnyArg) {
using namespace entt::literals;
entt::meta_any any{clazz_t{}};
entt::meta_any value{42};
ASSERT_EQ(any.cast<clazz_t>().i, 0);
ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).set(any, value));
ASSERT_EQ(any.cast<clazz_t>().i, 42);
}
TEST_F(MetaData, SetInvalidArg) {
using namespace entt::literals;
ASSERT_FALSE(entt::resolve<clazz_t>().data("i"_hs).set({}, 'c'));
}
TEST_F(MetaData, SetCast) {
using namespace entt::literals;
clazz_t instance{};
ASSERT_EQ(base_t::counter, 0);
ASSERT_TRUE(entt::resolve<clazz_t>().data("base"_hs).set(instance, derived_t{}));
ASSERT_EQ(base_t::counter, 1);
}
TEST_F(MetaData, SetConvert) {
using namespace entt::literals;
clazz_t instance{};
instance.h = 42;
ASSERT_EQ(instance.i, 0);
ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).set(instance, instance));
ASSERT_EQ(instance.i, 42);
}
TEST_F(MetaData, SetByRef) {
using namespace entt::literals;
entt::meta_any any{clazz_t{}};
int value{42};
ASSERT_EQ(any.cast<clazz_t>().i, 0);
ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).set(any, entt::make_meta<int &>(value)));
ASSERT_EQ(any.cast<clazz_t>().i, 42);
value = 3;
auto wrapper = entt::make_meta<int &>(value);
ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).set(any, wrapper.as_ref()));
ASSERT_EQ(any.cast<clazz_t>().i, 3);
}
TEST_F(MetaData, SetByConstRef) {
using namespace entt::literals;
entt::meta_any any{clazz_t{}};
int value{42};
ASSERT_EQ(any.cast<clazz_t>().i, 0);
ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).set(any, entt::make_meta<const int &>(value)));
ASSERT_EQ(any.cast<clazz_t>().i, 42);
value = 3;
auto wrapper = entt::make_meta<const int &>(value);
ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).set(any, wrapper.as_ref()));
ASSERT_EQ(any.cast<clazz_t>().i, 3);
}
TEST_F(MetaData, SetterGetterAsFreeFunctions) {
using namespace entt::literals;
auto data = entt::resolve<setter_getter_t>().data("x"_hs);
setter_getter_t instance{};
ASSERT_TRUE(data);
ASSERT_EQ(data.arity(), 1u);
ASSERT_EQ(data.type(), entt::resolve<int>());
ASSERT_EQ(data.arg(0u), entt::resolve<int>());
ASSERT_EQ(data.id(), "x"_hs);
ASSERT_FALSE(data.is_const());
ASSERT_FALSE(data.is_static());
ASSERT_EQ(data.get(instance).cast<int>(), 0);
ASSERT_TRUE(data.set(instance, 42));
ASSERT_EQ(data.get(instance).cast<int>(), 42);
}
TEST_F(MetaData, SetterGetterAsMemberFunctions) {
using namespace entt::literals;
auto data = entt::resolve<setter_getter_t>().data("y"_hs);
setter_getter_t instance{};
ASSERT_TRUE(data);
ASSERT_EQ(data.arity(), 1u);
ASSERT_EQ(data.type(), entt::resolve<int>());
ASSERT_EQ(data.arg(0u), entt::resolve<double>());
ASSERT_EQ(data.id(), "y"_hs);
ASSERT_FALSE(data.is_const());
ASSERT_FALSE(data.is_static());
ASSERT_EQ(data.get(instance).cast<int>(), 0);
ASSERT_TRUE(data.set(instance, 42.));
ASSERT_EQ(data.get(instance).cast<int>(), 42);
ASSERT_TRUE(data.set(instance, 3));
ASSERT_EQ(data.get(instance).cast<int>(), 3);
}
TEST_F(MetaData, SetterGetterWithRefAsMemberFunctions) {
using namespace entt::literals;
auto data = entt::resolve<setter_getter_t>().data("w"_hs);
setter_getter_t instance{};
ASSERT_TRUE(data);
ASSERT_EQ(data.arity(), 1u);
ASSERT_EQ(data.type(), entt::resolve<int>());
ASSERT_EQ(data.arg(0u), entt::resolve<int>());
ASSERT_EQ(data.id(), "w"_hs);
ASSERT_FALSE(data.is_const());
ASSERT_FALSE(data.is_static());
ASSERT_EQ(data.get(instance).cast<int>(), 0);
ASSERT_TRUE(data.set(instance, 42));
ASSERT_EQ(data.get(instance).cast<int>(), 42);
}
TEST_F(MetaData, SetterGetterMixed) {
using namespace entt::literals;
auto data = entt::resolve<setter_getter_t>().data("z"_hs);
setter_getter_t instance{};
ASSERT_TRUE(data);
ASSERT_EQ(data.arity(), 1u);
ASSERT_EQ(data.type(), entt::resolve<int>());
ASSERT_EQ(data.arg(0u), entt::resolve<int>());
ASSERT_EQ(data.id(), "z"_hs);
ASSERT_FALSE(data.is_const());
ASSERT_FALSE(data.is_static());
ASSERT_EQ(data.get(instance).cast<int>(), 0);
ASSERT_TRUE(data.set(instance, 42));
ASSERT_EQ(data.get(instance).cast<int>(), 42);
}
TEST_F(MetaData, SetterGetterReadOnly) {
using namespace entt::literals;
auto data = entt::resolve<setter_getter_t>().data("z_ro"_hs);
setter_getter_t instance{};
ASSERT_TRUE(data);
ASSERT_EQ(data.arity(), 0u);
ASSERT_EQ(data.type(), entt::resolve<int>());
ASSERT_EQ(data.arg(0u), entt::meta_type{});
ASSERT_EQ(data.id(), "z_ro"_hs);
ASSERT_TRUE(data.is_const());
ASSERT_FALSE(data.is_static());
ASSERT_EQ(data.get(instance).cast<int>(), 0);
ASSERT_FALSE(data.set(instance, 42));
ASSERT_EQ(data.get(instance).cast<int>(), 0);
}
TEST_F(MetaData, SetterGetterReadOnlyDataMember) {
using namespace entt::literals;
auto data = entt::resolve<setter_getter_t>().data("value"_hs);
setter_getter_t instance{};
ASSERT_TRUE(data);
ASSERT_EQ(data.arity(), 0u);
ASSERT_EQ(data.type(), entt::resolve<int>());
ASSERT_EQ(data.arg(0u), entt::meta_type{});
ASSERT_EQ(data.id(), "value"_hs);
ASSERT_TRUE(data.is_const());
ASSERT_FALSE(data.is_static());
ASSERT_EQ(data.get(instance).cast<int>(), 0);
ASSERT_FALSE(data.set(instance, 42));
ASSERT_EQ(data.get(instance).cast<int>(), 0);
}
TEST_F(MetaData, MultiSetter) {
using namespace entt::literals;
auto data = entt::resolve<multi_setter_t>().data("value"_hs);
multi_setter_t instance{};
ASSERT_TRUE(data);
ASSERT_EQ(data.arity(), 2u);
ASSERT_EQ(data.type(), entt::resolve<int>());
ASSERT_EQ(data.arg(0u), entt::resolve<double>());
ASSERT_EQ(data.arg(1u), entt::resolve<const char *>());
ASSERT_EQ(data.arg(2u), entt::meta_type{});
ASSERT_EQ(data.id(), "value"_hs);
ASSERT_FALSE(data.is_const());
ASSERT_FALSE(data.is_static());
ASSERT_EQ(data.get(instance).cast<int>(), 0);
ASSERT_TRUE(data.set(instance, 42));
ASSERT_EQ(data.get(instance).cast<int>(), 42);
ASSERT_TRUE(data.set(instance, 3.));
ASSERT_EQ(data.get(instance).cast<int>(), 3);
ASSERT_FALSE(data.set(instance, std::string{"99"}));
ASSERT_TRUE(data.set(instance, std::string{"99"}.c_str()));
ASSERT_EQ(data.get(instance).cast<int>(), 99);
}
TEST_F(MetaData, ConstInstance) {
using namespace entt::literals;
clazz_t instance{};
ASSERT_NE(entt::resolve<clazz_t>().data("i"_hs).get(instance).try_cast<int>(), nullptr);
ASSERT_NE(entt::resolve<clazz_t>().data("i"_hs).get(instance).try_cast<const int>(), nullptr);
ASSERT_EQ(entt::resolve<clazz_t>().data("i"_hs).get(std::as_const(instance)).try_cast<int>(), nullptr);
// as_ref_t adapts to the constness of the passed object and returns const references in case
ASSERT_NE(entt::resolve<clazz_t>().data("i"_hs).get(std::as_const(instance)).try_cast<const int>(), nullptr);
ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).get(instance));
ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).set(instance, 3));
ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).get(std::as_const(instance)));
ASSERT_FALSE(entt::resolve<clazz_t>().data("i"_hs).set(std::as_const(instance), 3));
ASSERT_TRUE(entt::resolve<clazz_t>().data("ci"_hs).get(instance));
ASSERT_TRUE(entt::resolve<clazz_t>().data("ci"_hs).set(instance, 3));
ASSERT_TRUE(entt::resolve<clazz_t>().data("ci"_hs).get(std::as_const(instance)));
ASSERT_FALSE(entt::resolve<clazz_t>().data("ci"_hs).set(std::as_const(instance), 3));
ASSERT_TRUE(entt::resolve<clazz_t>().data("j"_hs).get(instance));
ASSERT_FALSE(entt::resolve<clazz_t>().data("j"_hs).set(instance, 3));
ASSERT_TRUE(entt::resolve<clazz_t>().data("j"_hs).get(std::as_const(instance)));
ASSERT_FALSE(entt::resolve<clazz_t>().data("j"_hs).set(std::as_const(instance), 3));
}
TEST_F(MetaData, ArrayStatic) {
using namespace entt::literals;
auto data = entt::resolve<array_t>().data("global"_hs);
ASSERT_TRUE(data);
ASSERT_EQ(data.arity(), 1u);
ASSERT_EQ(data.type(), entt::resolve<int[3]>());
ASSERT_EQ(data.arg(0u), entt::resolve<int[3]>());
ASSERT_EQ(data.id(), "global"_hs);
ASSERT_FALSE(data.is_const());
ASSERT_TRUE(data.is_static());
ASSERT_TRUE(data.type().is_array());
ASSERT_FALSE(data.get({}));
}
TEST_F(MetaData, Array) {
using namespace entt::literals;
auto data = entt::resolve<array_t>().data("local"_hs);
array_t instance{};
ASSERT_TRUE(data);
ASSERT_EQ(data.arity(), 1u);
ASSERT_EQ(data.type(), entt::resolve<int[5]>());
ASSERT_EQ(data.arg(0u), entt::resolve<int[5]>());
ASSERT_EQ(data.id(), "local"_hs);
ASSERT_FALSE(data.is_const());
ASSERT_FALSE(data.is_static());
ASSERT_TRUE(data.type().is_array());
ASSERT_FALSE(data.get(instance));
}
TEST_F(MetaData, AsVoid) {
using namespace entt::literals;
auto data = entt::resolve<clazz_t>().data("void"_hs);
clazz_t instance{};
ASSERT_TRUE(data);
ASSERT_EQ(data.arity(), 1u);
ASSERT_EQ(data.type(), entt::resolve<int>());
ASSERT_EQ(data.arg(0u), entt::resolve<int>());
ASSERT_TRUE(data.set(instance, 42));
ASSERT_EQ(instance.i, 42);
ASSERT_EQ(data.get(instance), entt::meta_any{std::in_place_type<void>});
}
TEST_F(MetaData, AsRef) {
using namespace entt::literals;
clazz_t instance{};
auto data = entt::resolve<clazz_t>().data("i"_hs);
ASSERT_TRUE(data);
ASSERT_EQ(data.arity(), 1u);
ASSERT_EQ(data.type(), entt::resolve<int>());
ASSERT_EQ(data.arg(0u), entt::resolve<int>());
ASSERT_EQ(instance.i, 0);
data.get(instance).cast<int &>() = 3;
ASSERT_EQ(instance.i, 3);
}
TEST_F(MetaData, AsConstRef) {
using namespace entt::literals;
clazz_t instance{};
auto data = entt::resolve<clazz_t>().data("ci"_hs);
ASSERT_EQ(instance.i, 0);
ASSERT_EQ(data.arity(), 1u);
ASSERT_EQ(data.type(), entt::resolve<int>());
ASSERT_EQ(data.arg(0u), entt::resolve<int>());
ASSERT_EQ(data.get(instance).cast<const int &>(), 0);
ASSERT_EQ(data.get(instance).cast<int>(), 0);
ASSERT_EQ(instance.i, 0);
}
TEST_F(MetaDataDeathTest, AsConstRef) {
using namespace entt::literals;
clazz_t instance{};
auto data = entt::resolve<clazz_t>().data("ci"_hs);
ASSERT_DEATH(data.get(instance).cast<int &>() = 3, "");
}
TEST_F(MetaData, SetGetBaseData) {
using namespace entt::literals;
auto type = entt::resolve<derived_t>();
derived_t instance{};
ASSERT_TRUE(type.data("value"_hs));
ASSERT_EQ(instance.value, 3);
ASSERT_TRUE(type.data("value"_hs).set(instance, 42));
ASSERT_EQ(type.data("value"_hs).get(instance).cast<int>(), 42);
ASSERT_EQ(instance.value, 42);
}
TEST_F(MetaData, SetGetFromBase) {
using namespace entt::literals;
auto type = entt::resolve<derived_t>();
derived_t instance{};
ASSERT_TRUE(type.data("value_from_base"_hs));
ASSERT_EQ(instance.value, 3);
ASSERT_TRUE(type.data("value_from_base"_hs).set(instance, 42));
ASSERT_EQ(type.data("value_from_base"_hs).get(instance).cast<int>(), 42);
ASSERT_EQ(instance.value, 42);
}
TEST_F(MetaData, ReRegistration) {
using namespace entt::literals;
SetUp();
auto *node = entt::internal::meta_node<base_t>::resolve();
auto type = entt::resolve<base_t>();
ASSERT_NE(node->data, nullptr);
ASSERT_EQ(node->data->next, nullptr);
ASSERT_TRUE(type.data("value"_hs));
entt::meta<base_t>().data<&base_t::value>("field"_hs);
ASSERT_NE(node->data, nullptr);
ASSERT_EQ(node->data->next, nullptr);
ASSERT_FALSE(type.data("value"_hs));
ASSERT_TRUE(type.data("field"_hs));
}
TEST_F(MetaData, NameCollision) {
using namespace entt::literals;
ASSERT_NO_FATAL_FAILURE(entt::meta<clazz_t>().data<&clazz_t::j>("j"_hs));
ASSERT_TRUE(entt::resolve<clazz_t>().data("j"_hs));
ASSERT_NO_FATAL_FAILURE(entt::meta<clazz_t>().data<&clazz_t::j>("cj"_hs));
ASSERT_FALSE(entt::resolve<clazz_t>().data("j"_hs));
ASSERT_TRUE(entt::resolve<clazz_t>().data("cj"_hs));
}
TEST_F(MetaDataDeathTest, NameCollision) {
using namespace entt::literals;
ASSERT_DEATH(entt::meta<clazz_t>().data<&clazz_t::j>("i"_hs), "");
}
| 29.200577 | 132 | 0.636687 | matthew-nagy |
8b0f1c8242522b4f5d8cb091c79f98b379c39b34 | 7,208 | cc | C++ | third_party/nucleus/io/reference_test.cc | fo40225/deepvariant | c2167e7c90f016905f309f118eb3897935ee7c5f | [
"BSD-3-Clause"
] | 1 | 2019-05-20T11:55:45.000Z | 2019-05-20T11:55:45.000Z | third_party/nucleus/io/reference_test.cc | fo40225/deepvariant | c2167e7c90f016905f309f118eb3897935ee7c5f | [
"BSD-3-Clause"
] | null | null | null | third_party/nucleus/io/reference_test.cc | fo40225/deepvariant | c2167e7c90f016905f309f118eb3897935ee7c5f | [
"BSD-3-Clause"
] | 1 | 2017-12-06T17:30:18.000Z | 2017-12-06T17:30:18.000Z | /*
* Copyright 2018 Google LLC.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "third_party/nucleus/io/reference.h"
#include <vector>
#include <gmock/gmock-generated-matchers.h>
#include <gmock/gmock-matchers.h>
#include <gmock/gmock-more-matchers.h>
#include "tensorflow/core/platform/test.h"
#include "third_party/nucleus/io/reference_test.h"
#include "third_party/nucleus/util/utils.h"
#include "third_party/nucleus/vendor/status_matchers.h"
#include "tensorflow/core/platform/logging.h"
namespace nucleus {
using ::testing::IsEmpty;
using ::testing::Not;
using ::testing::UnorderedElementsAre;
TEST_P(GenomeReferenceTest, TestBasic) {
EXPECT_THAT(Ref().ContigNames(),
UnorderedElementsAre("chrM", "chr1", "chr2"));
EXPECT_THAT(Ref().Contigs().size(), 3);
const auto& chrm = *Ref().Contig("chrM").ValueOrDie();
EXPECT_EQ(100, chrm.n_bases());
EXPECT_EQ("chrM", chrm.name());
EXPECT_EQ(0, chrm.pos_in_fasta());
const auto& chr1 = *Ref().Contig("chr1").ValueOrDie();
EXPECT_EQ(76, chr1.n_bases());
EXPECT_EQ("chr1", chr1.name());
EXPECT_EQ(1, chr1.pos_in_fasta());
const auto& chr2 = *Ref().Contig("chr2").ValueOrDie();
EXPECT_EQ(121, chr2.n_bases());
EXPECT_EQ("chr2", chr2.name());
EXPECT_EQ(2, chr2.pos_in_fasta());
}
TEST_P(GenomeReferenceTest, TestIsValidInterval) {
// Checks that we can check that an unknown chromosome isn't valid.
EXPECT_FALSE(Ref().IsValidInterval(MakeRange("unknown_chr", 0, 1)));
for (const auto& chr : Ref().ContigNames()) {
const auto n_bases = Ref().Contig(chr).ValueOrDie()->n_bases();
EXPECT_TRUE(Ref().IsValidInterval(MakeRange(chr, 0, n_bases)));
for (int i = 0; i < n_bases; ++i) {
EXPECT_TRUE(Ref().IsValidInterval(MakeRange(chr, 0, i+1)));
EXPECT_TRUE(Ref().IsValidInterval(MakeRange(chr, i, i+1)));
}
EXPECT_FALSE(Ref().IsValidInterval(MakeRange(chr, -10, 0)));
EXPECT_FALSE(Ref().IsValidInterval(MakeRange(chr, -1, 0)));
EXPECT_FALSE(Ref().IsValidInterval(MakeRange(chr, 10, 9)));
EXPECT_FALSE(Ref().IsValidInterval(MakeRange(chr, 0, n_bases + 1)));
EXPECT_FALSE(Ref().IsValidInterval(MakeRange(chr, 0, n_bases + 100)));
EXPECT_FALSE(Ref().IsValidInterval(MakeRange(chr, n_bases, n_bases)));
EXPECT_FALSE(
Ref().IsValidInterval(MakeRange(chr, n_bases + 100, n_bases + 100)));
}
}
TEST_P(GenomeReferenceTest, NotOKIfContigCalledWithBadName) {
EXPECT_THAT(Ref().Contig("missing"),
IsNotOKWithMessage("Unknown contig missing"));
}
TEST_P(GenomeReferenceTest, NotOKIfIntervalIsInvalid) {
// Asking for bad chromosome values produces death.
StatusOr<string> result = Ref().GetBases(MakeRange("missing", 0, 1));
EXPECT_THAT(result, IsNotOKWithCodeAndMessage(
tensorflow::error::INVALID_ARGUMENT,
"Invalid interval"));
// Starting before 0 is detected.
EXPECT_THAT(Ref().GetBases(MakeRange("chrM", -1, 1)),
IsNotOKWithMessage("Invalid interval"));
// chr1 exists, but this range's start is beyond the chr.
EXPECT_THAT(Ref().GetBases(MakeRange("chr1", 1000, 1010)),
IsNotOKWithMessage("Invalid interval"));
// chr1 exists, but this range's end is beyond the chr.
EXPECT_THAT(Ref().GetBases(MakeRange("chr1", 0, 1010)),
IsNotOKWithMessage("Invalid interval"));
}
TEST_P(GenomeReferenceTest, TestHasContig) {
EXPECT_TRUE(Ref().HasContig("chrM"));
EXPECT_TRUE(Ref().HasContig("chr1"));
EXPECT_TRUE(Ref().HasContig("chr2"));
EXPECT_FALSE(Ref().HasContig("chr3"));
EXPECT_FALSE(Ref().HasContig("chr"));
EXPECT_FALSE(Ref().HasContig(""));
}
// Checks that GetBases work in all its forms for the given arguments.
void CheckGetBases(const GenomeReference& ref,
const string& chrom, const int64 start, const int64 end,
const string& expected_bases) {
StatusOr<string> query = ref.GetBases(MakeRange(chrom, start, end));
ASSERT_THAT(query, IsOK());
EXPECT_THAT(query.ValueOrDie(), expected_bases);
}
TEST_P(GenomeReferenceTest, TestReferenceBases) {
CheckGetBases(Ref(), "chrM", 0, 100,
"GATCACAGGTCTATCACCCTATTAACCACTCACGGGAGCTCTCCATGCATTTGGTATTTTC"
"GTCTGGGGGGTGTGCACGCGATAGCATTGCGAGACGCTG");
CheckGetBases(Ref(), "chr1", 0, 76,
"ACCACCATCCTCCGTGAAATCAATATCCCGCACAAGAGTGCTACTCTCCTAAATCCCTTCT"
"CGTCCCCATGGATGA");
CheckGetBases(Ref(), "chr2", 0, 121,
"CGCTNCGGGCCCATAACACTTGGGGGTAGCTAAAGTGAACTGTATCCGAC"
"ATCTGGTTCCTACTTCAGGGCCATAAAGCCTAAATAGCCCACACGTTCCC"
"CTTAAATAAGACATCACGATG");
}
TEST_P(GenomeReferenceTest, TestGetBasesParts) {
CheckGetBases(Ref(), "chrM", 0, 10, "GATCACAGGT");
CheckGetBases(Ref(), "chrM", 0, 9, "GATCACAGG");
CheckGetBases(Ref(), "chrM", 1, 9, "ATCACAGG");
CheckGetBases(Ref(), "chrM", 3, 7, "CACA");
CheckGetBases(Ref(), "chrM", 90, 100, "CGAGACGCTG");
CheckGetBases(Ref(), "chrM", 90, 99, "CGAGACGCT");
CheckGetBases(Ref(), "chrM", 91, 100, "GAGACGCTG");
CheckGetBases(Ref(), "chrM", 92, 100, "AGACGCTG");
CheckGetBases(Ref(), "chrM", 92, 99, "AGACGCT");
CheckGetBases(Ref(), "chrM", 92, 98, "AGACGC");
CheckGetBases(Ref(), "chrM", 0, 1, "G");
CheckGetBases(Ref(), "chrM", 1, 2, "A");
CheckGetBases(Ref(), "chrM", 2, 3, "T");
CheckGetBases(Ref(), "chrM", 3, 4, "C");
CheckGetBases(Ref(), "chrM", 4, 5, "A");
CheckGetBases(Ref(), "chrM", 5, 6, "C");
// crosses the boundary of the index when max_bin_size is 5
CheckGetBases(Ref(), "chrM", 4, 6, "AC");
// 0-bp interval requests should return the empty string.
CheckGetBases(Ref(), "chrM", 0, 0, "");
CheckGetBases(Ref(), "chrM", 10, 10, "");
}
} // namespace nucleus
| 38.752688 | 79 | 0.694784 | fo40225 |
8b118d5923b101a336a65df34cb52e1d0087371b | 36,090 | hpp | C++ | ReactNativeFrontend/ios/Pods/boost/boost/phoenix/support/preprocessed/vector_20.hpp | Harshitha91/Tmdb-react-native-node | e06e3f25a7ee6946ef07a1f524fdf62e48424293 | [
"Apache-2.0"
] | 12,278 | 2015-01-29T17:11:33.000Z | 2022-03-31T21:12:00.000Z | ios/Pods/boost-for-react-native/boost/phoenix/support/preprocessed/vector_20.hpp | c7yrus/alyson-v3 | 5ad95a8f782f5f5d2fd543d44ca6a8b093395965 | [
"Apache-2.0"
] | 9,469 | 2015-01-30T05:33:07.000Z | 2022-03-31T16:17:21.000Z | ios/Pods/boost-for-react-native/boost/phoenix/support/preprocessed/vector_20.hpp | c7yrus/alyson-v3 | 5ad95a8f782f5f5d2fd543d44ca6a8b093395965 | [
"Apache-2.0"
] | 892 | 2015-01-29T16:26:19.000Z | 2022-03-20T07:44:30.000Z | /*==============================================================================
Copyright (c) 2005-2010 Joel de Guzman
Copyright (c) 2010 Thomas Heller
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
==============================================================================*/
namespace boost { namespace phoenix
{
template <typename Dummy = void>
struct vector0
{
typedef mpl::int_<0> size_type;
static const int size_value = 0;
};
template <int> struct vector_chooser;
template <>
struct vector_chooser<0>
{
template <typename Dummy = void>
struct apply
{
typedef vector0<> type;
};
};
}}
namespace boost { namespace phoenix
{
template <typename A0>
struct vector1
{
typedef A0 member_type0; A0 a0;
typedef mpl::int_<1> size_type;
static const int size_value = 1;
typedef
vector0<>
args_type;
args_type args() const
{
args_type r = {};
return r;
}
};
template <>
struct vector_chooser<1>
{
template <typename A0>
struct apply
{
typedef vector1<A0> type;
};
};
}}
BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL(
(A0)
, ( boost::phoenix::vector1 ) (A0)
, (A0, a0)
)
namespace boost { namespace phoenix
{
template <typename A0 , typename A1>
struct vector2
{
typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1;
typedef mpl::int_<2> size_type;
static const int size_value = 2;
typedef
vector1<A1>
args_type;
args_type args() const
{
args_type r = {a1};
return r;
}
};
template <>
struct vector_chooser<2>
{
template <typename A0 , typename A1>
struct apply
{
typedef vector2<A0 , A1> type;
};
};
}}
BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL(
(A0) (A1)
, ( boost::phoenix::vector2 ) (A0) (A1)
, (A0, a0) (A1, a1)
)
namespace boost { namespace phoenix
{
template <typename A0 , typename A1 , typename A2>
struct vector3
{
typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2;
typedef mpl::int_<3> size_type;
static const int size_value = 3;
typedef
vector2<A1 , A2>
args_type;
args_type args() const
{
args_type r = {a1 , a2};
return r;
}
};
template <>
struct vector_chooser<3>
{
template <typename A0 , typename A1 , typename A2>
struct apply
{
typedef vector3<A0 , A1 , A2> type;
};
};
}}
BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL(
(A0) (A1) (A2)
, ( boost::phoenix::vector3 ) (A0) (A1) (A2)
, (A0, a0) (A1, a1) (A2, a2)
)
namespace boost { namespace phoenix
{
template <typename A0 , typename A1 , typename A2 , typename A3>
struct vector4
{
typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3;
typedef mpl::int_<4> size_type;
static const int size_value = 4;
typedef
vector3<A1 , A2 , A3>
args_type;
args_type args() const
{
args_type r = {a1 , a2 , a3};
return r;
}
};
template <>
struct vector_chooser<4>
{
template <typename A0 , typename A1 , typename A2 , typename A3>
struct apply
{
typedef vector4<A0 , A1 , A2 , A3> type;
};
};
}}
BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL(
(A0) (A1) (A2) (A3)
, ( boost::phoenix::vector4 ) (A0) (A1) (A2) (A3)
, (A0, a0) (A1, a1) (A2, a2) (A3, a3)
)
namespace boost { namespace phoenix
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4>
struct vector5
{
typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4;
typedef mpl::int_<5> size_type;
static const int size_value = 5;
typedef
vector4<A1 , A2 , A3 , A4>
args_type;
args_type args() const
{
args_type r = {a1 , a2 , a3 , a4};
return r;
}
};
template <>
struct vector_chooser<5>
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4>
struct apply
{
typedef vector5<A0 , A1 , A2 , A3 , A4> type;
};
};
}}
BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL(
(A0) (A1) (A2) (A3) (A4)
, ( boost::phoenix::vector5 ) (A0) (A1) (A2) (A3) (A4)
, (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4)
)
namespace boost { namespace phoenix
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5>
struct vector6
{
typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5;
typedef mpl::int_<6> size_type;
static const int size_value = 6;
typedef
vector5<A1 , A2 , A3 , A4 , A5>
args_type;
args_type args() const
{
args_type r = {a1 , a2 , a3 , a4 , a5};
return r;
}
};
template <>
struct vector_chooser<6>
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5>
struct apply
{
typedef vector6<A0 , A1 , A2 , A3 , A4 , A5> type;
};
};
}}
BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL(
(A0) (A1) (A2) (A3) (A4) (A5)
, ( boost::phoenix::vector6 ) (A0) (A1) (A2) (A3) (A4) (A5)
, (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5)
)
namespace boost { namespace phoenix
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6>
struct vector7
{
typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6;
typedef mpl::int_<7> size_type;
static const int size_value = 7;
typedef
vector6<A1 , A2 , A3 , A4 , A5 , A6>
args_type;
args_type args() const
{
args_type r = {a1 , a2 , a3 , a4 , a5 , a6};
return r;
}
};
template <>
struct vector_chooser<7>
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6>
struct apply
{
typedef vector7<A0 , A1 , A2 , A3 , A4 , A5 , A6> type;
};
};
}}
BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL(
(A0) (A1) (A2) (A3) (A4) (A5) (A6)
, ( boost::phoenix::vector7 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6)
, (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6)
)
namespace boost { namespace phoenix
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7>
struct vector8
{
typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7;
typedef mpl::int_<8> size_type;
static const int size_value = 8;
typedef
vector7<A1 , A2 , A3 , A4 , A5 , A6 , A7>
args_type;
args_type args() const
{
args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7};
return r;
}
};
template <>
struct vector_chooser<8>
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7>
struct apply
{
typedef vector8<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7> type;
};
};
}}
BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL(
(A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7)
, ( boost::phoenix::vector8 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7)
, (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7)
)
namespace boost { namespace phoenix
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8>
struct vector9
{
typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8;
typedef mpl::int_<9> size_type;
static const int size_value = 9;
typedef
vector8<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8>
args_type;
args_type args() const
{
args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8};
return r;
}
};
template <>
struct vector_chooser<9>
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8>
struct apply
{
typedef vector9<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8> type;
};
};
}}
BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL(
(A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8)
, ( boost::phoenix::vector9 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8)
, (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8)
)
namespace boost { namespace phoenix
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9>
struct vector10
{
typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9;
typedef mpl::int_<10> size_type;
static const int size_value = 10;
typedef
vector9<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9>
args_type;
args_type args() const
{
args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9};
return r;
}
};
template <>
struct vector_chooser<10>
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9>
struct apply
{
typedef vector10<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9> type;
};
};
}}
BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL(
(A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9)
, ( boost::phoenix::vector10 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9)
, (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9)
)
namespace boost { namespace phoenix
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10>
struct vector11
{
typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10;
typedef mpl::int_<11> size_type;
static const int size_value = 11;
typedef
vector10<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10>
args_type;
args_type args() const
{
args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10};
return r;
}
};
template <>
struct vector_chooser<11>
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10>
struct apply
{
typedef vector11<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10> type;
};
};
}}
BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL(
(A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10)
, ( boost::phoenix::vector11 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10)
, (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10)
)
namespace boost { namespace phoenix
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11>
struct vector12
{
typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11;
typedef mpl::int_<12> size_type;
static const int size_value = 12;
typedef
vector11<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11>
args_type;
args_type args() const
{
args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11};
return r;
}
};
template <>
struct vector_chooser<12>
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11>
struct apply
{
typedef vector12<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11> type;
};
};
}}
BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL(
(A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11)
, ( boost::phoenix::vector12 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11)
, (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11)
)
namespace boost { namespace phoenix
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12>
struct vector13
{
typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12;
typedef mpl::int_<13> size_type;
static const int size_value = 13;
typedef
vector12<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12>
args_type;
args_type args() const
{
args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12};
return r;
}
};
template <>
struct vector_chooser<13>
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12>
struct apply
{
typedef vector13<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12> type;
};
};
}}
BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL(
(A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12)
, ( boost::phoenix::vector13 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12)
, (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12)
)
namespace boost { namespace phoenix
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13>
struct vector14
{
typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13;
typedef mpl::int_<14> size_type;
static const int size_value = 14;
typedef
vector13<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13>
args_type;
args_type args() const
{
args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13};
return r;
}
};
template <>
struct vector_chooser<14>
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13>
struct apply
{
typedef vector14<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13> type;
};
};
}}
BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL(
(A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13)
, ( boost::phoenix::vector14 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13)
, (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13)
)
namespace boost { namespace phoenix
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14>
struct vector15
{
typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef A14 member_type14; A14 a14;
typedef mpl::int_<15> size_type;
static const int size_value = 15;
typedef
vector14<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14>
args_type;
args_type args() const
{
args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13 , a14};
return r;
}
};
template <>
struct vector_chooser<15>
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14>
struct apply
{
typedef vector15<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14> type;
};
};
}}
BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL(
(A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14)
, ( boost::phoenix::vector15 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14)
, (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) (A14, a14)
)
namespace boost { namespace phoenix
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15>
struct vector16
{
typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef A14 member_type14; A14 a14; typedef A15 member_type15; A15 a15;
typedef mpl::int_<16> size_type;
static const int size_value = 16;
typedef
vector15<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15>
args_type;
args_type args() const
{
args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13 , a14 , a15};
return r;
}
};
template <>
struct vector_chooser<16>
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15>
struct apply
{
typedef vector16<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15> type;
};
};
}}
BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL(
(A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15)
, ( boost::phoenix::vector16 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15)
, (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) (A14, a14) (A15, a15)
)
namespace boost { namespace phoenix
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16>
struct vector17
{
typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef A14 member_type14; A14 a14; typedef A15 member_type15; A15 a15; typedef A16 member_type16; A16 a16;
typedef mpl::int_<17> size_type;
static const int size_value = 17;
typedef
vector16<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16>
args_type;
args_type args() const
{
args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13 , a14 , a15 , a16};
return r;
}
};
template <>
struct vector_chooser<17>
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16>
struct apply
{
typedef vector17<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16> type;
};
};
}}
BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL(
(A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16)
, ( boost::phoenix::vector17 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16)
, (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) (A14, a14) (A15, a15) (A16, a16)
)
namespace boost { namespace phoenix
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17>
struct vector18
{
typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef A14 member_type14; A14 a14; typedef A15 member_type15; A15 a15; typedef A16 member_type16; A16 a16; typedef A17 member_type17; A17 a17;
typedef mpl::int_<18> size_type;
static const int size_value = 18;
typedef
vector17<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17>
args_type;
args_type args() const
{
args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13 , a14 , a15 , a16 , a17};
return r;
}
};
template <>
struct vector_chooser<18>
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17>
struct apply
{
typedef vector18<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17> type;
};
};
}}
BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL(
(A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17)
, ( boost::phoenix::vector18 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17)
, (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) (A14, a14) (A15, a15) (A16, a16) (A17, a17)
)
namespace boost { namespace phoenix
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17 , typename A18>
struct vector19
{
typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef A14 member_type14; A14 a14; typedef A15 member_type15; A15 a15; typedef A16 member_type16; A16 a16; typedef A17 member_type17; A17 a17; typedef A18 member_type18; A18 a18;
typedef mpl::int_<19> size_type;
static const int size_value = 19;
typedef
vector18<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17 , A18>
args_type;
args_type args() const
{
args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13 , a14 , a15 , a16 , a17 , a18};
return r;
}
};
template <>
struct vector_chooser<19>
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17 , typename A18>
struct apply
{
typedef vector19<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17 , A18> type;
};
};
}}
BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL(
(A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17) (A18)
, ( boost::phoenix::vector19 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17) (A18)
, (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) (A14, a14) (A15, a15) (A16, a16) (A17, a17) (A18, a18)
)
namespace boost { namespace phoenix
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17 , typename A18 , typename A19>
struct vector20
{
typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef A14 member_type14; A14 a14; typedef A15 member_type15; A15 a15; typedef A16 member_type16; A16 a16; typedef A17 member_type17; A17 a17; typedef A18 member_type18; A18 a18; typedef A19 member_type19; A19 a19;
typedef mpl::int_<20> size_type;
static const int size_value = 20;
typedef
vector19<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17 , A18 , A19>
args_type;
args_type args() const
{
args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13 , a14 , a15 , a16 , a17 , a18 , a19};
return r;
}
};
template <>
struct vector_chooser<20>
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17 , typename A18 , typename A19>
struct apply
{
typedef vector20<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17 , A18 , A19> type;
};
};
}}
BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL(
(A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17) (A18) (A19)
, ( boost::phoenix::vector20 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17) (A18) (A19)
, (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) (A14, a14) (A15, a15) (A16, a16) (A17, a17) (A18, a18) (A19, a19)
)
namespace boost { namespace phoenix
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17 , typename A18 , typename A19 , typename A20>
struct vector21
{
typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef A14 member_type14; A14 a14; typedef A15 member_type15; A15 a15; typedef A16 member_type16; A16 a16; typedef A17 member_type17; A17 a17; typedef A18 member_type18; A18 a18; typedef A19 member_type19; A19 a19; typedef A20 member_type20; A20 a20;
typedef mpl::int_<21> size_type;
static const int size_value = 21;
typedef
vector20<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17 , A18 , A19 , A20>
args_type;
args_type args() const
{
args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13 , a14 , a15 , a16 , a17 , a18 , a19 , a20};
return r;
}
};
template <>
struct vector_chooser<21>
{
template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17 , typename A18 , typename A19 , typename A20>
struct apply
{
typedef vector21<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17 , A18 , A19 , A20> type;
};
};
}}
BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL(
(A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17) (A18) (A19) (A20)
, ( boost::phoenix::vector21 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17) (A18) (A19) (A20)
, (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) (A14, a14) (A15, a15) (A16, a16) (A17, a17) (A18, a18) (A19, a19) (A20, a20)
)
| 41.626298 | 723 | 0.577279 | Harshitha91 |
8b16464ad45baf650cbef0b4316b226b640b5558 | 741,483 | cpp | C++ | makepad/ChakraCore/lib/Backend/GlobOpt.cpp | makepaddev/makepad | 25d2f18c8a7c190fd1b199762817b6514118e045 | [
"MIT"
] | null | null | null | makepad/ChakraCore/lib/Backend/GlobOpt.cpp | makepaddev/makepad | 25d2f18c8a7c190fd1b199762817b6514118e045 | [
"MIT"
] | null | null | null | makepad/ChakraCore/lib/Backend/GlobOpt.cpp | makepaddev/makepad | 25d2f18c8a7c190fd1b199762817b6514118e045 | [
"MIT"
] | null | null | null | //-------------------------------------------------------------------------------------------------------
// Copyright (C) Microsoft Corporation and contributors. All rights reserved.
// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
//-------------------------------------------------------------------------------------------------------
#include "Backend.h"
#if ENABLE_DEBUG_CONFIG_OPTIONS
#define TESTTRACE_PHASE_INSTR(phase, instr, ...) \
if(PHASE_TESTTRACE(phase, this->func)) \
{ \
char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; \
Output::Print( \
_u("Testtrace: %s function %s (%s): "), \
Js::PhaseNames[phase], \
instr->m_func->GetJITFunctionBody()->GetDisplayName(), \
instr->m_func->GetDebugNumberSet(debugStringBuffer)); \
Output::Print(__VA_ARGS__); \
Output::Flush(); \
}
#else // ENABLE_DEBUG_CONFIG_OPTIONS
#define TESTTRACE_PHASE_INSTR(phase, instr, ...)
#endif // ENABLE_DEBUG_CONFIG_OPTIONS
#if DBG_DUMP
#define DO_MEMOP_TRACE() (PHASE_TRACE(Js::MemOpPhase, this->func) ||\
PHASE_TRACE(Js::MemSetPhase, this->func) ||\
PHASE_TRACE(Js::MemCopyPhase, this->func))
#define DO_MEMOP_TRACE_PHASE(phase) (PHASE_TRACE(Js::MemOpPhase, this->func) || PHASE_TRACE(Js::phase ## Phase, this->func))
#define OUTPUT_MEMOP_TRACE(loop, instr, ...) {\
char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];\
Output::Print(15, _u("Function: %s%s, Loop: %u: "), this->func->GetJITFunctionBody()->GetDisplayName(), this->func->GetDebugNumberSet(debugStringBuffer), loop->GetLoopNumber());\
Output::Print(__VA_ARGS__);\
IR::Instr* __instr__ = instr;\
if(__instr__) __instr__->DumpByteCodeOffset();\
if(__instr__) Output::Print(_u(" (%s)"), Js::OpCodeUtil::GetOpCodeName(__instr__->m_opcode));\
Output::Print(_u("\n"));\
Output::Flush(); \
}
#define TRACE_MEMOP(loop, instr, ...) \
if (DO_MEMOP_TRACE()) {\
Output::Print(_u("TRACE MemOp:"));\
OUTPUT_MEMOP_TRACE(loop, instr, __VA_ARGS__)\
}
#define TRACE_MEMOP_VERBOSE(loop, instr, ...) if(CONFIG_FLAG(Verbose)) {TRACE_MEMOP(loop, instr, __VA_ARGS__)}
#define TRACE_MEMOP_PHASE(phase, loop, instr, ...) \
if (DO_MEMOP_TRACE_PHASE(phase))\
{\
Output::Print(_u("TRACE ") _u(#phase) _u(":"));\
OUTPUT_MEMOP_TRACE(loop, instr, __VA_ARGS__)\
}
#define TRACE_MEMOP_PHASE_VERBOSE(phase, loop, instr, ...) if(CONFIG_FLAG(Verbose)) {TRACE_MEMOP_PHASE(phase, loop, instr, __VA_ARGS__)}
#else
#define DO_MEMOP_TRACE()
#define DO_MEMOP_TRACE_PHASE(phase)
#define OUTPUT_MEMOP_TRACE(loop, instr, ...)
#define TRACE_MEMOP(loop, instr, ...)
#define TRACE_MEMOP_VERBOSE(loop, instr, ...)
#define TRACE_MEMOP_PHASE(phase, loop, instr, ...)
#define TRACE_MEMOP_PHASE_VERBOSE(phase, loop, instr, ...)
#endif
class AutoRestoreVal
{
private:
Value *const originalValue;
Value *const tempValue;
Value * *const valueRef;
public:
AutoRestoreVal(Value *const originalValue, Value * *const tempValueRef)
: originalValue(originalValue), tempValue(*tempValueRef), valueRef(tempValueRef)
{
}
~AutoRestoreVal()
{
if(*valueRef == tempValue)
{
*valueRef = originalValue;
}
}
PREVENT_COPY(AutoRestoreVal);
};
GlobOpt::GlobOpt(Func * func)
: func(func),
intConstantToStackSymMap(nullptr),
intConstantToValueMap(nullptr),
currentValue(FirstNewValueNumber),
prePassLoop(nullptr),
alloc(nullptr),
isCallHelper(false),
inInlinedBuiltIn(false),
rootLoopPrePass(nullptr),
noImplicitCallUsesToInsert(nullptr),
valuesCreatedForClone(nullptr),
valuesCreatedForMerge(nullptr),
instrCountSinceLastCleanUp(0),
isRecursiveCallOnLandingPad(false),
updateInductionVariableValueNumber(false),
isPerformingLoopBackEdgeCompensation(false),
currentRegion(nullptr),
changedSymsAfterIncBailoutCandidate(nullptr),
doTypeSpec(
!IsTypeSpecPhaseOff(func)),
doAggressiveIntTypeSpec(
doTypeSpec &&
DoAggressiveIntTypeSpec(func)),
doAggressiveMulIntTypeSpec(
doTypeSpec &&
!PHASE_OFF(Js::AggressiveMulIntTypeSpecPhase, func) &&
(!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsAggressiveMulIntTypeSpecDisabled(func->IsLoopBody()))),
doDivIntTypeSpec(
doAggressiveIntTypeSpec &&
(!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsDivIntTypeSpecDisabled(func->IsLoopBody()))),
doLossyIntTypeSpec(
doTypeSpec &&
DoLossyIntTypeSpec(func)),
doFloatTypeSpec(
doTypeSpec &&
DoFloatTypeSpec(func)),
doArrayCheckHoist(
DoArrayCheckHoist(func)),
doArrayMissingValueCheckHoist(
doArrayCheckHoist &&
DoArrayMissingValueCheckHoist(func)),
doArraySegmentHoist(
doArrayCheckHoist &&
DoArraySegmentHoist(ValueType::GetObject(ObjectType::Int32Array), func)),
doJsArraySegmentHoist(
doArraySegmentHoist &&
DoArraySegmentHoist(ValueType::GetObject(ObjectType::Array), func)),
doArrayLengthHoist(
doArrayCheckHoist &&
DoArrayLengthHoist(func)),
doEliminateArrayAccessHelperCall(
doArrayCheckHoist &&
!PHASE_OFF(Js::EliminateArrayAccessHelperCallPhase, func)),
doTrackRelativeIntBounds(
doAggressiveIntTypeSpec &&
DoPathDependentValues() &&
!PHASE_OFF(Js::Phase::TrackRelativeIntBoundsPhase, func)),
doBoundCheckElimination(
doTrackRelativeIntBounds &&
!PHASE_OFF(Js::Phase::BoundCheckEliminationPhase, func)),
doBoundCheckHoist(
doEliminateArrayAccessHelperCall &&
doBoundCheckElimination &&
DoConstFold() &&
!PHASE_OFF(Js::Phase::BoundCheckHoistPhase, func) &&
(!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsBoundCheckHoistDisabled(func->IsLoopBody()))),
doLoopCountBasedBoundCheckHoist(
doBoundCheckHoist &&
!PHASE_OFF(Js::Phase::LoopCountBasedBoundCheckHoistPhase, func) &&
(!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsLoopCountBasedBoundCheckHoistDisabled(func->IsLoopBody()))),
doPowIntIntTypeSpec(
doAggressiveIntTypeSpec &&
(!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsPowIntIntTypeSpecDisabled())),
doTagChecks(
(!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsTagCheckDisabled())),
isAsmJSFunc(func->GetJITFunctionBody()->IsAsmJsMode())
{
}
void
GlobOpt::BackwardPass(Js::Phase tag)
{
BEGIN_CODEGEN_PHASE(this->func, tag);
::BackwardPass backwardPass(this->func, this, tag);
backwardPass.Optimize();
END_CODEGEN_PHASE(this->func, tag);
}
void
GlobOpt::Optimize()
{
this->objectTypeSyms = nullptr;
this->func->argInsCount = this->func->GetInParamsCount() - 1; //Don't include "this" pointer in the count.
if (!func->DoGlobOpt())
{
this->lengthEquivBv = nullptr;
this->argumentsEquivBv = nullptr;
this->callerEquivBv = nullptr;
// Still need to run the dead store phase to calculate the live reg on back edge
this->BackwardPass(Js::DeadStorePhase);
CannotAllocateArgumentsObjectOnStack();
return;
}
{
this->lengthEquivBv = this->func->m_symTable->m_propertyEquivBvMap->Lookup(Js::PropertyIds::length, nullptr); // Used to kill live "length" properties
this->argumentsEquivBv = func->m_symTable->m_propertyEquivBvMap->Lookup(Js::PropertyIds::arguments, nullptr); // Used to kill live "arguments" properties
this->callerEquivBv = func->m_symTable->m_propertyEquivBvMap->Lookup(Js::PropertyIds::caller, nullptr); // Used to kill live "caller" properties
// The backward phase needs the glob opt's allocator to allocate the propertyTypeValueMap
// in GlobOpt::EnsurePropertyTypeValue and ranges of instructions where int overflow may be ignored.
// (see BackwardPass::TrackIntUsage)
PageAllocator * pageAllocator = this->func->m_alloc->GetPageAllocator();
NoRecoverMemoryJitArenaAllocator localAlloc(_u("BE-GlobOpt"), pageAllocator, Js::Throw::OutOfMemory);
this->alloc = &localAlloc;
NoRecoverMemoryJitArenaAllocator localTempAlloc(_u("BE-GlobOpt temp"), pageAllocator, Js::Throw::OutOfMemory);
this->tempAlloc = &localTempAlloc;
// The forward passes use info (upwardExposedUses) from the backward pass. This info
// isn't available for some of the symbols created during the backward pass, or the forward pass.
// Keep track of the last symbol for which we're guaranteed to have data.
this->maxInitialSymID = this->func->m_symTable->GetMaxSymID();
this->BackwardPass(Js::BackwardPhase);
this->ForwardPass();
}
this->BackwardPass(Js::DeadStorePhase);
this->TailDupPass();
}
bool GlobOpt::ShouldExpectConventionalArrayIndexValue(IR::IndirOpnd *const indirOpnd)
{
Assert(indirOpnd);
if(!indirOpnd->GetIndexOpnd())
{
return indirOpnd->GetOffset() >= 0;
}
IR::RegOpnd *const indexOpnd = indirOpnd->GetIndexOpnd();
if(indexOpnd->m_sym->m_isNotInt)
{
// Typically, single-def or any sym-specific information for type-specialized syms should not be used because all of
// their defs will not have been accounted for until after the forward pass. But m_isNotInt is only ever changed from
// false to true, so it's okay in this case.
return false;
}
StackSym *indexVarSym = indexOpnd->m_sym;
if(indexVarSym->IsTypeSpec())
{
indexVarSym = indexVarSym->GetVarEquivSym(nullptr);
Assert(indexVarSym);
}
else if(!IsLoopPrePass())
{
// Don't use single-def info or const flags for type-specialized syms, as all of their defs will not have been accounted
// for until after the forward pass. Also, don't use the const flags in a loop prepass because the const flags may not
// be up-to-date.
StackSym *const indexSym = indexOpnd->m_sym;
if(indexSym->IsIntConst())
{
return indexSym->GetIntConstValue() >= 0;
}
}
Value *const indexValue = CurrentBlockData()->FindValue(indexVarSym);
if(!indexValue)
{
// Treat it as Uninitialized, assume it's going to be valid
return true;
}
ValueInfo *const indexValueInfo = indexValue->GetValueInfo();
int32 indexConstantValue;
if(indexValueInfo->TryGetIntConstantValue(&indexConstantValue))
{
return indexConstantValue >= 0;
}
if(indexValueInfo->IsUninitialized())
{
// Assume it's going to be valid
return true;
}
return indexValueInfo->HasBeenNumber() && !indexValueInfo->HasBeenFloat();
}
//
// Either result is float or 1/x or cst1/cst2 where cst1%cst2 != 0
//
ValueType GlobOpt::GetDivValueType(IR::Instr* instr, Value* src1Val, Value* src2Val, bool specialize)
{
ValueInfo *src1ValueInfo = (src1Val ? src1Val->GetValueInfo() : nullptr);
ValueInfo *src2ValueInfo = (src2Val ? src2Val->GetValueInfo() : nullptr);
if (instr->IsProfiledInstr() && instr->m_func->HasProfileInfo())
{
ValueType resultType = instr->m_func->GetReadOnlyProfileInfo()->GetDivProfileInfo(static_cast<Js::ProfileId>(instr->AsProfiledInstr()->u.profileId));
if (resultType.IsLikelyInt())
{
if (specialize && src1ValueInfo && src2ValueInfo
&& ((src1ValueInfo->IsInt() && src2ValueInfo->IsInt()) ||
(this->DoDivIntTypeSpec() && src1ValueInfo->IsLikelyInt() && src2ValueInfo->IsLikelyInt())))
{
return ValueType::GetInt(true);
}
return resultType;
}
// Consider: Checking that the sources are numbers.
if (resultType.IsLikelyFloat())
{
return ValueType::Float;
}
return resultType;
}
int32 src1IntConstantValue;
if(!src1ValueInfo || !src1ValueInfo->TryGetIntConstantValue(&src1IntConstantValue))
{
return ValueType::Number;
}
if (src1IntConstantValue == 1)
{
return ValueType::Float;
}
int32 src2IntConstantValue;
if(!src2Val || !src2ValueInfo->TryGetIntConstantValue(&src2IntConstantValue))
{
return ValueType::Number;
}
if (src2IntConstantValue // Avoid divide by zero
&& !(src1IntConstantValue == 0x80000000 && src2IntConstantValue == -1) // Avoid integer overflow
&& (src1IntConstantValue % src2IntConstantValue) != 0)
{
return ValueType::Float;
}
return ValueType::Number;
}
void
GlobOpt::ForwardPass()
{
BEGIN_CODEGEN_PHASE(this->func, Js::ForwardPhase);
#if DBG_DUMP
if (Js::Configuration::Global.flags.Trace.IsEnabled(Js::GlobOptPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId()))
{
this->func->DumpHeader();
}
if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::GlobOptPhase))
{
this->TraceSettings();
}
#endif
// GetConstantCount() gives us the right size to pick for the SparseArray, but we may need more if we've inlined
// functions with constants. There will be a gap in the symbol numbering between the main constants and
// the inlined ones, so we'll most likely need a new array chunk. Make the min size of the array chunks be 64
// in case we have a main function with very few constants and a bunch of constants from inlined functions.
this->byteCodeConstantValueArray = SparseArray<Value>::New(this->alloc, max(this->func->GetJITFunctionBody()->GetConstCount(), 64U));
this->byteCodeConstantValueNumbersBv = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc);
this->tempBv = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc);
this->prePassCopyPropSym = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc);
this->slotSyms = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc);
this->byteCodeUses = nullptr;
this->propertySymUse = nullptr;
// changedSymsAfterIncBailoutCandidate helps track building incremental bailout in ForwardPass
this->changedSymsAfterIncBailoutCandidate = JitAnew(alloc, BVSparse<JitArenaAllocator>, alloc);
#if DBG
this->byteCodeUsesBeforeOpt = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc);
if (Js::Configuration::Global.flags.Trace.IsEnabled(Js::FieldCopyPropPhase) && this->DoFunctionFieldCopyProp())
{
Output::Print(_u("TRACE: CanDoFieldCopyProp Func: "));
this->func->DumpFullFunctionName();
Output::Print(_u("\n"));
}
#endif
OpndList localNoImplicitCallUsesToInsert(alloc);
this->noImplicitCallUsesToInsert = &localNoImplicitCallUsesToInsert;
IntConstantToStackSymMap localIntConstantToStackSymMap(alloc);
this->intConstantToStackSymMap = &localIntConstantToStackSymMap;
IntConstantToValueMap localIntConstantToValueMap(alloc);
this->intConstantToValueMap = &localIntConstantToValueMap;
Int64ConstantToValueMap localInt64ConstantToValueMap(alloc);
this->int64ConstantToValueMap = &localInt64ConstantToValueMap;
AddrConstantToValueMap localAddrConstantToValueMap(alloc);
this->addrConstantToValueMap = &localAddrConstantToValueMap;
StringConstantToValueMap localStringConstantToValueMap(alloc);
this->stringConstantToValueMap = &localStringConstantToValueMap;
SymIdToInstrMap localPrePassInstrMap(alloc);
this->prePassInstrMap = &localPrePassInstrMap;
ValueSetByValueNumber localValuesCreatedForClone(alloc, 64);
this->valuesCreatedForClone = &localValuesCreatedForClone;
ValueNumberPairToValueMap localValuesCreatedForMerge(alloc, 64);
this->valuesCreatedForMerge = &localValuesCreatedForMerge;
#if DBG
BVSparse<JitArenaAllocator> localFinishedStackLiteralInitFld(alloc);
this->finishedStackLiteralInitFld = &localFinishedStackLiteralInitFld;
#endif
FOREACH_BLOCK_IN_FUNC_EDITING(block, this->func)
{
this->OptBlock(block);
} NEXT_BLOCK_IN_FUNC_EDITING;
if (!PHASE_OFF(Js::MemOpPhase, this->func))
{
ProcessMemOp();
}
this->noImplicitCallUsesToInsert = nullptr;
this->intConstantToStackSymMap = nullptr;
this->intConstantToValueMap = nullptr;
this->int64ConstantToValueMap = nullptr;
this->addrConstantToValueMap = nullptr;
this->stringConstantToValueMap = nullptr;
#if DBG
this->finishedStackLiteralInitFld = nullptr;
uint freedCount = 0;
uint spilledCount = 0;
#endif
FOREACH_BLOCK_IN_FUNC(block, this->func)
{
#if DBG
if (block->GetDataUseCount() == 0)
{
freedCount++;
}
else
{
spilledCount++;
}
#endif
block->SetDataUseCount(0);
if (block->cloneStrCandidates)
{
JitAdelete(this->alloc, block->cloneStrCandidates);
block->cloneStrCandidates = nullptr;
}
} NEXT_BLOCK_IN_FUNC;
// Make sure we free most of them.
Assert(freedCount >= spilledCount);
// this->alloc will be freed right after return, no need to free it here
this->changedSymsAfterIncBailoutCandidate = nullptr;
END_CODEGEN_PHASE(this->func, Js::ForwardPhase);
}
void
GlobOpt::OptBlock(BasicBlock *block)
{
if (this->func->m_fg->RemoveUnreachableBlock(block, this))
{
GOPT_TRACE(_u("Removing unreachable block #%d\n"), block->GetBlockNum());
return;
}
Loop * loop = block->loop;
if (loop && block->isLoopHeader)
{
if (loop != this->prePassLoop)
{
OptLoops(loop);
if (!this->IsLoopPrePass() && DoFieldPRE(loop))
{
// Note: !IsLoopPrePass means this was a root loop pre-pass. FieldPre() is called once per loop.
this->FieldPRE(loop);
// Re-optimize the landing pad
BasicBlock *landingPad = loop->landingPad;
this->isRecursiveCallOnLandingPad = true;
this->OptBlock(landingPad);
this->isRecursiveCallOnLandingPad = false;
}
}
}
this->currentBlock = block;
PrepareLoopArrayCheckHoist();
block->MergePredBlocksValueMaps(this);
this->intOverflowCurrentlyMattersInRange = true;
this->intOverflowDoesNotMatterRange = this->currentBlock->intOverflowDoesNotMatterRange;
if (loop && DoFieldHoisting(loop))
{
if (block->isLoopHeader)
{
if (!this->IsLoopPrePass())
{
this->PrepareFieldHoisting(loop);
}
else if (loop == this->rootLoopPrePass)
{
this->PreparePrepassFieldHoisting(loop);
}
}
}
else
{
Assert(!TrackHoistableFields() || !HasHoistableFields(CurrentBlockData()));
if (!DoFieldCopyProp() && !DoFieldRefOpts())
{
this->KillAllFields(CurrentBlockData()->liveFields);
}
}
this->tempAlloc->Reset();
if(loop && block->isLoopHeader)
{
loop->firstValueNumberInLoop = this->currentValue;
}
GOPT_TRACE_BLOCK(block, true);
FOREACH_INSTR_IN_BLOCK_EDITING(instr, instrNext, block)
{
GOPT_TRACE_INSTRTRACE(instr);
BailOutInfo* oldBailOutInfo = nullptr;
bool isCheckAuxBailoutNeeded = this->func->IsJitInDebugMode() && !this->IsLoopPrePass();
if (isCheckAuxBailoutNeeded && instr->HasAuxBailOut() && !instr->HasBailOutInfo())
{
oldBailOutInfo = instr->GetBailOutInfo();
Assert(oldBailOutInfo);
}
bool isInstrRemoved = false;
instrNext = this->OptInstr(instr, &isInstrRemoved);
// If we still have instrs with only aux bail out, convert aux bail out back to regular bail out and fill it.
// During OptInstr some instr can be moved out to a different block, in this case bailout info is going to be replaced
// with e.g. loop bailout info which is filled as part of processing that block, thus we don't need to fill it here.
if (isCheckAuxBailoutNeeded && !isInstrRemoved && instr->HasAuxBailOut() && !instr->HasBailOutInfo())
{
if (instr->GetBailOutInfo() == oldBailOutInfo)
{
instr->PromoteAuxBailOut();
FillBailOutInfo(block, instr->GetBailOutInfo());
}
else
{
AssertMsg(instr->GetBailOutInfo(), "With aux bailout, the bailout info should not be removed by OptInstr.");
}
}
} NEXT_INSTR_IN_BLOCK_EDITING;
GOPT_TRACE_BLOCK(block, false);
if (block->loop)
{
if (IsLoopPrePass())
{
if (DoBoundCheckHoist())
{
DetectUnknownChangesToInductionVariables(&block->globOptData);
}
}
else
{
isPerformingLoopBackEdgeCompensation = true;
Assert(this->tempBv->IsEmpty());
BVSparse<JitArenaAllocator> tempBv2(this->tempAlloc);
// On loop back-edges, we need to restore the state of the type specialized
// symbols to that of the loop header.
FOREACH_SUCCESSOR_BLOCK(succ, block)
{
if (succ->isLoopHeader && succ->loop->IsDescendentOrSelf(block->loop))
{
BVSparse<JitArenaAllocator> *liveOnBackEdge = block->loop->regAlloc.liveOnBackEdgeSyms;
this->tempBv->Minus(block->loop->varSymsOnEntry, block->globOptData.liveVarSyms);
this->tempBv->And(liveOnBackEdge);
this->ToVar(this->tempBv, block);
// Lossy int in the loop header, and no int on the back-edge - need a lossy conversion to int
this->tempBv->Minus(block->loop->lossyInt32SymsOnEntry, block->globOptData.liveInt32Syms);
this->tempBv->And(liveOnBackEdge);
this->ToInt32(this->tempBv, block, true /* lossy */);
// Lossless int in the loop header, and no lossless int on the back-edge - need a lossless conversion to int
this->tempBv->Minus(block->loop->int32SymsOnEntry, block->loop->lossyInt32SymsOnEntry);
tempBv2.Minus(block->globOptData.liveInt32Syms, block->globOptData.liveLossyInt32Syms);
this->tempBv->Minus(&tempBv2);
this->tempBv->And(liveOnBackEdge);
this->ToInt32(this->tempBv, block, false /* lossy */);
this->tempBv->Minus(block->loop->float64SymsOnEntry, block->globOptData.liveFloat64Syms);
this->tempBv->And(liveOnBackEdge);
this->ToFloat64(this->tempBv, block);
#ifdef ENABLE_SIMDJS
// SIMD_JS
// Compensate on backedge if sym is live on loop entry but not on backedge
this->tempBv->Minus(block->loop->simd128F4SymsOnEntry, block->globOptData.liveSimd128F4Syms);
this->tempBv->And(liveOnBackEdge);
this->ToTypeSpec(this->tempBv, block, TySimd128F4, IR::BailOutSimd128F4Only);
this->tempBv->Minus(block->loop->simd128I4SymsOnEntry, block->globOptData.liveSimd128I4Syms);
this->tempBv->And(liveOnBackEdge);
this->ToTypeSpec(this->tempBv, block, TySimd128I4, IR::BailOutSimd128I4Only);
#endif
// For ints and floats, go aggressive and type specialize in the landing pad any symbol which was specialized on
// entry to the loop body (in the loop header), and is still specialized on this tail, but wasn't specialized in
// the landing pad.
// Lossy int in the loop header and no int in the landing pad - need a lossy conversion to int
// (entry.lossyInt32 - landingPad.int32)
this->tempBv->Minus(block->loop->lossyInt32SymsOnEntry, block->loop->landingPad->globOptData.liveInt32Syms);
this->tempBv->And(liveOnBackEdge);
this->ToInt32(this->tempBv, block->loop->landingPad, true /* lossy */);
// Lossless int in the loop header, and no lossless int in the landing pad - need a lossless conversion to int
// ((entry.int32 - entry.lossyInt32) - (landingPad.int32 - landingPad.lossyInt32))
this->tempBv->Minus(block->loop->int32SymsOnEntry, block->loop->lossyInt32SymsOnEntry);
tempBv2.Minus(
block->loop->landingPad->globOptData.liveInt32Syms,
block->loop->landingPad->globOptData.liveLossyInt32Syms);
this->tempBv->Minus(&tempBv2);
this->tempBv->And(liveOnBackEdge);
this->ToInt32(this->tempBv, block->loop->landingPad, false /* lossy */);
// ((entry.float64 - landingPad.float64) & block.float64)
this->tempBv->Minus(block->loop->float64SymsOnEntry, block->loop->landingPad->globOptData.liveFloat64Syms);
this->tempBv->And(block->globOptData.liveFloat64Syms);
this->tempBv->And(liveOnBackEdge);
this->ToFloat64(this->tempBv, block->loop->landingPad);
#ifdef ENABLE_SIMDJS
// SIMD_JS
// compensate on landingpad if live on loopEntry and Backedge.
this->tempBv->Minus(block->loop->simd128F4SymsOnEntry, block->loop->landingPad->globOptData.liveSimd128F4Syms);
this->tempBv->And(block->globOptData.liveSimd128F4Syms);
this->tempBv->And(liveOnBackEdge);
this->ToTypeSpec(this->tempBv, block->loop->landingPad, TySimd128F4, IR::BailOutSimd128F4Only);
this->tempBv->Minus(block->loop->simd128I4SymsOnEntry, block->loop->landingPad->globOptData.liveSimd128I4Syms);
this->tempBv->And(block->globOptData.liveSimd128I4Syms);
this->tempBv->And(liveOnBackEdge);
this->ToTypeSpec(this->tempBv, block->loop->landingPad, TySimd128I4, IR::BailOutSimd128I4Only);
#endif
// Now that we're done with the liveFields within this loop, trim the set to those syms
// that the backward pass told us were live out of the loop.
// This assumes we have no further need of the liveFields within the loop.
if (block->loop->liveOutFields)
{
block->globOptData.liveFields->And(block->loop->liveOutFields);
}
}
} NEXT_SUCCESSOR_BLOCK;
this->tempBv->ClearAll();
isPerformingLoopBackEdgeCompensation = false;
}
}
#if DBG
// The set of live lossy int32 syms should be a subset of all live int32 syms
this->tempBv->And(block->globOptData.liveInt32Syms, block->globOptData.liveLossyInt32Syms);
Assert(this->tempBv->Count() == block->globOptData.liveLossyInt32Syms->Count());
// The set of live lossy int32 syms should be a subset of live var or float syms (var or float sym containing the lossless
// value of the sym should be live)
this->tempBv->Or(block->globOptData.liveVarSyms, block->globOptData.liveFloat64Syms);
this->tempBv->And(block->globOptData.liveLossyInt32Syms);
Assert(this->tempBv->Count() == block->globOptData.liveLossyInt32Syms->Count());
this->tempBv->ClearAll();
Assert(this->currentBlock == block);
#endif
}
void
GlobOpt::OptLoops(Loop *loop)
{
Assert(loop != nullptr);
#if DBG
if (Js::Configuration::Global.flags.Trace.IsEnabled(Js::FieldCopyPropPhase) &&
!DoFunctionFieldCopyProp() && DoFieldCopyProp(loop))
{
Output::Print(_u("TRACE: CanDoFieldCopyProp Loop: "));
this->func->DumpFullFunctionName();
uint loopNumber = loop->GetLoopNumber();
Assert(loopNumber != Js::LoopHeader::NoLoop);
Output::Print(_u(" Loop: %d\n"), loopNumber);
}
#endif
Loop *previousLoop = this->prePassLoop;
this->prePassLoop = loop;
if (previousLoop == nullptr)
{
Assert(this->rootLoopPrePass == nullptr);
this->rootLoopPrePass = loop;
this->prePassInstrMap->Clear();
if (loop->parent == nullptr)
{
// Outer most loop...
this->prePassCopyPropSym->ClearAll();
}
}
if (loop->symsUsedBeforeDefined == nullptr)
{
loop->symsUsedBeforeDefined = JitAnew(alloc, BVSparse<JitArenaAllocator>, this->alloc);
loop->likelyIntSymsUsedBeforeDefined = JitAnew(alloc, BVSparse<JitArenaAllocator>, this->alloc);
loop->likelyNumberSymsUsedBeforeDefined = JitAnew(alloc, BVSparse<JitArenaAllocator>, this->alloc);
loop->forceFloat64SymsOnEntry = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc);
#ifdef ENABLE_SIMDJS
loop->likelySimd128F4SymsUsedBeforeDefined = JitAnew(alloc, BVSparse<JitArenaAllocator>, this->alloc);
loop->likelySimd128I4SymsUsedBeforeDefined = JitAnew(alloc, BVSparse<JitArenaAllocator>, this->alloc);
loop->forceSimd128F4SymsOnEntry = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc);
loop->forceSimd128I4SymsOnEntry = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc);
#endif
loop->symsDefInLoop = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc);
loop->fieldKilled = JitAnew(alloc, BVSparse<JitArenaAllocator>, this->alloc);
loop->fieldPRESymStore = JitAnew(alloc, BVSparse<JitArenaAllocator>, this->alloc);
loop->allFieldsKilled = false;
}
else
{
loop->symsUsedBeforeDefined->ClearAll();
loop->likelyIntSymsUsedBeforeDefined->ClearAll();
loop->likelyNumberSymsUsedBeforeDefined->ClearAll();
loop->forceFloat64SymsOnEntry->ClearAll();
#ifdef ENABLE_SIMDJS
loop->likelySimd128F4SymsUsedBeforeDefined->ClearAll();
loop->likelySimd128I4SymsUsedBeforeDefined->ClearAll();
loop->forceSimd128F4SymsOnEntry->ClearAll();
loop->forceSimd128I4SymsOnEntry->ClearAll();
#endif
loop->symsDefInLoop->ClearAll();
loop->fieldKilled->ClearAll();
loop->allFieldsKilled = false;
loop->initialValueFieldMap.Reset();
}
FOREACH_BLOCK_IN_LOOP(block, loop)
{
block->SetDataUseCount(block->GetSuccList()->Count());
OptBlock(block);
} NEXT_BLOCK_IN_LOOP;
if (previousLoop == nullptr)
{
Assert(this->rootLoopPrePass == loop);
this->rootLoopPrePass = nullptr;
}
this->prePassLoop = previousLoop;
}
void
GlobOpt::TailDupPass()
{
FOREACH_LOOP_IN_FUNC_EDITING(loop, this->func)
{
BasicBlock* header = loop->GetHeadBlock();
BasicBlock* loopTail = nullptr;
FOREACH_PREDECESSOR_BLOCK(pred, header)
{
if (loop->IsDescendentOrSelf(pred->loop))
{
loopTail = pred;
break;
}
} NEXT_PREDECESSOR_BLOCK;
if (loopTail)
{
AssertMsg(loopTail->GetLastInstr()->IsBranchInstr(), "LastInstr of loop should always be a branch no?");
if (!loopTail->GetPredList()->HasOne())
{
TryTailDup(loopTail->GetLastInstr()->AsBranchInstr());
}
}
} NEXT_LOOP_IN_FUNC_EDITING;
}
bool
GlobOpt::TryTailDup(IR::BranchInstr *tailBranch)
{
if (PHASE_OFF(Js::TailDupPhase, tailBranch->m_func->GetTopFunc()))
{
return false;
}
if (tailBranch->IsConditional())
{
return false;
}
IR::Instr *instr;
uint instrCount = 0;
for (instr = tailBranch->GetPrevRealInstrOrLabel(); !instr->IsLabelInstr(); instr = instr->GetPrevRealInstrOrLabel())
{
if (instr->HasBailOutInfo())
{
break;
}
if (!OpCodeAttr::CanCSE(instr->m_opcode))
{
// Consider: We could be more aggressive here
break;
}
instrCount++;
if (instrCount > 1)
{
// Consider: If copy handled single-def tmps renaming, we could do more instrs
break;
}
}
if (!instr->IsLabelInstr())
{
return false;
}
IR::LabelInstr *mergeLabel = instr->AsLabelInstr();
IR::Instr *mergeLabelPrev = mergeLabel->m_prev;
// Skip unreferenced labels
while (mergeLabelPrev->IsLabelInstr() && mergeLabelPrev->AsLabelInstr()->labelRefs.Empty())
{
mergeLabelPrev = mergeLabelPrev->m_prev;
}
BasicBlock* labelBlock = mergeLabel->GetBasicBlock();
uint origPredCount = labelBlock->GetPredList()->Count();
uint dupCount = 0;
// We are good to go. Let's do the tail duplication.
FOREACH_SLISTCOUNTED_ENTRY_EDITING(IR::BranchInstr*, branchEntry, &mergeLabel->labelRefs, iter)
{
if (branchEntry->IsUnconditional() && !branchEntry->IsMultiBranch() && branchEntry != mergeLabelPrev && branchEntry != tailBranch)
{
for (instr = mergeLabel->m_next; instr != tailBranch; instr = instr->m_next)
{
branchEntry->InsertBefore(instr->Copy());
}
instr = branchEntry;
branchEntry->ReplaceTarget(mergeLabel, tailBranch->GetTarget());
while(!instr->IsLabelInstr())
{
instr = instr->m_prev;
}
BasicBlock* branchBlock = instr->AsLabelInstr()->GetBasicBlock();
labelBlock->RemovePred(branchBlock, func->m_fg);
func->m_fg->AddEdge(branchBlock, tailBranch->GetTarget()->GetBasicBlock());
dupCount++;
}
} NEXT_SLISTCOUNTED_ENTRY_EDITING;
// If we've duplicated everywhere, tail block is dead and should be removed.
if (dupCount == origPredCount)
{
AssertMsg(mergeLabel->labelRefs.Empty(), "Should not remove block with referenced label.");
func->m_fg->RemoveBlock(labelBlock, nullptr, true);
}
return true;
}
void
GlobOpt::ToVar(BVSparse<JitArenaAllocator> *bv, BasicBlock *block)
{
FOREACH_BITSET_IN_SPARSEBV(id, bv)
{
StackSym *stackSym = this->func->m_symTable->FindStackSym(id);
IR::RegOpnd *newOpnd = IR::RegOpnd::New(stackSym, TyVar, this->func);
IR::Instr *lastInstr = block->GetLastInstr();
if (lastInstr->IsBranchInstr() || lastInstr->m_opcode == Js::OpCode::BailTarget)
{
// If branch is using this symbol, hoist the operand as the ToVar load will get
// inserted right before the branch.
IR::Opnd *src1 = lastInstr->GetSrc1();
if (src1)
{
if (src1->IsRegOpnd() && src1->AsRegOpnd()->m_sym == stackSym)
{
lastInstr->HoistSrc1(Js::OpCode::Ld_A);
}
IR::Opnd *src2 = lastInstr->GetSrc2();
if (src2)
{
if (src2->IsRegOpnd() && src2->AsRegOpnd()->m_sym == stackSym)
{
lastInstr->HoistSrc2(Js::OpCode::Ld_A);
}
}
}
this->ToVar(lastInstr, newOpnd, block, nullptr, false);
}
else
{
IR::Instr *lastNextInstr = lastInstr->m_next;
this->ToVar(lastNextInstr, newOpnd, block, nullptr, false);
}
} NEXT_BITSET_IN_SPARSEBV;
}
void
GlobOpt::ToInt32(BVSparse<JitArenaAllocator> *bv, BasicBlock *block, bool lossy, IR::Instr *insertBeforeInstr)
{
return this->ToTypeSpec(bv, block, TyInt32, IR::BailOutIntOnly, lossy, insertBeforeInstr);
}
void
GlobOpt::ToFloat64(BVSparse<JitArenaAllocator> *bv, BasicBlock *block)
{
return this->ToTypeSpec(bv, block, TyFloat64, IR::BailOutNumberOnly);
}
void
GlobOpt::ToTypeSpec(BVSparse<JitArenaAllocator> *bv, BasicBlock *block, IRType toType, IR::BailOutKind bailOutKind, bool lossy, IR::Instr *insertBeforeInstr)
{
FOREACH_BITSET_IN_SPARSEBV(id, bv)
{
StackSym *stackSym = this->func->m_symTable->FindStackSym(id);
IRType fromType = TyIllegal;
// Win8 bug: 757126. If we are trying to type specialize the arguments object,
// let's make sure stack args optimization is not enabled. This is a problem, particularly,
// if the instruction comes from an unreachable block. In other cases, the pass on the
// instruction itself should disable arguments object optimization.
if(block->globOptData.argObjSyms && block->globOptData.IsArgumentsSymID(id))
{
CannotAllocateArgumentsObjectOnStack();
}
if (block->globOptData.liveVarSyms->Test(id))
{
fromType = TyVar;
}
else if (block->globOptData.liveInt32Syms->Test(id) && !block->globOptData.liveLossyInt32Syms->Test(id))
{
fromType = TyInt32;
stackSym = stackSym->GetInt32EquivSym(this->func);
}
else if (block->globOptData.liveFloat64Syms->Test(id))
{
fromType = TyFloat64;
stackSym = stackSym->GetFloat64EquivSym(this->func);
}
else
{
#ifdef ENABLE_SIMDJS
Assert(block->globOptData.IsLiveAsSimd128(stackSym));
if (block->globOptData.IsLiveAsSimd128F4(stackSym))
{
fromType = TySimd128F4;
stackSym = stackSym->GetSimd128F4EquivSym(this->func);
}
else
{
fromType = TySimd128I4;
stackSym = stackSym->GetSimd128I4EquivSym(this->func);
}
#else
Assert(UNREACHED);
#endif
}
IR::RegOpnd *newOpnd = IR::RegOpnd::New(stackSym, fromType, this->func);
IR::Instr *lastInstr = block->GetLastInstr();
if (!insertBeforeInstr && lastInstr->IsBranchInstr())
{
// If branch is using this symbol, hoist the operand as the ToInt32 load will get
// inserted right before the branch.
IR::Instr *instrPrev = lastInstr->m_prev;
IR::Opnd *src1 = lastInstr->GetSrc1();
if (src1)
{
if (src1->IsRegOpnd() && src1->AsRegOpnd()->m_sym == stackSym)
{
lastInstr->HoistSrc1(Js::OpCode::Ld_A);
}
IR::Opnd *src2 = lastInstr->GetSrc2();
if (src2)
{
if (src2->IsRegOpnd() && src2->AsRegOpnd()->m_sym == stackSym)
{
lastInstr->HoistSrc2(Js::OpCode::Ld_A);
}
}
// Did we insert anything?
if (lastInstr->m_prev != instrPrev)
{
// If we had ByteCodeUses right before the branch, move them back down.
IR::Instr *insertPoint = lastInstr;
for (IR::Instr *instrBytecode = instrPrev; instrBytecode->m_opcode == Js::OpCode::ByteCodeUses; instrBytecode = instrBytecode->m_prev)
{
instrBytecode->Unlink();
insertPoint->InsertBefore(instrBytecode);
insertPoint = instrBytecode;
}
}
}
}
this->ToTypeSpecUse(nullptr, newOpnd, block, nullptr, nullptr, toType, bailOutKind, lossy, insertBeforeInstr);
} NEXT_BITSET_IN_SPARSEBV;
}
PRECandidatesList * GlobOpt::FindPossiblePRECandidates(Loop *loop, JitArenaAllocator *alloc)
{
// Find the set of PRE candidates
BasicBlock *loopHeader = loop->GetHeadBlock();
PRECandidatesList *candidates = nullptr;
bool firstBackEdge = true;
FOREACH_PREDECESSOR_BLOCK(blockPred, loopHeader)
{
if (!loop->IsDescendentOrSelf(blockPred->loop))
{
// Not a loop back-edge
continue;
}
if (firstBackEdge)
{
candidates = this->FindBackEdgePRECandidates(blockPred, alloc);
}
else
{
blockPred->globOptData.RemoveUnavailableCandidates(candidates);
}
} NEXT_PREDECESSOR_BLOCK;
return candidates;
}
BOOL GlobOpt::PreloadPRECandidate(Loop *loop, GlobHashBucket* candidate)
{
// Insert a load for each field PRE candidate.
PropertySym *propertySym = candidate->value->AsPropertySym();
StackSym *objPtrSym = propertySym->m_stackSym;
// If objPtr isn't live, we'll retry later.
// Another PRE candidate may insert a load for it.
if (!loop->landingPad->globOptData.IsLive(objPtrSym))
{
return false;
}
BasicBlock *landingPad = loop->landingPad;
Value *value = candidate->element;
Sym *symStore = value->GetValueInfo()->GetSymStore();
// The symStore can't be live into the loop
// The symStore needs to still have the same value
Assert(symStore && symStore->IsStackSym());
if (loop->landingPad->globOptData.IsLive(symStore))
{
// May have already been hoisted:
// o.x = t1;
// o.y = t1;
return false;
}
Value *landingPadValue = landingPad->globOptData.FindValue(propertySym);
// Value should be added as initial value or already be there.
Assert(landingPadValue);
IR::Instr * ldInstr = this->prePassInstrMap->Lookup(propertySym->m_id, nullptr);
Assert(ldInstr);
// Create instr to put in landing pad for compensation
Assert(IsPREInstrCandidateLoad(ldInstr->m_opcode));
IR::SymOpnd *ldSrc = ldInstr->GetSrc1()->AsSymOpnd();
if (ldSrc->m_sym != propertySym)
{
// It's possible that the propertySym but have equivalent objPtrs. Verify their values.
Value *val1 = CurrentBlockData()->FindValue(ldSrc->m_sym->AsPropertySym()->m_stackSym);
Value *val2 = CurrentBlockData()->FindValue(propertySym->m_stackSym);
if (!val1 || !val2 || val1->GetValueNumber() != val2->GetValueNumber())
{
return false;
}
}
ldInstr = ldInstr->Copy();
// Consider: Shouldn't be necessary once we have copy-prop in prepass...
ldInstr->GetSrc1()->AsSymOpnd()->m_sym = propertySym;
ldSrc = ldInstr->GetSrc1()->AsSymOpnd();
if (ldSrc->IsPropertySymOpnd())
{
IR::PropertySymOpnd *propSymOpnd = ldSrc->AsPropertySymOpnd();
IR::PropertySymOpnd *newPropSymOpnd;
newPropSymOpnd = propSymOpnd->AsPropertySymOpnd()->CopyWithoutFlowSensitiveInfo(this->func);
ldInstr->ReplaceSrc1(newPropSymOpnd);
}
if (ldInstr->GetDst()->AsRegOpnd()->m_sym != symStore)
{
ldInstr->ReplaceDst(IR::RegOpnd::New(symStore->AsStackSym(), TyVar, this->func));
}
ldInstr->GetSrc1()->SetIsJITOptimizedReg(true);
ldInstr->GetDst()->SetIsJITOptimizedReg(true);
landingPad->globOptData.liveVarSyms->Set(symStore->m_id);
loop->fieldPRESymStore->Set(symStore->m_id);
ValueType valueType(ValueType::Uninitialized);
Value *initialValue = nullptr;
if (loop->initialValueFieldMap.TryGetValue(propertySym, &initialValue))
{
if (ldInstr->IsProfiledInstr())
{
if (initialValue->GetValueNumber() == value->GetValueNumber())
{
if (value->GetValueInfo()->IsUninitialized())
{
valueType = ldInstr->AsProfiledInstr()->u.FldInfo().valueType;
}
else
{
valueType = value->GetValueInfo()->Type();
}
}
else
{
valueType = ValueType::Uninitialized;
}
ldInstr->AsProfiledInstr()->u.FldInfo().valueType = valueType;
}
}
else
{
valueType = landingPadValue->GetValueInfo()->Type();
}
loop->symsUsedBeforeDefined->Set(symStore->m_id);
if (valueType.IsLikelyNumber())
{
loop->likelyNumberSymsUsedBeforeDefined->Set(symStore->m_id);
if (DoAggressiveIntTypeSpec() ? valueType.IsLikelyInt() : valueType.IsInt())
{
// Can only force int conversions in the landing pad based on likely-int values if aggressive int type
// specialization is enabled
loop->likelyIntSymsUsedBeforeDefined->Set(symStore->m_id);
}
}
// Insert in landing pad
if (ldInstr->HasAnyImplicitCalls())
{
IR::Instr * bailInstr = EnsureDisableImplicitCallRegion(loop);
bailInstr->InsertBefore(ldInstr);
}
else if (loop->endDisableImplicitCall)
{
loop->endDisableImplicitCall->InsertBefore(ldInstr);
}
else
{
loop->landingPad->InsertAfter(ldInstr);
}
ldInstr->ClearByteCodeOffset();
ldInstr->SetByteCodeOffset(landingPad->GetFirstInstr());
#if DBG_DUMP
if (Js::Configuration::Global.flags.Trace.IsEnabled(Js::FieldPREPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId()))
{
Output::Print(_u("** TRACE: Field PRE: field pre-loaded in landing pad of loop head #%-3d: "), loop->GetHeadBlock()->GetBlockNum());
ldInstr->Dump();
Output::Print(_u("\n"));
}
#endif
return true;
}
void GlobOpt::PreloadPRECandidates(Loop *loop, PRECandidatesList *candidates)
{
// Insert loads in landing pad for field PRE candidates. Iterate while(changed)
// for the o.x.y cases.
BOOL changed = true;
if (!candidates)
{
return;
}
Assert(loop->landingPad->GetFirstInstr() == loop->landingPad->GetLastInstr());
while (changed)
{
changed = false;
FOREACH_SLIST_ENTRY_EDITING(GlobHashBucket*, candidate, (SList<GlobHashBucket*>*)candidates, iter)
{
if (this->PreloadPRECandidate(loop, candidate))
{
changed = true;
iter.RemoveCurrent();
}
} NEXT_SLIST_ENTRY_EDITING;
}
}
void GlobOpt::FieldPRE(Loop *loop)
{
if (!DoFieldPRE(loop))
{
return;
}
PRECandidatesList *candidates;
JitArenaAllocator *alloc = this->tempAlloc;
candidates = this->FindPossiblePRECandidates(loop, alloc);
this->PreloadPRECandidates(loop, candidates);
}
void GlobOpt::InsertValueCompensation(
BasicBlock *const predecessor,
const SymToValueInfoMap &symsRequiringCompensationToMergedValueInfoMap)
{
Assert(predecessor);
Assert(symsRequiringCompensationToMergedValueInfoMap.Count() != 0);
IR::Instr *insertBeforeInstr = predecessor->GetLastInstr();
Func *const func = insertBeforeInstr->m_func;
bool setLastInstrInPredecessor;
if(insertBeforeInstr->IsBranchInstr() || insertBeforeInstr->m_opcode == Js::OpCode::BailTarget)
{
// Don't insert code between the branch and the corresponding ByteCodeUses instructions
while(insertBeforeInstr->m_prev->m_opcode == Js::OpCode::ByteCodeUses)
{
insertBeforeInstr = insertBeforeInstr->m_prev;
}
setLastInstrInPredecessor = false;
}
else
{
// Insert at the end of the block and set the last instruction
Assert(insertBeforeInstr->m_next);
insertBeforeInstr = insertBeforeInstr->m_next; // Instruction after the last instruction in the predecessor
setLastInstrInPredecessor = true;
}
GlobOptBlockData &predecessorBlockData = predecessor->globOptData;
GlobOptBlockData &successorBlockData = *CurrentBlockData();
struct DelayChangeValueInfo
{
Value* predecessorValue;
ArrayValueInfo* valueInfo;
void ChangeValueInfo(BasicBlock* predecessor, GlobOpt* g)
{
g->ChangeValueInfo(
predecessor,
predecessorValue,
valueInfo,
false /*allowIncompatibleType*/,
true /*compensated*/);
}
};
JsUtil::List<DelayChangeValueInfo, ArenaAllocator> delayChangeValueInfo(alloc);
for(auto it = symsRequiringCompensationToMergedValueInfoMap.GetIterator(); it.IsValid(); it.MoveNext())
{
const auto &entry = it.Current();
Sym *const sym = entry.Key();
Value *const predecessorValue = predecessorBlockData.FindValue(sym);
Assert(predecessorValue);
ValueInfo *const predecessorValueInfo = predecessorValue->GetValueInfo();
// Currently, array value infos are the only ones that require compensation based on values
Assert(predecessorValueInfo->IsAnyOptimizedArray());
const ArrayValueInfo *const predecessorArrayValueInfo = predecessorValueInfo->AsArrayValueInfo();
StackSym *const predecessorHeadSegmentSym = predecessorArrayValueInfo->HeadSegmentSym();
StackSym *const predecessorHeadSegmentLengthSym = predecessorArrayValueInfo->HeadSegmentLengthSym();
StackSym *const predecessorLengthSym = predecessorArrayValueInfo->LengthSym();
ValueInfo *const mergedValueInfo = entry.Value();
const ArrayValueInfo *const mergedArrayValueInfo = mergedValueInfo->AsArrayValueInfo();
StackSym *const mergedHeadSegmentSym = mergedArrayValueInfo->HeadSegmentSym();
StackSym *const mergedHeadSegmentLengthSym = mergedArrayValueInfo->HeadSegmentLengthSym();
StackSym *const mergedLengthSym = mergedArrayValueInfo->LengthSym();
Assert(!mergedHeadSegmentSym || predecessorHeadSegmentSym);
Assert(!mergedHeadSegmentLengthSym || predecessorHeadSegmentLengthSym);
Assert(!mergedLengthSym || predecessorLengthSym);
bool compensated = false;
if(mergedHeadSegmentSym && predecessorHeadSegmentSym != mergedHeadSegmentSym)
{
IR::Instr *const newInstr =
IR::Instr::New(
Js::OpCode::Ld_A,
IR::RegOpnd::New(mergedHeadSegmentSym, mergedHeadSegmentSym->GetType(), func),
IR::RegOpnd::New(predecessorHeadSegmentSym, predecessorHeadSegmentSym->GetType(), func),
func);
newInstr->GetDst()->SetIsJITOptimizedReg(true);
newInstr->GetSrc1()->SetIsJITOptimizedReg(true);
newInstr->SetByteCodeOffset(insertBeforeInstr);
insertBeforeInstr->InsertBefore(newInstr);
compensated = true;
}
if(mergedHeadSegmentLengthSym && predecessorHeadSegmentLengthSym != mergedHeadSegmentLengthSym)
{
IR::Instr *const newInstr =
IR::Instr::New(
Js::OpCode::Ld_I4,
IR::RegOpnd::New(mergedHeadSegmentLengthSym, mergedHeadSegmentLengthSym->GetType(), func),
IR::RegOpnd::New(predecessorHeadSegmentLengthSym, predecessorHeadSegmentLengthSym->GetType(), func),
func);
newInstr->GetDst()->SetIsJITOptimizedReg(true);
newInstr->GetSrc1()->SetIsJITOptimizedReg(true);
newInstr->SetByteCodeOffset(insertBeforeInstr);
insertBeforeInstr->InsertBefore(newInstr);
compensated = true;
// Merge the head segment length value
Assert(predecessorBlockData.liveVarSyms->Test(predecessorHeadSegmentLengthSym->m_id));
predecessorBlockData.liveVarSyms->Set(mergedHeadSegmentLengthSym->m_id);
successorBlockData.liveVarSyms->Set(mergedHeadSegmentLengthSym->m_id);
Value *const predecessorHeadSegmentLengthValue =
predecessorBlockData.FindValue(predecessorHeadSegmentLengthSym);
Assert(predecessorHeadSegmentLengthValue);
predecessorBlockData.SetValue(predecessorHeadSegmentLengthValue, mergedHeadSegmentLengthSym);
Value *const mergedHeadSegmentLengthValue = successorBlockData.FindValue(mergedHeadSegmentLengthSym);
if(mergedHeadSegmentLengthValue)
{
Assert(mergedHeadSegmentLengthValue->GetValueNumber() != predecessorHeadSegmentLengthValue->GetValueNumber());
if(predecessorHeadSegmentLengthValue->GetValueInfo() != mergedHeadSegmentLengthValue->GetValueInfo())
{
mergedHeadSegmentLengthValue->SetValueInfo(
ValueInfo::MergeLikelyIntValueInfo(
this->alloc,
mergedHeadSegmentLengthValue,
predecessorHeadSegmentLengthValue,
mergedHeadSegmentLengthValue->GetValueInfo()->Type()
.Merge(predecessorHeadSegmentLengthValue->GetValueInfo()->Type())));
}
}
else
{
successorBlockData.SetValue(CopyValue(predecessorHeadSegmentLengthValue), mergedHeadSegmentLengthSym);
}
}
if(mergedLengthSym && predecessorLengthSym != mergedLengthSym)
{
IR::Instr *const newInstr =
IR::Instr::New(
Js::OpCode::Ld_I4,
IR::RegOpnd::New(mergedLengthSym, mergedLengthSym->GetType(), func),
IR::RegOpnd::New(predecessorLengthSym, predecessorLengthSym->GetType(), func),
func);
newInstr->GetDst()->SetIsJITOptimizedReg(true);
newInstr->GetSrc1()->SetIsJITOptimizedReg(true);
newInstr->SetByteCodeOffset(insertBeforeInstr);
insertBeforeInstr->InsertBefore(newInstr);
compensated = true;
// Merge the length value
Assert(predecessorBlockData.liveVarSyms->Test(predecessorLengthSym->m_id));
predecessorBlockData.liveVarSyms->Set(mergedLengthSym->m_id);
successorBlockData.liveVarSyms->Set(mergedLengthSym->m_id);
Value *const predecessorLengthValue = predecessorBlockData.FindValue(predecessorLengthSym);
Assert(predecessorLengthValue);
predecessorBlockData.SetValue(predecessorLengthValue, mergedLengthSym);
Value *const mergedLengthValue = successorBlockData.FindValue(mergedLengthSym);
if(mergedLengthValue)
{
Assert(mergedLengthValue->GetValueNumber() != predecessorLengthValue->GetValueNumber());
if(predecessorLengthValue->GetValueInfo() != mergedLengthValue->GetValueInfo())
{
mergedLengthValue->SetValueInfo(
ValueInfo::MergeLikelyIntValueInfo(
this->alloc,
mergedLengthValue,
predecessorLengthValue,
mergedLengthValue->GetValueInfo()->Type().Merge(predecessorLengthValue->GetValueInfo()->Type())));
}
}
else
{
successorBlockData.SetValue(CopyValue(predecessorLengthValue), mergedLengthSym);
}
}
if(compensated)
{
// Save the new ValueInfo for later.
// We don't want other symbols needing compensation to see this new one
delayChangeValueInfo.Add({
predecessorValue,
ArrayValueInfo::New(
alloc,
predecessorValueInfo->Type(),
mergedHeadSegmentSym ? mergedHeadSegmentSym : predecessorHeadSegmentSym,
mergedHeadSegmentLengthSym ? mergedHeadSegmentLengthSym : predecessorHeadSegmentLengthSym,
mergedLengthSym ? mergedLengthSym : predecessorLengthSym,
predecessorValueInfo->GetSymStore())
});
}
}
// Once we've compensated all the symbols, update the new ValueInfo.
delayChangeValueInfo.Map([predecessor, this](int, DelayChangeValueInfo d) { d.ChangeValueInfo(predecessor, this); });
if(setLastInstrInPredecessor)
{
predecessor->SetLastInstr(insertBeforeInstr->m_prev);
}
}
bool
GlobOpt::AreFromSameBytecodeFunc(IR::RegOpnd const* src1, IR::RegOpnd const* dst) const
{
Assert(this->func->m_symTable->FindStackSym(src1->m_sym->m_id) == src1->m_sym);
Assert(this->func->m_symTable->FindStackSym(dst->m_sym->m_id) == dst->m_sym);
if (dst->m_sym->HasByteCodeRegSlot() && src1->m_sym->HasByteCodeRegSlot())
{
return src1->m_sym->GetByteCodeFunc() == dst->m_sym->GetByteCodeFunc();
}
return false;
}
/*
* This is for scope object removal along with Heap Arguments optimization.
* We track several instructions to facilitate the removal of scope object.
* - LdSlotArr - This instr is tracked to keep track of the formals array (the dest)
* - InlineeStart - To keep track of the stack syms for the formals of the inlinee.
*/
void
GlobOpt::TrackInstrsForScopeObjectRemoval(IR::Instr * instr)
{
IR::Opnd* dst = instr->GetDst();
IR::Opnd* src1 = instr->GetSrc1();
if (instr->m_opcode == Js::OpCode::Ld_A && src1->IsRegOpnd())
{
AssertMsg(!instr->m_func->IsStackArgsEnabled() || !src1->IsScopeObjOpnd(instr->m_func), "There can be no aliasing for scope object.");
}
// The following is to track formals array for Stack Arguments optimization with Formals
if (instr->m_func->IsStackArgsEnabled() && !this->IsLoopPrePass())
{
if (instr->m_opcode == Js::OpCode::LdSlotArr)
{
if (instr->GetSrc1()->IsScopeObjOpnd(instr->m_func))
{
AssertMsg(!instr->m_func->GetJITFunctionBody()->HasImplicitArgIns(), "No mapping is required in this case. So it should already be generating ArgIns.");
instr->m_func->TrackFormalsArraySym(dst->GetStackSym()->m_id);
}
}
else if (instr->m_opcode == Js::OpCode::InlineeStart)
{
Assert(instr->m_func->IsInlined());
Js::ArgSlot actualsCount = instr->m_func->actualCount - 1;
Js::ArgSlot formalsCount = instr->m_func->GetJITFunctionBody()->GetInParamsCount() - 1;
Func * func = instr->m_func;
Func * inlinerFunc = func->GetParentFunc(); //Inliner's func
IR::Instr * argOutInstr = instr->GetSrc2()->GetStackSym()->GetInstrDef();
//The argout immediately before the InlineeStart will be the ArgOut for NewScObject
//So we don't want to track the stack sym for this argout.- Skipping it here.
if (instr->m_func->IsInlinedConstructor())
{
//PRE might introduce a second defintion for the Src1. So assert for the opcode only when it has single definition.
Assert(argOutInstr->GetSrc1()->GetStackSym()->GetInstrDef() == nullptr ||
argOutInstr->GetSrc1()->GetStackSym()->GetInstrDef()->m_opcode == Js::OpCode::NewScObjectNoCtor);
argOutInstr = argOutInstr->GetSrc2()->GetStackSym()->GetInstrDef();
}
if (formalsCount < actualsCount)
{
Js::ArgSlot extraActuals = actualsCount - formalsCount;
//Skipping extra actuals passed
for (Js::ArgSlot i = 0; i < extraActuals; i++)
{
argOutInstr = argOutInstr->GetSrc2()->GetStackSym()->GetInstrDef();
}
}
StackSym * undefinedSym = nullptr;
for (Js::ArgSlot param = formalsCount; param > 0; param--)
{
StackSym * argOutSym = nullptr;
if (argOutInstr->GetSrc1())
{
if (argOutInstr->GetSrc1()->IsRegOpnd())
{
argOutSym = argOutInstr->GetSrc1()->GetStackSym();
}
else
{
// We will always have ArgOut instr - so the source operand will not be removed.
argOutSym = StackSym::New(inlinerFunc);
IR::Opnd * srcOpnd = argOutInstr->GetSrc1();
IR::Opnd * dstOpnd = IR::RegOpnd::New(argOutSym, TyVar, inlinerFunc);
IR::Instr * assignInstr = IR::Instr::New(Js::OpCode::Ld_A, dstOpnd, srcOpnd, inlinerFunc);
instr->InsertBefore(assignInstr);
}
}
Assert(!func->HasStackSymForFormal(param - 1));
if (param <= actualsCount)
{
Assert(argOutSym);
func->TrackStackSymForFormalIndex(param - 1, argOutSym);
argOutInstr = argOutInstr->GetSrc2()->GetStackSym()->GetInstrDef();
}
else
{
/*When param is out of range of actuals count, load undefined*/
// TODO: saravind: This will insert undefined for each of the param not having an actual. - Clean up this by having a sym for undefined on func ?
Assert(formalsCount > actualsCount);
if (undefinedSym == nullptr)
{
undefinedSym = StackSym::New(inlinerFunc);
IR::Opnd * srcOpnd = IR::AddrOpnd::New(inlinerFunc->GetScriptContextInfo()->GetUndefinedAddr(), IR::AddrOpndKindDynamicMisc, inlinerFunc);
IR::Opnd * dstOpnd = IR::RegOpnd::New(undefinedSym, TyVar, inlinerFunc);
IR::Instr * assignUndefined = IR::Instr::New(Js::OpCode::Ld_A, dstOpnd, srcOpnd, inlinerFunc);
instr->InsertBefore(assignUndefined);
}
func->TrackStackSymForFormalIndex(param - 1, undefinedSym);
}
}
}
}
}
void
GlobOpt::OptArguments(IR::Instr *instr)
{
IR::Opnd* dst = instr->GetDst();
IR::Opnd* src1 = instr->GetSrc1();
IR::Opnd* src2 = instr->GetSrc2();
TrackInstrsForScopeObjectRemoval(instr);
if (!TrackArgumentsObject())
{
return;
}
if (instr->HasAnyLoadHeapArgsOpCode())
{
#ifdef ENABLE_DEBUG_CONFIG_OPTIONS
if (instr->m_func->IsStackArgsEnabled())
{
if (instr->GetSrc1()->IsRegOpnd() && instr->m_func->GetJITFunctionBody()->GetInParamsCount() > 1)
{
StackSym * scopeObjSym = instr->GetSrc1()->GetStackSym();
Assert(scopeObjSym);
Assert(scopeObjSym->GetInstrDef()->m_opcode == Js::OpCode::InitCachedScope || scopeObjSym->GetInstrDef()->m_opcode == Js::OpCode::NewScopeObject);
Assert(instr->m_func->GetScopeObjSym() == scopeObjSym);
if (PHASE_VERBOSE_TRACE1(Js::StackArgFormalsOptPhase))
{
Output::Print(_u("StackArgFormals : %s (%d) :Setting scopeObjSym in forward pass. \n"), instr->m_func->GetJITFunctionBody()->GetDisplayName(), instr->m_func->GetJITFunctionBody()->GetFunctionNumber());
Output::Flush();
}
}
}
#endif
if (instr->m_func->GetJITFunctionBody()->GetInParamsCount() != 1 && !instr->m_func->IsStackArgsEnabled())
{
CannotAllocateArgumentsObjectOnStack();
}
else
{
CurrentBlockData()->TrackArgumentsSym(dst->AsRegOpnd());
}
return;
}
// Keep track of arguments objects and its aliases
// LdHeapArguments loads the arguments object and Ld_A tracks the aliases.
if ((instr->m_opcode == Js::OpCode::Ld_A || instr->m_opcode == Js::OpCode::BytecodeArgOutCapture) && (src1->IsRegOpnd() && CurrentBlockData()->IsArgumentsOpnd(src1)))
{
// In the debug mode, we don't want to optimize away the aliases. Since we may have to show them on the inspection.
if (((!AreFromSameBytecodeFunc(src1->AsRegOpnd(), dst->AsRegOpnd()) || this->currentBlock->loop) && instr->m_opcode != Js::OpCode::BytecodeArgOutCapture) || this->func->IsJitInDebugMode())
{
CannotAllocateArgumentsObjectOnStack();
return;
}
if(!dst->AsRegOpnd()->GetStackSym()->m_nonEscapingArgObjAlias)
{
CurrentBlockData()->TrackArgumentsSym(dst->AsRegOpnd());
}
return;
}
if (!CurrentBlockData()->TestAnyArgumentsSym())
{
// There are no syms to track yet, don't start tracking arguments sym.
return;
}
// Avoid loop prepass
if (this->currentBlock->loop && this->IsLoopPrePass())
{
return;
}
SymID id = 0;
switch(instr->m_opcode)
{
case Js::OpCode::LdElemI_A:
case Js::OpCode::TypeofElem:
{
Assert(src1->IsIndirOpnd());
IR::RegOpnd *indexOpnd = src1->AsIndirOpnd()->GetIndexOpnd();
if (indexOpnd && CurrentBlockData()->IsArgumentsSymID(indexOpnd->m_sym->m_id))
{
// Pathological test cases such as a[arguments]
CannotAllocateArgumentsObjectOnStack();
return;
}
IR::RegOpnd *baseOpnd = src1->AsIndirOpnd()->GetBaseOpnd();
id = baseOpnd->m_sym->m_id;
if (CurrentBlockData()->IsArgumentsSymID(id))
{
instr->usesStackArgumentsObject = true;
}
break;
}
case Js::OpCode::LdLen_A:
{
Assert(src1->IsRegOpnd());
if(CurrentBlockData()->IsArgumentsOpnd(src1))
{
instr->usesStackArgumentsObject = true;
}
break;
}
case Js::OpCode::ArgOut_A_InlineBuiltIn:
{
if (CurrentBlockData()->IsArgumentsOpnd(src1))
{
instr->usesStackArgumentsObject = true;
}
if (CurrentBlockData()->IsArgumentsOpnd(src1) &&
src1->AsRegOpnd()->m_sym->GetInstrDef()->m_opcode == Js::OpCode::BytecodeArgOutCapture)
{
// Apply inlining results in such usage - this is to ignore this sym that is def'd by ByteCodeArgOutCapture
// It's needed because we do not have block level merging of arguments object and this def due to inlining can turn off stack args opt.
IR::Instr* builtinStart = instr->GetNextRealInstr();
if (builtinStart->m_opcode == Js::OpCode::InlineBuiltInStart)
{
IR::Opnd* builtinOpnd = builtinStart->GetSrc1();
if (builtinStart->GetSrc1()->IsAddrOpnd())
{
Assert(builtinOpnd->AsAddrOpnd()->m_isFunction);
Js::BuiltinFunction builtinFunction = Js::JavascriptLibrary::GetBuiltInForFuncInfo(((FixedFieldInfo*)builtinOpnd->AsAddrOpnd()->m_metadata)->GetFuncInfoAddr(), func->GetThreadContextInfo());
if (builtinFunction == Js::BuiltinFunction::JavascriptFunction_Apply)
{
CurrentBlockData()->ClearArgumentsSym(src1->AsRegOpnd());
}
}
else if (builtinOpnd->IsRegOpnd())
{
if (builtinOpnd->AsRegOpnd()->m_sym->m_builtInIndex == Js::BuiltinFunction::JavascriptFunction_Apply)
{
CurrentBlockData()->ClearArgumentsSym(src1->AsRegOpnd());
}
}
}
}
break;
}
case Js::OpCode::BailOnNotStackArgs:
case Js::OpCode::ArgOut_A_FromStackArgs:
case Js::OpCode::BytecodeArgOutUse:
{
if (src1 && CurrentBlockData()->IsArgumentsOpnd(src1))
{
instr->usesStackArgumentsObject = true;
}
break;
}
default:
{
// Super conservative here, if we see the arguments or any of its alias being used in any
// other opcode just don't do this optimization. Revisit this to optimize further if we see any common
// case is missed.
if (src1)
{
if (src1->IsRegOpnd() || src1->IsSymOpnd() || src1->IsIndirOpnd())
{
if (CurrentBlockData()->IsArgumentsOpnd(src1))
{
#ifdef PERF_HINT
if (PHASE_TRACE1(Js::PerfHintPhase))
{
WritePerfHint(PerfHints::HeapArgumentsCreated, instr->m_func, instr->GetByteCodeOffset());
}
#endif
CannotAllocateArgumentsObjectOnStack();
return;
}
}
}
if (src2)
{
if (src2->IsRegOpnd() || src2->IsSymOpnd() || src2->IsIndirOpnd())
{
if (CurrentBlockData()->IsArgumentsOpnd(src2))
{
#ifdef PERF_HINT
if (PHASE_TRACE1(Js::PerfHintPhase))
{
WritePerfHint(PerfHints::HeapArgumentsCreated, instr->m_func, instr->GetByteCodeOffset());
}
#endif
CannotAllocateArgumentsObjectOnStack();
return;
}
}
}
// We should look at dst last to correctly handle cases where it's the same as one of the src operands.
if (dst)
{
if (dst->IsIndirOpnd() || dst->IsSymOpnd())
{
if (CurrentBlockData()->IsArgumentsOpnd(dst))
{
#ifdef PERF_HINT
if (PHASE_TRACE1(Js::PerfHintPhase))
{
WritePerfHint(PerfHints::HeapArgumentsModification, instr->m_func, instr->GetByteCodeOffset());
}
#endif
CannotAllocateArgumentsObjectOnStack();
return;
}
}
else if (dst->IsRegOpnd())
{
if (this->currentBlock->loop && CurrentBlockData()->IsArgumentsOpnd(dst))
{
#ifdef PERF_HINT
if (PHASE_TRACE1(Js::PerfHintPhase))
{
WritePerfHint(PerfHints::HeapArgumentsModification, instr->m_func, instr->GetByteCodeOffset());
}
#endif
CannotAllocateArgumentsObjectOnStack();
return;
}
CurrentBlockData()->ClearArgumentsSym(dst->AsRegOpnd());
}
}
}
break;
}
return;
}
void
GlobOpt::MarkArgumentsUsedForBranch(IR::Instr * instr)
{
// If it's a conditional branch instruction and the operand used for branching is one of the arguments
// to the function, tag the m_argUsedForBranch of the functionBody so that it can be used later for inlining decisions.
if (instr->IsBranchInstr() && !instr->AsBranchInstr()->IsUnconditional())
{
IR::BranchInstr * bInstr = instr->AsBranchInstr();
IR::Opnd *src1 = bInstr->GetSrc1();
IR::Opnd *src2 = bInstr->GetSrc2();
// These are used because we don't want to rely on src1 or src2 to always be the register/constant
IR::RegOpnd *regOpnd = nullptr;
if (!src2 && (instr->m_opcode == Js::OpCode::BrFalse_A || instr->m_opcode == Js::OpCode::BrTrue_A) && src1->IsRegOpnd())
{
regOpnd = src1->AsRegOpnd();
}
// We need to check for (0===arg) and (arg===0); this is especially important since some minifiers
// change all instances of one to the other.
else if (src2 && src2->IsConstOpnd() && src1->IsRegOpnd())
{
regOpnd = src1->AsRegOpnd();
}
else if (src2 && src2->IsRegOpnd() && src1->IsConstOpnd())
{
regOpnd = src2->AsRegOpnd();
}
if (regOpnd != nullptr)
{
if (regOpnd->m_sym->IsSingleDef())
{
IR::Instr * defInst = regOpnd->m_sym->GetInstrDef();
IR::Opnd *defSym = defInst->GetSrc1();
if (defSym && defSym->IsSymOpnd() && defSym->AsSymOpnd()->m_sym->IsStackSym()
&& defSym->AsSymOpnd()->m_sym->AsStackSym()->IsParamSlotSym())
{
uint16 param = defSym->AsSymOpnd()->m_sym->AsStackSym()->GetParamSlotNum();
// We only support functions with 13 arguments to ensure optimal size of callSiteInfo
if (param < Js::Constants::MaximumArgumentCountForConstantArgumentInlining)
{
this->func->GetJITOutput()->SetArgUsedForBranch((uint8)param);
}
}
}
}
}
}
const InductionVariable*
GlobOpt::GetInductionVariable(SymID sym, Loop *loop)
{
if (loop->inductionVariables)
{
for (auto it = loop->inductionVariables->GetIterator(); it.IsValid(); it.MoveNext())
{
InductionVariable* iv = &it.CurrentValueReference();
if (!iv->IsChangeDeterminate() || !iv->IsChangeUnidirectional())
{
continue;
}
if (iv->Sym()->m_id == sym)
{
return iv;
}
}
}
return nullptr;
}
bool
GlobOpt::IsSymIDInductionVariable(SymID sym, Loop *loop)
{
return GetInductionVariable(sym, loop) != nullptr;
}
SymID
GlobOpt::GetVarSymID(StackSym *sym)
{
if (sym && sym->m_type != TyVar)
{
sym = sym->GetVarEquivSym(nullptr);
}
if (!sym)
{
return Js::Constants::InvalidSymID;
}
return sym->m_id;
}
bool
GlobOpt::IsAllowedForMemOpt(IR::Instr* instr, bool isMemset, IR::RegOpnd *baseOpnd, IR::Opnd *indexOpnd)
{
Assert(instr);
if (!baseOpnd || !indexOpnd)
{
return false;
}
Loop* loop = this->currentBlock->loop;
const ValueType baseValueType(baseOpnd->GetValueType());
const ValueType indexValueType(indexOpnd->GetValueType());
// Validate the array and index types
if (
!indexValueType.IsInt() ||
!(
baseValueType.IsTypedIntOrFloatArray() ||
baseValueType.IsArray()
)
)
{
#if DBG_DUMP
wchar indexValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE];
indexValueType.ToString(indexValueTypeStr);
wchar baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE];
baseValueType.ToString(baseValueTypeStr);
TRACE_MEMOP_VERBOSE(loop, instr, _u("Index[%s] or Array[%s] value type is invalid"), indexValueTypeStr, baseValueTypeStr);
#endif
return false;
}
// The following is conservative and works around a bug in induction variable analysis.
if (baseOpnd->IsArrayRegOpnd())
{
IR::ArrayRegOpnd *baseArrayOp = baseOpnd->AsArrayRegOpnd();
bool hasBoundChecksRemoved = (
baseArrayOp->EliminatedLowerBoundCheck() &&
baseArrayOp->EliminatedUpperBoundCheck() &&
!instr->extractedUpperBoundCheckWithoutHoisting &&
!instr->loadedArrayHeadSegment &&
!instr->loadedArrayHeadSegmentLength
);
if (!hasBoundChecksRemoved)
{
TRACE_MEMOP_VERBOSE(loop, instr, _u("Missing bounds check optimization"));
return false;
}
}
if (!baseValueType.IsTypedArray())
{
// Check if the instr can kill the value type of the array
JsArrayKills arrayKills = CheckJsArrayKills(instr);
if (arrayKills.KillsValueType(baseValueType))
{
TRACE_MEMOP_VERBOSE(loop, instr, _u("The array (s%d) can lose its value type"), GetVarSymID(baseOpnd->GetStackSym()));
return false;
}
}
// Process the Index Operand
if (!this->OptIsInvariant(baseOpnd, this->currentBlock, loop, CurrentBlockData()->FindValue(baseOpnd->m_sym), false, true))
{
TRACE_MEMOP_VERBOSE(loop, instr, _u("Base (s%d) is not invariant"), GetVarSymID(baseOpnd->GetStackSym()));
return false;
}
// Validate the index
Assert(indexOpnd->GetStackSym());
SymID indexSymID = GetVarSymID(indexOpnd->GetStackSym());
const InductionVariable* iv = GetInductionVariable(indexSymID, loop);
if (!iv)
{
// If the index is not an induction variable return
TRACE_MEMOP_VERBOSE(loop, instr, _u("Index (s%d) is not an induction variable"), indexSymID);
return false;
}
Assert(iv->IsChangeDeterminate() && iv->IsChangeUnidirectional());
const IntConstantBounds & bounds = iv->ChangeBounds();
if (loop->memOpInfo)
{
// Only accept induction variables that increments by 1
Loop::InductionVariableChangeInfo inductionVariableChangeInfo = { 0, 0 };
inductionVariableChangeInfo = loop->memOpInfo->inductionVariableChangeInfoMap->Lookup(indexSymID, inductionVariableChangeInfo);
if (
(bounds.LowerBound() != 1 && bounds.LowerBound() != -1) ||
(bounds.UpperBound() != bounds.LowerBound()) ||
inductionVariableChangeInfo.unroll > 1 // Must be 0 (not seen yet) or 1 (already seen)
)
{
TRACE_MEMOP_VERBOSE(loop, instr, _u("The index does not change by 1: %d><%d, unroll=%d"), bounds.LowerBound(), bounds.UpperBound(), inductionVariableChangeInfo.unroll);
return false;
}
// Check if the index is the same in all MemOp optimization in this loop
if (!loop->memOpInfo->candidates->Empty())
{
Loop::MemOpCandidate* previousCandidate = loop->memOpInfo->candidates->Head();
// All MemOp operations within the same loop must use the same index
if (previousCandidate->index != indexSymID)
{
TRACE_MEMOP_VERBOSE(loop, instr, _u("The index is not the same as other MemOp in the loop"));
return false;
}
}
}
return true;
}
bool
GlobOpt::CollectMemcopyLdElementI(IR::Instr *instr, Loop *loop)
{
Assert(instr->GetSrc1()->IsIndirOpnd());
IR::IndirOpnd *src1 = instr->GetSrc1()->AsIndirOpnd();
IR::Opnd *indexOpnd = src1->GetIndexOpnd();
IR::RegOpnd *baseOpnd = src1->GetBaseOpnd()->AsRegOpnd();
SymID baseSymID = GetVarSymID(baseOpnd->GetStackSym());
if (!IsAllowedForMemOpt(instr, false, baseOpnd, indexOpnd))
{
return false;
}
SymID inductionSymID = GetVarSymID(indexOpnd->GetStackSym());
Assert(IsSymIDInductionVariable(inductionSymID, loop));
loop->EnsureMemOpVariablesInitialized();
bool isIndexPreIncr = loop->memOpInfo->inductionVariableChangeInfoMap->ContainsKey(inductionSymID);
IR::Opnd * dst = instr->GetDst();
if (!dst->IsRegOpnd() || !dst->AsRegOpnd()->GetStackSym()->IsSingleDef())
{
return false;
}
Loop::MemCopyCandidate* memcopyInfo = memcopyInfo = JitAnewStruct(this->func->GetTopFunc()->m_fg->alloc, Loop::MemCopyCandidate);
memcopyInfo->ldBase = baseSymID;
memcopyInfo->ldCount = 1;
memcopyInfo->count = 0;
memcopyInfo->bIndexAlreadyChanged = isIndexPreIncr;
memcopyInfo->base = Js::Constants::InvalidSymID; //need to find the stElem first
memcopyInfo->index = inductionSymID;
memcopyInfo->transferSym = dst->AsRegOpnd()->GetStackSym();
loop->memOpInfo->candidates->Prepend(memcopyInfo);
return true;
}
bool
GlobOpt::CollectMemsetStElementI(IR::Instr *instr, Loop *loop)
{
Assert(instr->GetDst()->IsIndirOpnd());
IR::IndirOpnd *dst = instr->GetDst()->AsIndirOpnd();
IR::Opnd *indexOp = dst->GetIndexOpnd();
IR::RegOpnd *baseOp = dst->GetBaseOpnd()->AsRegOpnd();
if (!IsAllowedForMemOpt(instr, true, baseOp, indexOp))
{
return false;
}
SymID baseSymID = GetVarSymID(baseOp->GetStackSym());
IR::Opnd *srcDef = instr->GetSrc1();
StackSym *srcSym = nullptr;
if (srcDef->IsRegOpnd())
{
IR::RegOpnd* opnd = srcDef->AsRegOpnd();
if (this->OptIsInvariant(opnd, this->currentBlock, loop, CurrentBlockData()->FindValue(opnd->m_sym), true, true))
{
srcSym = opnd->GetStackSym();
}
}
BailoutConstantValue constant = {TyIllegal, 0};
if (srcDef->IsFloatConstOpnd())
{
constant.InitFloatConstValue(srcDef->AsFloatConstOpnd()->m_value);
}
else if (srcDef->IsIntConstOpnd())
{
constant.InitIntConstValue(srcDef->AsIntConstOpnd()->GetValue(), srcDef->AsIntConstOpnd()->GetType());
}
else if (srcDef->IsAddrOpnd())
{
constant.InitVarConstValue(srcDef->AsAddrOpnd()->m_address);
}
else if(!srcSym)
{
TRACE_MEMOP_PHASE_VERBOSE(MemSet, loop, instr, _u("Source is not an invariant"));
return false;
}
// Process the Index Operand
Assert(indexOp->GetStackSym());
SymID inductionSymID = GetVarSymID(indexOp->GetStackSym());
Assert(IsSymIDInductionVariable(inductionSymID, loop));
loop->EnsureMemOpVariablesInitialized();
bool isIndexPreIncr = loop->memOpInfo->inductionVariableChangeInfoMap->ContainsKey(inductionSymID);
Loop::MemSetCandidate* memsetInfo = JitAnewStruct(this->func->GetTopFunc()->m_fg->alloc, Loop::MemSetCandidate);
memsetInfo->base = baseSymID;
memsetInfo->index = inductionSymID;
memsetInfo->constant = constant;
memsetInfo->srcSym = srcSym;
memsetInfo->count = 1;
memsetInfo->bIndexAlreadyChanged = isIndexPreIncr;
loop->memOpInfo->candidates->Prepend(memsetInfo);
return true;
}
bool GlobOpt::CollectMemcopyStElementI(IR::Instr *instr, Loop *loop)
{
if (!loop->memOpInfo || loop->memOpInfo->candidates->Empty())
{
// There is no ldElem matching this stElem
return false;
}
Assert(instr->GetDst()->IsIndirOpnd());
IR::IndirOpnd *dst = instr->GetDst()->AsIndirOpnd();
IR::Opnd *indexOp = dst->GetIndexOpnd();
IR::RegOpnd *baseOp = dst->GetBaseOpnd()->AsRegOpnd();
SymID baseSymID = GetVarSymID(baseOp->GetStackSym());
if (!instr->GetSrc1()->IsRegOpnd())
{
return false;
}
IR::RegOpnd* src1 = instr->GetSrc1()->AsRegOpnd();
if (!src1->GetIsDead())
{
// This must be the last use of the register.
// It will invalidate `var m = a[i]; b[i] = m;` but this is not a very interesting case.
TRACE_MEMOP_PHASE_VERBOSE(MemCopy, loop, instr, _u("Source (s%d) is still alive after StElemI"), baseSymID);
return false;
}
if (!IsAllowedForMemOpt(instr, false, baseOp, indexOp))
{
return false;
}
SymID srcSymID = GetVarSymID(src1->GetStackSym());
// Prepare the memcopyCandidate entry
Loop::MemOpCandidate* previousCandidate = loop->memOpInfo->candidates->Head();
if (!previousCandidate->IsMemCopy())
{
return false;
}
Loop::MemCopyCandidate* memcopyInfo = previousCandidate->AsMemCopy();
// The previous candidate has to have been created by the matching ldElem
if (
memcopyInfo->base != Js::Constants::InvalidSymID ||
GetVarSymID(memcopyInfo->transferSym) != srcSymID
)
{
TRACE_MEMOP_PHASE_VERBOSE(MemCopy, loop, instr, _u("No matching LdElem found (s%d)"), baseSymID);
return false;
}
Assert(indexOp->GetStackSym());
SymID inductionSymID = GetVarSymID(indexOp->GetStackSym());
Assert(IsSymIDInductionVariable(inductionSymID, loop));
bool isIndexPreIncr = loop->memOpInfo->inductionVariableChangeInfoMap->ContainsKey(inductionSymID);
if (isIndexPreIncr != memcopyInfo->bIndexAlreadyChanged)
{
// The index changed between the load and the store
TRACE_MEMOP_PHASE_VERBOSE(MemCopy, loop, instr, _u("Index value changed between ldElem and stElem"));
return false;
}
// Consider: Can we remove the count field?
memcopyInfo->count++;
memcopyInfo->base = baseSymID;
return true;
}
bool
GlobOpt::CollectMemOpLdElementI(IR::Instr *instr, Loop *loop)
{
Assert(instr->m_opcode == Js::OpCode::LdElemI_A);
return (!PHASE_OFF(Js::MemCopyPhase, this->func) && CollectMemcopyLdElementI(instr, loop));
}
bool
GlobOpt::CollectMemOpStElementI(IR::Instr *instr, Loop *loop)
{
Assert(instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict);
Assert(instr->GetSrc1());
return (!PHASE_OFF(Js::MemSetPhase, this->func) && CollectMemsetStElementI(instr, loop)) ||
(!PHASE_OFF(Js::MemCopyPhase, this->func) && CollectMemcopyStElementI(instr, loop));
}
bool
GlobOpt::CollectMemOpInfo(IR::Instr *instrBegin, IR::Instr *instr, Value *src1Val, Value *src2Val)
{
Assert(this->currentBlock->loop);
Loop *loop = this->currentBlock->loop;
if (!loop->blockList.HasTwo())
{
// We support memcopy and memset for loops which have only two blocks.
return false;
}
if (loop->GetLoopFlags().isInterpreted && !loop->GetLoopFlags().memopMinCountReached)
{
TRACE_MEMOP_VERBOSE(loop, instr, _u("minimum loop count not reached"))
loop->doMemOp = false;
return false;
}
Assert(loop->doMemOp);
bool isIncr = true, isChangedByOne = false;
switch (instr->m_opcode)
{
case Js::OpCode::StElemI_A:
case Js::OpCode::StElemI_A_Strict:
if (!CollectMemOpStElementI(instr, loop))
{
loop->doMemOp = false;
return false;
}
break;
case Js::OpCode::LdElemI_A:
if (!CollectMemOpLdElementI(instr, loop))
{
loop->doMemOp = false;
return false;
}
break;
case Js::OpCode::Decr_A:
isIncr = false;
case Js::OpCode::Incr_A:
isChangedByOne = true;
goto MemOpCheckInductionVariable;
case Js::OpCode::Sub_I4:
case Js::OpCode::Sub_A:
isIncr = false;
case Js::OpCode::Add_A:
case Js::OpCode::Add_I4:
{
MemOpCheckInductionVariable:
StackSym *sym = instr->GetSrc1()->GetStackSym();
if (!sym)
{
sym = instr->GetSrc2()->GetStackSym();
}
SymID inductionSymID = GetVarSymID(sym);
if (IsSymIDInductionVariable(inductionSymID, this->currentBlock->loop))
{
if (!isChangedByOne)
{
IR::Opnd *src1, *src2;
src1 = instr->GetSrc1();
src2 = instr->GetSrc2();
if (src2->IsRegOpnd())
{
Value *val = CurrentBlockData()->FindValue(src2->AsRegOpnd()->m_sym);
if (val)
{
ValueInfo *vi = val->GetValueInfo();
int constValue;
if (vi && vi->TryGetIntConstantValue(&constValue))
{
if (constValue == 1)
{
isChangedByOne = true;
}
}
}
}
else if (src2->IsIntConstOpnd())
{
if (src2->AsIntConstOpnd()->GetValue() == 1)
{
isChangedByOne = true;
}
}
}
loop->EnsureMemOpVariablesInitialized();
if (!isChangedByOne)
{
Loop::InductionVariableChangeInfo inductionVariableChangeInfo = { Js::Constants::InvalidLoopUnrollFactor, 0 };
if (!loop->memOpInfo->inductionVariableChangeInfoMap->ContainsKey(inductionSymID))
{
loop->memOpInfo->inductionVariableChangeInfoMap->Add(inductionSymID, inductionVariableChangeInfo);
}
else
{
loop->memOpInfo->inductionVariableChangeInfoMap->Item(inductionSymID, inductionVariableChangeInfo);
}
}
else
{
if (!loop->memOpInfo->inductionVariableChangeInfoMap->ContainsKey(inductionSymID))
{
Loop::InductionVariableChangeInfo inductionVariableChangeInfo = { 1, isIncr };
loop->memOpInfo->inductionVariableChangeInfoMap->Add(inductionSymID, inductionVariableChangeInfo);
}
else
{
Loop::InductionVariableChangeInfo inductionVariableChangeInfo = { 0, 0 };
inductionVariableChangeInfo = loop->memOpInfo->inductionVariableChangeInfoMap->Lookup(inductionSymID, inductionVariableChangeInfo);
inductionVariableChangeInfo.unroll++;
inductionVariableChangeInfo.isIncremental = isIncr;
loop->memOpInfo->inductionVariableChangeInfoMap->Item(inductionSymID, inductionVariableChangeInfo);
}
}
break;
}
// Fallthrough if not an induction variable
}
default:
FOREACH_INSTR_IN_RANGE(chkInstr, instrBegin->m_next, instr)
{
if (IsInstrInvalidForMemOp(chkInstr, loop, src1Val, src2Val))
{
loop->doMemOp = false;
return false;
}
// Make sure this instruction doesn't use the memcopy transfer sym before it is checked by StElemI
if (loop->memOpInfo && !loop->memOpInfo->candidates->Empty())
{
Loop::MemOpCandidate* prevCandidate = loop->memOpInfo->candidates->Head();
if (prevCandidate->IsMemCopy())
{
Loop::MemCopyCandidate* memcopyCandidate = prevCandidate->AsMemCopy();
if (memcopyCandidate->base == Js::Constants::InvalidSymID)
{
if (chkInstr->HasSymUse(memcopyCandidate->transferSym))
{
loop->doMemOp = false;
TRACE_MEMOP_PHASE_VERBOSE(MemCopy, loop, chkInstr, _u("Found illegal use of LdElemI value(s%d)"), GetVarSymID(memcopyCandidate->transferSym));
return false;
}
}
}
}
}
NEXT_INSTR_IN_RANGE;
}
return true;
}
bool
GlobOpt::IsInstrInvalidForMemOp(IR::Instr *instr, Loop *loop, Value *src1Val, Value *src2Val)
{
// List of instruction that are valid with memop (ie: instr that gets removed if memop is emitted)
if (
this->currentBlock != loop->GetHeadBlock() &&
!instr->IsLabelInstr() &&
instr->IsRealInstr() &&
instr->m_opcode != Js::OpCode::IncrLoopBodyCount &&
instr->m_opcode != Js::OpCode::StLoopBodyCount &&
instr->m_opcode != Js::OpCode::Ld_A &&
instr->m_opcode != Js::OpCode::Ld_I4 &&
!(instr->IsBranchInstr() && instr->AsBranchInstr()->IsUnconditional())
)
{
TRACE_MEMOP_VERBOSE(loop, instr, _u("Instruction not accepted for memop"));
return true;
}
// Check prev instr because it could have been added by an optimization and we won't see it here.
if (OpCodeAttr::FastFldInstr(instr->m_opcode) || (instr->m_prev && OpCodeAttr::FastFldInstr(instr->m_prev->m_opcode)))
{
// Refuse any operations interacting with Fields
TRACE_MEMOP_VERBOSE(loop, instr, _u("Field interaction detected"));
return true;
}
if (Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::ElementSlot)
{
// Refuse any operations interacting with slots
TRACE_MEMOP_VERBOSE(loop, instr, _u("Slot interaction detected"));
return true;
}
if (this->MayNeedBailOnImplicitCall(instr, src1Val, src2Val))
{
TRACE_MEMOP_VERBOSE(loop, instr, _u("Implicit call bailout detected"));
return true;
}
return false;
}
void
GlobOpt::TryReplaceLdLen(IR::Instr *& instr)
{
// Change LdFld on arrays, strings, and 'arguments' to LdLen when we're accessing the .length field
if ((instr->GetSrc1() && instr->GetSrc1()->IsSymOpnd() && instr->m_opcode == Js::OpCode::ProfiledLdFld) || instr->m_opcode == Js::OpCode::LdFld || instr->m_opcode == Js::OpCode::ScopedLdFld)
{
IR::SymOpnd * opnd = instr->GetSrc1()->AsSymOpnd();
Sym *sym = opnd->m_sym;
if (sym->IsPropertySym())
{
PropertySym *originalPropertySym = sym->AsPropertySym();
// only on .length
if (this->lengthEquivBv != nullptr && this->lengthEquivBv->Test(originalPropertySym->m_id))
{
IR::RegOpnd* newopnd = IR::RegOpnd::New(originalPropertySym->m_stackSym, IRType::TyVar, instr->m_func);
ValueInfo *const objectValueInfo = CurrentBlockData()->FindValue(originalPropertySym->m_stackSym)->GetValueInfo();
// Only for things we'd emit a fast path for
if (
objectValueInfo->IsLikelyAnyArray() ||
objectValueInfo->HasHadStringTag() ||
objectValueInfo->IsLikelyString() ||
newopnd->IsArgumentsObject() ||
(CurrentBlockData()->argObjSyms && CurrentBlockData()->IsArgumentsOpnd(newopnd))
)
{
// We need to properly transfer over the information from the old operand, which is
// a SymOpnd, to the new one, which is a RegOpnd. Unfortunately, the types mean the
// normal copy methods won't work here, so we're going to directly copy data.
newopnd->SetIsJITOptimizedReg(opnd->GetIsJITOptimizedReg());
newopnd->SetValueType(objectValueInfo->Type());
newopnd->SetIsDead(opnd->GetIsDead());
// Now that we have the operand we need, we can go ahead and make the new instr.
IR::Instr *newinstr = IR::Instr::New(Js::OpCode::LdLen_A, instr->m_func);
instr->TransferTo(newinstr);
newinstr->UnlinkSrc1();
newinstr->SetSrc1(newopnd);
instr->InsertAfter(newinstr);
instr->Remove();
instr = newinstr;
}
}
}
}
}
IR::Instr *
GlobOpt::OptInstr(IR::Instr *&instr, bool* isInstrRemoved)
{
Assert(instr->m_func->IsTopFunc() || instr->m_func->isGetterSetter || instr->m_func->callSiteIdInParentFunc != UINT16_MAX);
IR::Opnd *src1, *src2;
Value *src1Val = nullptr, *src2Val = nullptr, *dstVal = nullptr;
Value *src1IndirIndexVal = nullptr, *dstIndirIndexVal = nullptr;
IR::Instr *instrPrev = instr->m_prev;
IR::Instr *instrNext = instr->m_next;
if (instr->IsLabelInstr() && this->func->HasTry() && this->func->DoOptimizeTry())
{
this->currentRegion = instr->AsLabelInstr()->GetRegion();
Assert(this->currentRegion);
}
if(PrepareForIgnoringIntOverflow(instr))
{
if(!IsLoopPrePass())
{
*isInstrRemoved = true;
currentBlock->RemoveInstr(instr);
}
return instrNext;
}
if (!instr->IsRealInstr() || instr->IsByteCodeUsesInstr() || instr->m_opcode == Js::OpCode::Conv_Bool)
{
return instrNext;
}
if (instr->m_opcode == Js::OpCode::Yield)
{
// TODO[generators][ianhall]: Can this and the FillBailOutInfo call below be moved to after Src1 and Src2 so that Yield can be optimized right up to the actual yield?
CurrentBlockData()->KillStateForGeneratorYield();
}
// Change LdFld on arrays, strings, and 'arguments' to LdLen when we're accessing the .length field
this->TryReplaceLdLen(instr);
// Consider: Do we ever get post-op bailout here, and if so is the FillBailOutInfo call in the right place?
if (instr->HasBailOutInfo() && !this->IsLoopPrePass())
{
this->FillBailOutInfo(this->currentBlock, instr->GetBailOutInfo());
}
this->instrCountSinceLastCleanUp++;
instr = this->PreOptPeep(instr);
this->OptArguments(instr);
//StackArguments Optimization - We bail out if the index is out of range of actuals.
if ((instr->m_opcode == Js::OpCode::LdElemI_A || instr->m_opcode == Js::OpCode::TypeofElem) &&
instr->DoStackArgsOpt(this->func) && !this->IsLoopPrePass())
{
GenerateBailAtOperation(&instr, IR::BailOnStackArgsOutOfActualsRange);
}
#if DBG
PropertySym *propertySymUseBefore = nullptr;
Assert(this->byteCodeUses == nullptr);
this->byteCodeUsesBeforeOpt->ClearAll();
GlobOpt::TrackByteCodeSymUsed(instr, this->byteCodeUsesBeforeOpt, &propertySymUseBefore);
Assert(noImplicitCallUsesToInsert->Count() == 0);
#endif
this->ignoredIntOverflowForCurrentInstr = false;
this->ignoredNegativeZeroForCurrentInstr = false;
src1 = instr->GetSrc1();
src2 = instr->GetSrc2();
if (src1)
{
src1Val = this->OptSrc(src1, &instr, &src1IndirIndexVal);
instr = this->SetTypeCheckBailOut(instr->GetSrc1(), instr, nullptr);
if (src2)
{
src2Val = this->OptSrc(src2, &instr);
}
}
if(instr->GetDst() && instr->GetDst()->IsIndirOpnd())
{
this->OptSrc(instr->GetDst(), &instr, &dstIndirIndexVal);
}
MarkArgumentsUsedForBranch(instr);
CSEOptimize(this->currentBlock, &instr, &src1Val, &src2Val, &src1IndirIndexVal);
OptimizeChecks(instr);
OptArraySrc(&instr);
OptNewScObject(&instr, src1Val);
instr = this->OptPeep(instr, src1Val, src2Val);
if (instr->m_opcode == Js::OpCode::Nop ||
(instr->m_opcode == Js::OpCode::CheckThis &&
instr->GetSrc1()->IsRegOpnd() &&
instr->GetSrc1()->AsRegOpnd()->m_sym->m_isSafeThis))
{
instrNext = instr->m_next;
InsertNoImplicitCallUses(instr);
if (this->byteCodeUses)
{
this->InsertByteCodeUses(instr);
}
*isInstrRemoved = true;
this->currentBlock->RemoveInstr(instr);
return instrNext;
}
else if (instr->m_opcode == Js::OpCode::GetNewScObject && !this->IsLoopPrePass() && src1Val->GetValueInfo()->IsPrimitive())
{
// Constructor returned (src1) a primitive value, so fold this into "dst = Ld_A src2", where src2 is the new object that
// was passed into the constructor as its 'this' parameter
instr->FreeSrc1();
instr->SetSrc1(instr->UnlinkSrc2());
instr->m_opcode = Js::OpCode::Ld_A;
src1Val = src2Val;
src2Val = nullptr;
}
else if ((instr->m_opcode == Js::OpCode::TryCatch && this->func->DoOptimizeTry()) || (instr->m_opcode == Js::OpCode::TryFinally && this->func->DoOptimizeTry()))
{
ProcessTryHandler(instr);
}
else if (instr->m_opcode == Js::OpCode::BrOnException || instr->m_opcode == Js::OpCode::BrOnNoException)
{
if (this->ProcessExceptionHandlingEdges(instr))
{
*isInstrRemoved = true;
return instrNext;
}
}
bool isAlreadyTypeSpecialized = false;
if (!IsLoopPrePass() && instr->HasBailOutInfo())
{
if (instr->GetBailOutKind() == IR::BailOutExpectingInteger)
{
isAlreadyTypeSpecialized = TypeSpecializeBailoutExpectedInteger(instr, src1Val, &dstVal);
}
else if (instr->GetBailOutKind() == IR::BailOutExpectingString)
{
if (instr->GetSrc1()->IsRegOpnd())
{
if (!src1Val || !src1Val->GetValueInfo()->IsLikelyString())
{
// Disable SwitchOpt if the source is definitely not a string - This may be realized only in Globopt
Assert(IsSwitchOptEnabled());
throw Js::RejitException(RejitReason::DisableSwitchOptExpectingString);
}
}
}
}
bool forceInvariantHoisting = false;
const bool ignoreIntOverflowInRangeForInstr = instr->ignoreIntOverflowInRange; // Save it since the instr can change
if (!isAlreadyTypeSpecialized)
{
bool redoTypeSpec;
instr = this->TypeSpecialization(instr, &src1Val, &src2Val, &dstVal, &redoTypeSpec, &forceInvariantHoisting);
if(redoTypeSpec && instr->m_opcode != Js::OpCode::Nop)
{
forceInvariantHoisting = false;
instr = this->TypeSpecialization(instr, &src1Val, &src2Val, &dstVal, &redoTypeSpec, &forceInvariantHoisting);
Assert(!redoTypeSpec);
}
if (instr->m_opcode == Js::OpCode::Nop)
{
InsertNoImplicitCallUses(instr);
if (this->byteCodeUses)
{
this->InsertByteCodeUses(instr);
}
instrNext = instr->m_next;
*isInstrRemoved = true;
this->currentBlock->RemoveInstr(instr);
return instrNext;
}
}
if (ignoreIntOverflowInRangeForInstr)
{
VerifyIntSpecForIgnoringIntOverflow(instr);
}
// Track calls after any pre-op bailouts have been inserted before the call, because they will need to restore out params.
this->TrackCalls(instr);
if (instr->GetSrc1())
{
this->UpdateObjPtrValueType(instr->GetSrc1(), instr);
}
IR::Opnd *dst = instr->GetDst();
if (dst)
{
// Copy prop dst uses and mark live/available type syms before tracking kills.
CopyPropDstUses(dst, instr, src1Val);
}
// Track mark temp object before we process the dst so we can generate pre-op bailout
instr = this->TrackMarkTempObject(instrPrev->m_next, instr);
bool removed = OptTagChecks(instr);
if (removed)
{
*isInstrRemoved = true;
return instrNext;
}
dstVal = this->OptDst(&instr, dstVal, src1Val, src2Val, dstIndirIndexVal, src1IndirIndexVal);
dst = instr->GetDst();
instrNext = instr->m_next;
if (dst)
{
if (this->func->HasTry() && this->func->DoOptimizeTry())
{
this->InsertToVarAtDefInTryRegion(instr, dst);
}
instr = this->SetTypeCheckBailOut(dst, instr, nullptr);
this->UpdateObjPtrValueType(dst, instr);
}
BVSparse<JitArenaAllocator> instrByteCodeStackSymUsedAfter(this->alloc);
PropertySym *propertySymUseAfter = nullptr;
if (this->byteCodeUses != nullptr)
{
GlobOpt::TrackByteCodeSymUsed(instr, &instrByteCodeStackSymUsedAfter, &propertySymUseAfter);
}
#if DBG
else
{
GlobOpt::TrackByteCodeSymUsed(instr, &instrByteCodeStackSymUsedAfter, &propertySymUseAfter);
instrByteCodeStackSymUsedAfter.Equal(this->byteCodeUsesBeforeOpt);
Assert(propertySymUseAfter == propertySymUseBefore);
}
#endif
bool isHoisted = false;
if (this->currentBlock->loop && !this->IsLoopPrePass())
{
isHoisted = this->TryHoistInvariant(instr, this->currentBlock, dstVal, src1Val, src2Val, true, false, forceInvariantHoisting);
}
src1 = instr->GetSrc1();
if (!this->IsLoopPrePass() && src1)
{
// instr const, nonConst => canonicalize by swapping operands
// This simplifies lowering. (somewhat machine dependent)
// Note that because of Var overflows, src1 may not have been constant prop'd to an IntConst
this->PreLowerCanonicalize(instr, &src1Val, &src2Val);
}
if (!PHASE_OFF(Js::MemOpPhase, this->func) &&
!isHoisted &&
!(instr->IsJitProfilingInstr()) &&
this->currentBlock->loop && !IsLoopPrePass() &&
!func->IsJitInDebugMode() &&
(func->HasProfileInfo() && !func->GetReadOnlyProfileInfo()->IsMemOpDisabled()) &&
this->currentBlock->loop->doMemOp)
{
CollectMemOpInfo(instrPrev, instr, src1Val, src2Val);
}
InsertNoImplicitCallUses(instr);
if (this->byteCodeUses != nullptr)
{
// Optimization removed some uses from the instruction.
// Need to insert fake uses so we can get the correct live register to restore in bailout.
this->byteCodeUses->Minus(&instrByteCodeStackSymUsedAfter);
if (this->propertySymUse == propertySymUseAfter)
{
this->propertySymUse = nullptr;
}
this->InsertByteCodeUses(instr);
}
if (!this->IsLoopPrePass() && !isHoisted && this->IsImplicitCallBailOutCurrentlyNeeded(instr, src1Val, src2Val))
{
IR::BailOutKind kind = IR::BailOutOnImplicitCalls;
if(instr->HasBailOutInfo())
{
Assert(instr->GetBailOutInfo()->bailOutOffset == instr->GetByteCodeOffset());
const IR::BailOutKind bailOutKind = instr->GetBailOutKind();
if((bailOutKind & ~IR::BailOutKindBits) != IR::BailOutOnImplicitCallsPreOp)
{
Assert(!(bailOutKind & ~IR::BailOutKindBits));
instr->SetBailOutKind(bailOutKind + IR::BailOutOnImplicitCallsPreOp);
}
}
else if (instr->forcePreOpBailOutIfNeeded || this->isRecursiveCallOnLandingPad)
{
// We can't have a byte code reg slot as dst to generate a
// pre-op implicit call after we have processed the dst.
// Consider: This might miss an opportunity to use a copy prop sym to restore
// some other byte code reg if the dst is that copy prop that we already killed.
Assert(!instr->GetDst()
|| !instr->GetDst()->IsRegOpnd()
|| instr->GetDst()->AsRegOpnd()->GetIsJITOptimizedReg()
|| !instr->GetDst()->AsRegOpnd()->m_sym->HasByteCodeRegSlot());
this->GenerateBailAtOperation(&instr, IR::BailOutOnImplicitCallsPreOp);
}
else
{
// Capture value of the bailout after the operation is done.
this->GenerateBailAfterOperation(&instr, kind);
}
}
if (CurrentBlockData()->capturedValuesCandidate && !this->IsLoopPrePass())
{
this->CommitCapturedValuesCandidate();
}
return instrNext;
}
bool
GlobOpt::OptTagChecks(IR::Instr *instr)
{
if (PHASE_OFF(Js::OptTagChecksPhase, this->func) || !this->DoTagChecks())
{
return false;
}
StackSym *stackSym = nullptr;
IR::SymOpnd *symOpnd = nullptr;
IR::RegOpnd *regOpnd = nullptr;
switch(instr->m_opcode)
{
case Js::OpCode::LdFld:
case Js::OpCode::LdMethodFld:
case Js::OpCode::CheckFixedFld:
case Js::OpCode::CheckPropertyGuardAndLoadType:
symOpnd = instr->GetSrc1()->AsSymOpnd();
stackSym = symOpnd->m_sym->AsPropertySym()->m_stackSym;
break;
case Js::OpCode::BailOnNotObject:
case Js::OpCode::BailOnNotArray:
if (instr->GetSrc1()->IsRegOpnd())
{
regOpnd = instr->GetSrc1()->AsRegOpnd();
stackSym = regOpnd->m_sym;
}
break;
case Js::OpCode::StFld:
symOpnd = instr->GetDst()->AsSymOpnd();
stackSym = symOpnd->m_sym->AsPropertySym()->m_stackSym;
break;
}
if (stackSym)
{
Value *value = CurrentBlockData()->FindValue(stackSym);
if (value)
{
ValueInfo *valInfo = value->GetValueInfo();
if (valInfo->GetSymStore() && valInfo->GetSymStore()->IsStackSym() && valInfo->GetSymStore()->AsStackSym()->IsFromByteCodeConstantTable())
{
return false;
}
ValueType valueType = value->GetValueInfo()->Type();
if (instr->m_opcode == Js::OpCode::BailOnNotObject)
{
if (valueType.CanBeTaggedValue())
{
// We're not adding new information to the value other than changing the value type. Preserve any existing
// information and just change the value type.
ChangeValueType(nullptr, value, valueType.SetCanBeTaggedValue(false), true /*preserveSubClassInfo*/);
return false;
}
if (this->byteCodeUses)
{
this->InsertByteCodeUses(instr);
}
this->currentBlock->RemoveInstr(instr);
return true;
}
if (valueType.CanBeTaggedValue() &&
!valueType.HasBeenNumber() &&
!this->IsLoopPrePass())
{
ValueType newValueType = valueType.SetCanBeTaggedValue(false);
// Split out the tag check as a separate instruction.
IR::Instr *bailOutInstr;
bailOutInstr = IR::BailOutInstr::New(Js::OpCode::BailOnNotObject, IR::BailOutOnTaggedValue, instr, instr->m_func);
if (!this->IsLoopPrePass())
{
FillBailOutInfo(this->currentBlock, bailOutInstr->GetBailOutInfo());
}
IR::RegOpnd *srcOpnd = regOpnd;
if (!srcOpnd)
{
srcOpnd = IR::RegOpnd::New(stackSym, stackSym->GetType(), instr->m_func);
AnalysisAssert(symOpnd);
if (symOpnd->GetIsJITOptimizedReg())
{
srcOpnd->SetIsJITOptimizedReg(true);
}
}
bailOutInstr->SetSrc1(srcOpnd);
bailOutInstr->GetSrc1()->SetValueType(valueType);
instr->InsertBefore(bailOutInstr);
if (this->currentBlock->loop)
{
// Try hoisting the BailOnNotObject instr.
// But since this isn't the current instr being optimized, we need to play tricks with
// the byteCodeUse fields...
TrackByteCodeUsesForInstrAddedInOptInstr(bailOutInstr, [&]()
{
TryHoistInvariant(bailOutInstr, this->currentBlock, nullptr, value, nullptr, true, false, false, IR::BailOutOnTaggedValue);
});
}
if (symOpnd)
{
symOpnd->SetPropertyOwnerValueType(newValueType);
}
else
{
regOpnd->SetValueType(newValueType);
}
ChangeValueType(nullptr, value, newValueType, false);
}
}
}
return false;
}
bool
GlobOpt::TypeSpecializeBailoutExpectedInteger(IR::Instr* instr, Value* src1Val, Value** dstVal)
{
bool isAlreadyTypeSpecialized = false;
if(instr->GetSrc1()->IsRegOpnd())
{
if (!src1Val || !src1Val->GetValueInfo()->IsLikelyInt() || instr->GetSrc1()->AsRegOpnd()->m_sym->m_isNotInt)
{
Assert(IsSwitchOptEnabledForIntTypeSpec());
throw Js::RejitException(RejitReason::DisableSwitchOptExpectingInteger);
}
// Attach the BailOutExpectingInteger to FromVar and Remove the bail out info on the Ld_A (Begin Switch) instr.
this->ToTypeSpecUse(instr, instr->GetSrc1(), this->currentBlock, src1Val, nullptr, TyInt32, IR::BailOutExpectingInteger, false, instr);
//TypeSpecialize the dst of Ld_A
TypeSpecializeIntDst(instr, instr->m_opcode, src1Val, src1Val, nullptr, IR::BailOutInvalid, INT32_MIN, INT32_MAX, dstVal);
isAlreadyTypeSpecialized = true;
}
instr->ClearBailOutInfo();
return isAlreadyTypeSpecialized;
}
Value*
GlobOpt::OptDst(
IR::Instr ** pInstr,
Value *dstVal,
Value *src1Val,
Value *src2Val,
Value *dstIndirIndexVal,
Value *src1IndirIndexVal)
{
IR::Instr *&instr = *pInstr;
IR::Opnd *opnd = instr->GetDst();
if (opnd)
{
if (opnd->IsSymOpnd() && opnd->AsSymOpnd()->IsPropertySymOpnd())
{
this->FinishOptPropOp(instr, opnd->AsPropertySymOpnd());
}
else if (instr->m_opcode == Js::OpCode::StElemI_A ||
instr->m_opcode == Js::OpCode::StElemI_A_Strict ||
instr->m_opcode == Js::OpCode::InitComputedProperty)
{
this->KillObjectHeaderInlinedTypeSyms(this->currentBlock, false);
}
if (opnd->IsIndirOpnd() && !this->IsLoopPrePass())
{
IR::RegOpnd *baseOpnd = opnd->AsIndirOpnd()->GetBaseOpnd();
const ValueType baseValueType(baseOpnd->GetValueType());
if ((
baseValueType.IsLikelyNativeArray() ||
#ifdef _M_IX86
(
!AutoSystemInfo::Data.SSE2Available() &&
baseValueType.IsLikelyObject() &&
(
baseValueType.GetObjectType() == ObjectType::Float32Array ||
baseValueType.GetObjectType() == ObjectType::Float64Array
)
)
#else
false
#endif
) &&
instr->GetSrc1()->IsVar())
{
if(instr->m_opcode == Js::OpCode::StElemC)
{
// StElemC has different code that handles native array conversion or missing value stores. Add a bailout
// for those cases.
Assert(baseValueType.IsLikelyNativeArray());
Assert(!instr->HasBailOutInfo());
GenerateBailAtOperation(&instr, IR::BailOutConventionalNativeArrayAccessOnly);
}
else if(instr->HasBailOutInfo())
{
// The lowerer is not going to generate a fast path for this case. Remove any bailouts that require the fast
// path. Note that the removed bailouts should not be necessary for correctness. Bailout on native array
// conversion will be handled automatically as normal.
IR::BailOutKind bailOutKind = instr->GetBailOutKind();
if(bailOutKind & IR::BailOutOnArrayAccessHelperCall)
{
bailOutKind -= IR::BailOutOnArrayAccessHelperCall;
}
if(bailOutKind == IR::BailOutOnImplicitCallsPreOp)
{
bailOutKind -= IR::BailOutOnImplicitCallsPreOp;
}
if(bailOutKind)
{
instr->SetBailOutKind(bailOutKind);
}
else
{
instr->ClearBailOutInfo();
}
}
}
}
}
this->ProcessKills(instr);
if (opnd)
{
if (dstVal == nullptr)
{
dstVal = ValueNumberDst(pInstr, src1Val, src2Val);
}
if (this->IsLoopPrePass())
{
// Keep track of symbols defined in the loop.
if (opnd->IsRegOpnd())
{
StackSym *symDst = opnd->AsRegOpnd()->m_sym;
rootLoopPrePass->symsDefInLoop->Set(symDst->m_id);
}
}
else if (dstVal)
{
opnd->SetValueType(dstVal->GetValueInfo()->Type());
if(currentBlock->loop &&
!IsLoopPrePass() &&
(instr->m_opcode == Js::OpCode::Ld_A || instr->m_opcode == Js::OpCode::Ld_I4) &&
instr->GetSrc1()->IsRegOpnd() &&
!func->IsJitInDebugMode() &&
func->DoGlobOptsForGeneratorFunc())
{
// Look for the following patterns:
//
// Pattern 1:
// s1[liveOnBackEdge] = s3[dead]
//
// Pattern 2:
// s3 = operation(s1[liveOnBackEdge], s2)
// s1[liveOnBackEdge] = s3
//
// In both patterns, s1 and s3 have the same value by the end. Prefer to use s1 as the sym store instead of s3
// since s1 is live on back-edge, as otherwise, their lifetimes overlap, requiring two registers to hold the
// value instead of one.
do
{
IR::RegOpnd *const src = instr->GetSrc1()->AsRegOpnd();
StackSym *srcVarSym = src->m_sym;
if(srcVarSym->IsTypeSpec())
{
srcVarSym = srcVarSym->GetVarEquivSym(nullptr);
Assert(srcVarSym);
}
if(dstVal->GetValueInfo()->GetSymStore() != srcVarSym)
{
break;
}
IR::RegOpnd *const dst = opnd->AsRegOpnd();
StackSym *dstVarSym = dst->m_sym;
if(dstVarSym->IsTypeSpec())
{
dstVarSym = dstVarSym->GetVarEquivSym(nullptr);
Assert(dstVarSym);
}
if(!currentBlock->loop->regAlloc.liveOnBackEdgeSyms->Test(dstVarSym->m_id))
{
break;
}
Value *const srcValue = CurrentBlockData()->FindValue(srcVarSym);
if(srcValue->GetValueNumber() != dstVal->GetValueNumber())
{
break;
}
if(!src->GetIsDead())
{
IR::Instr *const prevInstr = instr->GetPrevRealInstrOrLabel();
IR::Opnd *const prevDst = prevInstr->GetDst();
if(!prevDst ||
!src->IsEqualInternal(prevDst) ||
!(
(prevInstr->GetSrc1() && dst->IsEqual(prevInstr->GetSrc1())) ||
(prevInstr->GetSrc2() && dst->IsEqual(prevInstr->GetSrc2()))
))
{
break;
}
}
this->SetSymStoreDirect(dstVal->GetValueInfo(), dstVarSym);
} while(false);
}
}
this->ValueNumberObjectType(opnd, instr);
}
this->CSEAddInstr(this->currentBlock, *pInstr, dstVal, src1Val, src2Val, dstIndirIndexVal, src1IndirIndexVal);
return dstVal;
}
void
GlobOpt::CopyPropDstUses(IR::Opnd *opnd, IR::Instr *instr, Value *src1Val)
{
if (opnd->IsSymOpnd())
{
IR::SymOpnd *symOpnd = opnd->AsSymOpnd();
if (symOpnd->m_sym->IsPropertySym())
{
PropertySym * originalPropertySym = symOpnd->m_sym->AsPropertySym();
Value *const objectValue = CurrentBlockData()->FindValue(originalPropertySym->m_stackSym);
symOpnd->SetPropertyOwnerValueType(objectValue ? objectValue->GetValueInfo()->Type() : ValueType::Uninitialized);
this->FieldHoistOptDst(instr, originalPropertySym, src1Val);
PropertySym * sym = this->CopyPropPropertySymObj(symOpnd, instr);
if (sym != originalPropertySym && !this->IsLoopPrePass())
{
// Consider: This doesn't detect hoistability of a property sym after object pointer copy prop
// on loop prepass. But if it so happened that the property sym is hoisted, we might as well do so.
this->FieldHoistOptDst(instr, sym, src1Val);
}
}
}
}
void
GlobOpt::SetLoopFieldInitialValue(Loop *loop, IR::Instr *instr, PropertySym *propertySym, PropertySym *originalPropertySym)
{
Value *initialValue = nullptr;
StackSym *symStore;
if (loop->allFieldsKilled || loop->fieldKilled->Test(originalPropertySym->m_id))
{
return;
}
Assert(!loop->fieldKilled->Test(propertySym->m_id));
// Value already exists
if (CurrentBlockData()->FindValue(propertySym))
{
return;
}
// If this initial value was already added, we would find in the current value table.
Assert(!loop->initialValueFieldMap.TryGetValue(propertySym, &initialValue));
// If propertySym is live in landingPad, we don't need an initial value.
if (loop->landingPad->globOptData.liveFields->Test(propertySym->m_id))
{
return;
}
Value *landingPadObjPtrVal, *currentObjPtrVal;
landingPadObjPtrVal = loop->landingPad->globOptData.FindValue(propertySym->m_stackSym);
currentObjPtrVal = CurrentBlockData()->FindValue(propertySym->m_stackSym);
if (!currentObjPtrVal || !landingPadObjPtrVal || currentObjPtrVal->GetValueNumber() != landingPadObjPtrVal->GetValueNumber())
{
// objPtr has a different value in the landing pad.
return;
}
// The opnd's value type has not yet been initialized. Since the property sym doesn't have a value, it effectively has an
// Uninitialized value type. Use the profiled value type from the instruction.
const ValueType profiledValueType =
instr->IsProfiledInstr() ? instr->AsProfiledInstr()->u.FldInfo().valueType : ValueType::Uninitialized;
Assert(!profiledValueType.IsDefinite()); // Hence the values created here don't need to be tracked for kills
initialValue = this->NewGenericValue(profiledValueType, propertySym);
symStore = StackSym::New(this->func);
initialValue->GetValueInfo()->SetSymStore(symStore);
loop->initialValueFieldMap.Add(propertySym, initialValue->Copy(this->alloc, initialValue->GetValueNumber()));
// Copy the initial value into the landing pad, but without a symStore
Value *landingPadInitialValue = Value::New(this->alloc, initialValue->GetValueNumber(),
ValueInfo::New(this->alloc, initialValue->GetValueInfo()->Type()));
loop->landingPad->globOptData.SetValue(landingPadInitialValue, propertySym);
loop->landingPad->globOptData.liveFields->Set(propertySym->m_id);
#if DBG_DUMP
if (PHASE_TRACE(Js::FieldPREPhase, this->func))
{
Output::Print(_u("** TRACE: Field PRE initial value for loop head #%d. Val:%d symStore:"),
loop->GetHeadBlock()->GetBlockNum(), initialValue->GetValueNumber());
symStore->Dump();
Output::Print(_u("\n Instr: "));
instr->Dump();
}
#endif
// Add initial value to all the previous blocks in the loop.
FOREACH_BLOCK_BACKWARD_IN_RANGE(block, this->currentBlock->GetPrev(), loop->GetHeadBlock())
{
if (block->GetDataUseCount() == 0)
{
// All successor blocks have been processed, no point in adding the value.
continue;
}
Value *newValue = initialValue->Copy(this->alloc, initialValue->GetValueNumber());
block->globOptData.SetValue(newValue, propertySym);
block->globOptData.liveFields->Set(propertySym->m_id);
block->globOptData.SetValue(newValue, symStore);
block->globOptData.liveVarSyms->Set(symStore->m_id);
} NEXT_BLOCK_BACKWARD_IN_RANGE;
CurrentBlockData()->SetValue(initialValue, symStore);
CurrentBlockData()->liveVarSyms->Set(symStore->m_id);
CurrentBlockData()->liveFields->Set(propertySym->m_id);
}
// Examine src, apply copy prop and value number it
Value*
GlobOpt::OptSrc(IR::Opnd *opnd, IR::Instr * *pInstr, Value **indirIndexValRef, IR::IndirOpnd *parentIndirOpnd)
{
IR::Instr * &instr = *pInstr;
Assert(!indirIndexValRef || !*indirIndexValRef);
Assert(
parentIndirOpnd
? opnd == parentIndirOpnd->GetBaseOpnd() || opnd == parentIndirOpnd->GetIndexOpnd()
: opnd == instr->GetSrc1() || opnd == instr->GetSrc2() || opnd == instr->GetDst() && opnd->IsIndirOpnd());
Sym *sym;
Value *val;
PropertySym *originalPropertySym = nullptr;
switch(opnd->GetKind())
{
case IR::OpndKindIntConst:
val = this->GetIntConstantValue(opnd->AsIntConstOpnd()->AsInt32(), instr);
opnd->SetValueType(val->GetValueInfo()->Type());
return val;
case IR::OpndKindInt64Const:
val = this->GetIntConstantValue(opnd->AsInt64ConstOpnd()->GetValue(), instr);
opnd->SetValueType(val->GetValueInfo()->Type());
return val;
case IR::OpndKindFloatConst:
{
const FloatConstType floatValue = opnd->AsFloatConstOpnd()->m_value;
int32 int32Value;
if(Js::JavascriptNumber::TryGetInt32Value(floatValue, &int32Value))
{
val = GetIntConstantValue(int32Value, instr);
}
else
{
val = NewFloatConstantValue(floatValue);
}
opnd->SetValueType(val->GetValueInfo()->Type());
return val;
}
case IR::OpndKindAddr:
{
IR::AddrOpnd *addrOpnd = opnd->AsAddrOpnd();
if (addrOpnd->m_isFunction)
{
AssertMsg(!PHASE_OFF(Js::FixedMethodsPhase, instr->m_func), "Fixed function address operand with fixed method calls phase disabled?");
val = NewFixedFunctionValue((Js::JavascriptFunction *)addrOpnd->m_address, addrOpnd);
opnd->SetValueType(val->GetValueInfo()->Type());
return val;
}
else if (addrOpnd->IsVar() && Js::TaggedInt::Is(addrOpnd->m_address))
{
val = this->GetIntConstantValue(Js::TaggedInt::ToInt32(addrOpnd->m_address), instr);
opnd->SetValueType(val->GetValueInfo()->Type());
return val;
}
val = this->GetVarConstantValue(addrOpnd);
return val;
}
case IR::OpndKindSym:
{
// Clear the opnd's value type up-front, so that this code cannot accidentally use the value type set from a previous
// OptSrc on the same instruction (for instance, from an earlier loop prepass). The value type will be set from the
// value if available, before returning from this function.
opnd->SetValueType(ValueType::Uninitialized);
sym = opnd->AsSymOpnd()->m_sym;
// Don't create a new value for ArgSlots and don't copy prop them away.
if (sym->IsStackSym() && sym->AsStackSym()->IsArgSlotSym())
{
return nullptr;
}
// Unless we have profile info, don't create a new value for ArgSlots and don't copy prop them away.
if (sym->IsStackSym() && sym->AsStackSym()->IsParamSlotSym())
{
if (!instr->m_func->IsLoopBody() && instr->m_func->HasProfileInfo())
{
// Skip "this" pointer.
int paramSlotNum = sym->AsStackSym()->GetParamSlotNum() - 2;
if (paramSlotNum >= 0)
{
const auto parameterType = instr->m_func->GetReadOnlyProfileInfo()->GetParameterInfo(static_cast<Js::ArgSlot>(paramSlotNum));
val = NewGenericValue(parameterType);
opnd->SetValueType(val->GetValueInfo()->Type());
return val;
}
}
return nullptr;
}
if (!sym->IsPropertySym())
{
break;
}
originalPropertySym = sym->AsPropertySym();
// Dont give a vale to 'arguments' property sym to prevent field copy prop of 'arguments'
if (originalPropertySym->AsPropertySym()->m_propertyId == Js::PropertyIds::arguments &&
originalPropertySym->AsPropertySym()->m_fieldKind == PropertyKindData)
{
return nullptr;
}
Value *const objectValue = CurrentBlockData()->FindValue(originalPropertySym->m_stackSym);
opnd->AsSymOpnd()->SetPropertyOwnerValueType(
objectValue ? objectValue->GetValueInfo()->Type() : ValueType::Uninitialized);
if (!FieldHoistOptSrc(opnd->AsSymOpnd(), instr, originalPropertySym))
{
sym = this->CopyPropPropertySymObj(opnd->AsSymOpnd(), instr);
// Consider: This doesn't detect hoistability of a property sym after object pointer copy prop
// on loop prepass. But if it so happened that the property sym is hoisted, we might as well do so.
if (originalPropertySym == sym || this->IsLoopPrePass() ||
!FieldHoistOptSrc(opnd->AsSymOpnd(), instr, sym->AsPropertySym()))
{
if (!DoFieldCopyProp())
{
if (opnd->AsSymOpnd()->IsPropertySymOpnd())
{
this->FinishOptPropOp(instr, opnd->AsPropertySymOpnd());
}
return nullptr;
}
switch (instr->m_opcode)
{
// These need the symbolic reference to the field, don't copy prop the value of the field
case Js::OpCode::DeleteFld:
case Js::OpCode::DeleteRootFld:
case Js::OpCode::DeleteFldStrict:
case Js::OpCode::DeleteRootFldStrict:
case Js::OpCode::ScopedDeleteFld:
case Js::OpCode::ScopedDeleteFldStrict:
case Js::OpCode::LdMethodFromFlags:
case Js::OpCode::BrOnNoProperty:
case Js::OpCode::BrOnHasProperty:
case Js::OpCode::LdMethodFldPolyInlineMiss:
case Js::OpCode::StSlotChkUndecl:
return nullptr;
};
if (instr->CallsGetter())
{
return nullptr;
}
if (this->IsLoopPrePass() && this->DoFieldPRE(this->rootLoopPrePass))
{
if (!this->prePassLoop->allFieldsKilled && !this->prePassLoop->fieldKilled->Test(sym->m_id))
{
this->SetLoopFieldInitialValue(this->rootLoopPrePass, instr, sym->AsPropertySym(), originalPropertySym);
}
if (this->IsPREInstrCandidateLoad(instr->m_opcode))
{
// Foreach property sym, remember the first instruction that loads it.
// Can this be done in one call?
if (!this->prePassInstrMap->ContainsKey(sym->m_id))
{
this->prePassInstrMap->AddNew(sym->m_id, instr);
}
}
}
break;
}
}
// We field hoisted, we can continue as a reg.
opnd = instr->GetSrc1();
}
case IR::OpndKindReg:
// Clear the opnd's value type up-front, so that this code cannot accidentally use the value type set from a previous
// OptSrc on the same instruction (for instance, from an earlier loop prepass). The value type will be set from the
// value if available, before returning from this function.
opnd->SetValueType(ValueType::Uninitialized);
sym = opnd->AsRegOpnd()->m_sym;
CurrentBlockData()->MarkTempLastUse(instr, opnd->AsRegOpnd());
if (sym->AsStackSym()->IsTypeSpec())
{
sym = sym->AsStackSym()->GetVarEquivSym(this->func);
}
break;
case IR::OpndKindIndir:
this->OptimizeIndirUses(opnd->AsIndirOpnd(), &instr, indirIndexValRef);
return nullptr;
default:
return nullptr;
}
val = CurrentBlockData()->FindValue(sym);
if (val)
{
Assert(CurrentBlockData()->IsLive(sym) || (sym->IsPropertySym()));
if (instr)
{
opnd = this->CopyProp(opnd, instr, val, parentIndirOpnd);
}
// Check if we freed the operand.
if (opnd == nullptr)
{
return nullptr;
}
// In a loop prepass, determine stack syms that are used before they are defined in the root loop for which the prepass
// is being done. This information is used to do type specialization conversions in the landing pad where appropriate.
if(IsLoopPrePass() &&
sym->IsStackSym() &&
!rootLoopPrePass->symsUsedBeforeDefined->Test(sym->m_id) &&
rootLoopPrePass->landingPad->globOptData.IsLive(sym) && !isAsmJSFunc) // no typespec in asmjs and hence skipping this
{
Value *const landingPadValue = rootLoopPrePass->landingPad->globOptData.FindValue(sym);
if(landingPadValue && val->GetValueNumber() == landingPadValue->GetValueNumber())
{
rootLoopPrePass->symsUsedBeforeDefined->Set(sym->m_id);
ValueInfo *landingPadValueInfo = landingPadValue->GetValueInfo();
if(landingPadValueInfo->IsLikelyNumber())
{
rootLoopPrePass->likelyNumberSymsUsedBeforeDefined->Set(sym->m_id);
if(DoAggressiveIntTypeSpec() ? landingPadValueInfo->IsLikelyInt() : landingPadValueInfo->IsInt())
{
// Can only force int conversions in the landing pad based on likely-int values if aggressive int type
// specialization is enabled.
rootLoopPrePass->likelyIntSymsUsedBeforeDefined->Set(sym->m_id);
}
}
#ifdef ENABLE_SIMDJS
// SIMD_JS
// For uses before defs, we set likelySimd128*SymsUsedBeforeDefined bits for syms that have landing pad value info that allow type-spec to happen in the loop body.
// The BV will be added to loop header if the backedge has a live matching type-spec value. We then compensate in the loop header to unbox the value.
// This allows type-spec in the landing pad instead of boxing/unboxing on each iteration.
if (Js::IsSimd128Opcode(instr->m_opcode))
{
// Simd ops are strongly typed. We type-spec only if the type is likely/Definitely the expected type or if we have object which can come from merging different Simd types.
// Simd value must be initialized properly on all paths before the loop entry. Cannot be merged with Undefined/Null.
ThreadContext::SimdFuncSignature funcSignature;
instr->m_func->GetScriptContext()->GetThreadContext()->GetSimdFuncSignatureFromOpcode(instr->m_opcode, funcSignature);
Assert(funcSignature.valid);
ValueType expectedType = funcSignature.args[opnd == instr->GetSrc1() ? 0 : 1];
if (expectedType.IsSimd128Float32x4())
{
if (
(landingPadValueInfo->IsLikelySimd128Float32x4() || (landingPadValueInfo->IsLikelyObject() && landingPadValueInfo->GetObjectType() == ObjectType::Object))
&&
!landingPadValueInfo->HasBeenUndefined() && !landingPadValueInfo->HasBeenNull()
)
{
rootLoopPrePass->likelySimd128F4SymsUsedBeforeDefined->Set(sym->m_id);
}
}
else if (expectedType.IsSimd128Int32x4())
{
if (
(landingPadValueInfo->IsLikelySimd128Int32x4() || (landingPadValueInfo->IsLikelyObject() && landingPadValueInfo->GetObjectType() == ObjectType::Object))
&&
!landingPadValueInfo->HasBeenUndefined() && !landingPadValueInfo->HasBeenNull()
)
{
rootLoopPrePass->likelySimd128I4SymsUsedBeforeDefined->Set(sym->m_id);
}
}
}
else if (instr->m_opcode == Js::OpCode::ExtendArg_A && opnd == instr->GetSrc1() && instr->GetDst()->GetValueType().IsSimd128())
{
// Extended_Args for Simd ops are annotated with the expected type by the inliner. Use this info to find out if type-spec is supposed to happen.
ValueType expectedType = instr->GetDst()->GetValueType();
if ((landingPadValueInfo->IsLikelySimd128Float32x4() || (landingPadValueInfo->IsLikelyObject() && landingPadValueInfo->GetObjectType() == ObjectType::Object))
&& expectedType.IsSimd128Float32x4())
{
rootLoopPrePass->likelySimd128F4SymsUsedBeforeDefined->Set(sym->m_id);
}
else if ((landingPadValueInfo->IsLikelySimd128Int32x4() || (landingPadValueInfo->IsLikelyObject() && landingPadValueInfo->GetObjectType() == ObjectType::Object))
&& expectedType.IsSimd128Int32x4())
{
rootLoopPrePass->likelySimd128I4SymsUsedBeforeDefined->Set(sym->m_id);
}
}
#endif
}
}
}
else if ((instr->TransfersSrcValue() || OpCodeAttr::CanCSE(instr->m_opcode)) && (opnd == instr->GetSrc1() || opnd == instr->GetSrc2()))
{
if (sym->IsPropertySym())
{
val = this->CreateFieldSrcValue(sym->AsPropertySym(), originalPropertySym, &opnd, instr);
}
else
{
val = this->NewGenericValue(ValueType::Uninitialized, opnd);
}
}
if (opnd->IsSymOpnd() && opnd->AsSymOpnd()->IsPropertySymOpnd())
{
TryOptimizeInstrWithFixedDataProperty(&instr);
this->FinishOptPropOp(instr, opnd->AsPropertySymOpnd());
}
if (val)
{
ValueType valueType(val->GetValueInfo()->Type());
// This block uses local profiling data to optimize the case of a native array being passed to a function that fills it with other types. When the function is inlined
// into different call paths which use different types this can cause a perf hit by performing unnecessary array conversions, so only perform this optimization when
// the function is not inlined.
if (valueType.IsLikelyNativeArray() && !valueType.IsObject() && instr->IsProfiledInstr() && !instr->m_func->IsInlined())
{
// See if we have profile data for the array type
IR::ProfiledInstr *const profiledInstr = instr->AsProfiledInstr();
ValueType profiledArrayType;
switch(instr->m_opcode)
{
case Js::OpCode::LdElemI_A:
if(instr->GetSrc1()->IsIndirOpnd() && opnd == instr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd())
{
profiledArrayType = profiledInstr->u.ldElemInfo->GetArrayType();
}
break;
case Js::OpCode::StElemI_A:
case Js::OpCode::StElemI_A_Strict:
case Js::OpCode::StElemC:
if(instr->GetDst()->IsIndirOpnd() && opnd == instr->GetDst()->AsIndirOpnd()->GetBaseOpnd())
{
profiledArrayType = profiledInstr->u.stElemInfo->GetArrayType();
}
break;
case Js::OpCode::LdLen_A:
if(instr->GetSrc1()->IsRegOpnd() && opnd == instr->GetSrc1())
{
profiledArrayType = profiledInstr->u.ldElemInfo->GetArrayType();
}
break;
}
if(profiledArrayType.IsLikelyObject() &&
profiledArrayType.GetObjectType() == valueType.GetObjectType() &&
(profiledArrayType.HasVarElements() || (valueType.HasIntElements() && profiledArrayType.HasFloatElements())))
{
// Merge array type we pulled from profile with type propagated by dataflow.
valueType = valueType.Merge(profiledArrayType).SetHasNoMissingValues(valueType.HasNoMissingValues());
ChangeValueType(this->currentBlock, CurrentBlockData()->FindValue(opnd->AsRegOpnd()->m_sym), valueType, false);
}
}
opnd->SetValueType(valueType);
if(!IsLoopPrePass() && opnd->IsSymOpnd() && valueType.IsDefinite())
{
if (opnd->AsSymOpnd()->m_sym->IsPropertySym())
{
// A property sym can only be guaranteed to have a definite value type when implicit calls are disabled from the
// point where the sym was defined with the definite value type. Insert an instruction to indicate to the
// dead-store pass that implicit calls need to be kept disabled until after this instruction.
Assert(DoFieldCopyProp());
CaptureNoImplicitCallUses(opnd, false, instr);
}
}
}
else
{
opnd->SetValueType(ValueType::Uninitialized);
}
return val;
}
/*
* GlobOpt::TryOptimizeInstrWithFixedDataProperty
* Converts Ld[Root]Fld instr to
* * CheckFixedFld
* * Dst = Ld_A <int Constant value>
* This API assumes that the source operand is a Sym/PropertySym kind.
*/
void
GlobOpt::TryOptimizeInstrWithFixedDataProperty(IR::Instr ** const pInstr)
{
Assert(pInstr);
IR::Instr * &instr = *pInstr;
IR::Opnd * src1 = instr->GetSrc1();
Assert(src1 && src1->IsSymOpnd() && src1->AsSymOpnd()->IsPropertySymOpnd());
if(PHASE_OFF(Js::UseFixedDataPropsPhase, instr->m_func))
{
return;
}
if (!this->IsLoopPrePass() && !this->isRecursiveCallOnLandingPad &&
OpCodeAttr::CanLoadFixedFields(instr->m_opcode))
{
instr->TryOptimizeInstrWithFixedDataProperty(&instr, this);
}
}
// Constant prop if possible, otherwise if this value already resides in another
// symbol, reuse this previous symbol. This should help register allocation.
IR::Opnd *
GlobOpt::CopyProp(IR::Opnd *opnd, IR::Instr *instr, Value *val, IR::IndirOpnd *parentIndirOpnd)
{
Assert(
parentIndirOpnd
? opnd == parentIndirOpnd->GetBaseOpnd() || opnd == parentIndirOpnd->GetIndexOpnd()
: opnd == instr->GetSrc1() || opnd == instr->GetSrc2() || opnd == instr->GetDst() && opnd->IsIndirOpnd());
if (this->IsLoopPrePass())
{
// Transformations are not legal in prepass...
return opnd;
}
if (!this->func->DoGlobOptsForGeneratorFunc())
{
// Don't copy prop in generator functions because non-bytecode temps that span a yield
// cannot be saved and restored by the current bail-out mechanics utilized by generator
// yield/resume.
// TODO[generators][ianhall]: Enable copy-prop at least for in between yields.
return opnd;
}
if (instr->m_opcode == Js::OpCode::CheckFixedFld || instr->m_opcode == Js::OpCode::CheckPropertyGuardAndLoadType)
{
// Don't copy prop into CheckFixedFld or CheckPropertyGuardAndLoadType
return opnd;
}
// Don't copy-prop link operands of ExtendedArgs
if (instr->m_opcode == Js::OpCode::ExtendArg_A && opnd == instr->GetSrc2())
{
return opnd;
}
// Don't copy-prop operand of SIMD instr with ExtendedArg operands. Each instr should have its exclusive EA sequence.
if (
Js::IsSimd128Opcode(instr->m_opcode) &&
instr->GetSrc1() != nullptr &&
instr->GetSrc1()->IsRegOpnd() &&
instr->GetSrc2() == nullptr
)
{
StackSym *sym = instr->GetSrc1()->GetStackSym();
if (sym && sym->IsSingleDef() && sym->GetInstrDef()->m_opcode == Js::OpCode::ExtendArg_A)
{
return opnd;
}
}
ValueInfo *valueInfo = val->GetValueInfo();
if (this->func->HasFinally())
{
// s0 = undefined was added on functions with early exit in try-finally functions, that can get copy-proped and case incorrect results
if (instr->m_opcode == Js::OpCode::ArgOut_A_Inline && valueInfo->GetSymStore() &&
valueInfo->GetSymStore()->m_id == 0)
{
// We don't want to copy-prop s0 (return symbol) into inlinee code
return opnd;
}
}
// Constant prop?
int32 intConstantValue;
int64 int64ConstantValue;
if (valueInfo->TryGetIntConstantValue(&intConstantValue))
{
if (PHASE_OFF(Js::ConstPropPhase, this->func))
{
return opnd;
}
if ((
instr->m_opcode == Js::OpCode::StElemI_A ||
instr->m_opcode == Js::OpCode::StElemI_A_Strict ||
instr->m_opcode == Js::OpCode::StElemC
) && instr->GetSrc1() == opnd)
{
// Disabling prop to src of native array store, because we were losing the chance to type specialize.
// Is it possible to type specialize this src if we allow constants, etc., to be prop'd here?
if (instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->GetValueType().IsLikelyNativeArray())
{
return opnd;
}
}
if(opnd != instr->GetSrc1() && opnd != instr->GetSrc2())
{
if(PHASE_OFF(Js::IndirCopyPropPhase, instr->m_func))
{
return opnd;
}
// Const-prop an indir opnd's constant index into its offset
IR::Opnd *srcs[] = { instr->GetSrc1(), instr->GetSrc2(), instr->GetDst() };
for(int i = 0; i < sizeof(srcs) / sizeof(srcs[0]); ++i)
{
const auto src = srcs[i];
if(!src || !src->IsIndirOpnd())
{
continue;
}
const auto indir = src->AsIndirOpnd();
if ((int64)indir->GetOffset() + intConstantValue > INT32_MAX)
{
continue;
}
if(opnd == indir->GetIndexOpnd())
{
Assert(indir->GetScale() == 0);
GOPT_TRACE_OPND(opnd, _u("Constant prop indir index into offset (value: %d)\n"), intConstantValue);
this->CaptureByteCodeSymUses(instr);
indir->SetOffset(indir->GetOffset() + intConstantValue);
indir->SetIndexOpnd(nullptr);
}
}
return opnd;
}
if (Js::TaggedInt::IsOverflow(intConstantValue))
{
return opnd;
}
IR::Opnd *constOpnd;
if (opnd->IsVar())
{
IR::AddrOpnd *addrOpnd = IR::AddrOpnd::New(Js::TaggedInt::ToVarUnchecked((int)intConstantValue), IR::AddrOpndKindConstantVar, instr->m_func);
GOPT_TRACE_OPND(opnd, _u("Constant prop %d (value:%d)\n"), addrOpnd->m_address, intConstantValue);
constOpnd = addrOpnd;
}
else
{
// Note: Jit loop body generates some i32 operands...
Assert(opnd->IsInt32() || opnd->IsInt64() || opnd->IsUInt32());
IRType opndType;
IntConstType constVal;
if (opnd->IsUInt32())
{
// avoid sign extension
constVal = (uint32)intConstantValue;
opndType = TyUint32;
}
else
{
constVal = intConstantValue;
opndType = TyInt32;
}
IR::IntConstOpnd *intOpnd = IR::IntConstOpnd::New(constVal, opndType, instr->m_func);
GOPT_TRACE_OPND(opnd, _u("Constant prop %d (value:%d)\n"), intOpnd->GetImmediateValue(instr->m_func), intConstantValue);
constOpnd = intOpnd;
}
#if ENABLE_DEBUG_CONFIG_OPTIONS
//Need to update DumpFieldCopyPropTestTrace for every new opcode that is added for fieldcopyprop
if(Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::FieldCopyPropPhase))
{
instr->DumpFieldCopyPropTestTrace();
}
#endif
this->CaptureByteCodeSymUses(instr);
opnd = instr->ReplaceSrc(opnd, constOpnd);
switch (instr->m_opcode)
{
case Js::OpCode::LdSlot:
case Js::OpCode::LdSlotArr:
case Js::OpCode::LdFld:
case Js::OpCode::LdFldForTypeOf:
case Js::OpCode::LdRootFldForTypeOf:
case Js::OpCode::LdFldForCallApplyTarget:
case Js::OpCode::LdRootFld:
case Js::OpCode::LdMethodFld:
case Js::OpCode::LdRootMethodFld:
case Js::OpCode::LdMethodFromFlags:
case Js::OpCode::ScopedLdMethodFld:
instr->m_opcode = Js::OpCode::Ld_A;
case Js::OpCode::Ld_A:
{
IR::Opnd * dst = instr->GetDst();
if (dst->IsRegOpnd() && dst->AsRegOpnd()->m_sym->IsSingleDef())
{
dst->AsRegOpnd()->m_sym->SetIsIntConst((int)intConstantValue);
}
break;
}
case Js::OpCode::ArgOut_A:
case Js::OpCode::ArgOut_A_Inline:
case Js::OpCode::ArgOut_A_FixupForStackArgs:
case Js::OpCode::ArgOut_A_InlineBuiltIn:
if (instr->GetDst()->IsRegOpnd())
{
Assert(instr->GetDst()->AsRegOpnd()->m_sym->m_isSingleDef);
instr->GetDst()->AsRegOpnd()->m_sym->AsStackSym()->SetIsIntConst((int)intConstantValue);
}
else
{
instr->GetDst()->AsSymOpnd()->m_sym->AsStackSym()->SetIsIntConst((int)intConstantValue);
}
break;
case Js::OpCode::TypeofElem:
instr->m_opcode = Js::OpCode::Typeof;
break;
case Js::OpCode::StSlotChkUndecl:
if (instr->GetSrc2() == opnd)
{
// Src2 here should refer to the same location as the Dst operand, which we need to keep live
// due to the implicit read for ChkUndecl.
instr->m_opcode = Js::OpCode::StSlot;
instr->FreeSrc2();
opnd = nullptr;
}
break;
}
return opnd;
}
else if (valueInfo->TryGetIntConstantValue(&int64ConstantValue, false))
{
if (PHASE_OFF(Js::ConstPropPhase, this->func) || !PHASE_ON(Js::Int64ConstPropPhase, this->func))
{
return opnd;
}
Assert(this->func->GetJITFunctionBody()->IsWasmFunction());
if (this->func->GetJITFunctionBody()->IsWasmFunction() && opnd->IsInt64())
{
IR::Int64ConstOpnd *intOpnd = IR::Int64ConstOpnd::New(int64ConstantValue, opnd->GetType(), instr->m_func);
GOPT_TRACE_OPND(opnd, _u("Constant prop %lld (value:%lld)\n"), intOpnd->GetImmediateValue(instr->m_func), int64ConstantValue);
this->CaptureByteCodeSymUses(instr);
opnd = instr->ReplaceSrc(opnd, intOpnd);
}
return opnd;
}
Sym *opndSym = nullptr;
if (opnd->IsRegOpnd())
{
IR::RegOpnd *regOpnd = opnd->AsRegOpnd();
opndSym = regOpnd->m_sym;
}
else if (opnd->IsSymOpnd())
{
IR::SymOpnd *symOpnd = opnd->AsSymOpnd();
opndSym = symOpnd->m_sym;
}
if (!opndSym)
{
return opnd;
}
if (PHASE_OFF(Js::CopyPropPhase, this->func))
{
this->SetSymStoreDirect(valueInfo, opndSym);
return opnd;
}
// We should have dealt with field hoist already
Assert(!instr->TransfersSrcValue() || !opndSym->IsPropertySym() ||
!this->IsHoistedPropertySym(opndSym->AsPropertySym()));
StackSym *copySym = CurrentBlockData()->GetCopyPropSym(opndSym, val);
if (copySym != nullptr)
{
// Copy prop.
return CopyPropReplaceOpnd(instr, opnd, copySym, parentIndirOpnd);
}
else
{
if (valueInfo->GetSymStore() && instr->m_opcode == Js::OpCode::Ld_A && instr->GetDst()->IsRegOpnd()
&& valueInfo->GetSymStore() == instr->GetDst()->AsRegOpnd()->m_sym)
{
// Avoid resetting symStore after fieldHoisting:
// t1 = LdFld field <- set symStore to fieldHoistSym
// fieldHoistSym = Ld_A t1 <- we're looking at t1 now, but want to copy-prop fieldHoistSym forward
return opnd;
}
this->SetSymStoreDirect(valueInfo, opndSym);
}
return opnd;
}
IR::Opnd *
GlobOpt::CopyPropReplaceOpnd(IR::Instr * instr, IR::Opnd * opnd, StackSym * copySym, IR::IndirOpnd *parentIndirOpnd)
{
Assert(
parentIndirOpnd
? opnd == parentIndirOpnd->GetBaseOpnd() || opnd == parentIndirOpnd->GetIndexOpnd()
: opnd == instr->GetSrc1() || opnd == instr->GetSrc2() || opnd == instr->GetDst() && opnd->IsIndirOpnd());
Assert(CurrentBlockData()->IsLive(copySym));
IR::RegOpnd *regOpnd;
StackSym *newSym = copySym;
GOPT_TRACE_OPND(opnd, _u("Copy prop s%d\n"), newSym->m_id);
#if ENABLE_DEBUG_CONFIG_OPTIONS
//Need to update DumpFieldCopyPropTestTrace for every new opcode that is added for fieldcopyprop
if(Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::FieldCopyPropPhase))
{
instr->DumpFieldCopyPropTestTrace();
}
#endif
this->CaptureByteCodeSymUses(instr);
if (opnd->IsRegOpnd())
{
regOpnd = opnd->AsRegOpnd();
regOpnd->m_sym = newSym;
regOpnd->SetIsJITOptimizedReg(true);
// The dead bit on the opnd is specific to the sym it is referencing. Since we replaced the sym, the bit is reset.
regOpnd->SetIsDead(false);
if(parentIndirOpnd)
{
return regOpnd;
}
}
else
{
// If this is an object type specialized field load inside a loop, and it produces a type value which wasn't live
// before, make sure the type check is left in the loop, because it may be the last type check in the loop protecting
// other fields which are not hoistable and are lexically upstream in the loop. If the check is not ultimately
// needed, the dead store pass will remove it.
if (this->currentBlock->loop != nullptr && opnd->IsSymOpnd() && opnd->AsSymOpnd()->IsPropertySymOpnd())
{
IR::PropertySymOpnd* propertySymOpnd = opnd->AsPropertySymOpnd();
if (CheckIfPropOpEmitsTypeCheck(instr, propertySymOpnd))
{
// We only set guarded properties in the dead store pass, so they shouldn't be set here yet. If they were
// we would need to move them from this operand to the operand which is being copy propagated.
Assert(propertySymOpnd->GetGuardedPropOps() == nullptr);
// We're creating a copy of this operand to be reused in the same spot in the flow, so we can copy all
// flow sensitive fields. However, we will do only a type check here (no property access) and only for
// the sake of downstream instructions, so the flags pertaining to this property access are irrelevant.
IR::PropertySymOpnd* checkObjTypeOpnd = CreateOpndForTypeCheckOnly(propertySymOpnd, instr->m_func);
IR::Instr* checkObjTypeInstr = IR::Instr::New(Js::OpCode::CheckObjType, instr->m_func);
checkObjTypeInstr->SetSrc1(checkObjTypeOpnd);
checkObjTypeInstr->SetByteCodeOffset(instr);
instr->InsertBefore(checkObjTypeInstr);
// Since we inserted this instruction before the one that is being processed in natural flow, we must process
// it for object type spec explicitly here.
FinishOptPropOp(checkObjTypeInstr, checkObjTypeOpnd);
Assert(!propertySymOpnd->IsTypeChecked());
checkObjTypeInstr = this->SetTypeCheckBailOut(checkObjTypeOpnd, checkObjTypeInstr, nullptr);
Assert(checkObjTypeInstr->HasBailOutInfo());
if (this->currentBlock->loop && !this->IsLoopPrePass())
{
// Try hoisting this checkObjType.
// But since this isn't the current instr being optimized, we need to play tricks with
// the byteCodeUse fields...
TrackByteCodeUsesForInstrAddedInOptInstr(checkObjTypeInstr, [&]()
{
TryHoistInvariant(checkObjTypeInstr, this->currentBlock, NULL, CurrentBlockData()->FindValue(copySym), NULL, true);
});
}
}
}
if (opnd->IsSymOpnd() && opnd->GetIsDead())
{
// Take the property sym out of the live fields set
this->EndFieldLifetime(opnd->AsSymOpnd());
}
regOpnd = IR::RegOpnd::New(newSym, opnd->GetType(), instr->m_func);
regOpnd->SetIsJITOptimizedReg(true);
instr->ReplaceSrc(opnd, regOpnd);
}
switch (instr->m_opcode)
{
case Js::OpCode::Ld_A:
if (instr->GetDst()->IsRegOpnd() && instr->GetSrc1()->IsRegOpnd() &&
instr->GetDst()->AsRegOpnd()->GetStackSym() == instr->GetSrc1()->AsRegOpnd()->GetStackSym())
{
this->InsertByteCodeUses(instr, true);
instr->m_opcode = Js::OpCode::Nop;
}
break;
case Js::OpCode::LdSlot:
case Js::OpCode::LdSlotArr:
if (instr->GetDst()->IsRegOpnd() && instr->GetSrc1()->IsRegOpnd() &&
instr->GetDst()->AsRegOpnd()->GetStackSym() == instr->GetSrc1()->AsRegOpnd()->GetStackSym())
{
this->InsertByteCodeUses(instr, true);
instr->m_opcode = Js::OpCode::Nop;
}
else
{
instr->m_opcode = Js::OpCode::Ld_A;
}
break;
case Js::OpCode::StSlotChkUndecl:
if (instr->GetSrc2()->IsRegOpnd())
{
// Src2 here should refer to the same location as the Dst operand, which we need to keep live
// due to the implicit read for ChkUndecl.
instr->m_opcode = Js::OpCode::StSlot;
instr->FreeSrc2();
return nullptr;
}
break;
case Js::OpCode::LdFld:
case Js::OpCode::LdFldForTypeOf:
case Js::OpCode::LdRootFldForTypeOf:
case Js::OpCode::LdFldForCallApplyTarget:
case Js::OpCode::LdRootFld:
case Js::OpCode::LdMethodFld:
case Js::OpCode::LdRootMethodFld:
case Js::OpCode::ScopedLdMethodFld:
instr->m_opcode = Js::OpCode::Ld_A;
break;
case Js::OpCode::LdMethodFromFlags:
// The bailout is checked on the loop top and we don't need to check bailout again in loop.
instr->m_opcode = Js::OpCode::Ld_A;
instr->ClearBailOutInfo();
break;
case Js::OpCode::TypeofElem:
instr->m_opcode = Js::OpCode::Typeof;
break;
}
CurrentBlockData()->MarkTempLastUse(instr, regOpnd);
return regOpnd;
}
ValueNumber
GlobOpt::NewValueNumber()
{
ValueNumber valueNumber = this->currentValue++;
if (valueNumber == 0)
{
Js::Throw::OutOfMemory();
}
return valueNumber;
}
Value *GlobOpt::NewValue(ValueInfo *const valueInfo)
{
return NewValue(NewValueNumber(), valueInfo);
}
Value *GlobOpt::NewValue(const ValueNumber valueNumber, ValueInfo *const valueInfo)
{
Assert(valueInfo);
return Value::New(alloc, valueNumber, valueInfo);
}
Value *GlobOpt::CopyValue(Value const *const value)
{
return CopyValue(value, NewValueNumber());
}
Value *GlobOpt::CopyValue(Value const *const value, const ValueNumber valueNumber)
{
Assert(value);
return value->Copy(alloc, valueNumber);
}
Value *
GlobOpt::NewGenericValue(const ValueType valueType)
{
return NewGenericValue(valueType, static_cast<IR::Opnd *>(nullptr));
}
Value *
GlobOpt::NewGenericValue(const ValueType valueType, IR::Opnd *const opnd)
{
// Shouldn't assign a likely-int value to something that is definitely not an int
Assert(!(valueType.IsLikelyInt() && opnd && opnd->IsRegOpnd() && opnd->AsRegOpnd()->m_sym->m_isNotInt));
ValueInfo *valueInfo = ValueInfo::New(this->alloc, valueType);
Value *val = NewValue(valueInfo);
TrackNewValueForKills(val);
CurrentBlockData()->InsertNewValue(val, opnd);
return val;
}
Value *
GlobOpt::NewGenericValue(const ValueType valueType, Sym *const sym)
{
ValueInfo *valueInfo = ValueInfo::New(this->alloc, valueType);
Value *val = NewValue(valueInfo);
TrackNewValueForKills(val);
CurrentBlockData()->SetValue(val, sym);
return val;
}
Value *
GlobOpt::GetIntConstantValue(const int32 intConst, IR::Instr * instr, IR::Opnd *const opnd)
{
Value *value = nullptr;
Value *const cachedValue = this->intConstantToValueMap->Lookup(intConst, nullptr);
if(cachedValue)
{
// The cached value could be from a different block since this is a global (as opposed to a per-block) cache. Since
// values are cloned for each block, we can't use the same value object. We also can't have two values with the same
// number in one block, so we can't simply copy the cached value either. And finally, there is no deterministic and fast
// way to determine if a value with the same value number exists for this block. So the best we can do with a global
// cache is to check the sym-store's value in the current block to see if it has a value with the same number.
// Otherwise, we have to create a new value with a new value number.
Sym *const symStore = cachedValue->GetValueInfo()->GetSymStore();
if (symStore && CurrentBlockData()->IsLive(symStore))
{
Value *const symStoreValue = CurrentBlockData()->FindValue(symStore);
int32 symStoreIntConstantValue;
if (symStoreValue &&
symStoreValue->GetValueNumber() == cachedValue->GetValueNumber() &&
symStoreValue->GetValueInfo()->TryGetIntConstantValue(&symStoreIntConstantValue) &&
symStoreIntConstantValue == intConst)
{
value = symStoreValue;
}
}
}
if (!value)
{
value = NewIntConstantValue(intConst, instr, !Js::TaggedInt::IsOverflow(intConst));
}
return CurrentBlockData()->InsertNewValue(value, opnd);
}
Value *
GlobOpt::GetIntConstantValue(const int64 intConst, IR::Instr * instr, IR::Opnd *const opnd)
{
Assert(instr->m_func->GetJITFunctionBody()->IsWasmFunction());
Value *value = nullptr;
Value *const cachedValue = this->int64ConstantToValueMap->Lookup(intConst, nullptr);
if (cachedValue)
{
// The cached value could be from a different block since this is a global (as opposed to a per-block) cache. Since
// values are cloned for each block, we can't use the same value object. We also can't have two values with the same
// number in one block, so we can't simply copy the cached value either. And finally, there is no deterministic and fast
// way to determine if a value with the same value number exists for this block. So the best we can do with a global
// cache is to check the sym-store's value in the current block to see if it has a value with the same number.
// Otherwise, we have to create a new value with a new value number.
Sym *const symStore = cachedValue->GetValueInfo()->GetSymStore();
if (symStore && this->currentBlock->globOptData.IsLive(symStore))
{
Value *const symStoreValue = this->currentBlock->globOptData.FindValue(symStore);
int64 symStoreIntConstantValue;
if (symStoreValue &&
symStoreValue->GetValueNumber() == cachedValue->GetValueNumber() &&
symStoreValue->GetValueInfo()->TryGetInt64ConstantValue(&symStoreIntConstantValue, false) &&
symStoreIntConstantValue == intConst)
{
value = symStoreValue;
}
}
}
if (!value)
{
value = NewInt64ConstantValue(intConst, instr);
}
return this->currentBlock->globOptData.InsertNewValue(value, opnd);
}
Value *
GlobOpt::NewInt64ConstantValue(const int64 intConst, IR::Instr* instr)
{
Value * value = NewValue(Int64ConstantValueInfo::New(this->alloc, intConst));
this->int64ConstantToValueMap->Item(intConst, value);
if (!value->GetValueInfo()->GetSymStore() &&
(instr->m_opcode == Js::OpCode::LdC_A_I4 || instr->m_opcode == Js::OpCode::Ld_I4))
{
StackSym * sym = instr->GetDst()->GetStackSym();
Assert(sym && !sym->IsTypeSpec());
this->currentBlock->globOptData.SetValue(value, sym);
this->currentBlock->globOptData.liveVarSyms->Set(sym->m_id);
}
return value;
}
Value *
GlobOpt::NewIntConstantValue(const int32 intConst, IR::Instr * instr, bool isTaggable)
{
Value * value = NewValue(IntConstantValueInfo::New(this->alloc, intConst));
this->intConstantToValueMap->Item(intConst, value);
if (isTaggable &&
!PHASE_OFF(Js::HoistConstIntPhase, this->func))
{
// When creating a new int constant value, make sure it gets a symstore. If the int const doesn't have a symstore,
// any downstream instruction using the same int will have to create a new value (object) for the int.
// This gets in the way of CSE.
value = HoistConstantLoadAndPropagateValueBackward(Js::TaggedInt::ToVarUnchecked(intConst), instr, value);
if (!value->GetValueInfo()->GetSymStore() &&
(instr->m_opcode == Js::OpCode::LdC_A_I4 || instr->m_opcode == Js::OpCode::Ld_I4))
{
StackSym * sym = instr->GetDst()->GetStackSym();
Assert(sym);
if (sym->IsTypeSpec())
{
Assert(sym->IsInt32());
StackSym * varSym = sym->GetVarEquivSym(instr->m_func);
CurrentBlockData()->SetValue(value, varSym);
CurrentBlockData()->liveInt32Syms->Set(varSym->m_id);
}
else
{
CurrentBlockData()->SetValue(value, sym);
CurrentBlockData()->liveVarSyms->Set(sym->m_id);
}
}
}
return value;
}
ValueInfo *
GlobOpt::NewIntRangeValueInfo(const int32 min, const int32 max, const bool wasNegativeZeroPreventedByBailout)
{
return ValueInfo::NewIntRangeValueInfo(this->alloc, min, max, wasNegativeZeroPreventedByBailout);
}
ValueInfo *GlobOpt::NewIntRangeValueInfo(
const ValueInfo *const originalValueInfo,
const int32 min,
const int32 max) const
{
Assert(originalValueInfo);
ValueInfo *valueInfo;
if(min == max)
{
// Since int constant values are const-propped, negative zero tracking does not track them, and so it's okay to ignore
// 'wasNegativeZeroPreventedByBailout'
valueInfo = IntConstantValueInfo::New(alloc, min);
}
else
{
valueInfo =
IntRangeValueInfo::New(
alloc,
min,
max,
min <= 0 && max >= 0 && originalValueInfo->WasNegativeZeroPreventedByBailout());
}
valueInfo->SetSymStore(originalValueInfo->GetSymStore());
return valueInfo;
}
Value *
GlobOpt::NewIntRangeValue(
const int32 min,
const int32 max,
const bool wasNegativeZeroPreventedByBailout,
IR::Opnd *const opnd)
{
ValueInfo *valueInfo = this->NewIntRangeValueInfo(min, max, wasNegativeZeroPreventedByBailout);
Value *val = NewValue(valueInfo);
if (opnd)
{
GOPT_TRACE_OPND(opnd, _u("Range %d (0x%X) to %d (0x%X)\n"), min, min, max, max);
}
CurrentBlockData()->InsertNewValue(val, opnd);
return val;
}
IntBoundedValueInfo *GlobOpt::NewIntBoundedValueInfo(
const ValueInfo *const originalValueInfo,
const IntBounds *const bounds) const
{
Assert(originalValueInfo);
bounds->Verify();
IntBoundedValueInfo *const valueInfo =
IntBoundedValueInfo::New(
originalValueInfo->Type(),
bounds,
(
bounds->ConstantLowerBound() <= 0 &&
bounds->ConstantUpperBound() >= 0 &&
originalValueInfo->WasNegativeZeroPreventedByBailout()
),
alloc);
valueInfo->SetSymStore(originalValueInfo->GetSymStore());
return valueInfo;
}
Value *GlobOpt::NewIntBoundedValue(
const ValueType valueType,
const IntBounds *const bounds,
const bool wasNegativeZeroPreventedByBailout,
IR::Opnd *const opnd)
{
Value *const value = NewValue(IntBoundedValueInfo::New(valueType, bounds, wasNegativeZeroPreventedByBailout, alloc));
CurrentBlockData()->InsertNewValue(value, opnd);
return value;
}
Value *
GlobOpt::NewFloatConstantValue(const FloatConstType floatValue, IR::Opnd *const opnd)
{
FloatConstantValueInfo *valueInfo = FloatConstantValueInfo::New(this->alloc, floatValue);
Value *val = NewValue(valueInfo);
CurrentBlockData()->InsertNewValue(val, opnd);
return val;
}
Value *
GlobOpt::GetVarConstantValue(IR::AddrOpnd *addrOpnd)
{
bool isVar = addrOpnd->IsVar();
bool isString = isVar && addrOpnd->m_localAddress && JITJavascriptString::Is(addrOpnd->m_localAddress);
Value *val = nullptr;
Value *cachedValue = nullptr;
if(this->addrConstantToValueMap->TryGetValue(addrOpnd->m_address, &cachedValue))
{
// The cached value could be from a different block since this is a global (as opposed to a per-block) cache. Since
// values are cloned for each block, we can't use the same value object. We also can't have two values with the same
// number in one block, so we can't simply copy the cached value either. And finally, there is no deterministic and fast
// way to determine if a value with the same value number exists for this block. So the best we can do with a global
// cache is to check the sym-store's value in the current block to see if it has a value with the same number.
// Otherwise, we have to create a new value with a new value number.
Sym *symStore = cachedValue->GetValueInfo()->GetSymStore();
if(symStore && CurrentBlockData()->IsLive(symStore))
{
Value *const symStoreValue = CurrentBlockData()->FindValue(symStore);
if(symStoreValue && symStoreValue->GetValueNumber() == cachedValue->GetValueNumber())
{
ValueInfo *const symStoreValueInfo = symStoreValue->GetValueInfo();
if(symStoreValueInfo->IsVarConstant() && symStoreValueInfo->AsVarConstant()->VarValue() == addrOpnd->m_address)
{
val = symStoreValue;
}
}
}
}
else if (isString)
{
JITJavascriptString* jsString = JITJavascriptString::FromVar(addrOpnd->m_localAddress);
Js::InternalString internalString(jsString->GetString(), jsString->GetLength());
if (this->stringConstantToValueMap->TryGetValue(internalString, &cachedValue))
{
Sym *symStore = cachedValue->GetValueInfo()->GetSymStore();
if (symStore && CurrentBlockData()->IsLive(symStore))
{
Value *const symStoreValue = CurrentBlockData()->FindValue(symStore);
if (symStoreValue && symStoreValue->GetValueNumber() == cachedValue->GetValueNumber())
{
ValueInfo *const symStoreValueInfo = symStoreValue->GetValueInfo();
if (symStoreValueInfo->IsVarConstant())
{
JITJavascriptString * cachedString = JITJavascriptString::FromVar(symStoreValue->GetValueInfo()->AsVarConstant()->VarValue(true));
Js::InternalString cachedInternalString(cachedString->GetString(), cachedString->GetLength());
if (Js::InternalStringComparer::Equals(internalString, cachedInternalString))
{
val = symStoreValue;
}
}
}
}
}
}
if(!val)
{
val = NewVarConstantValue(addrOpnd, isString);
}
addrOpnd->SetValueType(val->GetValueInfo()->Type());
return val;
}
Value *
GlobOpt::NewVarConstantValue(IR::AddrOpnd *addrOpnd, bool isString)
{
VarConstantValueInfo *valueInfo = VarConstantValueInfo::New(this->alloc, addrOpnd->m_address, addrOpnd->GetValueType(), false, addrOpnd->m_localAddress);
Value * value = NewValue(valueInfo);
this->addrConstantToValueMap->Item(addrOpnd->m_address, value);
if (isString)
{
JITJavascriptString* jsString = JITJavascriptString::FromVar(addrOpnd->m_localAddress);
Js::InternalString internalString(jsString->GetString(), jsString->GetLength());
this->stringConstantToValueMap->Item(internalString, value);
}
return value;
}
Value *
GlobOpt::HoistConstantLoadAndPropagateValueBackward(Js::Var varConst, IR::Instr * origInstr, Value * value)
{
if (this->IsLoopPrePass() ||
((this->currentBlock == this->func->m_fg->blockList) &&
origInstr->TransfersSrcValue()))
{
return value;
}
// Only hoisting taggable int const loads for now. Could be extended to other constants (floats, strings, addr opnds) if we see some benefit.
Assert(Js::TaggedInt::Is(varConst));
// Insert a load of the constant at the top of the function
StackSym * dstSym = StackSym::New(this->func);
IR::RegOpnd * constRegOpnd = IR::RegOpnd::New(dstSym, TyVar, this->func);
IR::Instr * loadInstr = IR::Instr::NewConstantLoad(constRegOpnd, (intptr_t)varConst, ValueType::GetInt(true), this->func);
this->func->m_fg->blockList->GetFirstInstr()->InsertAfter(loadInstr);
// Type-spec the load (Support for floats needs to be added when we start hoisting float constants).
bool typeSpecedToInt = false;
if (Js::TaggedInt::Is(varConst) && !IsTypeSpecPhaseOff(this->func))
{
typeSpecedToInt = true;
loadInstr->m_opcode = Js::OpCode::Ld_I4;
ToInt32Dst(loadInstr, loadInstr->GetDst()->AsRegOpnd(), this->currentBlock);
loadInstr->GetDst()->GetStackSym()->SetIsConst();
}
else
{
CurrentBlockData()->liveVarSyms->Set(dstSym->m_id);
}
// Add the value (object) to the current block's symToValueMap and propagate the value backward to all relevant blocks so it is available on merges.
value = CurrentBlockData()->InsertNewValue(value, constRegOpnd);
BVSparse<JitArenaAllocator>* GlobOptBlockData::*bv;
bv = typeSpecedToInt ? &GlobOptBlockData::liveInt32Syms : &GlobOptBlockData::liveVarSyms; // Will need to be expanded when we start hoisting float constants.
if (this->currentBlock != this->func->m_fg->blockList)
{
for (InvariantBlockBackwardIterator it(this, this->currentBlock, this->func->m_fg->blockList, nullptr);
it.IsValid();
it.MoveNext())
{
BasicBlock * block = it.Block();
(block->globOptData.*bv)->Set(dstSym->m_id);
Assert(!block->globOptData.FindValue(dstSym));
Value *const valueCopy = CopyValue(value, value->GetValueNumber());
block->globOptData.SetValue(valueCopy, dstSym);
}
}
return value;
}
Value *
GlobOpt::NewFixedFunctionValue(Js::JavascriptFunction *function, IR::AddrOpnd *addrOpnd)
{
Assert(function != nullptr);
Value *val = nullptr;
Value *cachedValue = nullptr;
if(this->addrConstantToValueMap->TryGetValue(addrOpnd->m_address, &cachedValue))
{
// The cached value could be from a different block since this is a global (as opposed to a per-block) cache. Since
// values are cloned for each block, we can't use the same value object. We also can't have two values with the same
// number in one block, so we can't simply copy the cached value either. And finally, there is no deterministic and fast
// way to determine if a value with the same value number exists for this block. So the best we can do with a global
// cache is to check the sym-store's value in the current block to see if it has a value with the same number.
// Otherwise, we have to create a new value with a new value number.
Sym *symStore = cachedValue->GetValueInfo()->GetSymStore();
if(symStore && CurrentBlockData()->IsLive(symStore))
{
Value *const symStoreValue = CurrentBlockData()->FindValue(symStore);
if(symStoreValue && symStoreValue->GetValueNumber() == cachedValue->GetValueNumber())
{
ValueInfo *const symStoreValueInfo = symStoreValue->GetValueInfo();
if(symStoreValueInfo->IsVarConstant())
{
VarConstantValueInfo *const symStoreVarConstantValueInfo = symStoreValueInfo->AsVarConstant();
if(symStoreVarConstantValueInfo->VarValue() == addrOpnd->m_address &&
symStoreVarConstantValueInfo->IsFunction())
{
val = symStoreValue;
}
}
}
}
}
if(!val)
{
VarConstantValueInfo *valueInfo = VarConstantValueInfo::New(this->alloc, function, addrOpnd->GetValueType(), true, addrOpnd->m_localAddress);
val = NewValue(valueInfo);
this->addrConstantToValueMap->AddNew(addrOpnd->m_address, val);
}
CurrentBlockData()->InsertNewValue(val, addrOpnd);
return val;
}
StackSym *GlobOpt::GetTaggedIntConstantStackSym(const int32 intConstantValue) const
{
Assert(!Js::TaggedInt::IsOverflow(intConstantValue));
return intConstantToStackSymMap->Lookup(intConstantValue, nullptr);
}
StackSym *GlobOpt::GetOrCreateTaggedIntConstantStackSym(const int32 intConstantValue) const
{
StackSym *stackSym = GetTaggedIntConstantStackSym(intConstantValue);
if(stackSym)
{
return stackSym;
}
stackSym = StackSym::New(TyVar,func);
intConstantToStackSymMap->Add(intConstantValue, stackSym);
return stackSym;
}
Sym *
GlobOpt::SetSymStore(ValueInfo *valueInfo, Sym *sym)
{
if (sym->IsStackSym())
{
StackSym *stackSym = sym->AsStackSym();
if (stackSym->IsTypeSpec())
{
stackSym = stackSym->GetVarEquivSym(this->func);
sym = stackSym;
}
}
if (valueInfo->GetSymStore() == nullptr || valueInfo->GetSymStore()->IsPropertySym())
{
SetSymStoreDirect(valueInfo, sym);
}
return sym;
}
void
GlobOpt::SetSymStoreDirect(ValueInfo * valueInfo, Sym * sym)
{
Sym * prevSymStore = valueInfo->GetSymStore();
CurrentBlockData()->SetChangedSym(prevSymStore);
valueInfo->SetSymStore(sym);
}
// Figure out the Value of this dst.
Value *
GlobOpt::ValueNumberDst(IR::Instr **pInstr, Value *src1Val, Value *src2Val)
{
IR::Instr *&instr = *pInstr;
IR::Opnd *dst = instr->GetDst();
Value *dstVal = nullptr;
Sym *sym;
if (instr->CallsSetter())
{
return nullptr;
}
if (dst == nullptr)
{
return nullptr;
}
switch (dst->GetKind())
{
case IR::OpndKindSym:
sym = dst->AsSymOpnd()->m_sym;
break;
case IR::OpndKindReg:
sym = dst->AsRegOpnd()->m_sym;
if (OpCodeAttr::TempNumberProducing(instr->m_opcode))
{
CurrentBlockData()->isTempSrc->Set(sym->m_id);
}
else if (OpCodeAttr::TempNumberTransfer(instr->m_opcode))
{
IR::Opnd *src1 = instr->GetSrc1();
if (src1->IsRegOpnd() && CurrentBlockData()->isTempSrc->Test(src1->AsRegOpnd()->m_sym->m_id))
{
StackSym *src1Sym = src1->AsRegOpnd()->m_sym;
// isTempSrc is used for marking isTempLastUse, which is used to generate AddLeftDead()
// calls instead of the normal Add helpers. It tells the runtime that concats can use string
// builders.
// We need to be careful in the case where src1 points to a string builder and is getting aliased.
// Clear the bit on src and dst of the transfer instr in this case, unless we can prove src1
// isn't pointing at a string builder, like if it is single def and the def instr is not an Add,
// but TempProducing.
if (src1Sym->IsSingleDef() && src1Sym->m_instrDef->m_opcode != Js::OpCode::Add_A
&& OpCodeAttr::TempNumberProducing(src1Sym->m_instrDef->m_opcode))
{
CurrentBlockData()->isTempSrc->Set(sym->m_id);
}
else
{
CurrentBlockData()->isTempSrc->Clear(src1->AsRegOpnd()->m_sym->m_id);
CurrentBlockData()->isTempSrc->Clear(sym->m_id);
}
}
else
{
CurrentBlockData()->isTempSrc->Clear(sym->m_id);
}
}
else
{
CurrentBlockData()->isTempSrc->Clear(sym->m_id);
}
break;
case IR::OpndKindIndir:
return nullptr;
default:
return nullptr;
}
int32 min1, max1, min2, max2, newMin, newMax;
ValueInfo *src1ValueInfo = (src1Val ? src1Val->GetValueInfo() : nullptr);
ValueInfo *src2ValueInfo = (src2Val ? src2Val->GetValueInfo() : nullptr);
switch (instr->m_opcode)
{
case Js::OpCode::Conv_PrimStr:
AssertMsg(instr->GetDst()->GetValueType().IsString(),
"Creator of this instruction should have set the type");
if (this->IsLoopPrePass() || src1ValueInfo == nullptr || !src1ValueInfo->IsPrimitive())
{
break;
}
instr->m_opcode = Js::OpCode::Conv_Str;
// fall-through
case Js::OpCode::Conv_Str:
// This opcode is commented out since we don't track regex information in GlobOpt now.
//case Js::OpCode::Coerce_Regex:
case Js::OpCode::Coerce_Str:
AssertMsg(instr->GetDst()->GetValueType().IsString(),
"Creator of this instruction should have set the type");
// fall-through
case Js::OpCode::Coerce_StrOrRegex:
// We don't set the ValueType of src1 for Coerce_StrOrRegex, hence skip the ASSERT
if (this->IsLoopPrePass() || src1ValueInfo == nullptr || !src1ValueInfo->IsString())
{
break;
}
instr->m_opcode = Js::OpCode::Ld_A;
// fall-through
case Js::OpCode::BytecodeArgOutCapture:
case Js::OpCode::InitConst:
case Js::OpCode::LdAsmJsFunc:
case Js::OpCode::Ld_A:
case Js::OpCode::Ld_I4:
// Propagate sym attributes across the reg copy.
if (!this->IsLoopPrePass() && instr->GetSrc1()->IsRegOpnd())
{
if (dst->AsRegOpnd()->m_sym->IsSingleDef())
{
dst->AsRegOpnd()->m_sym->CopySymAttrs(instr->GetSrc1()->AsRegOpnd()->m_sym);
}
}
if (instr->IsProfiledInstr())
{
const ValueType profiledValueType(instr->AsProfiledInstr()->u.FldInfo().valueType);
if(!(
profiledValueType.IsLikelyInt() &&
(
(dst->IsRegOpnd() && dst->AsRegOpnd()->m_sym->m_isNotInt) ||
(instr->GetSrc1()->IsRegOpnd() && instr->GetSrc1()->AsRegOpnd()->m_sym->m_isNotInt)
)
))
{
if(!src1ValueInfo)
{
dstVal = this->NewGenericValue(profiledValueType, dst);
}
else if(src1ValueInfo->IsUninitialized())
{
if(IsLoopPrePass())
{
dstVal = this->NewGenericValue(profiledValueType, dst);
}
else
{
// Assuming the profile data gives more precise value types based on the path it took at runtime, we
// can improve the original value type.
src1ValueInfo->Type() = profiledValueType;
instr->GetSrc1()->SetValueType(profiledValueType);
}
}
}
}
if (dstVal == nullptr)
{
// Ld_A is just transferring the value
dstVal = this->ValueNumberTransferDst(instr, src1Val);
}
break;
case Js::OpCode::ExtendArg_A:
{
// SIMD_JS
// We avoid transforming EAs to Lds to keep the IR shape consistent and avoid CSEing of EAs.
// CSEOptimize only assigns a Value to the EA dst, and doesn't turn it to a Ld. If this happened, we shouldn't assign a new Value here.
if (DoCSE())
{
IR::Opnd * currDst = instr->GetDst();
Value * currDstVal = CurrentBlockData()->FindValue(currDst->GetStackSym());
if (currDstVal != nullptr)
{
return currDstVal;
}
}
break;
}
case Js::OpCode::CheckFixedFld:
AssertMsg(false, "CheckFixedFld doesn't have a dst, so we should never get here");
break;
case Js::OpCode::LdSlot:
case Js::OpCode::LdSlotArr:
case Js::OpCode::LdFld:
case Js::OpCode::LdFldForTypeOf:
case Js::OpCode::LdFldForCallApplyTarget:
// Do not transfer value type on ldFldForTypeOf to prevent copy-prop to LdRootFld in case the field doesn't exist since LdRootFldForTypeOf does not throw
//case Js::OpCode::LdRootFldForTypeOf:
case Js::OpCode::LdRootFld:
case Js::OpCode::LdMethodFld:
case Js::OpCode::LdRootMethodFld:
case Js::OpCode::ScopedLdMethodFld:
case Js::OpCode::LdMethodFromFlags:
if (instr->IsProfiledInstr())
{
ValueType profiledValueType(instr->AsProfiledInstr()->u.FldInfo().valueType);
if(!(profiledValueType.IsLikelyInt() && dst->IsRegOpnd() && dst->AsRegOpnd()->m_sym->m_isNotInt))
{
if(!src1ValueInfo)
{
dstVal = this->NewGenericValue(profiledValueType, dst);
}
else if(src1ValueInfo->IsUninitialized())
{
if(IsLoopPrePass() && (!dst->IsRegOpnd() || !dst->AsRegOpnd()->m_sym->IsSingleDef() || DoFieldHoisting()))
{
dstVal = this->NewGenericValue(profiledValueType, dst);
}
else
{
// Assuming the profile data gives more precise value types based on the path it took at runtime, we
// can improve the original value type.
src1ValueInfo->Type() = profiledValueType;
instr->GetSrc1()->SetValueType(profiledValueType);
}
}
}
}
if (dstVal == nullptr)
{
dstVal = this->ValueNumberTransferDst(instr, src1Val);
}
if(!this->IsLoopPrePass())
{
// We cannot transfer value if the field hasn't been copy prop'd because we don't generate
// an implicit call bailout between those values if we don't have "live fields" unless, we are hoisting the field.
PropertySym *propertySym = instr->GetSrc1()->AsSymOpnd()->m_sym->AsPropertySym();
StackSym * fieldHoistSym;
Loop * loop = this->FindFieldHoistStackSym(this->currentBlock->loop, propertySym->m_id, &fieldHoistSym, instr);
ValueInfo *dstValueInfo = (dstVal ? dstVal->GetValueInfo() : nullptr);
// Update symStore for field hoisting
if (loop != nullptr && (dstValueInfo != nullptr))
{
this->SetSymStoreDirect(dstValueInfo, fieldHoistSym);
}
// Update symStore if it isn't a stackSym
if (dstVal && (!dstValueInfo->GetSymStore() || !dstValueInfo->GetSymStore()->IsStackSym()))
{
Assert(dst->IsRegOpnd());
this->SetSymStoreDirect(dstValueInfo, dst->AsRegOpnd()->m_sym);
}
if (src1Val != dstVal)
{
CurrentBlockData()->SetValue(dstVal, instr->GetSrc1());
}
}
break;
case Js::OpCode::LdC_A_R8:
case Js::OpCode::LdC_A_I4:
case Js::OpCode::ArgIn_A:
dstVal = src1Val;
break;
case Js::OpCode::LdStr:
if (src1Val == nullptr)
{
src1Val = NewGenericValue(ValueType::String, dst);
}
dstVal = src1Val;
break;
// LdElemUndef only assign undef if the field doesn't exist.
// So we don't actually know what the value is, so we can't really copy prop it.
//case Js::OpCode::LdElemUndef:
case Js::OpCode::StSlot:
case Js::OpCode::StSlotChkUndecl:
case Js::OpCode::StFld:
case Js::OpCode::StRootFld:
case Js::OpCode::StFldStrict:
case Js::OpCode::StRootFldStrict:
case Js::OpCode::InitFld:
case Js::OpCode::InitComputedProperty:
if (DoFieldCopyProp())
{
if (src1Val == nullptr)
{
// src1 may have no value if it's not a valid var, e.g., NULL for let/const initialization.
// Consider creating generic values for such things.
return nullptr;
}
AssertMsg(!src2Val, "Bad src Values...");
Assert(sym->IsPropertySym());
SymID symId = sym->m_id;
Assert(instr->m_opcode == Js::OpCode::StSlot || instr->m_opcode == Js::OpCode::StSlotChkUndecl || !CurrentBlockData()->liveFields->Test(symId));
if (IsHoistablePropertySym(symId))
{
// We have changed the value of a hoistable field, load afterwards shouldn't get hoisted,
// but we will still copy prop the pre-assign sym to it if we have a live value.
Assert((instr->m_opcode == Js::OpCode::StSlot || instr->m_opcode == Js::OpCode::StSlotChkUndecl) && CurrentBlockData()->liveFields->Test(symId));
CurrentBlockData()->hoistableFields->Clear(symId);
}
CurrentBlockData()->liveFields->Set(symId);
if (!this->IsLoopPrePass() && dst->GetIsDead())
{
// Take the property sym out of the live fields set (with special handling for loops).
this->EndFieldLifetime(dst->AsSymOpnd());
}
dstVal = this->ValueNumberTransferDst(instr, src1Val);
}
else
{
return nullptr;
}
break;
case Js::OpCode::Conv_Num:
if(src1ValueInfo->IsNumber())
{
dstVal = ValueNumberTransferDst(instr, src1Val);
}
else
{
return NewGenericValue(src1ValueInfo->Type().ToDefiniteAnyNumber(), dst);
}
break;
case Js::OpCode::Not_A:
{
if (!src1Val || !src1ValueInfo->GetIntValMinMax(&min1, &max1, this->DoAggressiveIntTypeSpec()))
{
min1 = INT32_MIN;
max1 = INT32_MAX;
}
this->PropagateIntRangeForNot(min1, max1, &newMin, &newMax);
return CreateDstUntransferredIntValue(newMin, newMax, instr, src1Val, src2Val);
}
case Js::OpCode::Xor_A:
case Js::OpCode::Or_A:
case Js::OpCode::And_A:
case Js::OpCode::Shl_A:
case Js::OpCode::Shr_A:
case Js::OpCode::ShrU_A:
{
if (!src1Val || !src1ValueInfo->GetIntValMinMax(&min1, &max1, this->DoAggressiveIntTypeSpec()))
{
min1 = INT32_MIN;
max1 = INT32_MAX;
}
if (!src2Val || !src2ValueInfo->GetIntValMinMax(&min2, &max2, this->DoAggressiveIntTypeSpec()))
{
min2 = INT32_MIN;
max2 = INT32_MAX;
}
if (instr->m_opcode == Js::OpCode::ShrU_A &&
min1 < 0 &&
IntConstantBounds(min2, max2).And_0x1f().Contains(0))
{
// Src1 may be too large to represent as a signed int32, and src2 may be zero.
// Since the result can therefore be too large to represent as a signed int32,
// include Number in the value type.
return CreateDstUntransferredValue(
ValueType::AnyNumber.SetCanBeTaggedValue(true), instr, src1Val, src2Val);
}
this->PropagateIntRangeBinary(instr, min1, max1, min2, max2, &newMin, &newMax);
return CreateDstUntransferredIntValue(newMin, newMax, instr, src1Val, src2Val);
}
case Js::OpCode::Incr_A:
case Js::OpCode::Decr_A:
{
ValueType valueType;
if(src1Val)
{
valueType = src1Val->GetValueInfo()->Type().ToDefiniteAnyNumber();
}
else
{
valueType = ValueType::Number;
}
return CreateDstUntransferredValue(valueType, instr, src1Val, src2Val);
}
case Js::OpCode::Add_A:
{
ValueType valueType;
if (src1Val && src1ValueInfo->IsLikelyNumber() && src2Val && src2ValueInfo->IsLikelyNumber())
{
if(src1ValueInfo->IsLikelyInt() && src2ValueInfo->IsLikelyInt())
{
// When doing aggressiveIntType, just assume the result is likely going to be int
// if both input is int.
const bool isLikelyTagged = src1ValueInfo->IsLikelyTaggedInt() && src2ValueInfo->IsLikelyTaggedInt();
if(src1ValueInfo->IsNumber() && src2ValueInfo->IsNumber())
{
// If both of them are numbers then we can definitely say that the result is a number.
valueType = ValueType::GetNumberAndLikelyInt(isLikelyTagged);
}
else
{
// This is only likely going to be int but can be a string as well.
valueType = ValueType::GetInt(isLikelyTagged).ToLikely();
}
}
else
{
// We can only be certain of any thing if both of them are numbers.
// Otherwise, the result could be string.
if (src1ValueInfo->IsNumber() && src2ValueInfo->IsNumber())
{
if (src1ValueInfo->IsFloat() || src2ValueInfo->IsFloat())
{
// If one of them is a float, the result probably is a float instead of just int
// but should always be a number.
valueType = ValueType::Float;
}
else
{
// Could be int, could be number
valueType = ValueType::Number;
}
}
else if (src1ValueInfo->IsLikelyFloat() || src2ValueInfo->IsLikelyFloat())
{
// Result is likely a float (but can be anything)
valueType = ValueType::Float.ToLikely();
}
else
{
// Otherwise it is a likely int or float (but can be anything)
valueType = ValueType::Number.ToLikely();
}
}
}
else if((src1Val && src1ValueInfo->IsString()) || (src2Val && src2ValueInfo->IsString()))
{
// String + anything should always result in a string
valueType = ValueType::String;
}
else if((src1Val && src1ValueInfo->IsNotString() && src1ValueInfo->IsPrimitive())
&& (src2Val && src2ValueInfo->IsNotString() && src2ValueInfo->IsPrimitive()))
{
// If src1 and src2 are not strings and primitive, add should yield a number.
valueType = ValueType::Number;
}
else if((src1Val && src1ValueInfo->IsLikelyString()) || (src2Val && src2ValueInfo->IsLikelyString()))
{
// likelystring + anything should always result in a likelystring
valueType = ValueType::String.ToLikely();
}
else
{
// Number or string. Could make the value a merge of Number and String, but Uninitialized is more useful at the moment.
Assert(valueType.IsUninitialized());
}
return CreateDstUntransferredValue(valueType, instr, src1Val, src2Val);
}
case Js::OpCode::Div_A:
{
ValueType divValueType = GetDivValueType(instr, src1Val, src2Val, false);
if (divValueType.IsLikelyInt() || divValueType.IsFloat())
{
return CreateDstUntransferredValue(divValueType, instr, src1Val, src2Val);
}
}
// fall-through
case Js::OpCode::Sub_A:
case Js::OpCode::Mul_A:
case Js::OpCode::Rem_A:
{
ValueType valueType;
if( src1Val &&
src1ValueInfo->IsLikelyInt() &&
src2Val &&
src2ValueInfo->IsLikelyInt() &&
instr->m_opcode != Js::OpCode::Div_A)
{
const bool isLikelyTagged =
src1ValueInfo->IsLikelyTaggedInt() && (src2ValueInfo->IsLikelyTaggedInt() || instr->m_opcode == Js::OpCode::Rem_A);
if(src1ValueInfo->IsNumber() && src2ValueInfo->IsNumber())
{
valueType = ValueType::GetNumberAndLikelyInt(isLikelyTagged);
}
else
{
valueType = ValueType::GetInt(isLikelyTagged).ToLikely();
}
}
else if ((src1Val && src1ValueInfo->IsLikelyFloat()) || (src2Val && src2ValueInfo->IsLikelyFloat()))
{
// This should ideally be NewNumberAndLikelyFloatValue since we know the result is a number but not sure if it will
// be a float value. However, that Number/LikelyFloat value type doesn't exist currently and all the necessary
// checks are done for float values (tagged int checks, etc.) so it's sufficient to just create a float value here.
valueType = ValueType::Float;
}
else
{
valueType = ValueType::Number;
}
return CreateDstUntransferredValue(valueType, instr, src1Val, src2Val);
}
case Js::OpCode::CallI:
Assert(dst->IsRegOpnd());
return NewGenericValue(dst->AsRegOpnd()->GetValueType(), dst);
case Js::OpCode::LdElemI_A:
{
dstVal = ValueNumberLdElemDst(pInstr, src1Val);
const ValueType baseValueType(instr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->GetValueType());
if( (
baseValueType.IsLikelyNativeArray() ||
#ifdef _M_IX86
(
!AutoSystemInfo::Data.SSE2Available() &&
baseValueType.IsLikelyObject() &&
(
baseValueType.GetObjectType() == ObjectType::Float32Array ||
baseValueType.GetObjectType() == ObjectType::Float64Array
)
)
#else
false
#endif
) &&
instr->GetDst()->IsVar() &&
instr->HasBailOutInfo())
{
// The lowerer is not going to generate a fast path for this case. Remove any bailouts that require the fast
// path. Note that the removed bailouts should not be necessary for correctness.
IR::BailOutKind bailOutKind = instr->GetBailOutKind();
if(bailOutKind & IR::BailOutOnArrayAccessHelperCall)
{
bailOutKind -= IR::BailOutOnArrayAccessHelperCall;
}
if(bailOutKind == IR::BailOutOnImplicitCallsPreOp)
{
bailOutKind -= IR::BailOutOnImplicitCallsPreOp;
}
if(bailOutKind)
{
instr->SetBailOutKind(bailOutKind);
}
else
{
instr->ClearBailOutInfo();
}
}
return dstVal;
}
case Js::OpCode::LdMethodElem:
// Not worth profiling this, just assume it's likely object (should be likely function but ValueType does not track
// functions currently, so using ObjectType::Object instead)
dstVal = NewGenericValue(ValueType::GetObject(ObjectType::Object).ToLikely(), dst);
if(instr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->GetValueType().IsLikelyNativeArray() && instr->HasBailOutInfo())
{
// The lowerer is not going to generate a fast path for this case. Remove any bailouts that require the fast
// path. Note that the removed bailouts should not be necessary for correctness.
IR::BailOutKind bailOutKind = instr->GetBailOutKind();
if(bailOutKind & IR::BailOutOnArrayAccessHelperCall)
{
bailOutKind -= IR::BailOutOnArrayAccessHelperCall;
}
if(bailOutKind == IR::BailOutOnImplicitCallsPreOp)
{
bailOutKind -= IR::BailOutOnImplicitCallsPreOp;
}
if(bailOutKind)
{
instr->SetBailOutKind(bailOutKind);
}
else
{
instr->ClearBailOutInfo();
}
}
return dstVal;
case Js::OpCode::StElemI_A:
case Js::OpCode::StElemI_A_Strict:
dstVal = this->ValueNumberTransferDst(instr, src1Val);
break;
case Js::OpCode::LdLen_A:
if (instr->IsProfiledInstr())
{
const ValueType profiledValueType(instr->AsProfiledInstr()->u.ldElemInfo->GetElementType());
if(!(profiledValueType.IsLikelyInt() && dst->AsRegOpnd()->m_sym->m_isNotInt))
{
return this->NewGenericValue(profiledValueType, dst);
}
}
break;
case Js::OpCode::BrOnEmpty:
case Js::OpCode::BrOnNotEmpty:
Assert(dst->IsRegOpnd());
Assert(dst->GetValueType().IsString());
return this->NewGenericValue(ValueType::String, dst);
case Js::OpCode::IsInst:
case Js::OpCode::LdTrue:
case Js::OpCode::LdFalse:
return this->NewGenericValue(ValueType::Boolean, dst);
case Js::OpCode::LdUndef:
return this->NewGenericValue(ValueType::Undefined, dst);
case Js::OpCode::LdC_A_Null:
return this->NewGenericValue(ValueType::Null, dst);
case Js::OpCode::LdThis:
if (!PHASE_OFF(Js::OptTagChecksPhase, this->func) &&
(src1ValueInfo == nullptr || src1ValueInfo->IsUninitialized()))
{
return this->NewGenericValue(ValueType::GetObject(ObjectType::Object), dst);
}
break;
case Js::OpCode::Typeof:
case Js::OpCode::TypeofElem:
return this->NewGenericValue(ValueType::String, dst);
case Js::OpCode::InitLocalClosure:
Assert(instr->GetDst());
Assert(instr->GetDst()->IsRegOpnd());
IR::RegOpnd *regOpnd = instr->GetDst()->AsRegOpnd();
StackSym *opndStackSym = regOpnd->m_sym;
Assert(opndStackSym != nullptr);
ObjectSymInfo *objectSymInfo = opndStackSym->m_objectInfo;
Assert(objectSymInfo != nullptr);
for (PropertySym *localVarSlotList = objectSymInfo->m_propertySymList; localVarSlotList; localVarSlotList = localVarSlotList->m_nextInStackSymList)
{
this->slotSyms->Set(localVarSlotList->m_id);
}
break;
}
#ifdef ENABLE_SIMDJS
// SIMD_JS
if (Js::IsSimd128Opcode(instr->m_opcode) && !func->GetJITFunctionBody()->IsAsmJsMode())
{
ThreadContext::SimdFuncSignature simdFuncSignature;
instr->m_func->GetScriptContext()->GetThreadContext()->GetSimdFuncSignatureFromOpcode(instr->m_opcode, simdFuncSignature);
return this->NewGenericValue(simdFuncSignature.returnType, dst);
}
#endif
if (dstVal == nullptr)
{
return this->NewGenericValue(dst->GetValueType(), dst);
}
return CurrentBlockData()->SetValue(dstVal, dst);
}
Value *
GlobOpt::ValueNumberLdElemDst(IR::Instr **pInstr, Value *srcVal)
{
IR::Instr *&instr = *pInstr;
IR::Opnd *dst = instr->GetDst();
Value *dstVal = nullptr;
int32 newMin, newMax;
ValueInfo *srcValueInfo = (srcVal ? srcVal->GetValueInfo() : nullptr);
ValueType profiledElementType;
if (instr->IsProfiledInstr())
{
profiledElementType = instr->AsProfiledInstr()->u.ldElemInfo->GetElementType();
if(!(profiledElementType.IsLikelyInt() && dst->IsRegOpnd() && dst->AsRegOpnd()->m_sym->m_isNotInt) &&
srcVal &&
srcValueInfo->IsUninitialized())
{
if(IsLoopPrePass())
{
dstVal = NewGenericValue(profiledElementType, dst);
}
else
{
// Assuming the profile data gives more precise value types based on the path it took at runtime, we
// can improve the original value type.
srcValueInfo->Type() = profiledElementType;
instr->GetSrc1()->SetValueType(profiledElementType);
}
}
}
IR::IndirOpnd *src = instr->GetSrc1()->AsIndirOpnd();
const ValueType baseValueType(src->GetBaseOpnd()->GetValueType());
if (instr->DoStackArgsOpt(this->func) ||
!(
baseValueType.IsLikelyOptimizedTypedArray() ||
(baseValueType.IsLikelyNativeArray() && instr->IsProfiledInstr()) // Specialized native array lowering for LdElem requires that it is profiled.
) ||
(!this->DoTypedArrayTypeSpec() && baseValueType.IsLikelyOptimizedTypedArray()) ||
// Don't do type spec on native array with a history of accessing gaps, as this is a bailout
(!this->DoNativeArrayTypeSpec() && baseValueType.IsLikelyNativeArray()) ||
!ShouldExpectConventionalArrayIndexValue(src))
{
if(DoTypedArrayTypeSpec() && !IsLoopPrePass())
{
GOPT_TRACE_INSTR(instr, _u("Didn't specialize array access.\n"));
if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->func))
{
char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE];
baseValueType.ToString(baseValueTypeStr);
Output::Print(_u("Typed Array Optimization: function: %s (%s): instr: %s, base value type: %S, did not type specialize, because %s.\n"),
this->func->GetJITFunctionBody()->GetDisplayName(),
this->func->GetDebugNumberSet(debugStringBuffer),
Js::OpCodeUtil::GetOpCodeName(instr->m_opcode),
baseValueTypeStr,
instr->DoStackArgsOpt(this->func) ? _u("instruction uses the arguments object") :
baseValueType.IsLikelyOptimizedTypedArray() ? _u("index is negative or likely not int") : _u("of array type"));
Output::Flush();
}
}
if(!dstVal)
{
if(srcVal)
{
dstVal = this->ValueNumberTransferDst(instr, srcVal);
}
else
{
dstVal = NewGenericValue(profiledElementType, dst);
}
}
return dstVal;
}
Assert(instr->GetSrc1()->IsIndirOpnd());
IRType toType = TyVar;
IR::BailOutKind bailOutKind = IR::BailOutConventionalTypedArrayAccessOnly;
switch(baseValueType.GetObjectType())
{
case ObjectType::Int8Array:
case ObjectType::Int8VirtualArray:
case ObjectType::Int8MixedArray:
newMin = Int8ConstMin;
newMax = Int8ConstMax;
goto IntArrayCommon;
case ObjectType::Uint8Array:
case ObjectType::Uint8VirtualArray:
case ObjectType::Uint8MixedArray:
case ObjectType::Uint8ClampedArray:
case ObjectType::Uint8ClampedVirtualArray:
case ObjectType::Uint8ClampedMixedArray:
newMin = Uint8ConstMin;
newMax = Uint8ConstMax;
goto IntArrayCommon;
case ObjectType::Int16Array:
case ObjectType::Int16VirtualArray:
case ObjectType::Int16MixedArray:
newMin = Int16ConstMin;
newMax = Int16ConstMax;
goto IntArrayCommon;
case ObjectType::Uint16Array:
case ObjectType::Uint16VirtualArray:
case ObjectType::Uint16MixedArray:
newMin = Uint16ConstMin;
newMax = Uint16ConstMax;
goto IntArrayCommon;
case ObjectType::Int32Array:
case ObjectType::Int32VirtualArray:
case ObjectType::Int32MixedArray:
case ObjectType::Uint32Array: // int-specialized loads from uint32 arrays will bail out on values that don't fit in an int32
case ObjectType::Uint32VirtualArray:
case ObjectType::Uint32MixedArray:
Int32Array:
newMin = Int32ConstMin;
newMax = Int32ConstMax;
goto IntArrayCommon;
IntArrayCommon:
Assert(dst->IsRegOpnd());
// If int type spec is disabled, it is ok to load int values as they can help float type spec, and merging int32 with float64 => float64.
// But if float type spec is also disabled, we'll have problems because float64 merged with var => float64...
if (!this->DoAggressiveIntTypeSpec() && !this->DoFloatTypeSpec())
{
if (!dstVal)
{
if (srcVal)
{
dstVal = this->ValueNumberTransferDst(instr, srcVal);
}
else
{
dstVal = NewGenericValue(profiledElementType, dst);
}
}
return dstVal;
}
if (!this->IsLoopPrePass())
{
if (instr->HasBailOutInfo())
{
const IR::BailOutKind oldBailOutKind = instr->GetBailOutKind();
Assert(
(
!(oldBailOutKind & ~IR::BailOutKindBits) ||
(oldBailOutKind & ~IR::BailOutKindBits) == IR::BailOutOnImplicitCallsPreOp
) &&
!(oldBailOutKind & IR::BailOutKindBits & ~(IR::BailOutOnArrayAccessHelperCall | IR::BailOutMarkTempObject)));
if (bailOutKind == IR::BailOutConventionalTypedArrayAccessOnly)
{
// BailOutConventionalTypedArrayAccessOnly also bails out if the array access is outside the head
// segment bounds, and guarantees no implicit calls. Override the bailout kind so that the instruction
// bails out for the right reason.
instr->SetBailOutKind(
bailOutKind | (oldBailOutKind & (IR::BailOutKindBits - IR::BailOutOnArrayAccessHelperCall)));
}
else
{
// BailOutConventionalNativeArrayAccessOnly by itself may generate a helper call, and may cause implicit
// calls to occur, so it must be merged in to eliminate generating the helper call
Assert(bailOutKind == IR::BailOutConventionalNativeArrayAccessOnly);
instr->SetBailOutKind(oldBailOutKind | bailOutKind);
}
}
else
{
GenerateBailAtOperation(&instr, bailOutKind);
}
}
TypeSpecializeIntDst(instr, instr->m_opcode, nullptr, nullptr, nullptr, bailOutKind, newMin, newMax, &dstVal);
toType = TyInt32;
break;
case ObjectType::Float32Array:
case ObjectType::Float32VirtualArray:
case ObjectType::Float32MixedArray:
case ObjectType::Float64Array:
case ObjectType::Float64VirtualArray:
case ObjectType::Float64MixedArray:
Float64Array:
Assert(dst->IsRegOpnd());
// If float type spec is disabled, don't load float64 values
if (!this->DoFloatTypeSpec())
{
if (!dstVal)
{
if (srcVal)
{
dstVal = this->ValueNumberTransferDst(instr, srcVal);
}
else
{
dstVal = NewGenericValue(profiledElementType, dst);
}
}
return dstVal;
}
if (!this->IsLoopPrePass())
{
if (instr->HasBailOutInfo())
{
const IR::BailOutKind oldBailOutKind = instr->GetBailOutKind();
Assert(
(
!(oldBailOutKind & ~IR::BailOutKindBits) ||
(oldBailOutKind & ~IR::BailOutKindBits) == IR::BailOutOnImplicitCallsPreOp
) &&
!(oldBailOutKind & IR::BailOutKindBits & ~(IR::BailOutOnArrayAccessHelperCall | IR::BailOutMarkTempObject)));
if (bailOutKind == IR::BailOutConventionalTypedArrayAccessOnly)
{
// BailOutConventionalTypedArrayAccessOnly also bails out if the array access is outside the head
// segment bounds, and guarantees no implicit calls. Override the bailout kind so that the instruction
// bails out for the right reason.
instr->SetBailOutKind(
bailOutKind | (oldBailOutKind & (IR::BailOutKindBits - IR::BailOutOnArrayAccessHelperCall)));
}
else
{
// BailOutConventionalNativeArrayAccessOnly by itself may generate a helper call, and may cause implicit
// calls to occur, so it must be merged in to eliminate generating the helper call
Assert(bailOutKind == IR::BailOutConventionalNativeArrayAccessOnly);
instr->SetBailOutKind(oldBailOutKind | bailOutKind);
}
}
else
{
GenerateBailAtOperation(&instr, bailOutKind);
}
}
TypeSpecializeFloatDst(instr, nullptr, nullptr, nullptr, &dstVal);
toType = TyFloat64;
break;
default:
Assert(baseValueType.IsLikelyNativeArray());
bailOutKind = IR::BailOutConventionalNativeArrayAccessOnly;
if(baseValueType.HasIntElements())
{
goto Int32Array;
}
Assert(baseValueType.HasFloatElements());
goto Float64Array;
}
if(!dstVal)
{
dstVal = NewGenericValue(profiledElementType, dst);
}
Assert(toType != TyVar);
GOPT_TRACE_INSTR(instr, _u("Type specialized array access.\n"));
if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->func))
{
char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE];
baseValueType.ToString(baseValueTypeStr);
char dstValTypeStr[VALUE_TYPE_MAX_STRING_SIZE];
dstVal->GetValueInfo()->Type().ToString(dstValTypeStr);
Output::Print(_u("Typed Array Optimization: function: %s (%s): instr: %s, base value type: %S, type specialized to %s producing %S"),
this->func->GetJITFunctionBody()->GetDisplayName(),
this->func->GetDebugNumberSet(debugStringBuffer),
Js::OpCodeUtil::GetOpCodeName(instr->m_opcode),
baseValueTypeStr,
toType == TyInt32 ? _u("int32") : _u("float64"),
dstValTypeStr);
#if DBG_DUMP
Output::Print(_u(" ("));
dstVal->Dump();
Output::Print(_u(").\n"));
#else
Output::Print(_u(".\n"));
#endif
Output::Flush();
}
return dstVal;
}
ValueType
GlobOpt::GetPrepassValueTypeForDst(
const ValueType desiredValueType,
IR::Instr *const instr,
Value *const src1Value,
Value *const src2Value,
bool *const isValueInfoPreciseRef) const
{
// Values with definite types can be created in the loop prepass only when it is guaranteed that the value type will be the
// same on any iteration of the loop. The heuristics currently used are:
// - If the source sym is not live on the back-edge, then it acquires a new value for each iteration of the loop, so
// that value type can be definite
// - Consider: A better solution for this is to track values that originate in this loop, which can have definite value
// types. That catches more cases, should look into that in the future.
// - If the source sym has a constant value that doesn't change for the duration of the function
// - The operation always results in a definite value type. For instance, signed bitwise operations always result in an
// int32, conv_num and ++ always result in a number, etc.
// - For operations that always result in an int32, the resulting int range is precise only if the source syms pass
// the above heuristics. Otherwise, the range must be expanded to the full int32 range.
Assert(IsLoopPrePass());
Assert(instr);
if(isValueInfoPreciseRef)
{
*isValueInfoPreciseRef = false;
}
if(!desiredValueType.IsDefinite())
{
return desiredValueType;
}
if((instr->GetSrc1() && !IsPrepassSrcValueInfoPrecise(instr->GetSrc1(), src1Value)) ||
(instr->GetSrc2() && !IsPrepassSrcValueInfoPrecise(instr->GetSrc2(), src2Value)))
{
// If the desired value type is not precise, the value type of the destination is derived from the value types of the
// sources. Since the value type of a source sym is not definite, the destination value type also cannot be definite.
if(desiredValueType.IsInt() && OpCodeAttr::IsInt32(instr->m_opcode))
{
// The op always produces an int32, but not always a tagged int
return ValueType::GetInt(desiredValueType.IsLikelyTaggedInt());
}
if(desiredValueType.IsNumber() && OpCodeAttr::ProducesNumber(instr->m_opcode))
{
// The op always produces a number, but not always an int
return desiredValueType.ToDefiniteAnyNumber();
}
return desiredValueType.ToLikely();
}
if(isValueInfoPreciseRef)
{
// The produced value info is derived from the sources, which have precise value infos
*isValueInfoPreciseRef = true;
}
return desiredValueType;
}
bool
GlobOpt::IsPrepassSrcValueInfoPrecise(IR::Opnd *const src, Value *const srcValue) const
{
Assert(IsLoopPrePass());
Assert(src);
if(!src->IsRegOpnd() || !srcValue)
{
return false;
}
ValueInfo *const srcValueInfo = srcValue->GetValueInfo();
if(!srcValueInfo->IsDefinite())
{
return false;
}
StackSym *srcSym = src->AsRegOpnd()->m_sym;
Assert(!srcSym->IsTypeSpec());
int32 intConstantValue;
return
srcSym->IsFromByteCodeConstantTable() ||
(
srcValueInfo->TryGetIntConstantValue(&intConstantValue) &&
!Js::TaggedInt::IsOverflow(intConstantValue) &&
GetTaggedIntConstantStackSym(intConstantValue) == srcSym
) ||
!currentBlock->loop->regAlloc.liveOnBackEdgeSyms->Test(srcSym->m_id);
}
Value *GlobOpt::CreateDstUntransferredIntValue(
const int32 min,
const int32 max,
IR::Instr *const instr,
Value *const src1Value,
Value *const src2Value)
{
Assert(instr);
Assert(instr->GetDst());
Assert(OpCodeAttr::ProducesNumber(instr->m_opcode)
|| (instr->m_opcode == Js::OpCode::Add_A && src1Value->GetValueInfo()->IsNumber()
&& src2Value->GetValueInfo()->IsNumber()));
ValueType valueType(ValueType::GetInt(IntConstantBounds(min, max).IsLikelyTaggable()));
Assert(valueType.IsInt());
bool isValueInfoPrecise;
if(IsLoopPrePass())
{
valueType = GetPrepassValueTypeForDst(valueType, instr, src1Value, src2Value, &isValueInfoPrecise);
}
else
{
isValueInfoPrecise = true;
}
IR::Opnd *const dst = instr->GetDst();
if(isValueInfoPrecise)
{
Assert(valueType == ValueType::GetInt(IntConstantBounds(min, max).IsLikelyTaggable()));
Assert(!(dst->IsRegOpnd() && dst->AsRegOpnd()->m_sym->IsTypeSpec()));
return NewIntRangeValue(min, max, false, dst);
}
return NewGenericValue(valueType, dst);
}
Value *
GlobOpt::CreateDstUntransferredValue(
const ValueType desiredValueType,
IR::Instr *const instr,
Value *const src1Value,
Value *const src2Value)
{
Assert(instr);
Assert(instr->GetDst());
Assert(!desiredValueType.IsInt()); // use CreateDstUntransferredIntValue instead
ValueType valueType(desiredValueType);
if(IsLoopPrePass())
{
valueType = GetPrepassValueTypeForDst(valueType, instr, src1Value, src2Value);
}
return NewGenericValue(valueType, instr->GetDst());
}
Value *
GlobOpt::ValueNumberTransferDst(IR::Instr *const instr, Value * src1Val)
{
Value *dstVal = this->IsLoopPrePass() ? this->ValueNumberTransferDstInPrepass(instr, src1Val) : src1Val;
// Don't copy-prop a temp over a user symbol. This is likely to extend the temp's lifetime, as the user symbol
// is more likely to already have later references.
// REVIEW: Enabling this does cause perf issues...
#if 0
if (dstVal != src1Val)
{
return dstVal;
}
Sym *dstSym = dst->GetStackSym();
if (dstVal && dstSym && dstSym->IsStackSym() && !dstSym->AsStackSym()->m_isBytecodeTmp)
{
Sym *dstValSym = dstVal->GetValueInfo()->GetSymStore();
if (dstValSym && dstValSym->AsStackSym()->m_isBytecodeTmp /* src->GetIsDead()*/)
{
dstVal->GetValueInfo()->SetSymStore(dstSym);
}
}
#endif
return dstVal;
}
bool
GlobOpt::IsSafeToTransferInPrePass(IR::Opnd *src, Value *srcValue)
{
if (this->DoFieldHoisting())
{
return false;
}
if (src->IsRegOpnd())
{
StackSym *srcSym = src->AsRegOpnd()->m_sym;
if (srcSym->IsFromByteCodeConstantTable())
{
return true;
}
ValueInfo *srcValueInfo = srcValue->GetValueInfo();
int32 srcIntConstantValue;
if (srcValueInfo->TryGetIntConstantValue(&srcIntConstantValue) && !Js::TaggedInt::IsOverflow(srcIntConstantValue)
&& GetTaggedIntConstantStackSym(srcIntConstantValue) == srcSym)
{
return true;
}
}
return false;
}
Value *
GlobOpt::ValueNumberTransferDstInPrepass(IR::Instr *const instr, Value *const src1Val)
{
Value *dstVal = nullptr;
if (!src1Val)
{
return nullptr;
}
bool isValueInfoPrecise;
ValueInfo *const src1ValueInfo = src1Val->GetValueInfo();
// TODO: This conflicts with new values created by the type specialization code
// We should re-enable if we change that code to avoid the new values.
#if 0
if (this->IsSafeToTransferInPrePass(instr->GetSrc1(), src1Val))
{
return src1Val;
}
if (this->IsPREInstrCandidateLoad(instr->m_opcode) && instr->GetDst())
{
StackSym *dstSym = instr->GetDst()->AsRegOpnd()->m_sym;
for (Loop *curLoop = this->currentBlock->loop; curLoop; curLoop = curLoop->parent)
{
if (curLoop->fieldPRESymStore->Test(dstSym->m_id))
{
return src1Val;
}
}
}
if (!this->DoFieldHoisting())
{
if (instr->GetDst()->IsRegOpnd())
{
StackSym *stackSym = instr->GetDst()->AsRegOpnd()->m_sym;
if (stackSym->IsSingleDef() || this->IsLive(stackSym, this->prePassLoop->landingPad))
{
IntConstantBounds src1IntConstantBounds;
if (src1ValueInfo->TryGetIntConstantBounds(&src1IntConstantBounds) &&
!(
src1IntConstantBounds.LowerBound() == INT32_MIN &&
src1IntConstantBounds.UpperBound() == INT32_MAX
))
{
const ValueType valueType(
GetPrepassValueTypeForDst(src1ValueInfo->Type(), instr, src1Val, nullptr, &isValueInfoPrecise));
if (isValueInfoPrecise)
{
return src1Val;
}
}
else
{
return src1Val;
}
}
}
}
#endif
// Src1's value could change later in the loop, so the value wouldn't be the same for each
// iteration. Since we don't iterate over loops "while (!changed)", go conservative on the
// first pass when transferring a value that is live on the back-edge.
// In prepass we are going to copy the value but with a different value number
// for aggressive int type spec.
const ValueType valueType(GetPrepassValueTypeForDst(src1ValueInfo->Type(), instr, src1Val, nullptr, &isValueInfoPrecise));
if(isValueInfoPrecise || (valueType == src1ValueInfo->Type() && src1ValueInfo->IsGeneric()))
{
Assert(valueType == src1ValueInfo->Type());
dstVal = CopyValue(src1Val);
TrackCopiedValueForKills(dstVal);
}
else
{
dstVal = NewGenericValue(valueType);
dstVal->GetValueInfo()->SetSymStore(src1ValueInfo->GetSymStore());
}
return dstVal;
}
void
GlobOpt::PropagateIntRangeForNot(int32 minimum, int32 maximum, int32 *pNewMin, int32* pNewMax)
{
int32 tmp;
Int32Math::Not(minimum, pNewMin);
*pNewMax = *pNewMin;
Int32Math::Not(maximum, &tmp);
*pNewMin = min(*pNewMin, tmp);
*pNewMax = max(*pNewMax, tmp);
}
void
GlobOpt::PropagateIntRangeBinary(IR::Instr *instr, int32 min1, int32 max1,
int32 min2, int32 max2, int32 *pNewMin, int32* pNewMax)
{
int32 min, max, tmp, tmp2;
min = INT32_MIN;
max = INT32_MAX;
switch (instr->m_opcode)
{
case Js::OpCode::Xor_A:
case Js::OpCode::Or_A:
// Find range with highest high order bit
tmp = ::max((uint32)min1, (uint32)max1);
tmp2 = ::max((uint32)min2, (uint32)max2);
if ((uint32)tmp > (uint32)tmp2)
{
max = tmp;
}
else
{
max = tmp2;
}
if (max < 0)
{
min = INT32_MIN; // REVIEW: conservative...
max = INT32_MAX;
}
else
{
// Turn values like 0x1010 into 0x1111
max = 1 << Math::Log2(max);
max = (uint32)(max << 1) - 1;
min = 0;
}
break;
case Js::OpCode::And_A:
if (min1 == INT32_MIN && min2 == INT32_MIN)
{
// Shortcut
break;
}
// Find range with lowest higher bit
tmp = ::max((uint32)min1, (uint32)max1);
tmp2 = ::max((uint32)min2, (uint32)max2);
if ((uint32)tmp < (uint32)tmp2)
{
min = min1;
max = max1;
}
else
{
min = min2;
max = max2;
}
// To compute max, look if min has higher high bit
if ((uint32)min > (uint32)max)
{
max = min;
}
// If max is negative, max let's assume it could be -1, so result in MAX_INT
if (max < 0)
{
max = INT32_MAX;
}
// If min is positive, the resulting min is zero
if (min >= 0)
{
min = 0;
}
else
{
min = INT32_MIN;
}
break;
case Js::OpCode::Shl_A:
{
// Shift count
if (min2 != max2 && ((uint32)min2 > 0x1F || (uint32)max2 > 0x1F))
{
min2 = 0;
max2 = 0x1F;
}
else
{
min2 &= 0x1F;
max2 &= 0x1F;
}
int32 min1FreeTopBitCount = min1 ? (sizeof(int32) * 8) - (Math::Log2(min1) + 1) : (sizeof(int32) * 8);
int32 max1FreeTopBitCount = max1 ? (sizeof(int32) * 8) - (Math::Log2(max1) + 1) : (sizeof(int32) * 8);
if (min1FreeTopBitCount <= max2 || max1FreeTopBitCount <= max2)
{
// If the shift is going to touch the sign bit return the max range
min = INT32_MIN;
max = INT32_MAX;
}
else
{
// Compute max
// Turn values like 0x1010 into 0x1111
if (min1)
{
min1 = 1 << Math::Log2(min1);
min1 = (min1 << 1) - 1;
}
if (max1)
{
max1 = 1 << Math::Log2(max1);
max1 = (uint32)(max1 << 1) - 1;
}
if (max1 > 0)
{
int32 nrTopBits = (sizeof(int32) * 8) - Math::Log2(max1);
if (nrTopBits < ::min(max2, 30))
max = INT32_MAX;
else
max = ::max((max1 << ::min(max2, 30)) & ~0x80000000, (min1 << min2) & ~0x80000000);
}
else
{
max = (max1 << min2) & ~0x80000000;
}
// Compute min
if (min1 < 0)
{
min = ::min(min1 << max2, max1 << max2);
}
else
{
min = ::min(min1 << min2, max1 << max2);
}
// Turn values like 0x1110 into 0x1000
if (min)
{
min = 1 << Math::Log2(min);
}
}
}
break;
case Js::OpCode::Shr_A:
// Shift count
if (min2 != max2 && ((uint32)min2 > 0x1F || (uint32)max2 > 0x1F))
{
min2 = 0;
max2 = 0x1F;
}
else
{
min2 &= 0x1F;
max2 &= 0x1F;
}
// Compute max
if (max1 < 0)
{
max = max1 >> max2;
}
else
{
max = max1 >> min2;
}
// Compute min
if (min1 < 0)
{
min = min1 >> min2;
}
else
{
min = min1 >> max2;
}
break;
case Js::OpCode::ShrU_A:
// shift count is constant zero
if ((min2 == max2) && (max2 & 0x1f) == 0)
{
// We can't encode uint32 result, so it has to be used as int32 only or the original value is positive.
Assert(instr->ignoreIntOverflow || min1 >= 0);
// We can transfer the signed int32 range.
min = min1;
max = max1;
break;
}
const IntConstantBounds src2NewBounds = IntConstantBounds(min2, max2).And_0x1f();
// Zero is only allowed if result is always a signed int32 or always used as a signed int32
Assert(min1 >= 0 || instr->ignoreIntOverflow || !src2NewBounds.Contains(0));
min2 = src2NewBounds.LowerBound();
max2 = src2NewBounds.UpperBound();
Assert(min2 <= max2);
// zero shift count is only allowed if result is used as int32 and/or value is positive
Assert(min2 > 0 || instr->ignoreIntOverflow || min1 >= 0);
uint32 umin1 = (uint32)min1;
uint32 umax1 = (uint32)max1;
if (umin1 > umax1)
{
uint32 temp = umax1;
umax1 = umin1;
umin1 = temp;
}
Assert(min2 >= 0 && max2 < 32);
// Compute max
if (min1 < 0)
{
umax1 = UINT32_MAX;
}
max = umax1 >> min2;
// Compute min
if (min1 <= 0 && max1 >=0)
{
min = 0;
}
else
{
min = umin1 >> max2;
}
// We should be able to fit uint32 range as int32
Assert(instr->ignoreIntOverflow || (min >= 0 && max >= 0) );
if (min > max)
{
// can only happen if shift count can be zero
Assert(min2 == 0 && (instr->ignoreIntOverflow || min1 >= 0));
min = Int32ConstMin;
max = Int32ConstMax;
}
break;
}
*pNewMin = min;
*pNewMax = max;
}
IR::Instr *
GlobOpt::TypeSpecialization(
IR::Instr *instr,
Value **pSrc1Val,
Value **pSrc2Val,
Value **pDstVal,
bool *redoTypeSpecRef,
bool *const forceInvariantHoistingRef)
{
Value *&src1Val = *pSrc1Val;
Value *&src2Val = *pSrc2Val;
*redoTypeSpecRef = false;
Assert(!*forceInvariantHoistingRef);
this->ignoredIntOverflowForCurrentInstr = false;
this->ignoredNegativeZeroForCurrentInstr = false;
// - Int32 values that can't be tagged are created as float constant values instead because a JavascriptNumber var is needed
// for that value at runtime. For the purposes of type specialization, recover the int32 values so that they will be
// treated as ints.
// - If int overflow does not matter for the instruction, we can additionally treat uint32 values as int32 values because
// the value resulting from the operation will eventually be converted to int32 anyway
Value *const src1OriginalVal = src1Val;
Value *const src2OriginalVal = src2Val;
#ifdef ENABLE_SIMDJS
// SIMD_JS
if (TypeSpecializeSimd128(instr, pSrc1Val, pSrc2Val, pDstVal))
{
return instr;
}
#endif
if(!instr->ShouldCheckForIntOverflow())
{
if(src1Val && src1Val->GetValueInfo()->IsFloatConstant())
{
int32 int32Value;
bool isInt32;
if(Js::JavascriptNumber::TryGetInt32OrUInt32Value(
src1Val->GetValueInfo()->AsFloatConstant()->FloatValue(),
&int32Value,
&isInt32))
{
src1Val = GetIntConstantValue(int32Value, instr);
if(!isInt32)
{
this->ignoredIntOverflowForCurrentInstr = true;
}
}
}
if(src2Val && src2Val->GetValueInfo()->IsFloatConstant())
{
int32 int32Value;
bool isInt32;
if(Js::JavascriptNumber::TryGetInt32OrUInt32Value(
src2Val->GetValueInfo()->AsFloatConstant()->FloatValue(),
&int32Value,
&isInt32))
{
src2Val = GetIntConstantValue(int32Value, instr);
if(!isInt32)
{
this->ignoredIntOverflowForCurrentInstr = true;
}
}
}
}
const AutoRestoreVal autoRestoreSrc1Val(src1OriginalVal, &src1Val);
const AutoRestoreVal autoRestoreSrc2Val(src2OriginalVal, &src2Val);
if (src1Val && instr->GetSrc2() == nullptr)
{
// Unary
// Note make sure that native array StElemI gets to TypeSpecializeStElem. Do this for typed arrays, too?
int32 intConstantValue;
if (!this->IsLoopPrePass() &&
!instr->IsBranchInstr() &&
src1Val->GetValueInfo()->TryGetIntConstantValue(&intConstantValue) &&
!(
// Nothing to fold for element stores. Go into type specialization to see if they can at least be specialized.
instr->m_opcode == Js::OpCode::StElemI_A ||
instr->m_opcode == Js::OpCode::StElemI_A_Strict ||
instr->m_opcode == Js::OpCode::StElemC ||
instr->m_opcode == Js::OpCode::MultiBr ||
instr->m_opcode == Js::OpCode::InlineArrayPop
))
{
if (OptConstFoldUnary(&instr, intConstantValue, src1Val == src1OriginalVal, pDstVal))
{
return instr;
}
}
else if (this->TypeSpecializeUnary(
&instr,
&src1Val,
pDstVal,
src1OriginalVal,
redoTypeSpecRef,
forceInvariantHoistingRef))
{
return instr;
}
else if(*redoTypeSpecRef)
{
return instr;
}
}
else if (instr->GetSrc2() && !instr->IsBranchInstr())
{
// Binary
if (!this->IsLoopPrePass())
{
if (GetIsAsmJSFunc())
{
if (CONFIG_FLAG(WasmFold))
{
bool success = instr->GetSrc1()->IsInt64() ?
this->OptConstFoldBinaryWasm<int64>(&instr, src1Val, src2Val, pDstVal) :
this->OptConstFoldBinaryWasm<int>(&instr, src1Val, src2Val, pDstVal);
if (success)
{
return instr;
}
}
}
else
{
// OptConstFoldBinary doesn't do type spec, so only deal with things we are sure are int (IntConstant and IntRange)
// and not just likely ints TypeSpecializeBinary will deal with type specializing them and fold them again
IntConstantBounds src1IntConstantBounds, src2IntConstantBounds;
if (src1Val && src1Val->GetValueInfo()->TryGetIntConstantBounds(&src1IntConstantBounds))
{
if (src2Val && src2Val->GetValueInfo()->TryGetIntConstantBounds(&src2IntConstantBounds))
{
if (this->OptConstFoldBinary(&instr, src1IntConstantBounds, src2IntConstantBounds, pDstVal))
{
return instr;
}
}
}
}
}
}
if (instr->GetSrc2() && this->TypeSpecializeBinary(&instr, pSrc1Val, pSrc2Val, pDstVal, src1OriginalVal, src2OriginalVal, redoTypeSpecRef))
{
if (!this->IsLoopPrePass() &&
instr->m_opcode != Js::OpCode::Nop &&
instr->m_opcode != Js::OpCode::Br && // We may have const fold a branch
// Cannot const-peep if the result of the operation is required for a bailout check
!(instr->HasBailOutInfo() && instr->GetBailOutKind() & IR::BailOutOnResultConditions))
{
if (src1Val && src1Val->GetValueInfo()->HasIntConstantValue())
{
if (this->OptConstPeep(instr, instr->GetSrc1(), pDstVal, src1Val->GetValueInfo()))
{
return instr;
}
}
else if (src2Val && src2Val->GetValueInfo()->HasIntConstantValue())
{
if (this->OptConstPeep(instr, instr->GetSrc2(), pDstVal, src2Val->GetValueInfo()))
{
return instr;
}
}
}
return instr;
}
else if(*redoTypeSpecRef)
{
return instr;
}
if (instr->IsBranchInstr() && !this->IsLoopPrePass())
{
if (this->OptConstFoldBranch(instr, src1Val, src2Val, pDstVal))
{
return instr;
}
}
// We didn't type specialize, make sure the srcs are unspecialized
IR::Opnd *src1 = instr->GetSrc1();
if (src1)
{
instr = this->ToVarUses(instr, src1, false, src1Val);
IR::Opnd *src2 = instr->GetSrc2();
if (src2)
{
instr = this->ToVarUses(instr, src2, false, src2Val);
}
}
IR::Opnd *dst = instr->GetDst();
if (dst)
{
instr = this->ToVarUses(instr, dst, true, nullptr);
// Handling for instructions other than built-ins that may require only dst type specialization
// should be added here.
if(OpCodeAttr::IsInlineBuiltIn(instr->m_opcode) && !GetIsAsmJSFunc()) // don't need to do typespec for asmjs
{
this->TypeSpecializeInlineBuiltInDst(&instr, pDstVal);
return instr;
}
// Clear the int specialized bit on the dst.
if (dst->IsRegOpnd())
{
IR::RegOpnd *dstRegOpnd = dst->AsRegOpnd();
if (!dstRegOpnd->m_sym->IsTypeSpec())
{
this->ToVarRegOpnd(dstRegOpnd, this->currentBlock);
}
else if (dstRegOpnd->m_sym->IsInt32())
{
this->ToInt32Dst(instr, dstRegOpnd, this->currentBlock);
}
else if (dstRegOpnd->m_sym->IsUInt32() && GetIsAsmJSFunc())
{
this->ToUInt32Dst(instr, dstRegOpnd, this->currentBlock);
}
else if (dstRegOpnd->m_sym->IsFloat64())
{
this->ToFloat64Dst(instr, dstRegOpnd, this->currentBlock);
}
}
else if (dst->IsSymOpnd() && dst->AsSymOpnd()->m_sym->IsStackSym())
{
this->ToVarStackSym(dst->AsSymOpnd()->m_sym->AsStackSym(), this->currentBlock);
}
}
return instr;
}
bool
GlobOpt::OptConstPeep(IR::Instr *instr, IR::Opnd *constSrc, Value **pDstVal, ValueInfo *valuInfo)
{
int32 value;
IR::Opnd *src;
IR::Opnd *nonConstSrc = (constSrc == instr->GetSrc1() ? instr->GetSrc2() : instr->GetSrc1());
// Try to find the value from value info first
if (valuInfo->TryGetIntConstantValue(&value))
{
}
else if (constSrc->IsAddrOpnd())
{
IR::AddrOpnd *addrOpnd = constSrc->AsAddrOpnd();
#ifdef _M_X64
Assert(addrOpnd->IsVar() || Math::FitsInDWord((size_t)addrOpnd->m_address));
#else
Assert(sizeof(value) == sizeof(addrOpnd->m_address));
#endif
if (addrOpnd->IsVar())
{
value = Js::TaggedInt::ToInt32(addrOpnd->m_address);
}
else
{
// We asserted that the address will fit in a DWORD above
value = ::Math::PointerCastToIntegral<int32>(constSrc->AsAddrOpnd()->m_address);
}
}
else if (constSrc->IsIntConstOpnd())
{
value = constSrc->AsIntConstOpnd()->AsInt32();
}
else
{
return false;
}
switch(instr->m_opcode)
{
// Can't do all Add_A because of string concats.
// Sub_A cannot be transformed to a NEG_A because 0 - 0 != -0
case Js::OpCode::Add_A:
src = nonConstSrc;
if (!src->GetValueType().IsInt())
{
// 0 + -0 != -0
// "Foo" + 0 != "Foo
return false;
}
// fall-through
case Js::OpCode::Add_I4:
if (value != 0)
{
return false;
}
if (constSrc == instr->GetSrc1())
{
src = instr->GetSrc2();
}
else
{
src = instr->GetSrc1();
}
break;
case Js::OpCode::Mul_A:
case Js::OpCode::Mul_I4:
if (value == 0)
{
// -0 * 0 != 0
return false;
}
else if (value == 1)
{
src = nonConstSrc;
}
else
{
return false;
}
break;
case Js::OpCode::Div_A:
if (value == 1 && constSrc == instr->GetSrc2())
{
src = instr->GetSrc1();
}
else
{
return false;
}
break;
case Js::OpCode::Or_I4:
if (value == -1)
{
src = constSrc;
}
else if (value == 0)
{
src = nonConstSrc;
}
else
{
return false;
}
break;
case Js::OpCode::And_I4:
if (value == -1)
{
src = nonConstSrc;
}
else if (value == 0)
{
src = constSrc;
}
else
{
return false;
}
break;
case Js::OpCode::Shl_I4:
case Js::OpCode::ShrU_I4:
case Js::OpCode::Shr_I4:
if (value != 0 || constSrc != instr->GetSrc2())
{
return false;
}
src = instr->GetSrc1();
break;
default:
return false;
}
this->CaptureByteCodeSymUses(instr);
if (src == instr->GetSrc1())
{
instr->FreeSrc2();
}
else
{
Assert(src == instr->GetSrc2());
instr->ReplaceSrc1(instr->UnlinkSrc2());
}
instr->m_opcode = Js::OpCode::Ld_A;
return true;
}
Js::Var // TODO: michhol OOP JIT, shouldn't play with Vars
GlobOpt::GetConstantVar(IR::Opnd *opnd, Value *val)
{
ValueInfo *valueInfo = val->GetValueInfo();
if (valueInfo->IsVarConstant() && valueInfo->IsPrimitive())
{
return valueInfo->AsVarConstant()->VarValue();
}
if (opnd->IsAddrOpnd())
{
IR::AddrOpnd *addrOpnd = opnd->AsAddrOpnd();
if (addrOpnd->IsVar())
{
return addrOpnd->m_address;
}
}
else if (opnd->IsIntConstOpnd())
{
if (!Js::TaggedInt::IsOverflow(opnd->AsIntConstOpnd()->AsInt32()))
{
return Js::TaggedInt::ToVarUnchecked(opnd->AsIntConstOpnd()->AsInt32());
}
}
else if (opnd->IsRegOpnd() && opnd->AsRegOpnd()->m_sym->IsSingleDef())
{
if (valueInfo->IsBoolean())
{
IR::Instr * defInstr = opnd->AsRegOpnd()->m_sym->GetInstrDef();
if (defInstr->m_opcode != Js::OpCode::Ld_A || !defInstr->GetSrc1()->IsAddrOpnd())
{
return nullptr;
}
Assert(defInstr->GetSrc1()->AsAddrOpnd()->IsVar());
return defInstr->GetSrc1()->AsAddrOpnd()->m_address;
}
else if (valueInfo->IsUndefined())
{
return (Js::Var)this->func->GetScriptContextInfo()->GetUndefinedAddr();
}
else if (valueInfo->IsNull())
{
return (Js::Var)this->func->GetScriptContextInfo()->GetNullAddr();
}
}
return nullptr;
}
bool BoolAndIntStaticAndTypeMismatch(Value* src1Val, Value* src2Val, Js::Var src1Var, Js::Var src2Var)
{
ValueInfo *src1ValInfo = src1Val->GetValueInfo();
ValueInfo *src2ValInfo = src2Val->GetValueInfo();
return (src1ValInfo->IsNumber() && src1Var && src2ValInfo->IsBoolean() && src1Var != Js::TaggedInt::ToVarUnchecked(0) && src1Var != Js::TaggedInt::ToVarUnchecked(1)) ||
(src2ValInfo->IsNumber() && src2Var && src1ValInfo->IsBoolean() && src2Var != Js::TaggedInt::ToVarUnchecked(0) && src2Var != Js::TaggedInt::ToVarUnchecked(1));
}
bool
GlobOpt::OptConstFoldBranch(IR::Instr *instr, Value *src1Val, Value*src2Val, Value **pDstVal)
{
if (!src1Val)
{
return false;
}
int64 left64, right64;
Js::Var src1Var = this->GetConstantVar(instr->GetSrc1(), src1Val);
Js::Var src2Var = nullptr;
if (instr->GetSrc2())
{
if (!src2Val)
{
return false;
}
src2Var = this->GetConstantVar(instr->GetSrc2(), src2Val);
}
auto AreSourcesEqual = [&](Value * val1, Value * val2) -> bool
{
// NaN !== NaN, and objects can have valueOf/toString
return val1->IsEqualTo(val2) &&
val1->GetValueInfo()->IsPrimitive() && val1->GetValueInfo()->IsNotFloat();
};
// Make sure GetConstantVar only returns primitives.
// TODO: OOP JIT, enabled these asserts
//Assert(!src1Var || !Js::JavascriptOperators::IsObject(src1Var));
//Assert(!src2Var || !Js::JavascriptOperators::IsObject(src2Var));
BOOL result;
int32 constVal;
switch (instr->m_opcode)
{
#define BRANCH(OPCODE,CMP,TYPE,UNSIGNEDNESS) \
case Js::OpCode::##OPCODE: \
if (src1Val->GetValueInfo()->TryGetInt64ConstantValue(&left64, UNSIGNEDNESS) && \
src2Val->GetValueInfo()->TryGetInt64ConstantValue(&right64, UNSIGNEDNESS)) \
{ \
result = (TYPE)left64 CMP (TYPE)right64; \
} \
else if (AreSourcesEqual(src1Val, src2Val)) \
{ \
result = 0 CMP 0; \
} \
else \
{ \
return false; \
} \
break;
BRANCH(BrEq_I4, == , int64, false)
BRANCH(BrGe_I4, >= , int64, false)
BRANCH(BrGt_I4, >, int64, false)
BRANCH(BrLt_I4, <, int64, false)
BRANCH(BrLe_I4, <= , int64, false)
BRANCH(BrNeq_I4, != , int64, false)
BRANCH(BrUnGe_I4, >= , uint64, true)
BRANCH(BrUnGt_I4, >, uint64, true)
BRANCH(BrUnLt_I4, <, uint64, true)
BRANCH(BrUnLe_I4, <= , uint64, true)
case Js::OpCode::BrEq_A:
case Js::OpCode::BrNotNeq_A:
if (!src1Var || !src2Var)
{
if (BoolAndIntStaticAndTypeMismatch(src1Val, src2Val, src1Var, src2Var))
{
result = false;
}
else if (AreSourcesEqual(src1Val, src2Val))
{
result = true;
}
else
{
return false;
}
}
else
{
if (func->IsOOPJIT() || !CONFIG_FLAG(OOPJITMissingOpts))
{
// TODO: OOP JIT, const folding
return false;
}
result = Js::JavascriptOperators::Equal(src1Var, src2Var, this->func->GetScriptContext());
}
break;
case Js::OpCode::BrNeq_A:
case Js::OpCode::BrNotEq_A:
if (!src1Var || !src2Var)
{
if (BoolAndIntStaticAndTypeMismatch(src1Val, src2Val, src1Var, src2Var))
{
result = true;
}
else if (AreSourcesEqual(src1Val, src2Val))
{
result = false;
}
else
{
return false;
}
}
else
{
if (func->IsOOPJIT() || !CONFIG_FLAG(OOPJITMissingOpts))
{
// TODO: OOP JIT, const folding
return false;
}
result = Js::JavascriptOperators::NotEqual(src1Var, src2Var, this->func->GetScriptContext());
}
break;
case Js::OpCode::BrSrEq_A:
case Js::OpCode::BrSrNotNeq_A:
if (!src1Var || !src2Var)
{
ValueInfo *src1ValInfo = src1Val->GetValueInfo();
ValueInfo *src2ValInfo = src2Val->GetValueInfo();
if (
(src1ValInfo->IsUndefined() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenUndefined()) ||
(src1ValInfo->IsNull() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenNull()) ||
(src1ValInfo->IsBoolean() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenBoolean()) ||
(src1ValInfo->IsNumber() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenNumber()) ||
(src1ValInfo->IsString() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenString()) ||
(src2ValInfo->IsUndefined() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenUndefined()) ||
(src2ValInfo->IsNull() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenNull()) ||
(src2ValInfo->IsBoolean() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenBoolean()) ||
(src2ValInfo->IsNumber() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenNumber()) ||
(src2ValInfo->IsString() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenString())
)
{
result = false;
}
else if (AreSourcesEqual(src1Val, src2Val))
{
result = true;
}
else
{
return false;
}
}
else
{
if (func->IsOOPJIT() || !CONFIG_FLAG(OOPJITMissingOpts))
{
// TODO: OOP JIT, const folding
return false;
}
result = Js::JavascriptOperators::StrictEqual(src1Var, src2Var, this->func->GetScriptContext());
}
break;
case Js::OpCode::BrSrNeq_A:
case Js::OpCode::BrSrNotEq_A:
if (!src1Var || !src2Var)
{
ValueInfo *src1ValInfo = src1Val->GetValueInfo();
ValueInfo *src2ValInfo = src2Val->GetValueInfo();
if (
(src1ValInfo->IsUndefined() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenUndefined()) ||
(src1ValInfo->IsNull() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenNull()) ||
(src1ValInfo->IsBoolean() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenBoolean()) ||
(src1ValInfo->IsNumber() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenNumber()) ||
(src1ValInfo->IsString() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenString()) ||
(src2ValInfo->IsUndefined() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenUndefined()) ||
(src2ValInfo->IsNull() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenNull()) ||
(src2ValInfo->IsBoolean() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenBoolean()) ||
(src2ValInfo->IsNumber() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenNumber()) ||
(src2ValInfo->IsString() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenString())
)
{
result = true;
}
else if (AreSourcesEqual(src1Val, src2Val))
{
result = false;
}
else
{
return false;
}
}
else
{
if (func->IsOOPJIT() || !CONFIG_FLAG(OOPJITMissingOpts))
{
// TODO: OOP JIT, const folding
return false;
}
result = Js::JavascriptOperators::NotStrictEqual(src1Var, src2Var, this->func->GetScriptContext());
}
break;
case Js::OpCode::BrFalse_A:
case Js::OpCode::BrTrue_A:
{
ValueInfo *const src1ValueInfo = src1Val->GetValueInfo();
if(src1ValueInfo->IsNull() || src1ValueInfo->IsUndefined())
{
result = instr->m_opcode == Js::OpCode::BrFalse_A;
break;
}
if(src1ValueInfo->IsObject() && src1ValueInfo->GetObjectType() > ObjectType::Object)
{
// Specific object types that are tracked are equivalent to 'true'
result = instr->m_opcode == Js::OpCode::BrTrue_A;
break;
}
if (func->IsOOPJIT() || !CONFIG_FLAG(OOPJITMissingOpts))
{
// TODO: OOP JIT, const folding
return false;
}
if (!src1Var)
{
return false;
}
result = Js::JavascriptConversion::ToBoolean(src1Var, this->func->GetScriptContext());
if(instr->m_opcode == Js::OpCode::BrFalse_A)
{
result = !result;
}
break;
}
case Js::OpCode::BrFalse_I4:
// this path would probably work outside of asm.js, but we should verify that if we ever hit this scenario
Assert(GetIsAsmJSFunc());
constVal = 0;
if (!src1Val->GetValueInfo()->TryGetIntConstantValue(&constVal))
{
return false;
}
result = constVal == 0;
break;
default:
return false;
#undef BRANCH
}
this->OptConstFoldBr(!!result, instr);
return true;
}
bool
GlobOpt::OptConstFoldUnary(
IR::Instr * *pInstr,
const int32 intConstantValue,
const bool isUsingOriginalSrc1Value,
Value **pDstVal)
{
IR::Instr * &instr = *pInstr;
int32 value = 0;
IR::Opnd *constOpnd;
bool isInt = true;
bool doSetDstVal = true;
FloatConstType fValue = 0.0;
if (!DoConstFold())
{
return false;
}
if (instr->GetDst() && !instr->GetDst()->IsRegOpnd())
{
return false;
}
switch(instr->m_opcode)
{
case Js::OpCode::Neg_A:
if (intConstantValue == 0)
{
// Could fold to -0.0
return false;
}
if (Int32Math::Neg(intConstantValue, &value))
{
return false;
}
break;
case Js::OpCode::Not_A:
Int32Math::Not(intConstantValue, &value);
break;
case Js::OpCode::Ld_A:
if (instr->HasBailOutInfo())
{
//The profile data for switch expr can be string and in GlobOpt we realize it is an int.
if(instr->GetBailOutKind() == IR::BailOutExpectingString)
{
throw Js::RejitException(RejitReason::DisableSwitchOptExpectingString);
}
Assert(instr->GetBailOutKind() == IR::BailOutExpectingInteger);
instr->ClearBailOutInfo();
}
value = intConstantValue;
if(isUsingOriginalSrc1Value)
{
doSetDstVal = false; // Let OptDst do it by copying src1Val
}
break;
case Js::OpCode::Conv_Num:
case Js::OpCode::LdC_A_I4:
value = intConstantValue;
if(isUsingOriginalSrc1Value)
{
doSetDstVal = false; // Let OptDst do it by copying src1Val
}
break;
case Js::OpCode::Incr_A:
if (Int32Math::Inc(intConstantValue, &value))
{
return false;
}
break;
case Js::OpCode::Decr_A:
if (Int32Math::Dec(intConstantValue, &value))
{
return false;
}
break;
case Js::OpCode::InlineMathAcos:
fValue = Js::Math::Acos((double)intConstantValue);
isInt = false;
break;
case Js::OpCode::InlineMathAsin:
fValue = Js::Math::Asin((double)intConstantValue);
isInt = false;
break;
case Js::OpCode::InlineMathAtan:
fValue = Js::Math::Atan((double)intConstantValue);
isInt = false;
break;
case Js::OpCode::InlineMathCos:
fValue = Js::Math::Cos((double)intConstantValue);
isInt = false;
break;
case Js::OpCode::InlineMathExp:
fValue = Js::Math::Exp((double)intConstantValue);
isInt = false;
break;
case Js::OpCode::InlineMathLog:
fValue = Js::Math::Log((double)intConstantValue);
isInt = false;
break;
case Js::OpCode::InlineMathSin:
fValue = Js::Math::Sin((double)intConstantValue);
isInt = false;
break;
case Js::OpCode::InlineMathSqrt:
fValue = ::sqrt((double)intConstantValue);
isInt = false;
break;
case Js::OpCode::InlineMathTan:
fValue = ::tan((double)intConstantValue);
isInt = false;
break;
case Js::OpCode::InlineMathFround:
fValue = (double) (float) intConstantValue;
isInt = false;
break;
case Js::OpCode::InlineMathAbs:
if (intConstantValue == INT32_MIN)
{
if (instr->GetDst()->IsInt32())
{
// if dst is an int (e.g. in asm.js), we should coerce it, not convert to float
value = static_cast<int32>(2147483648U);
}
else
{
// Rejit with AggressiveIntTypeSpecDisabled for Math.abs(INT32_MIN) because it causes dst
// to be float type which could be different with previous type spec result in LoopPrePass
throw Js::RejitException(RejitReason::AggressiveIntTypeSpecDisabled);
}
}
else
{
value = ::abs(intConstantValue);
}
break;
case Js::OpCode::InlineMathClz:
DWORD clz;
if (_BitScanReverse(&clz, intConstantValue))
{
value = 31 - clz;
}
else
{
value = 32;
}
instr->ClearBailOutInfo();
break;
case Js::OpCode::Ctz:
Assert(func->GetJITFunctionBody()->IsWasmFunction());
Assert(!instr->HasBailOutInfo());
DWORD ctz;
if (_BitScanForward(&ctz, intConstantValue))
{
value = ctz;
}
else
{
value = 32;
}
break;
case Js::OpCode::InlineMathFloor:
value = intConstantValue;
instr->ClearBailOutInfo();
break;
case Js::OpCode::InlineMathCeil:
value = intConstantValue;
instr->ClearBailOutInfo();
break;
case Js::OpCode::InlineMathRound:
value = intConstantValue;
instr->ClearBailOutInfo();
break;
case Js::OpCode::ToVar:
if (Js::TaggedInt::IsOverflow(intConstantValue))
{
return false;
}
else
{
value = intConstantValue;
instr->ClearBailOutInfo();
break;
}
default:
return false;
}
this->CaptureByteCodeSymUses(instr);
Assert(!instr->HasBailOutInfo()); // If we are, in fact, successful in constant folding the instruction, there is no point in having the bailoutinfo around anymore.
// Make sure that it is cleared if it was initially present.
if (!isInt)
{
value = (int32)fValue;
if (fValue == (double)value)
{
isInt = true;
}
}
if (isInt)
{
constOpnd = IR::IntConstOpnd::New(value, TyInt32, instr->m_func);
GOPT_TRACE(_u("Constant folding to %d\n"), value);
}
else
{
constOpnd = IR::FloatConstOpnd::New(fValue, TyFloat64, instr->m_func);
GOPT_TRACE(_u("Constant folding to %f\n"), fValue);
}
instr->ReplaceSrc1(constOpnd);
this->OptSrc(constOpnd, &instr);
IR::Opnd *dst = instr->GetDst();
Assert(dst->IsRegOpnd());
StackSym *dstSym = dst->AsRegOpnd()->m_sym;
if (isInt)
{
if (dstSym->IsSingleDef())
{
dstSym->SetIsIntConst(value);
}
if (doSetDstVal)
{
*pDstVal = GetIntConstantValue(value, instr, dst);
}
if (IsTypeSpecPhaseOff(this->func))
{
instr->m_opcode = Js::OpCode::LdC_A_I4;
this->ToVarRegOpnd(dst->AsRegOpnd(), this->currentBlock);
}
else
{
instr->m_opcode = Js::OpCode::Ld_I4;
this->ToInt32Dst(instr, dst->AsRegOpnd(), this->currentBlock);
StackSym * currDstSym = instr->GetDst()->AsRegOpnd()->m_sym;
if (currDstSym->IsSingleDef())
{
currDstSym->SetIsIntConst(value);
}
}
}
else
{
*pDstVal = NewFloatConstantValue(fValue, dst);
if (IsTypeSpecPhaseOff(this->func))
{
instr->m_opcode = Js::OpCode::LdC_A_R8;
this->ToVarRegOpnd(dst->AsRegOpnd(), this->currentBlock);
}
else
{
instr->m_opcode = Js::OpCode::LdC_F8_R8;
this->ToFloat64Dst(instr, dst->AsRegOpnd(), this->currentBlock);
}
}
// If this is an induction variable, then treat it the way the prepass would have if it had seen
// the assignment and the resulting change to the value number, and mark it as indeterminate.
for (Loop * loop = this->currentBlock->loop; loop; loop = loop->parent)
{
InductionVariable *iv = nullptr;
if (loop->inductionVariables && loop->inductionVariables->TryGetReference(dstSym->m_id, &iv))
{
iv->SetChangeIsIndeterminate();
}
}
return true;
}
//------------------------------------------------------------------------------------------------------
// Type specialization
//------------------------------------------------------------------------------------------------------
bool
GlobOpt::IsWorthSpecializingToInt32DueToSrc(IR::Opnd *const src, Value *const val)
{
Assert(src);
Assert(val);
ValueInfo *valueInfo = val->GetValueInfo();
Assert(valueInfo->IsLikelyInt());
// If it is not known that the operand is definitely an int, the operand is not already type-specialized, and it's not live
// in the loop landing pad (if we're in a loop), it's probably not worth type-specializing this instruction. The common case
// where type-specializing this would be bad is where the operations are entirely on properties or array elements, where the
// ratio of FromVars and ToVars to the number of actual operations is high, and the conversions would dominate the time
// spent. On the other hand, if we're using a function formal parameter more than once, it would probably be worth
// type-specializing it, hence the IsDead check on the operands.
return
valueInfo->IsInt() ||
valueInfo->HasIntConstantValue(true) ||
!src->GetIsDead() ||
!src->IsRegOpnd() ||
CurrentBlockData()->IsInt32TypeSpecialized(src->AsRegOpnd()->m_sym) ||
(this->currentBlock->loop && this->currentBlock->loop->landingPad->globOptData.IsLive(src->AsRegOpnd()->m_sym));
}
bool
GlobOpt::IsWorthSpecializingToInt32DueToDst(IR::Opnd *const dst)
{
Assert(dst);
const auto sym = dst->AsRegOpnd()->m_sym;
return
CurrentBlockData()->IsInt32TypeSpecialized(sym) ||
(this->currentBlock->loop && this->currentBlock->loop->landingPad->globOptData.IsLive(sym));
}
bool
GlobOpt::IsWorthSpecializingToInt32(IR::Instr *const instr, Value *const src1Val, Value *const src2Val)
{
Assert(instr);
const auto src1 = instr->GetSrc1();
const auto src2 = instr->GetSrc2();
// In addition to checking each operand and the destination, if for any reason we only have to do a maximum of two
// conversions instead of the worst-case 3 conversions, it's probably worth specializing.
if (IsWorthSpecializingToInt32DueToSrc(src1, src1Val) ||
(src2Val && IsWorthSpecializingToInt32DueToSrc(src2, src2Val)))
{
return true;
}
IR::Opnd *dst = instr->GetDst();
if (!dst || IsWorthSpecializingToInt32DueToDst(dst))
{
return true;
}
if (dst->IsEqual(src1) || (src2Val && (dst->IsEqual(src2) || src1->IsEqual(src2))))
{
return true;
}
IR::Instr *instrNext = instr->GetNextRealInstrOrLabel();
// Skip useless Ld_A's
do
{
switch (instrNext->m_opcode)
{
case Js::OpCode::Ld_A:
if (!dst->IsEqual(instrNext->GetSrc1()))
{
goto done;
}
dst = instrNext->GetDst();
break;
case Js::OpCode::LdFld:
case Js::OpCode::LdRootFld:
case Js::OpCode::LdRootFldForTypeOf:
case Js::OpCode::LdFldForTypeOf:
case Js::OpCode::LdElemI_A:
case Js::OpCode::ByteCodeUses:
break;
default:
goto done;
}
instrNext = instrNext->GetNextRealInstrOrLabel();
} while (true);
done:
// If the next instr could also be type specialized, then it is probably worth it.
if ((instrNext->GetSrc1() && dst->IsEqual(instrNext->GetSrc1())) || (instrNext->GetSrc2() && dst->IsEqual(instrNext->GetSrc2())))
{
switch (instrNext->m_opcode)
{
case Js::OpCode::Add_A:
case Js::OpCode::Sub_A:
case Js::OpCode::Mul_A:
case Js::OpCode::Div_A:
case Js::OpCode::Rem_A:
case Js::OpCode::Xor_A:
case Js::OpCode::And_A:
case Js::OpCode::Or_A:
case Js::OpCode::Shl_A:
case Js::OpCode::Shr_A:
case Js::OpCode::Incr_A:
case Js::OpCode::Decr_A:
case Js::OpCode::Neg_A:
case Js::OpCode::Not_A:
case Js::OpCode::Conv_Num:
case Js::OpCode::BrEq_I4:
case Js::OpCode::BrTrue_I4:
case Js::OpCode::BrFalse_I4:
case Js::OpCode::BrGe_I4:
case Js::OpCode::BrGt_I4:
case Js::OpCode::BrLt_I4:
case Js::OpCode::BrLe_I4:
case Js::OpCode::BrNeq_I4:
return true;
}
}
return false;
}
bool
GlobOpt::TypeSpecializeNumberUnary(IR::Instr *instr, Value *src1Val, Value **pDstVal)
{
Assert(src1Val->GetValueInfo()->IsNumber());
if (this->IsLoopPrePass())
{
return false;
}
switch (instr->m_opcode)
{
case Js::OpCode::Conv_Num:
// Optimize Conv_Num away since we know this is a number
instr->m_opcode = Js::OpCode::Ld_A;
return false;
}
return false;
}
bool
GlobOpt::TypeSpecializeUnary(
IR::Instr **pInstr,
Value **pSrc1Val,
Value **pDstVal,
Value *const src1OriginalVal,
bool *redoTypeSpecRef,
bool *const forceInvariantHoistingRef)
{
Assert(pSrc1Val);
Value *&src1Val = *pSrc1Val;
Assert(src1Val);
// We don't need to do typespec for asmjs
if (IsTypeSpecPhaseOff(this->func) || GetIsAsmJSFunc())
{
return false;
}
IR::Instr *&instr = *pInstr;
int32 min, max;
// Inline built-ins explicitly specify how srcs/dst must be specialized.
if (OpCodeAttr::IsInlineBuiltIn(instr->m_opcode))
{
TypeSpecializeInlineBuiltInUnary(pInstr, &src1Val, pDstVal, src1OriginalVal, redoTypeSpecRef);
return true;
}
// Consider: If type spec wasn't completely done, make sure that we don't type-spec the dst 2nd time.
if(instr->m_opcode == Js::OpCode::LdLen_A && TypeSpecializeLdLen(&instr, &src1Val, pDstVal, forceInvariantHoistingRef))
{
return true;
}
if (!src1Val->GetValueInfo()->GetIntValMinMax(&min, &max, this->DoAggressiveIntTypeSpec()))
{
src1Val = src1OriginalVal;
if (src1Val->GetValueInfo()->IsLikelyFloat())
{
// Try to type specialize to float
return this->TypeSpecializeFloatUnary(pInstr, src1Val, pDstVal);
}
else if (src1Val->GetValueInfo()->IsNumber())
{
return TypeSpecializeNumberUnary(instr, src1Val, pDstVal);
}
return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal);
}
return this->TypeSpecializeIntUnary(pInstr, &src1Val, pDstVal, min, max, src1OriginalVal, redoTypeSpecRef);
}
// Returns true if the built-in requested type specialization, and no further action needed,
// otherwise returns false.
void
GlobOpt::TypeSpecializeInlineBuiltInUnary(IR::Instr **pInstr, Value **pSrc1Val, Value **pDstVal, Value *const src1OriginalVal, bool *redoTypeSpecRef)
{
IR::Instr *&instr = *pInstr;
Assert(pSrc1Val);
Value *&src1Val = *pSrc1Val;
Assert(OpCodeAttr::IsInlineBuiltIn(instr->m_opcode));
Js::BuiltinFunction builtInId = Js::JavascriptLibrary::GetBuiltInInlineCandidateId(instr->m_opcode); // From actual instr, not profile based.
Assert(builtInId != Js::BuiltinFunction::None);
// Consider using different bailout for float/int FromVars, so that when the arg cannot be converted to number we don't disable
// type spec for other parts of the big function but rather just don't inline that built-in instr.
// E.g. could do that if the value is not likelyInt/likelyFloat.
Js::BuiltInFlags builtInFlags = Js::JavascriptLibrary::GetFlagsForBuiltIn(builtInId);
bool areAllArgsAlwaysFloat = (builtInFlags & Js::BuiltInFlags::BIF_Args) == Js::BuiltInFlags::BIF_TypeSpecUnaryToFloat;
if (areAllArgsAlwaysFloat)
{
// InlineMathAcos, InlineMathAsin, InlineMathAtan, InlineMathCos, InlineMathExp, InlineMathLog, InlineMathSin, InlineMathSqrt, InlineMathTan.
Assert(this->DoFloatTypeSpec());
// Type-spec the src.
src1Val = src1OriginalVal;
bool retVal = this->TypeSpecializeFloatUnary(pInstr, src1Val, pDstVal, /* skipDst = */ true);
AssertMsg(retVal, "For inline built-ins the args have to be type-specialized to float, but something failed during the process.");
// Type-spec the dst.
this->TypeSpecializeFloatDst(instr, nullptr, src1Val, nullptr, pDstVal);
}
else if (instr->m_opcode == Js::OpCode::InlineMathAbs)
{
// Consider the case when the value is unknown - because of bailout in abs we may disable type spec for the whole function which is too much.
// First, try int.
int minVal, maxVal;
bool shouldTypeSpecToInt = src1Val->GetValueInfo()->GetIntValMinMax(&minVal, &maxVal, /* doAggressiveIntTypeSpec = */ true);
if (shouldTypeSpecToInt)
{
Assert(this->DoAggressiveIntTypeSpec());
bool retVal = this->TypeSpecializeIntUnary(pInstr, &src1Val, pDstVal, minVal, maxVal, src1OriginalVal, redoTypeSpecRef, true);
AssertMsg(retVal, "For inline built-ins the args have to be type-specialized (int), but something failed during the process.");
if (!this->IsLoopPrePass())
{
// Create bailout for INT_MIN which does not have corresponding int value on the positive side.
// Check int range: if we know the range is out of overflow, we do not need the bail out at all.
if (minVal == INT32_MIN)
{
GenerateBailAtOperation(&instr, IR::BailOnIntMin);
}
}
// Account for ::abs(INT_MIN) == INT_MIN (which is less than 0).
maxVal = ::max(
::abs(Int32Math::NearestInRangeTo(minVal, INT_MIN + 1, INT_MAX)),
::abs(Int32Math::NearestInRangeTo(maxVal, INT_MIN + 1, INT_MAX)));
minVal = minVal >= 0 ? minVal : 0;
this->TypeSpecializeIntDst(instr, instr->m_opcode, nullptr, src1Val, nullptr, IR::BailOutInvalid, minVal, maxVal, pDstVal);
}
else
{
// If we couldn't do int, do float.
Assert(this->DoFloatTypeSpec());
src1Val = src1OriginalVal;
bool retVal = this->TypeSpecializeFloatUnary(pInstr, src1Val, pDstVal, true);
AssertMsg(retVal, "For inline built-ins the args have to be type-specialized (float), but something failed during the process.");
this->TypeSpecializeFloatDst(instr, nullptr, src1Val, nullptr, pDstVal);
}
}
else if (instr->m_opcode == Js::OpCode::InlineMathFloor || instr->m_opcode == Js::OpCode::InlineMathCeil || instr->m_opcode == Js::OpCode::InlineMathRound)
{
// Type specialize src to float
src1Val = src1OriginalVal;
bool retVal = this->TypeSpecializeFloatUnary(pInstr, src1Val, pDstVal, /* skipDst = */ true);
AssertMsg(retVal, "For inline Math.floor and Math.ceil the src has to be type-specialized to float, but something failed during the process.");
// Type specialize dst to int
this->TypeSpecializeIntDst(
instr,
instr->m_opcode,
nullptr,
src1Val,
nullptr,
IR::BailOutInvalid,
INT32_MIN,
INT32_MAX,
pDstVal);
}
else if(instr->m_opcode == Js::OpCode::InlineArrayPop)
{
IR::Opnd *const thisOpnd = instr->GetSrc1();
Assert(thisOpnd);
// Ensure src1 (Array) is a var
this->ToVarUses(instr, thisOpnd, false, src1Val);
if(!this->IsLoopPrePass() && thisOpnd->GetValueType().IsLikelyNativeArray())
{
// We bail out, if there is illegal access or a mismatch in the Native array type that is optimized for, during the run time.
GenerateBailAtOperation(&instr, IR::BailOutConventionalNativeArrayAccessOnly);
}
if(!instr->GetDst())
{
return;
}
// Try Type Specializing the element (return item from Pop) based on the array's profile data.
if(thisOpnd->GetValueType().IsLikelyNativeIntArray())
{
this->TypeSpecializeIntDst(instr, instr->m_opcode, nullptr, nullptr, nullptr, IR::BailOutInvalid, INT32_MIN, INT32_MAX, pDstVal);
}
else if(thisOpnd->GetValueType().IsLikelyNativeFloatArray())
{
this->TypeSpecializeFloatDst(instr, nullptr, nullptr, nullptr, pDstVal);
}
else
{
// We reached here so the Element is not yet type specialized. Ensure element is a var
if(instr->GetDst()->IsRegOpnd())
{
this->ToVarRegOpnd(instr->GetDst()->AsRegOpnd(), currentBlock);
}
}
}
else if (instr->m_opcode == Js::OpCode::InlineMathClz)
{
Assert(this->DoAggressiveIntTypeSpec());
Assert(this->DoLossyIntTypeSpec());
//Type specialize to int
bool retVal = this->TypeSpecializeIntUnary(pInstr, &src1Val, pDstVal, INT32_MIN, INT32_MAX, src1OriginalVal, redoTypeSpecRef);
AssertMsg(retVal, "For clz32, the arg has to be type-specialized to int.");
}
else
{
AssertMsg(FALSE, "Unsupported built-in!");
}
}
void
GlobOpt::TypeSpecializeInlineBuiltInBinary(IR::Instr **pInstr, Value *src1Val, Value* src2Val, Value **pDstVal, Value *const src1OriginalVal, Value *const src2OriginalVal)
{
IR::Instr *&instr = *pInstr;
Assert(OpCodeAttr::IsInlineBuiltIn(instr->m_opcode));
switch(instr->m_opcode)
{
case Js::OpCode::InlineMathAtan2:
{
Js::BuiltinFunction builtInId = Js::JavascriptLibrary::GetBuiltInInlineCandidateId(instr->m_opcode); // From actual instr, not profile based.
Js::BuiltInFlags builtInFlags = Js::JavascriptLibrary::GetFlagsForBuiltIn(builtInId);
bool areAllArgsAlwaysFloat = (builtInFlags & Js::BuiltInFlags::BIF_TypeSpecAllToFloat) != 0;
Assert(areAllArgsAlwaysFloat);
Assert(this->DoFloatTypeSpec());
// Type-spec the src1, src2 and dst.
src1Val = src1OriginalVal;
src2Val = src2OriginalVal;
bool retVal = this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal);
AssertMsg(retVal, "For pow and atnan2 the args have to be type-specialized to float, but something failed during the process.");
break;
}
case Js::OpCode::InlineMathPow:
{
#ifndef _M_ARM32_OR_ARM64
if (src2Val->GetValueInfo()->IsLikelyInt())
{
bool lossy = false;
this->ToInt32(instr, instr->GetSrc2(), this->currentBlock, src2Val, nullptr, lossy);
IR::Opnd* src1 = instr->GetSrc1();
int32 valueMin, valueMax;
if (src1Val->GetValueInfo()->IsLikelyInt() &&
this->DoPowIntIntTypeSpec() &&
src2Val->GetValueInfo()->GetIntValMinMax(&valueMin, &valueMax, this->DoAggressiveIntTypeSpec()) &&
valueMin >= 0)
{
this->ToInt32(instr, src1, this->currentBlock, src1Val, nullptr, lossy);
this->TypeSpecializeIntDst(instr, instr->m_opcode, nullptr, src1Val, src2Val, IR::BailOutInvalid, INT32_MIN, INT32_MAX, pDstVal);
if(!this->IsLoopPrePass())
{
GenerateBailAtOperation(&instr, IR::BailOutOnPowIntIntOverflow);
}
}
else
{
this->ToFloat64(instr, src1, this->currentBlock, src1Val, nullptr, IR::BailOutPrimitiveButString);
TypeSpecializeFloatDst(instr, nullptr, src1Val, src2Val, pDstVal);
}
}
else
{
#endif
this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal);
#ifndef _M_ARM32_OR_ARM64
}
#endif
break;
}
case Js::OpCode::InlineMathImul:
{
Assert(this->DoAggressiveIntTypeSpec());
Assert(this->DoLossyIntTypeSpec());
//Type specialize to int
bool retVal = this->TypeSpecializeIntBinary(pInstr, src1Val, src2Val, pDstVal, INT32_MIN, INT32_MAX, false /* skipDst */);
AssertMsg(retVal, "For imul, the args have to be type-specialized to int but something failed during the process.");
break;
}
case Js::OpCode::InlineMathMin:
case Js::OpCode::InlineMathMax:
{
if(src1Val->GetValueInfo()->IsLikelyInt() && src2Val->GetValueInfo()->IsLikelyInt())
{
// Compute resulting range info
int32 min1 = INT32_MIN;
int32 max1 = INT32_MAX;
int32 min2 = INT32_MIN;
int32 max2 = INT32_MAX;
int32 newMin, newMax;
Assert(this->DoAggressiveIntTypeSpec());
src1Val->GetValueInfo()->GetIntValMinMax(&min1, &max1, this->DoAggressiveIntTypeSpec());
src2Val->GetValueInfo()->GetIntValMinMax(&min2, &max2, this->DoAggressiveIntTypeSpec());
if (instr->m_opcode == Js::OpCode::InlineMathMin)
{
newMin = min(min1, min2);
newMax = min(max1, max2);
}
else
{
Assert(instr->m_opcode == Js::OpCode::InlineMathMax);
newMin = max(min1, min2);
newMax = max(max1, max2);
}
// Type specialize to int
bool retVal = this->TypeSpecializeIntBinary(pInstr, src1Val, src2Val, pDstVal, newMin, newMax, false /* skipDst */);
AssertMsg(retVal, "For min and max, the args have to be type-specialized to int if any one of the sources is an int, but something failed during the process.");
}
// Couldn't type specialize to int, type specialize to float
else
{
Assert(this->DoFloatTypeSpec());
src1Val = src1OriginalVal;
src2Val = src2OriginalVal;
bool retVal = this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal);
AssertMsg(retVal, "For min and max, the args have to be type-specialized to float if any one of the sources is a float, but something failed during the process.");
}
break;
}
case Js::OpCode::InlineArrayPush:
{
IR::Opnd *const thisOpnd = instr->GetSrc1();
Assert(thisOpnd);
if(instr->GetDst() && instr->GetDst()->IsRegOpnd())
{
// Set the dst as live here, as the built-ins return early from the TypeSpecialization functions - before the dst is marked as live.
// Also, we are not specializing the dst separately and we are skipping the dst to be handled when we specialize the instruction above.
this->ToVarRegOpnd(instr->GetDst()->AsRegOpnd(), currentBlock);
}
// Ensure src1 (Array) is a var
this->ToVarUses(instr, thisOpnd, false, src1Val);
if(!this->IsLoopPrePass())
{
if(thisOpnd->GetValueType().IsLikelyNativeArray())
{
// We bail out, if there is illegal access or a mismatch in the Native array type that is optimized for, during run time.
GenerateBailAtOperation(&instr, IR::BailOutConventionalNativeArrayAccessOnly);
}
else
{
GenerateBailAtOperation(&instr, IR::BailOutOnImplicitCallsPreOp);
}
}
// Try Type Specializing the element based on the array's profile data.
if(thisOpnd->GetValueType().IsLikelyNativeFloatArray())
{
src1Val = src1OriginalVal;
src2Val = src2OriginalVal;
}
if((thisOpnd->GetValueType().IsLikelyNativeIntArray() && this->TypeSpecializeIntBinary(pInstr, src1Val, src2Val, pDstVal, INT32_MIN, INT32_MAX, true))
|| (thisOpnd->GetValueType().IsLikelyNativeFloatArray() && this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal)))
{
break;
}
// The Element is not yet type specialized. Ensure element is a var
this->ToVarUses(instr, instr->GetSrc2(), false, src2Val);
break;
}
}
}
void
GlobOpt::TypeSpecializeInlineBuiltInDst(IR::Instr **pInstr, Value **pDstVal)
{
IR::Instr *&instr = *pInstr;
Assert(OpCodeAttr::IsInlineBuiltIn(instr->m_opcode));
if (instr->m_opcode == Js::OpCode::InlineMathRandom)
{
Assert(this->DoFloatTypeSpec());
// Type specialize dst to float
this->TypeSpecializeFloatDst(instr, nullptr, nullptr, nullptr, pDstVal);
}
}
bool
GlobOpt::TryTypeSpecializeUnaryToFloatHelper(IR::Instr** pInstr, Value** pSrc1Val, Value* const src1OriginalVal, Value **pDstVal)
{
// It has been determined that this instruction cannot be int-specialized. We need to determine whether to attempt to
// float-specialize the instruction, or leave it unspecialized.
#if !INT32VAR
Value*& src1Val = *pSrc1Val;
if(src1Val->GetValueInfo()->IsLikelyUntaggedInt())
{
// An input range is completely outside the range of an int31. Even if the operation may overflow, it is
// unlikely to overflow on these operations, so we leave it unspecialized on 64-bit platforms. However, on
// 32-bit platforms, the value is untaggable and will be a JavascriptNumber, which is significantly slower to
// use in an unspecialized operation compared to a tagged int. So, try to float-specialize the instruction.
src1Val = src1OriginalVal;
return this->TypeSpecializeFloatUnary(pInstr, src1Val, pDstVal);
}
#endif
return false;
}
bool
GlobOpt::TypeSpecializeIntBinary(IR::Instr **pInstr, Value *src1Val, Value *src2Val, Value **pDstVal, int32 min, int32 max, bool skipDst /* = false */)
{
// Consider moving the code for int type spec-ing binary functions here.
IR::Instr *&instr = *pInstr;
bool lossy = false;
if(OpCodeAttr::IsInlineBuiltIn(instr->m_opcode))
{
if(instr->m_opcode == Js::OpCode::InlineArrayPush)
{
int32 intConstantValue;
bool isIntConstMissingItem = src2Val->GetValueInfo()->TryGetIntConstantValue(&intConstantValue);
if(isIntConstMissingItem)
{
isIntConstMissingItem = Js::SparseArraySegment<int>::IsMissingItem(&intConstantValue);
}
// Don't specialize if the element is not likelyInt or an IntConst which is a missing item value.
if(!(src2Val->GetValueInfo()->IsLikelyInt()) || isIntConstMissingItem)
{
return false;
}
// We don't want to specialize both the source operands, though it is a binary instr.
IR::Opnd * elementOpnd = instr->GetSrc2();
this->ToInt32(instr, elementOpnd, this->currentBlock, src2Val, nullptr, lossy);
}
else
{
IR::Opnd *src1 = instr->GetSrc1();
this->ToInt32(instr, src1, this->currentBlock, src1Val, nullptr, lossy);
IR::Opnd *src2 = instr->GetSrc2();
this->ToInt32(instr, src2, this->currentBlock, src2Val, nullptr, lossy);
}
if(!skipDst)
{
IR::Opnd *dst = instr->GetDst();
if (dst)
{
TypeSpecializeIntDst(instr, instr->m_opcode, nullptr, src1Val, src2Val, IR::BailOutInvalid, min, max, pDstVal);
}
}
return true;
}
else
{
AssertMsg(false, "Yet to move code for other binary functions here");
return false;
}
}
bool
GlobOpt::TypeSpecializeIntUnary(
IR::Instr **pInstr,
Value **pSrc1Val,
Value **pDstVal,
int32 min,
int32 max,
Value *const src1OriginalVal,
bool *redoTypeSpecRef,
bool skipDst /* = false */)
{
IR::Instr *&instr = *pInstr;
Assert(pSrc1Val);
Value *&src1Val = *pSrc1Val;
bool isTransfer = false;
Js::OpCode opcode;
int32 newMin, newMax;
bool lossy = false;
IR::BailOutKind bailOutKind = IR::BailOutInvalid;
bool ignoredIntOverflow = this->ignoredIntOverflowForCurrentInstr;
bool ignoredNegativeZero = false;
bool checkTypeSpecWorth = false;
if(instr->GetSrc1()->IsRegOpnd() && instr->GetSrc1()->AsRegOpnd()->m_sym->m_isNotInt)
{
return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal);
}
AddSubConstantInfo addSubConstantInfo;
switch(instr->m_opcode)
{
case Js::OpCode::Ld_A:
if (instr->GetSrc1()->IsRegOpnd())
{
StackSym *sym = instr->GetSrc1()->AsRegOpnd()->m_sym;
if (CurrentBlockData()->IsInt32TypeSpecialized(sym) == false)
{
// Type specializing an Ld_A isn't worth it, unless the src
// is already type specialized.
return false;
}
}
newMin = min;
newMax = max;
opcode = Js::OpCode::Ld_I4;
isTransfer = true;
break;
case Js::OpCode::Conv_Num:
newMin = min;
newMax = max;
opcode = Js::OpCode::Ld_I4;
isTransfer = true;
break;
case Js::OpCode::LdC_A_I4:
newMin = newMax = instr->GetSrc1()->AsIntConstOpnd()->AsInt32();
opcode = Js::OpCode::Ld_I4;
break;
case Js::OpCode::Neg_A:
if (min <= 0 && max >= 0)
{
if(instr->ShouldCheckForNegativeZero())
{
// -0 matters since the sym is not a local, or is used in a way in which -0 would differ from +0
if(!DoAggressiveIntTypeSpec())
{
// May result in -0
// Consider adding a dynamic check for src1 == 0
return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal);
}
if(min == 0 && max == 0)
{
// Always results in -0
return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal);
}
bailOutKind |= IR::BailOutOnNegativeZero;
}
else
{
ignoredNegativeZero = true;
}
}
if (Int32Math::Neg(min, &newMax))
{
if(instr->ShouldCheckForIntOverflow())
{
if(!DoAggressiveIntTypeSpec())
{
// May overflow
return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal);
}
if(min == max)
{
// Always overflows
return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal);
}
bailOutKind |= IR::BailOutOnOverflow;
newMax = INT32_MAX;
}
else
{
ignoredIntOverflow = true;
}
}
if (Int32Math::Neg(max, &newMin))
{
if(instr->ShouldCheckForIntOverflow())
{
if(!DoAggressiveIntTypeSpec())
{
// May overflow
return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal);
}
bailOutKind |= IR::BailOutOnOverflow;
newMin = INT32_MAX;
}
else
{
ignoredIntOverflow = true;
}
}
if(!instr->ShouldCheckForIntOverflow() && newMin > newMax)
{
// When ignoring overflow, the range needs to account for overflow. Since MIN_INT is the only int32 value that
// overflows on Neg, and the value resulting from overflow is also MIN_INT, if calculating only the new min or new
// max overflowed but not both, then the new min will be greater than the new max. In that case we need to consider
// the full range of int32s as possible resulting values.
newMin = INT32_MIN;
newMax = INT32_MAX;
}
opcode = Js::OpCode::Neg_I4;
checkTypeSpecWorth = true;
break;
case Js::OpCode::Not_A:
if(!DoLossyIntTypeSpec())
{
return false;
}
this->PropagateIntRangeForNot(min, max, &newMin, &newMax);
opcode = Js::OpCode::Not_I4;
lossy = true;
break;
case Js::OpCode::Incr_A:
do // while(false)
{
const auto CannotOverflowBasedOnRelativeBounds = [&]()
{
const ValueInfo *const src1ValueInfo = src1Val->GetValueInfo();
return
(src1ValueInfo->IsInt() || DoAggressiveIntTypeSpec()) &&
src1ValueInfo->IsIntBounded() &&
src1ValueInfo->AsIntBounded()->Bounds()->AddCannotOverflowBasedOnRelativeBounds(1);
};
if (Int32Math::Inc(min, &newMin))
{
if(CannotOverflowBasedOnRelativeBounds())
{
newMin = INT32_MAX;
}
else if(instr->ShouldCheckForIntOverflow())
{
// Always overflows
return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal);
}
else
{
// When ignoring overflow, the range needs to account for overflow. For any Add or Sub, since overflow
// causes the value to wrap around, and we don't have a way to specify a lower and upper range of ints,
// we use the full range of int32s.
ignoredIntOverflow = true;
newMin = INT32_MIN;
newMax = INT32_MAX;
break;
}
}
if (Int32Math::Inc(max, &newMax))
{
if(CannotOverflowBasedOnRelativeBounds())
{
newMax = INT32_MAX;
}
else if(instr->ShouldCheckForIntOverflow())
{
if(!DoAggressiveIntTypeSpec())
{
// May overflow
return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal);
}
bailOutKind |= IR::BailOutOnOverflow;
newMax = INT32_MAX;
}
else
{
// See comment about ignoring overflow above
ignoredIntOverflow = true;
newMin = INT32_MIN;
newMax = INT32_MAX;
break;
}
}
} while(false);
if(!ignoredIntOverflow && instr->GetSrc1()->IsRegOpnd())
{
addSubConstantInfo.Set(instr->GetSrc1()->AsRegOpnd()->m_sym, src1Val, min == max, 1);
}
opcode = Js::OpCode::Add_I4;
if (!this->IsLoopPrePass())
{
instr->SetSrc2(IR::IntConstOpnd::New(1, TyInt32, instr->m_func));
}
checkTypeSpecWorth = true;
break;
case Js::OpCode::Decr_A:
do // while(false)
{
const auto CannotOverflowBasedOnRelativeBounds = [&]()
{
const ValueInfo *const src1ValueInfo = src1Val->GetValueInfo();
return
(src1ValueInfo->IsInt() || DoAggressiveIntTypeSpec()) &&
src1ValueInfo->IsIntBounded() &&
src1ValueInfo->AsIntBounded()->Bounds()->SubCannotOverflowBasedOnRelativeBounds(1);
};
if (Int32Math::Dec(max, &newMax))
{
if(CannotOverflowBasedOnRelativeBounds())
{
newMax = INT32_MIN;
}
else if(instr->ShouldCheckForIntOverflow())
{
// Always overflows
return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal);
}
else
{
// When ignoring overflow, the range needs to account for overflow. For any Add or Sub, since overflow
// causes the value to wrap around, and we don't have a way to specify a lower and upper range of ints, we
// use the full range of int32s.
ignoredIntOverflow = true;
newMin = INT32_MIN;
newMax = INT32_MAX;
break;
}
}
if (Int32Math::Dec(min, &newMin))
{
if(CannotOverflowBasedOnRelativeBounds())
{
newMin = INT32_MIN;
}
else if(instr->ShouldCheckForIntOverflow())
{
if(!DoAggressiveIntTypeSpec())
{
// May overflow
return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal);
}
bailOutKind |= IR::BailOutOnOverflow;
newMin = INT32_MIN;
}
else
{
// See comment about ignoring overflow above
ignoredIntOverflow = true;
newMin = INT32_MIN;
newMax = INT32_MAX;
break;
}
}
} while(false);
if(!ignoredIntOverflow && instr->GetSrc1()->IsRegOpnd())
{
addSubConstantInfo.Set(instr->GetSrc1()->AsRegOpnd()->m_sym, src1Val, min == max, -1);
}
opcode = Js::OpCode::Sub_I4;
if (!this->IsLoopPrePass())
{
instr->SetSrc2(IR::IntConstOpnd::New(1, TyInt32, instr->m_func));
}
checkTypeSpecWorth = true;
break;
case Js::OpCode::BrFalse_A:
case Js::OpCode::BrTrue_A:
{
if(DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrFalse(instr, src1Val, min, max))
{
return true;
}
bool specialize = true;
if (!src1Val->GetValueInfo()->HasIntConstantValue() && instr->GetSrc1()->IsRegOpnd())
{
StackSym *sym = instr->GetSrc1()->AsRegOpnd()->m_sym;
if (CurrentBlockData()->IsInt32TypeSpecialized(sym) == false)
{
// Type specializing a BrTrue_A/BrFalse_A isn't worth it, unless the src
// is already type specialized
specialize = false;
}
}
if(instr->m_opcode == Js::OpCode::BrTrue_A)
{
UpdateIntBoundsForNotEqualBranch(src1Val, nullptr, 0);
opcode = Js::OpCode::BrTrue_I4;
}
else
{
UpdateIntBoundsForEqualBranch(src1Val, nullptr, 0);
opcode = Js::OpCode::BrFalse_I4;
}
if(!specialize)
{
return false;
}
newMin = 2; newMax = 1; // We'll assert if we make a range where min > max
break;
}
case Js::OpCode::MultiBr:
newMin = min;
newMax = max;
opcode = instr->m_opcode;
break;
case Js::OpCode::StElemI_A:
case Js::OpCode::StElemI_A_Strict:
case Js::OpCode::StElemC:
if(instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->GetValueType().IsLikelyAnyArrayWithNativeFloatValues())
{
src1Val = src1OriginalVal;
}
return TypeSpecializeStElem(pInstr, src1Val, pDstVal);
case Js::OpCode::NewScArray:
case Js::OpCode::NewScArrayWithMissingValues:
case Js::OpCode::InitFld:
case Js::OpCode::InitRootFld:
case Js::OpCode::StSlot:
case Js::OpCode::StSlotChkUndecl:
#if !FLOATVAR
case Js::OpCode::StSlotBoxTemp:
#endif
case Js::OpCode::StFld:
case Js::OpCode::StRootFld:
case Js::OpCode::StFldStrict:
case Js::OpCode::StRootFldStrict:
case Js::OpCode::ArgOut_A:
case Js::OpCode::ArgOut_A_Inline:
case Js::OpCode::ArgOut_A_FixupForStackArgs:
case Js::OpCode::ArgOut_A_Dynamic:
case Js::OpCode::ArgOut_A_FromStackArgs:
case Js::OpCode::ArgOut_A_SpreadArg:
// For this one we need to implement type specialization
//case Js::OpCode::ArgOut_A_InlineBuiltIn:
case Js::OpCode::Ret:
case Js::OpCode::LdElemUndef:
case Js::OpCode::LdElemUndefScoped:
return false;
default:
if (OpCodeAttr::IsInlineBuiltIn(instr->m_opcode))
{
newMin = min;
newMax = max;
opcode = instr->m_opcode;
break; // Note: we must keep checkTypeSpecWorth = false to make sure we never return false from this function.
}
return false;
}
// If this instruction is in a range of instructions where int overflow does not matter, we will still specialize it (won't
// leave it unspecialized based on heuristics), since it is most likely worth specializing, and the dst value needs to be
// guaranteed to be an int
if(checkTypeSpecWorth &&
!ignoredIntOverflow &&
!ignoredNegativeZero &&
instr->ShouldCheckForIntOverflow() &&
!IsWorthSpecializingToInt32(instr, src1Val))
{
// Even though type specialization is being skipped since it may not be worth it, the proper value should still be
// maintained so that the result may be type specialized later. An int value is not created for the dst in any of
// the following cases.
// - A bailout check is necessary to specialize this instruction. The bailout check is what guarantees the result to be
// an int, but since we're not going to specialize this instruction, there won't be a bailout check.
// - Aggressive int type specialization is disabled and we're in a loop prepass. We're conservative on dst values in
// that case, especially if the dst sym is live on the back-edge.
if(bailOutKind == IR::BailOutInvalid &&
instr->GetDst() &&
(DoAggressiveIntTypeSpec() || !this->IsLoopPrePass()))
{
*pDstVal = CreateDstUntransferredIntValue(newMin, newMax, instr, src1Val, nullptr);
}
if(instr->GetSrc2())
{
instr->FreeSrc2();
}
return false;
}
this->ignoredIntOverflowForCurrentInstr = ignoredIntOverflow;
this->ignoredNegativeZeroForCurrentInstr = ignoredNegativeZero;
{
// Try CSE again before modifying the IR, in case some attributes are required for successful CSE
Value *src1IndirIndexVal = nullptr;
Value *src2Val = nullptr;
if(CSEOptimize(currentBlock, &instr, &src1Val, &src2Val, &src1IndirIndexVal, true /* intMathExprOnly */))
{
*redoTypeSpecRef = true;
return false;
}
}
const Js::OpCode originalOpCode = instr->m_opcode;
if (!this->IsLoopPrePass())
{
// No re-write on prepass
instr->m_opcode = opcode;
}
Value *src1ValueToSpecialize = src1Val;
if(lossy)
{
// Lossy conversions to int32 must be done based on the original source values. For instance, if one of the values is a
// float constant with a value that fits in a uint32 but not an int32, and the instruction can ignore int overflow, the
// source value for the purposes of int specialization would have been changed to an int constant value by ignoring
// overflow. If we were to specialize the sym using the int constant value, it would be treated as a lossless
// conversion, but since there may be subsequent uses of the same float constant value that may not ignore overflow,
// this must be treated as a lossy conversion by specializing the sym using the original float constant value.
src1ValueToSpecialize = src1OriginalVal;
}
// Make sure the srcs are specialized
IR::Opnd *src1 = instr->GetSrc1();
this->ToInt32(instr, src1, this->currentBlock, src1ValueToSpecialize, nullptr, lossy);
if(bailOutKind != IR::BailOutInvalid && !this->IsLoopPrePass())
{
GenerateBailAtOperation(&instr, bailOutKind);
}
if (!skipDst)
{
IR::Opnd *dst = instr->GetDst();
if (dst)
{
AssertMsg(!(isTransfer && !this->IsLoopPrePass()) || min == newMin && max == newMax, "If this is just a copy, old/new min/max should be the same");
TypeSpecializeIntDst(
instr,
originalOpCode,
isTransfer ? src1Val : nullptr,
src1Val,
nullptr,
bailOutKind,
newMin,
newMax,
pDstVal,
addSubConstantInfo.HasInfo() ? &addSubConstantInfo : nullptr);
}
}
if(bailOutKind == IR::BailOutInvalid)
{
GOPT_TRACE(_u("Type specialized to INT\n"));
#if ENABLE_DEBUG_CONFIG_OPTIONS
if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::AggressiveIntTypeSpecPhase))
{
Output::Print(_u("Type specialized to INT: "));
Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode));
}
#endif
}
else
{
GOPT_TRACE(_u("Type specialized to INT with bailout on:\n"));
if(bailOutKind & IR::BailOutOnOverflow)
{
GOPT_TRACE(_u(" Overflow\n"));
#if ENABLE_DEBUG_CONFIG_OPTIONS
if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::AggressiveIntTypeSpecPhase))
{
Output::Print(_u("Type specialized to INT with bailout (%S): "), "Overflow");
Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode));
}
#endif
}
if(bailOutKind & IR::BailOutOnNegativeZero)
{
GOPT_TRACE(_u(" Zero\n"));
#if ENABLE_DEBUG_CONFIG_OPTIONS
if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::AggressiveIntTypeSpecPhase))
{
Output::Print(_u("Type specialized to INT with bailout (%S): "), "Zero");
Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode));
}
#endif
}
}
return true;
}
void
GlobOpt::TypeSpecializeIntDst(IR::Instr* instr, Js::OpCode originalOpCode, Value* valToTransfer, Value *const src1Value, Value *const src2Value, const IR::BailOutKind bailOutKind, int32 newMin, int32 newMax, Value** pDstVal, const AddSubConstantInfo *const addSubConstantInfo)
{
this->TypeSpecializeIntDst(instr, originalOpCode, valToTransfer, src1Value, src2Value, bailOutKind, ValueType::GetInt(IntConstantBounds(newMin, newMax).IsLikelyTaggable()), newMin, newMax, pDstVal, addSubConstantInfo);
}
void
GlobOpt::TypeSpecializeIntDst(IR::Instr* instr, Js::OpCode originalOpCode, Value* valToTransfer, Value *const src1Value, Value *const src2Value, const IR::BailOutKind bailOutKind, ValueType valueType, Value** pDstVal, const AddSubConstantInfo *const addSubConstantInfo)
{
this->TypeSpecializeIntDst(instr, originalOpCode, valToTransfer, src1Value, src2Value, bailOutKind, valueType, 0, 0, pDstVal, addSubConstantInfo);
}
void
GlobOpt::TypeSpecializeIntDst(IR::Instr* instr, Js::OpCode originalOpCode, Value* valToTransfer, Value *const src1Value, Value *const src2Value, const IR::BailOutKind bailOutKind, ValueType valueType, int32 newMin, int32 newMax, Value** pDstVal, const AddSubConstantInfo *const addSubConstantInfo)
{
Assert(valueType.IsInt() || (valueType.IsNumber() && valueType.IsLikelyInt() && newMin == 0 && newMax == 0));
Assert(!valToTransfer || valToTransfer == src1Value);
Assert(!addSubConstantInfo || addSubConstantInfo->HasInfo());
IR::Opnd *dst = instr->GetDst();
Assert(dst);
bool isValueInfoPrecise;
if(IsLoopPrePass())
{
valueType = GetPrepassValueTypeForDst(valueType, instr, src1Value, src2Value, &isValueInfoPrecise);
}
else
{
isValueInfoPrecise = true;
}
// If dst has a circular reference in a loop, it probably won't get specialized. Don't mark the dst as type-specialized on
// the pre-pass. With aggressive int spec though, it will take care of bailing out if necessary so there's no need to assume
// that the dst will be a var even if it's live on the back-edge. Also if the op always produces an int32, then there's no
// ambiguity in the dst's value type even in the prepass.
if (!DoAggressiveIntTypeSpec() && this->IsLoopPrePass() && !valueType.IsInt())
{
if (dst->IsRegOpnd())
{
this->ToVarRegOpnd(dst->AsRegOpnd(), this->currentBlock);
}
return;
}
const IntBounds *dstBounds = nullptr;
if(addSubConstantInfo && !addSubConstantInfo->SrcValueIsLikelyConstant() && DoTrackRelativeIntBounds())
{
Assert(!ignoredIntOverflowForCurrentInstr);
// Track bounds for add or sub with a constant. For instance, consider (b = a + 2). The value of 'b' should track that
// it is equal to (the value of 'a') + 2. Additionally, the value of 'b' should inherit the bounds of 'a', offset by
// the constant value.
if(!valueType.IsInt() || !isValueInfoPrecise)
{
newMin = INT32_MIN;
newMax = INT32_MAX;
}
dstBounds =
IntBounds::Add(
addSubConstantInfo->SrcValue(),
addSubConstantInfo->Offset(),
isValueInfoPrecise,
IntConstantBounds(newMin, newMax),
alloc);
}
// Src1's value could change later in the loop, so the value wouldn't be the same for each
// iteration. Since we don't iterate over loops "while (!changed)", go conservative on the
// pre-pass.
if (valToTransfer)
{
// If this is just a copy, no need for creating a new value.
Assert(!addSubConstantInfo);
*pDstVal = this->ValueNumberTransferDst(instr, valToTransfer);
CurrentBlockData()->InsertNewValue(*pDstVal, dst);
}
else if (valueType.IsInt() && isValueInfoPrecise)
{
bool wasNegativeZeroPreventedByBailout = false;
if(newMin <= 0 && newMax >= 0)
{
switch(originalOpCode)
{
case Js::OpCode::Add_A:
// -0 + -0 == -0
Assert(src1Value);
Assert(src2Value);
wasNegativeZeroPreventedByBailout =
src1Value->GetValueInfo()->WasNegativeZeroPreventedByBailout() &&
src2Value->GetValueInfo()->WasNegativeZeroPreventedByBailout();
break;
case Js::OpCode::Sub_A:
// -0 - 0 == -0
Assert(src1Value);
wasNegativeZeroPreventedByBailout = src1Value->GetValueInfo()->WasNegativeZeroPreventedByBailout();
break;
case Js::OpCode::Neg_A:
case Js::OpCode::Mul_A:
case Js::OpCode::Div_A:
case Js::OpCode::Rem_A:
wasNegativeZeroPreventedByBailout = !!(bailOutKind & IR::BailOutOnNegativeZero);
break;
}
}
*pDstVal =
dstBounds
? NewIntBoundedValue(valueType, dstBounds, wasNegativeZeroPreventedByBailout, nullptr)
: NewIntRangeValue(newMin, newMax, wasNegativeZeroPreventedByBailout, nullptr);
}
else
{
*pDstVal = dstBounds ? NewIntBoundedValue(valueType, dstBounds, false, nullptr) : NewGenericValue(valueType);
}
if(addSubConstantInfo || updateInductionVariableValueNumber)
{
TrackIntSpecializedAddSubConstant(instr, addSubConstantInfo, *pDstVal, !!dstBounds);
}
CurrentBlockData()->SetValue(*pDstVal, dst);
AssertMsg(dst->IsRegOpnd(), "What else?");
this->ToInt32Dst(instr, dst->AsRegOpnd(), this->currentBlock);
}
bool
GlobOpt::TypeSpecializeBinary(IR::Instr **pInstr, Value **pSrc1Val, Value **pSrc2Val, Value **pDstVal, Value *const src1OriginalVal, Value *const src2OriginalVal, bool *redoTypeSpecRef)
{
IR::Instr *&instr = *pInstr;
int32 min1 = INT32_MIN, max1 = INT32_MAX, min2 = INT32_MIN, max2 = INT32_MAX, newMin, newMax, tmp;
Js::OpCode opcode;
Value *&src1Val = *pSrc1Val;
Value *&src2Val = *pSrc2Val;
// We don't need to do typespec for asmjs
if (IsTypeSpecPhaseOff(this->func) || GetIsAsmJSFunc())
{
return false;
}
if (OpCodeAttr::IsInlineBuiltIn(instr->m_opcode))
{
this->TypeSpecializeInlineBuiltInBinary(pInstr, src1Val, src2Val, pDstVal, src1OriginalVal, src2OriginalVal);
return true;
}
if (src1Val)
{
src1Val->GetValueInfo()->GetIntValMinMax(&min1, &max1, this->DoAggressiveIntTypeSpec());
}
if (src2Val)
{
src2Val->GetValueInfo()->GetIntValMinMax(&min2, &max2, this->DoAggressiveIntTypeSpec());
}
// Type specialize binary operators to int32
bool src1Lossy = true;
bool src2Lossy = true;
IR::BailOutKind bailOutKind = IR::BailOutInvalid;
bool ignoredIntOverflow = this->ignoredIntOverflowForCurrentInstr;
bool ignoredNegativeZero = false;
bool skipSrc2 = false;
bool skipDst = false;
bool needsBoolConv = false;
AddSubConstantInfo addSubConstantInfo;
switch (instr->m_opcode)
{
case Js::OpCode::Or_A:
if (!DoLossyIntTypeSpec())
{
return false;
}
this->PropagateIntRangeBinary(instr, min1, max1, min2, max2, &newMin, &newMax);
opcode = Js::OpCode::Or_I4;
break;
case Js::OpCode::And_A:
if (!DoLossyIntTypeSpec())
{
return false;
}
this->PropagateIntRangeBinary(instr, min1, max1, min2, max2, &newMin, &newMax);
opcode = Js::OpCode::And_I4;
break;
case Js::OpCode::Xor_A:
if (!DoLossyIntTypeSpec())
{
return false;
}
this->PropagateIntRangeBinary(instr, min1, max1, min2, max2, &newMin, &newMax);
opcode = Js::OpCode::Xor_I4;
break;
case Js::OpCode::Shl_A:
if (!DoLossyIntTypeSpec())
{
return false;
}
this->PropagateIntRangeBinary(instr, min1, max1, min2, max2, &newMin, &newMax);
opcode = Js::OpCode::Shl_I4;
break;
case Js::OpCode::Shr_A:
if (!DoLossyIntTypeSpec())
{
return false;
}
this->PropagateIntRangeBinary(instr, min1, max1, min2, max2, &newMin, &newMax);
opcode = Js::OpCode::Shr_I4;
break;
case Js::OpCode::ShrU_A:
if (!DoLossyIntTypeSpec())
{
return false;
}
if (min1 < 0 && IntConstantBounds(min2, max2).And_0x1f().Contains(0))
{
// Src1 may be too large to represent as a signed int32, and src2 may be zero. Unless the resulting value is only
// used as a signed int32 (hence allowing us to ignore the result's sign), don't specialize the instruction.
if (!instr->ignoreIntOverflow)
return false;
ignoredIntOverflow = true;
}
this->PropagateIntRangeBinary(instr, min1, max1, min2, max2, &newMin, &newMax);
opcode = Js::OpCode::ShrU_I4;
break;
case Js::OpCode::BrUnLe_A:
// Folding the branch based on bounds will attempt a lossless int32 conversion of the sources if they are not definitely
// int already, so require that both sources are likely int for folding.
if (DoConstFold() &&
!IsLoopPrePass() &&
TryOptConstFoldBrUnsignedGreaterThan(instr, false, src1Val, min1, max1, src2Val, min2, max2))
{
return true;
}
if (min1 >= 0 && min2 >= 0)
{
// Only handle positive values since this is unsigned...
// Bounds are tracked only for likely int values. Only likely int values may have bounds that are not the defaults
// (INT32_MIN, INT32_MAX), so we're good.
Assert(src1Val);
Assert(src1Val->GetValueInfo()->IsLikelyInt());
Assert(src2Val);
Assert(src2Val->GetValueInfo()->IsLikelyInt());
UpdateIntBoundsForLessThanOrEqualBranch(src1Val, src2Val);
}
if (!DoLossyIntTypeSpec())
{
return false;
}
newMin = newMax = 0;
opcode = Js::OpCode::BrUnLe_I4;
break;
case Js::OpCode::BrUnLt_A:
// Folding the branch based on bounds will attempt a lossless int32 conversion of the sources if they are not definitely
// int already, so require that both sources are likely int for folding.
if (DoConstFold() &&
!IsLoopPrePass() &&
TryOptConstFoldBrUnsignedLessThan(instr, true, src1Val, min1, max1, src2Val, min2, max2))
{
return true;
}
if (min1 >= 0 && min2 >= 0)
{
// Only handle positive values since this is unsigned...
// Bounds are tracked only for likely int values. Only likely int values may have bounds that are not the defaults
// (INT32_MIN, INT32_MAX), so we're good.
Assert(src1Val);
Assert(src1Val->GetValueInfo()->IsLikelyInt());
Assert(src2Val);
Assert(src2Val->GetValueInfo()->IsLikelyInt());
UpdateIntBoundsForLessThanBranch(src1Val, src2Val);
}
if (!DoLossyIntTypeSpec())
{
return false;
}
newMin = newMax = 0;
opcode = Js::OpCode::BrUnLt_I4;
break;
case Js::OpCode::BrUnGe_A:
// Folding the branch based on bounds will attempt a lossless int32 conversion of the sources if they are not definitely
// int already, so require that both sources are likely int for folding.
if (DoConstFold() &&
!IsLoopPrePass() &&
TryOptConstFoldBrUnsignedLessThan(instr, false, src1Val, min1, max1, src2Val, min2, max2))
{
return true;
}
if (min1 >= 0 && min2 >= 0)
{
// Only handle positive values since this is unsigned...
// Bounds are tracked only for likely int values. Only likely int values may have bounds that are not the defaults
// (INT32_MIN, INT32_MAX), so we're good.
Assert(src1Val);
Assert(src1Val->GetValueInfo()->IsLikelyInt());
Assert(src2Val);
Assert(src2Val->GetValueInfo()->IsLikelyInt());
UpdateIntBoundsForGreaterThanOrEqualBranch(src1Val, src2Val);
}
if (!DoLossyIntTypeSpec())
{
return false;
}
newMin = newMax = 0;
opcode = Js::OpCode::BrUnGe_I4;
break;
case Js::OpCode::BrUnGt_A:
// Folding the branch based on bounds will attempt a lossless int32 conversion of the sources if they are not definitely
// int already, so require that both sources are likely int for folding.
if (DoConstFold() &&
!IsLoopPrePass() &&
TryOptConstFoldBrUnsignedGreaterThan(instr, true, src1Val, min1, max1, src2Val, min2, max2))
{
return true;
}
if (min1 >= 0 && min2 >= 0)
{
// Only handle positive values since this is unsigned...
// Bounds are tracked only for likely int values. Only likely int values may have bounds that are not the defaults
// (INT32_MIN, INT32_MAX), so we're good.
Assert(src1Val);
Assert(src1Val->GetValueInfo()->IsLikelyInt());
Assert(src2Val);
Assert(src2Val->GetValueInfo()->IsLikelyInt());
UpdateIntBoundsForGreaterThanBranch(src1Val, src2Val);
}
if (!DoLossyIntTypeSpec())
{
return false;
}
newMin = newMax = 0;
opcode = Js::OpCode::BrUnGt_I4;
break;
case Js::OpCode::CmUnLe_A:
if (!DoLossyIntTypeSpec())
{
return false;
}
newMin = 0;
newMax = 1;
opcode = Js::OpCode::CmUnLe_I4;
needsBoolConv = true;
break;
case Js::OpCode::CmUnLt_A:
if (!DoLossyIntTypeSpec())
{
return false;
}
newMin = 0;
newMax = 1;
opcode = Js::OpCode::CmUnLt_I4;
needsBoolConv = true;
break;
case Js::OpCode::CmUnGe_A:
if (!DoLossyIntTypeSpec())
{
return false;
}
newMin = 0;
newMax = 1;
opcode = Js::OpCode::CmUnGe_I4;
needsBoolConv = true;
break;
case Js::OpCode::CmUnGt_A:
if (!DoLossyIntTypeSpec())
{
return false;
}
newMin = 0;
newMax = 1;
opcode = Js::OpCode::CmUnGt_I4;
needsBoolConv = true;
break;
case Js::OpCode::Expo_A:
{
src1Val = src1OriginalVal;
src2Val = src2OriginalVal;
return this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal);
}
case Js::OpCode::Div_A:
{
ValueType specializedValueType = GetDivValueType(instr, src1Val, src2Val, true);
if (specializedValueType.IsFloat())
{
// Either result is float or 1/x or cst1/cst2 where cst1%cst2 != 0
// Note: We should really constant fold cst1%cst2...
src1Val = src1OriginalVal;
src2Val = src2OriginalVal;
return this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal);
}
#ifdef _M_ARM
if (!AutoSystemInfo::Data.ArmDivAvailable())
{
return false;
}
#endif
if (specializedValueType.IsInt())
{
if (max2 == 0x80000000 || (min2 == 0 && max2 == 00))
{
return false;
}
if (min1 == 0x80000000 && min2 <= -1 && max2 >= -1)
{
// Prevent integer overflow, as div by zero or MIN_INT / -1 will throw an exception
// Or we know we are dividing by zero (which is weird to have because the profile data
// say we got an int)
bailOutKind = IR::BailOutOnDivOfMinInt;
}
src1Lossy = false; // Detect -0 on the sources
src2Lossy = false;
opcode = Js::OpCode::Div_I4;
Assert(!instr->GetSrc1()->IsUnsigned());
bailOutKind |= IR::BailOnDivResultNotInt;
if (max2 >= 0 && min2 <= 0)
{
// Need to check for divide by zero if the denominator range includes 0
bailOutKind |= IR::BailOutOnDivByZero;
}
if (max1 >= 0 && min1 <= 0)
{
// Numerator contains 0 so the result contains 0
newMin = 0;
newMax = 0;
if (min2 < 0)
{
// Denominator may be negative, so the result could be negative 0
if (instr->ShouldCheckForNegativeZero())
{
bailOutKind |= IR::BailOutOnNegativeZero;
}
else
{
ignoredNegativeZero = true;
}
}
}
else
{
// Initialize to invalid value, one of the condition below will update it correctly
newMin = INT_MAX;
newMax = INT_MIN;
}
// Deal with the positive and negative range separately for both the numerator and the denominator,
// and integrate to the overall min and max.
// If the result is positive (positive/positive or negative/negative):
// The min should be the smallest magnitude numerator (positive_Min1 | negative_Max1)
// divided by ---------------------------------------------------------------
// largest magnitude denominator (positive_Max2 | negative_Min2)
//
// The max should be the largest magnitude numerator (positive_Max1 | negative_Max1)
// divided by ---------------------------------------------------------------
// smallest magnitude denominator (positive_Min2 | negative_Max2)
// If the result is negative (positive/negative or positive/negative):
// The min should be the largest magnitude numerator (positive_Max1 | negative_Min1)
// divided by ---------------------------------------------------------------
// smallest magnitude denominator (negative_Max2 | positive_Min2)
//
// The max should be the smallest magnitude numerator (positive_Min1 | negative_Max1)
// divided by ---------------------------------------------------------------
// largest magnitude denominator (negative_Min2 | positive_Max2)
// Consider: The range can be slightly more precise if we take care of the rounding
if (max1 > 0)
{
// Take only the positive numerator range
int32 positive_Min1 = max(1, min1);
int32 positive_Max1 = max1;
if (max2 > 0)
{
// Take only the positive denominator range
int32 positive_Min2 = max(1, min2);
int32 positive_Max2 = max2;
// Positive / Positive
int32 quadrant1_Min = positive_Min1 <= positive_Max2? 1 : positive_Min1 / positive_Max2;
int32 quadrant1_Max = positive_Max1 <= positive_Min2? 1 : positive_Max1 / positive_Min2;
Assert(1 <= quadrant1_Min && quadrant1_Min <= quadrant1_Max);
// The result should positive
newMin = min(newMin, quadrant1_Min);
newMax = max(newMax, quadrant1_Max);
}
if (min2 < 0)
{
// Take only the negative denominator range
int32 negative_Min2 = min2;
int32 negative_Max2 = min(-1, max2);
// Positive / Negative
int32 quadrant2_Min = -positive_Max1 >= negative_Max2? -1 : positive_Max1 / negative_Max2;
int32 quadrant2_Max = -positive_Min1 >= negative_Min2? -1 : positive_Min1 / negative_Min2;
// The result should negative
Assert(quadrant2_Min <= quadrant2_Max && quadrant2_Max <= -1);
newMin = min(newMin, quadrant2_Min);
newMax = max(newMax, quadrant2_Max);
}
}
if (min1 < 0)
{
// Take only the native numerator range
int32 negative_Min1 = min1;
int32 negative_Max1 = min(-1, max1);
if (max2 > 0)
{
// Take only the positive denominator range
int32 positive_Min2 = max(1, min2);
int32 positive_Max2 = max2;
// Negative / Positive
int32 quadrant4_Min = negative_Min1 >= -positive_Min2? -1 : negative_Min1 / positive_Min2;
int32 quadrant4_Max = negative_Max1 >= -positive_Max2? -1 : negative_Max1 / positive_Max2;
// The result should negative
Assert(quadrant4_Min <= quadrant4_Max && quadrant4_Max <= -1);
newMin = min(newMin, quadrant4_Min);
newMax = max(newMax, quadrant4_Max);
}
if (min2 < 0)
{
// Take only the negative denominator range
int32 negative_Min2 = min2;
int32 negative_Max2 = min(-1, max2);
int32 quadrant3_Min;
int32 quadrant3_Max;
// Negative / Negative
if (negative_Max1 == 0x80000000 && negative_Min2 == -1)
{
quadrant3_Min = negative_Max1 >= negative_Min2? 1 : (negative_Max1+1) / negative_Min2;
}
else
{
quadrant3_Min = negative_Max1 >= negative_Min2? 1 : negative_Max1 / negative_Min2;
}
if (negative_Min1 == 0x80000000 && negative_Max2 == -1)
{
quadrant3_Max = negative_Min1 >= negative_Max2? 1 : (negative_Min1+1) / negative_Max2;
}
else
{
quadrant3_Max = negative_Min1 >= negative_Max2? 1 : negative_Min1 / negative_Max2;
}
// The result should positive
Assert(1 <= quadrant3_Min && quadrant3_Min <= quadrant3_Max);
newMin = min(newMin, quadrant3_Min);
newMax = max(newMax, quadrant3_Max);
}
}
Assert(newMin <= newMax);
// Continue to int type spec
break;
}
}
// fall-through
default:
{
const bool involesLargeInt32 =
(src1Val && src1Val->GetValueInfo()->IsLikelyUntaggedInt()) ||
(src2Val && src2Val->GetValueInfo()->IsLikelyUntaggedInt());
const auto trySpecializeToFloat =
[&](const bool mayOverflow) -> bool
{
// It has been determined that this instruction cannot be int-specialized. Need to determine whether to attempt
// to float-specialize the instruction, or leave it unspecialized.
if((involesLargeInt32
#if INT32VAR
&& mayOverflow
#endif
) || (instr->m_opcode == Js::OpCode::Mul_A && !this->DoAggressiveMulIntTypeSpec())
)
{
// An input range is completely outside the range of an int31 and the operation is likely to overflow.
// Additionally, on 32-bit platforms, the value is untaggable and will be a JavascriptNumber, which is
// significantly slower to use in an unspecialized operation compared to a tagged int. So, try to
// float-specialize the instruction.
src1Val = src1OriginalVal;
src2Val = src2OriginalVal;
return TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal);
}
return false;
};
if (instr->m_opcode != Js::OpCode::ArgOut_A_InlineBuiltIn)
{
if ((src1Val && src1Val->GetValueInfo()->IsLikelyFloat()) || (src2Val && src2Val->GetValueInfo()->IsLikelyFloat()))
{
// Try to type specialize to float
src1Val = src1OriginalVal;
src2Val = src2OriginalVal;
return this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal);
}
if (src1Val == nullptr ||
src2Val == nullptr ||
!src1Val->GetValueInfo()->IsLikelyInt() ||
!src2Val->GetValueInfo()->IsLikelyInt() ||
(
!DoAggressiveIntTypeSpec() &&
(
!(src1Val->GetValueInfo()->IsInt() || CurrentBlockData()->IsSwitchInt32TypeSpecialized(instr)) ||
!src2Val->GetValueInfo()->IsInt()
)
) ||
(instr->GetSrc1()->IsRegOpnd() && instr->GetSrc1()->AsRegOpnd()->m_sym->m_isNotInt) ||
(instr->GetSrc2()->IsRegOpnd() && instr->GetSrc2()->AsRegOpnd()->m_sym->m_isNotInt))
{
return trySpecializeToFloat(true);
}
}
// Try to type specialize to int32
// If one of the values is a float constant with a value that fits in a uint32 but not an int32,
// and the instruction can ignore int overflow, the source value for the purposes of int specialization
// would have been changed to an int constant value by ignoring overflow. But, the conversion is still lossy.
if (!(src1OriginalVal && src1OriginalVal->GetValueInfo()->IsFloatConstant() && src1Val && src1Val->GetValueInfo()->HasIntConstantValue()))
{
src1Lossy = false;
}
if (!(src2OriginalVal && src2OriginalVal->GetValueInfo()->IsFloatConstant() && src2Val && src2Val->GetValueInfo()->HasIntConstantValue()))
{
src2Lossy = false;
}
switch(instr->m_opcode)
{
case Js::OpCode::ArgOut_A_InlineBuiltIn:
// If the src is already type-specialized, if we don't type-specialize ArgOut_A_InlineBuiltIn instr, we'll get additional ToVar.
// So, to avoid that, type-specialize the ArgOut_A_InlineBuiltIn instr.
// Else we don't need to type-specialize the instr, we are fine with src being Var.
if (instr->GetSrc1()->IsRegOpnd())
{
StackSym *sym = instr->GetSrc1()->AsRegOpnd()->m_sym;
if (CurrentBlockData()->IsInt32TypeSpecialized(sym))
{
opcode = instr->m_opcode;
skipDst = true; // We should keep dst as is, otherwise the link opnd for next ArgOut/InlineBuiltInStart would be broken.
skipSrc2 = true; // src2 is linkOpnd. We don't need to type-specialize it.
newMin = min1; newMax = max1; // Values don't matter, these are unused.
goto LOutsideSwitch; // Continue to int-type-specialize.
}
else if (CurrentBlockData()->IsFloat64TypeSpecialized(sym))
{
src1Val = src1OriginalVal;
src2Val = src2OriginalVal;
return this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal);
}
#ifdef ENABLE_SIMDJS
else if (CurrentBlockData()->IsSimd128F4TypeSpecialized(sym))
{
// SIMD_JS
// We should be already using the SIMD type-spec sym. See TypeSpecializeSimd128.
Assert(IRType_IsSimd128(instr->GetSrc1()->GetType()));
}
#endif
}
return false;
case Js::OpCode::Add_A:
do // while(false)
{
const auto CannotOverflowBasedOnRelativeBounds = [&](int32 *const constantValueRef)
{
Assert(constantValueRef);
if(min2 == max2 &&
src1Val->GetValueInfo()->IsIntBounded() &&
src1Val->GetValueInfo()->AsIntBounded()->Bounds()->AddCannotOverflowBasedOnRelativeBounds(min2))
{
*constantValueRef = min2;
return true;
}
else if(
min1 == max1 &&
src2Val->GetValueInfo()->IsIntBounded() &&
src2Val->GetValueInfo()->AsIntBounded()->Bounds()->AddCannotOverflowBasedOnRelativeBounds(min1))
{
*constantValueRef = min1;
return true;
}
return false;
};
if (Int32Math::Add(min1, min2, &newMin))
{
int32 constantSrcValue;
if(CannotOverflowBasedOnRelativeBounds(&constantSrcValue))
{
newMin = constantSrcValue >= 0 ? INT32_MAX : INT32_MIN;
}
else if(instr->ShouldCheckForIntOverflow())
{
if(involesLargeInt32 || !DoAggressiveIntTypeSpec())
{
// May overflow
return trySpecializeToFloat(true);
}
bailOutKind |= IR::BailOutOnOverflow;
newMin = min1 < 0 ? INT32_MIN : INT32_MAX;
}
else
{
// When ignoring overflow, the range needs to account for overflow. For any Add or Sub, since
// overflow causes the value to wrap around, and we don't have a way to specify a lower and upper
// range of ints, we use the full range of int32s.
ignoredIntOverflow = true;
newMin = INT32_MIN;
newMax = INT32_MAX;
break;
}
}
if (Int32Math::Add(max1, max2, &newMax))
{
int32 constantSrcValue;
if(CannotOverflowBasedOnRelativeBounds(&constantSrcValue))
{
newMax = constantSrcValue >= 0 ? INT32_MAX : INT32_MIN;
}
else if(instr->ShouldCheckForIntOverflow())
{
if(involesLargeInt32 || !DoAggressiveIntTypeSpec())
{
// May overflow
return trySpecializeToFloat(true);
}
bailOutKind |= IR::BailOutOnOverflow;
newMax = max1 < 0 ? INT32_MIN : INT32_MAX;
}
else
{
// See comment about ignoring overflow above
ignoredIntOverflow = true;
newMin = INT32_MIN;
newMax = INT32_MAX;
break;
}
}
if(bailOutKind & IR::BailOutOnOverflow)
{
Assert(bailOutKind == IR::BailOutOnOverflow);
Assert(instr->ShouldCheckForIntOverflow());
int32 temp;
if(Int32Math::Add(
Int32Math::NearestInRangeTo(0, min1, max1),
Int32Math::NearestInRangeTo(0, min2, max2),
&temp))
{
// Always overflows
return trySpecializeToFloat(true);
}
}
} while(false);
if (!this->IsLoopPrePass() && newMin == newMax && bailOutKind == IR::BailOutInvalid)
{
// Take care of Add with zero here, since we know we're dealing with 2 numbers.
this->CaptureByteCodeSymUses(instr);
IR::Opnd *src;
bool isAddZero = true;
int32 intConstantValue;
if (src1Val->GetValueInfo()->TryGetIntConstantValue(&intConstantValue) && intConstantValue == 0)
{
src = instr->UnlinkSrc2();
instr->FreeSrc1();
}
else if (src2Val->GetValueInfo()->TryGetIntConstantValue(&intConstantValue) && intConstantValue == 0)
{
src = instr->UnlinkSrc1();
instr->FreeSrc2();
}
else
{
// This should have been handled by const folding, unless:
// - A source's value was substituted with a different value here, which is after const folding happened
// - A value is not definitely int, but once converted to definite int, it would be zero due to a
// condition in the source code such as if(a === 0). Ideally, we would specialize the sources and
// remove the add, but doesn't seem too important for now.
Assert(
!DoConstFold() ||
src1Val != src1OriginalVal ||
src2Val != src2OriginalVal ||
!src1Val->GetValueInfo()->IsInt() ||
!src2Val->GetValueInfo()->IsInt());
isAddZero = false;
src = nullptr;
}
if (isAddZero)
{
IR::Instr *newInstr = IR::Instr::New(Js::OpCode::Ld_A, instr->UnlinkDst(), src, instr->m_func);
newInstr->SetByteCodeOffset(instr);
instr->m_opcode = Js::OpCode::Nop;
this->currentBlock->InsertInstrAfter(newInstr, instr);
return true;
}
}
if(!ignoredIntOverflow)
{
if(min2 == max2 &&
(!IsLoopPrePass() || IsPrepassSrcValueInfoPrecise(instr->GetSrc2(), src2Val)) &&
instr->GetSrc1()->IsRegOpnd())
{
addSubConstantInfo.Set(instr->GetSrc1()->AsRegOpnd()->m_sym, src1Val, min1 == max1, min2);
}
else if(
min1 == max1 &&
(!IsLoopPrePass() || IsPrepassSrcValueInfoPrecise(instr->GetSrc1(), src1Val)) &&
instr->GetSrc2()->IsRegOpnd())
{
addSubConstantInfo.Set(instr->GetSrc2()->AsRegOpnd()->m_sym, src2Val, min2 == max2, min1);
}
}
opcode = Js::OpCode::Add_I4;
break;
case Js::OpCode::Sub_A:
do // while(false)
{
const auto CannotOverflowBasedOnRelativeBounds = [&]()
{
return
min2 == max2 &&
src1Val->GetValueInfo()->IsIntBounded() &&
src1Val->GetValueInfo()->AsIntBounded()->Bounds()->SubCannotOverflowBasedOnRelativeBounds(min2);
};
if (Int32Math::Sub(min1, max2, &newMin))
{
if(CannotOverflowBasedOnRelativeBounds())
{
Assert(min2 == max2);
newMin = min2 >= 0 ? INT32_MIN : INT32_MAX;
}
else if(instr->ShouldCheckForIntOverflow())
{
if(involesLargeInt32 || !DoAggressiveIntTypeSpec())
{
// May overflow
return trySpecializeToFloat(true);
}
bailOutKind |= IR::BailOutOnOverflow;
newMin = min1 < 0 ? INT32_MIN : INT32_MAX;
}
else
{
// When ignoring overflow, the range needs to account for overflow. For any Add or Sub, since overflow
// causes the value to wrap around, and we don't have a way to specify a lower and upper range of ints,
// we use the full range of int32s.
ignoredIntOverflow = true;
newMin = INT32_MIN;
newMax = INT32_MAX;
break;
}
}
if (Int32Math::Sub(max1, min2, &newMax))
{
if(CannotOverflowBasedOnRelativeBounds())
{
Assert(min2 == max2);
newMax = min2 >= 0 ? INT32_MIN: INT32_MAX;
}
else if(instr->ShouldCheckForIntOverflow())
{
if(involesLargeInt32 || !DoAggressiveIntTypeSpec())
{
// May overflow
return trySpecializeToFloat(true);
}
bailOutKind |= IR::BailOutOnOverflow;
newMax = max1 < 0 ? INT32_MIN : INT32_MAX;
}
else
{
// See comment about ignoring overflow above
ignoredIntOverflow = true;
newMin = INT32_MIN;
newMax = INT32_MAX;
break;
}
}
if(bailOutKind & IR::BailOutOnOverflow)
{
Assert(bailOutKind == IR::BailOutOnOverflow);
Assert(instr->ShouldCheckForIntOverflow());
int32 temp;
if(Int32Math::Sub(
Int32Math::NearestInRangeTo(-1, min1, max1),
Int32Math::NearestInRangeTo(0, min2, max2),
&temp))
{
// Always overflows
return trySpecializeToFloat(true);
}
}
} while(false);
if(!ignoredIntOverflow &&
min2 == max2 &&
min2 != INT32_MIN &&
(!IsLoopPrePass() || IsPrepassSrcValueInfoPrecise(instr->GetSrc2(), src2Val)) &&
instr->GetSrc1()->IsRegOpnd())
{
addSubConstantInfo.Set(instr->GetSrc1()->AsRegOpnd()->m_sym, src1Val, min1 == max1, -min2);
}
opcode = Js::OpCode::Sub_I4;
break;
case Js::OpCode::Mul_A:
{
if (Int32Math::Mul(min1, min2, &newMin))
{
if (involesLargeInt32 || !DoAggressiveMulIntTypeSpec() || !DoAggressiveIntTypeSpec())
{
// May overflow
return trySpecializeToFloat(true);
}
bailOutKind |= IR::BailOutOnMulOverflow;
newMin = (min1 < 0) ^ (min2 < 0) ? INT32_MIN : INT32_MAX;
}
newMax = newMin;
if (Int32Math::Mul(max1, max2, &tmp))
{
if (involesLargeInt32 || !DoAggressiveMulIntTypeSpec() || !DoAggressiveIntTypeSpec())
{
// May overflow
return trySpecializeToFloat(true);
}
bailOutKind |= IR::BailOutOnMulOverflow;
tmp = (max1 < 0) ^ (max2 < 0) ? INT32_MIN : INT32_MAX;
}
newMin = min(newMin, tmp);
newMax = max(newMax, tmp);
if (Int32Math::Mul(min1, max2, &tmp))
{
if (involesLargeInt32 || !DoAggressiveMulIntTypeSpec() || !DoAggressiveIntTypeSpec())
{
// May overflow
return trySpecializeToFloat(true);
}
bailOutKind |= IR::BailOutOnMulOverflow;
tmp = (min1 < 0) ^ (max2 < 0) ? INT32_MIN : INT32_MAX;
}
newMin = min(newMin, tmp);
newMax = max(newMax, tmp);
if (Int32Math::Mul(max1, min2, &tmp))
{
if (involesLargeInt32 || !DoAggressiveMulIntTypeSpec() || !DoAggressiveIntTypeSpec())
{
// May overflow
return trySpecializeToFloat(true);
}
bailOutKind |= IR::BailOutOnMulOverflow;
tmp = (max1 < 0) ^ (min2 < 0) ? INT32_MIN : INT32_MAX;
}
newMin = min(newMin, tmp);
newMax = max(newMax, tmp);
if (bailOutKind & IR::BailOutOnMulOverflow)
{
// CSE only if two MULs have the same overflow check behavior.
// Currently this is set to be ignore int32 overflow, but not 53-bit, or int32 overflow matters.
if (!instr->ShouldCheckFor32BitOverflow() && instr->ShouldCheckForNon32BitOverflow())
{
// If we allow int to overflow then there can be anything in the resulting int
newMin = IntConstMin;
newMax = IntConstMax;
ignoredIntOverflow = true;
}
int32 temp, overflowValue;
if (Int32Math::Mul(
Int32Math::NearestInRangeTo(0, min1, max1),
Int32Math::NearestInRangeTo(0, min2, max2),
&temp,
&overflowValue))
{
Assert(instr->ignoreOverflowBitCount >= 32);
int overflowMatters = 64 - instr->ignoreOverflowBitCount;
if (!ignoredIntOverflow ||
// Use shift to check high bits in case its negative
((overflowValue << overflowMatters) >> overflowMatters) != overflowValue
)
{
// Always overflows
return trySpecializeToFloat(true);
}
}
}
if (newMin <= 0 && newMax >= 0 && // New range crosses zero
(min1 < 0 || min2 < 0) && // An operand's range contains a negative integer
!(min1 > 0 || min2 > 0) && // Neither operand's range contains only positive integers
!instr->GetSrc1()->IsEqual(instr->GetSrc2())) // The operands don't have the same value
{
if (instr->ShouldCheckForNegativeZero())
{
// -0 matters since the sym is not a local, or is used in a way in which -0 would differ from +0
if (!DoAggressiveIntTypeSpec())
{
// May result in -0
return trySpecializeToFloat(false);
}
if (((min1 == 0 && max1 == 0) || (min2 == 0 && max2 == 0)) && (max1 < 0 || max2 < 0))
{
// Always results in -0
return trySpecializeToFloat(false);
}
bailOutKind |= IR::BailOutOnNegativeZero;
}
else
{
ignoredNegativeZero = true;
}
}
opcode = Js::OpCode::Mul_I4;
break;
}
case Js::OpCode::Rem_A:
{
IR::Opnd* src2 = instr->GetSrc2();
if (!this->IsLoopPrePass() && min2 == max2 && min1 >= 0)
{
int32 value = min2;
if (value == (1 << Math::Log2(value)) && src2->IsAddrOpnd())
{
Assert(src2->AsAddrOpnd()->IsVar());
instr->m_opcode = Js::OpCode::And_A;
src2->AsAddrOpnd()->SetAddress(Js::TaggedInt::ToVarUnchecked(value - 1),
IR::AddrOpndKindConstantVar);
*pSrc2Val = GetIntConstantValue(value - 1, instr);
src2Val = *pSrc2Val;
return this->TypeSpecializeBinary(&instr, pSrc1Val, pSrc2Val, pDstVal, src1OriginalVal, src2Val, redoTypeSpecRef);
}
}
#ifdef _M_ARM
if (!AutoSystemInfo::Data.ArmDivAvailable())
{
return false;
}
#endif
if (min1 < 0)
{
// The most negative it can be is min1, unless limited by min2/max2
int32 negMaxAbs2;
if (min2 == INT32_MIN)
{
negMaxAbs2 = INT32_MIN;
}
else
{
negMaxAbs2 = -max(abs(min2), abs(max2)) + 1;
}
newMin = max(min1, negMaxAbs2);
}
else
{
newMin = 0;
}
bool isModByPowerOf2 = (instr->IsProfiledInstr() && instr->m_func->HasProfileInfo() &&
instr->m_func->GetReadOnlyProfileInfo()->IsModulusOpByPowerOf2(static_cast<Js::ProfileId>(instr->AsProfiledInstr()->u.profileId)));
if(isModByPowerOf2)
{
Assert(bailOutKind == IR::BailOutInvalid);
bailOutKind = IR::BailOnModByPowerOf2;
newMin = 0;
}
else
{
if (min2 <= 0 && max2 >= 0)
{
// Consider: We could handle the zero case with a check and bailout...
return false;
}
if (min1 == 0x80000000 && (min2 <= -1 && max2 >= -1))
{
// Prevent integer overflow, as div by zero or MIN_INT / -1 will throw an exception
return false;
}
if (min1 < 0)
{
if(instr->ShouldCheckForNegativeZero())
{
if (!DoAggressiveIntTypeSpec())
{
return false;
}
bailOutKind |= IR::BailOutOnNegativeZero;
}
else
{
ignoredNegativeZero = true;
}
}
}
{
int32 absMax2;
if (min2 == INT32_MIN)
{
// abs(INT32_MIN) == INT32_MAX because of overflow
absMax2 = INT32_MAX;
}
else
{
absMax2 = max(abs(min2), abs(max2)) - 1;
}
newMax = min(absMax2, max(max1, 0));
newMax = max(newMin, newMax);
}
opcode = Js::OpCode::Rem_I4;
Assert(!instr->GetSrc1()->IsUnsigned());
break;
}
case Js::OpCode::CmEq_A:
case Js::OpCode::CmSrEq_A:
if (!IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val))
{
return false;
}
newMin = 0;
newMax = 1;
opcode = Js::OpCode::CmEq_I4;
needsBoolConv = true;
break;
case Js::OpCode::CmNeq_A:
case Js::OpCode::CmSrNeq_A:
if (!IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val))
{
return false;
}
newMin = 0;
newMax = 1;
opcode = Js::OpCode::CmNeq_I4;
needsBoolConv = true;
break;
case Js::OpCode::CmLe_A:
if (!IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val))
{
return false;
}
newMin = 0;
newMax = 1;
opcode = Js::OpCode::CmLe_I4;
needsBoolConv = true;
break;
case Js::OpCode::CmLt_A:
if (!IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val))
{
return false;
}
newMin = 0;
newMax = 1;
opcode = Js::OpCode::CmLt_I4;
needsBoolConv = true;
break;
case Js::OpCode::CmGe_A:
if (!IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val))
{
return false;
}
newMin = 0;
newMax = 1;
opcode = Js::OpCode::CmGe_I4;
needsBoolConv = true;
break;
case Js::OpCode::CmGt_A:
if (!IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val))
{
return false;
}
newMin = 0;
newMax = 1;
opcode = Js::OpCode::CmGt_I4;
needsBoolConv = true;
break;
case Js::OpCode::BrSrEq_A:
case Js::OpCode::BrEq_A:
case Js::OpCode::BrNotNeq_A:
case Js::OpCode::BrSrNotNeq_A:
{
if(DoConstFold() &&
!IsLoopPrePass() &&
TryOptConstFoldBrEqual(instr, true, src1Val, min1, max1, src2Val, min2, max2))
{
return true;
}
const bool specialize = IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val);
UpdateIntBoundsForEqualBranch(src1Val, src2Val);
if(!specialize)
{
return false;
}
opcode = Js::OpCode::BrEq_I4;
// We'll get a warning if we don't assign a value to these...
// We'll assert if we use them and make a range where min > max
newMin = 2; newMax = 1;
break;
}
case Js::OpCode::BrSrNeq_A:
case Js::OpCode::BrNeq_A:
case Js::OpCode::BrSrNotEq_A:
case Js::OpCode::BrNotEq_A:
{
if(DoConstFold() &&
!IsLoopPrePass() &&
TryOptConstFoldBrEqual(instr, false, src1Val, min1, max1, src2Val, min2, max2))
{
return true;
}
const bool specialize = IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val);
UpdateIntBoundsForNotEqualBranch(src1Val, src2Val);
if(!specialize)
{
return false;
}
opcode = Js::OpCode::BrNeq_I4;
// We'll get a warning if we don't assign a value to these...
// We'll assert if we use them and make a range where min > max
newMin = 2; newMax = 1;
break;
}
case Js::OpCode::BrGt_A:
case Js::OpCode::BrNotLe_A:
{
if(DoConstFold() &&
!IsLoopPrePass() &&
TryOptConstFoldBrGreaterThan(instr, true, src1Val, min1, max1, src2Val, min2, max2))
{
return true;
}
const bool specialize = IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val);
UpdateIntBoundsForGreaterThanBranch(src1Val, src2Val);
if(!specialize)
{
return false;
}
opcode = Js::OpCode::BrGt_I4;
// We'll get a warning if we don't assign a value to these...
// We'll assert if we use them and make a range where min > max
newMin = 2; newMax = 1;
break;
}
case Js::OpCode::BrGe_A:
case Js::OpCode::BrNotLt_A:
{
if(DoConstFold() &&
!IsLoopPrePass() &&
TryOptConstFoldBrGreaterThanOrEqual(instr, true, src1Val, min1, max1, src2Val, min2, max2))
{
return true;
}
const bool specialize = IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val);
UpdateIntBoundsForGreaterThanOrEqualBranch(src1Val, src2Val);
if(!specialize)
{
return false;
}
opcode = Js::OpCode::BrGe_I4;
// We'll get a warning if we don't assign a value to these...
// We'll assert if we use them and make a range where min > max
newMin = 2; newMax = 1;
break;
}
case Js::OpCode::BrLt_A:
case Js::OpCode::BrNotGe_A:
{
if(DoConstFold() &&
!IsLoopPrePass() &&
TryOptConstFoldBrGreaterThanOrEqual(instr, false, src1Val, min1, max1, src2Val, min2, max2))
{
return true;
}
const bool specialize = IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val);
UpdateIntBoundsForLessThanBranch(src1Val, src2Val);
if(!specialize)
{
return false;
}
opcode = Js::OpCode::BrLt_I4;
// We'll get a warning if we don't assign a value to these...
// We'll assert if we use them and make a range where min > max
newMin = 2; newMax = 1;
break;
}
case Js::OpCode::BrLe_A:
case Js::OpCode::BrNotGt_A:
{
if(DoConstFold() &&
!IsLoopPrePass() &&
TryOptConstFoldBrGreaterThan(instr, false, src1Val, min1, max1, src2Val, min2, max2))
{
return true;
}
const bool specialize = IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val);
UpdateIntBoundsForLessThanOrEqualBranch(src1Val, src2Val);
if(!specialize)
{
return false;
}
opcode = Js::OpCode::BrLe_I4;
// We'll get a warning if we don't assign a value to these...
// We'll assert if we use them and make a range where min > max
newMin = 2; newMax = 1;
break;
}
default:
return false;
}
// If this instruction is in a range of instructions where int overflow does not matter, we will still specialize it
// (won't leave it unspecialized based on heuristics), since it is most likely worth specializing, and the dst value
// needs to be guaranteed to be an int
if(!ignoredIntOverflow &&
!ignoredNegativeZero &&
!needsBoolConv &&
instr->ShouldCheckForIntOverflow() &&
!IsWorthSpecializingToInt32(instr, src1Val, src2Val))
{
// Even though type specialization is being skipped since it may not be worth it, the proper value should still be
// maintained so that the result may be type specialized later. An int value is not created for the dst in any of
// the following cases.
// - A bailout check is necessary to specialize this instruction. The bailout check is what guarantees the result to
// be an int, but since we're not going to specialize this instruction, there won't be a bailout check.
// - Aggressive int type specialization is disabled and we're in a loop prepass. We're conservative on dst values in
// that case, especially if the dst sym is live on the back-edge.
if(bailOutKind == IR::BailOutInvalid &&
instr->GetDst() &&
src1Val->GetValueInfo()->IsInt() &&
src2Val->GetValueInfo()->IsInt() &&
(DoAggressiveIntTypeSpec() || !this->IsLoopPrePass()))
{
*pDstVal = CreateDstUntransferredIntValue(newMin, newMax, instr, src1Val, src2Val);
}
return false;
}
} // case default
} // switch
LOutsideSwitch:
this->ignoredIntOverflowForCurrentInstr = ignoredIntOverflow;
this->ignoredNegativeZeroForCurrentInstr = ignoredNegativeZero;
{
// Try CSE again before modifying the IR, in case some attributes are required for successful CSE
Value *src1IndirIndexVal = nullptr;
if(CSEOptimize(currentBlock, &instr, &src1Val, &src2Val, &src1IndirIndexVal, true /* intMathExprOnly */))
{
*redoTypeSpecRef = true;
return false;
}
}
const Js::OpCode originalOpCode = instr->m_opcode;
if (!this->IsLoopPrePass())
{
// No re-write on prepass
instr->m_opcode = opcode;
}
Value *src1ValueToSpecialize = src1Val, *src2ValueToSpecialize = src2Val;
// Lossy conversions to int32 must be done based on the original source values. For instance, if one of the values is a
// float constant with a value that fits in a uint32 but not an int32, and the instruction can ignore int overflow, the
// source value for the purposes of int specialization would have been changed to an int constant value by ignoring
// overflow. If we were to specialize the sym using the int constant value, it would be treated as a lossless
// conversion, but since there may be subsequent uses of the same float constant value that may not ignore overflow,
// this must be treated as a lossy conversion by specializing the sym using the original float constant value.
if(src1Lossy)
{
src1ValueToSpecialize = src1OriginalVal;
}
if (src2Lossy)
{
src2ValueToSpecialize = src2OriginalVal;
}
// Make sure the srcs are specialized
IR::Opnd* src1 = instr->GetSrc1();
this->ToInt32(instr, src1, this->currentBlock, src1ValueToSpecialize, nullptr, src1Lossy);
if (!skipSrc2)
{
IR::Opnd* src2 = instr->GetSrc2();
this->ToInt32(instr, src2, this->currentBlock, src2ValueToSpecialize, nullptr, src2Lossy);
}
if(bailOutKind != IR::BailOutInvalid && !this->IsLoopPrePass())
{
GenerateBailAtOperation(&instr, bailOutKind);
}
if (!skipDst && instr->GetDst())
{
if (needsBoolConv)
{
IR::RegOpnd *varDst;
if (this->IsLoopPrePass())
{
varDst = instr->GetDst()->AsRegOpnd();
this->ToVarRegOpnd(varDst, this->currentBlock);
}
else
{
// Generate:
// t1.i = CmCC t2.i, t3.i
// t1.v = Conv_bool t1.i
//
// If the only uses of t1 are ints, the conv_bool will get dead-stored
TypeSpecializeIntDst(instr, originalOpCode, nullptr, src1Val, src2Val, bailOutKind, newMin, newMax, pDstVal);
IR::RegOpnd *intDst = instr->GetDst()->AsRegOpnd();
intDst->SetIsJITOptimizedReg(true);
varDst = IR::RegOpnd::New(intDst->m_sym->GetVarEquivSym(this->func), TyVar, this->func);
IR::Instr *convBoolInstr = IR::Instr::New(Js::OpCode::Conv_Bool, varDst, intDst, this->func);
// In some cases (e.g. unsigned compare peep code), a comparison will use variables
// other than the ones initially intended for it, if we can determine that we would
// arrive at the same result. This means that we get a ByteCodeUses operation after
// the actual comparison. Since Inserting the Conv_bool just after the compare, and
// just before the ByteCodeUses, would cause issues later on with register lifetime
// calculation, we want to insert the Conv_bool after the whole compare instruction
// block.
IR::Instr *putAfter = instr;
while (putAfter->m_next && putAfter->m_next->m_opcode == Js::OpCode::ByteCodeUses)
{
putAfter = putAfter->m_next;
}
putAfter->InsertAfter(convBoolInstr);
convBoolInstr->SetByteCodeOffset(instr);
this->ToVarRegOpnd(varDst, this->currentBlock);
CurrentBlockData()->liveInt32Syms->Set(varDst->m_sym->m_id);
CurrentBlockData()->liveLossyInt32Syms->Set(varDst->m_sym->m_id);
}
*pDstVal = this->NewGenericValue(ValueType::Boolean, varDst);
}
else
{
TypeSpecializeIntDst(
instr,
originalOpCode,
nullptr,
src1Val,
src2Val,
bailOutKind,
newMin,
newMax,
pDstVal,
addSubConstantInfo.HasInfo() ? &addSubConstantInfo : nullptr);
}
}
if(bailOutKind == IR::BailOutInvalid)
{
GOPT_TRACE(_u("Type specialized to INT\n"));
#if ENABLE_DEBUG_CONFIG_OPTIONS
if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::AggressiveIntTypeSpecPhase))
{
Output::Print(_u("Type specialized to INT: "));
Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode));
}
#endif
}
else
{
GOPT_TRACE(_u("Type specialized to INT with bailout on:\n"));
if(bailOutKind & (IR::BailOutOnOverflow | IR::BailOutOnMulOverflow) )
{
GOPT_TRACE(_u(" Overflow\n"));
#if ENABLE_DEBUG_CONFIG_OPTIONS
if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::AggressiveIntTypeSpecPhase))
{
Output::Print(_u("Type specialized to INT with bailout (%S): "), "Overflow");
Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode));
}
#endif
}
if(bailOutKind & IR::BailOutOnNegativeZero)
{
GOPT_TRACE(_u(" Zero\n"));
#if ENABLE_DEBUG_CONFIG_OPTIONS
if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::AggressiveIntTypeSpecPhase))
{
Output::Print(_u("Type specialized to INT with bailout (%S): "), "Zero");
Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode));
}
#endif
}
}
return true;
}
bool
GlobOpt::IsWorthSpecializingToInt32Branch(IR::Instr const * instr, Value const * src1Val, Value const * src2Val) const
{
if (!src1Val->GetValueInfo()->HasIntConstantValue() && instr->GetSrc1()->IsRegOpnd())
{
StackSym const *sym1 = instr->GetSrc1()->AsRegOpnd()->m_sym;
if (CurrentBlockData()->IsInt32TypeSpecialized(sym1) == false)
{
if (!src2Val->GetValueInfo()->HasIntConstantValue() && instr->GetSrc2()->IsRegOpnd())
{
StackSym const *sym2 = instr->GetSrc2()->AsRegOpnd()->m_sym;
if (CurrentBlockData()->IsInt32TypeSpecialized(sym2) == false)
{
// Type specializing a Br itself isn't worth it, unless one src
// is already type specialized
return false;
}
}
}
}
return true;
}
bool
GlobOpt::TryOptConstFoldBrFalse(
IR::Instr *const instr,
Value *const srcValue,
const int32 min,
const int32 max)
{
Assert(instr);
Assert(instr->m_opcode == Js::OpCode::BrFalse_A || instr->m_opcode == Js::OpCode::BrTrue_A);
Assert(srcValue);
if(!(DoAggressiveIntTypeSpec() ? srcValue->GetValueInfo()->IsLikelyInt() : srcValue->GetValueInfo()->IsInt()))
{
return false;
}
if(ValueInfo::IsEqualTo(srcValue, min, max, nullptr, 0, 0))
{
OptConstFoldBr(instr->m_opcode == Js::OpCode::BrFalse_A, instr, srcValue);
return true;
}
if(ValueInfo::IsNotEqualTo(srcValue, min, max, nullptr, 0, 0))
{
OptConstFoldBr(instr->m_opcode == Js::OpCode::BrTrue_A, instr, srcValue);
return true;
}
return false;
}
bool
GlobOpt::TryOptConstFoldBrEqual(
IR::Instr *const instr,
const bool branchOnEqual,
Value *const src1Value,
const int32 min1,
const int32 max1,
Value *const src2Value,
const int32 min2,
const int32 max2)
{
Assert(instr);
Assert(src1Value);
Assert(DoAggressiveIntTypeSpec() ? src1Value->GetValueInfo()->IsLikelyInt() : src1Value->GetValueInfo()->IsInt());
Assert(src2Value);
Assert(DoAggressiveIntTypeSpec() ? src2Value->GetValueInfo()->IsLikelyInt() : src2Value->GetValueInfo()->IsInt());
if(ValueInfo::IsEqualTo(src1Value, min1, max1, src2Value, min2, max2))
{
OptConstFoldBr(branchOnEqual, instr, src1Value, src2Value);
return true;
}
if(ValueInfo::IsNotEqualTo(src1Value, min1, max1, src2Value, min2, max2))
{
OptConstFoldBr(!branchOnEqual, instr, src1Value, src2Value);
return true;
}
return false;
}
bool
GlobOpt::TryOptConstFoldBrGreaterThan(
IR::Instr *const instr,
const bool branchOnGreaterThan,
Value *const src1Value,
const int32 min1,
const int32 max1,
Value *const src2Value,
const int32 min2,
const int32 max2)
{
Assert(instr);
Assert(src1Value);
Assert(DoAggressiveIntTypeSpec() ? src1Value->GetValueInfo()->IsLikelyInt() : src1Value->GetValueInfo()->IsInt());
Assert(src2Value);
Assert(DoAggressiveIntTypeSpec() ? src2Value->GetValueInfo()->IsLikelyInt() : src2Value->GetValueInfo()->IsInt());
if(ValueInfo::IsGreaterThan(src1Value, min1, max1, src2Value, min2, max2))
{
OptConstFoldBr(branchOnGreaterThan, instr, src1Value, src2Value);
return true;
}
if(ValueInfo::IsLessThanOrEqualTo(src1Value, min1, max1, src2Value, min2, max2))
{
OptConstFoldBr(!branchOnGreaterThan, instr, src1Value, src2Value);
return true;
}
return false;
}
bool
GlobOpt::TryOptConstFoldBrGreaterThanOrEqual(
IR::Instr *const instr,
const bool branchOnGreaterThanOrEqual,
Value *const src1Value,
const int32 min1,
const int32 max1,
Value *const src2Value,
const int32 min2,
const int32 max2)
{
Assert(instr);
Assert(src1Value);
Assert(DoAggressiveIntTypeSpec() ? src1Value->GetValueInfo()->IsLikelyInt() : src1Value->GetValueInfo()->IsInt());
Assert(src2Value);
Assert(DoAggressiveIntTypeSpec() ? src2Value->GetValueInfo()->IsLikelyInt() : src2Value->GetValueInfo()->IsInt());
if(ValueInfo::IsGreaterThanOrEqualTo(src1Value, min1, max1, src2Value, min2, max2))
{
OptConstFoldBr(branchOnGreaterThanOrEqual, instr, src1Value, src2Value);
return true;
}
if(ValueInfo::IsLessThan(src1Value, min1, max1, src2Value, min2, max2))
{
OptConstFoldBr(!branchOnGreaterThanOrEqual, instr, src1Value, src2Value);
return true;
}
return false;
}
bool
GlobOpt::TryOptConstFoldBrUnsignedLessThan(
IR::Instr *const instr,
const bool branchOnLessThan,
Value *const src1Value,
const int32 min1,
const int32 max1,
Value *const src2Value,
const int32 min2,
const int32 max2)
{
Assert(DoConstFold());
Assert(!IsLoopPrePass());
if(!src1Value ||
!src2Value ||
!(
DoAggressiveIntTypeSpec()
? src1Value->GetValueInfo()->IsLikelyInt() && src2Value->GetValueInfo()->IsLikelyInt()
: src1Value->GetValueInfo()->IsInt() && src2Value->GetValueInfo()->IsInt()
))
{
return false;
}
uint uMin1 = (min1 < 0 ? (max1 < 0 ? min((uint)min1, (uint)max1) : 0) : min1);
uint uMax1 = max((uint)min1, (uint)max1);
uint uMin2 = (min2 < 0 ? (max2 < 0 ? min((uint)min2, (uint)max2) : 0) : min2);
uint uMax2 = max((uint)min2, (uint)max2);
if (uMax1 < uMin2)
{
// Range 1 is always lesser than Range 2
OptConstFoldBr(branchOnLessThan, instr, src1Value, src2Value);
return true;
}
if (uMin1 >= uMax2)
{
// Range 2 is always lesser than Range 1
OptConstFoldBr(!branchOnLessThan, instr, src1Value, src2Value);
return true;
}
return false;
}
bool
GlobOpt::TryOptConstFoldBrUnsignedGreaterThan(
IR::Instr *const instr,
const bool branchOnGreaterThan,
Value *const src1Value,
const int32 min1,
const int32 max1,
Value *const src2Value,
const int32 min2,
const int32 max2)
{
Assert(DoConstFold());
Assert(!IsLoopPrePass());
if(!src1Value ||
!src2Value ||
!(
DoAggressiveIntTypeSpec()
? src1Value->GetValueInfo()->IsLikelyInt() && src2Value->GetValueInfo()->IsLikelyInt()
: src1Value->GetValueInfo()->IsInt() && src2Value->GetValueInfo()->IsInt()
))
{
return false;
}
uint uMin1 = (min1 < 0 ? (max1 < 0 ? min((uint)min1, (uint)max1) : 0) : min1);
uint uMax1 = max((uint)min1, (uint)max1);
uint uMin2 = (min2 < 0 ? (max2 < 0 ? min((uint)min2, (uint)max2) : 0) : min2);
uint uMax2 = max((uint)min2, (uint)max2);
if (uMin1 > uMax2)
{
// Range 1 is always greater than Range 2
OptConstFoldBr(branchOnGreaterThan, instr, src1Value, src2Value);
return true;
}
if (uMax1 <= uMin2)
{
// Range 2 is always greater than Range 1
OptConstFoldBr(!branchOnGreaterThan, instr, src1Value, src2Value);
return true;
}
return false;
}
void
GlobOpt::SetPathDependentInfo(const bool conditionToBranch, const PathDependentInfo &info)
{
Assert(this->currentBlock->GetSuccList()->Count() == 2);
IR::Instr * fallthrough = this->currentBlock->GetNext()->GetFirstInstr();
FOREACH_SLISTBASECOUNTED_ENTRY(FlowEdge*, edge, this->currentBlock->GetSuccList())
{
if (conditionToBranch == (edge->GetSucc()->GetFirstInstr() != fallthrough))
{
edge->SetPathDependentInfo(info, alloc);
return;
}
}
NEXT_SLISTBASECOUNTED_ENTRY;
Assert(false);
}
PathDependentInfoToRestore
GlobOpt::UpdatePathDependentInfo(PathDependentInfo *const info)
{
Assert(info);
if(!info->HasInfo())
{
return PathDependentInfoToRestore();
}
decltype(&GlobOpt::UpdateIntBoundsForEqual) UpdateIntBoundsForLeftValue, UpdateIntBoundsForRightValue;
switch(info->Relationship())
{
case PathDependentRelationship::Equal:
UpdateIntBoundsForLeftValue = &GlobOpt::UpdateIntBoundsForEqual;
UpdateIntBoundsForRightValue = &GlobOpt::UpdateIntBoundsForEqual;
break;
case PathDependentRelationship::NotEqual:
UpdateIntBoundsForLeftValue = &GlobOpt::UpdateIntBoundsForNotEqual;
UpdateIntBoundsForRightValue = &GlobOpt::UpdateIntBoundsForNotEqual;
break;
case PathDependentRelationship::GreaterThanOrEqual:
UpdateIntBoundsForLeftValue = &GlobOpt::UpdateIntBoundsForGreaterThanOrEqual;
UpdateIntBoundsForRightValue = &GlobOpt::UpdateIntBoundsForLessThanOrEqual;
break;
case PathDependentRelationship::GreaterThan:
UpdateIntBoundsForLeftValue = &GlobOpt::UpdateIntBoundsForGreaterThan;
UpdateIntBoundsForRightValue = &GlobOpt::UpdateIntBoundsForLessThan;
break;
case PathDependentRelationship::LessThanOrEqual:
UpdateIntBoundsForLeftValue = &GlobOpt::UpdateIntBoundsForLessThanOrEqual;
UpdateIntBoundsForRightValue = &GlobOpt::UpdateIntBoundsForGreaterThanOrEqual;
break;
case PathDependentRelationship::LessThan:
UpdateIntBoundsForLeftValue = &GlobOpt::UpdateIntBoundsForLessThan;
UpdateIntBoundsForRightValue = &GlobOpt::UpdateIntBoundsForGreaterThan;
break;
default:
Assert(false);
__assume(false);
}
ValueInfo *leftValueInfo = info->LeftValue()->GetValueInfo();
IntConstantBounds leftConstantBounds;
AssertVerify(leftValueInfo->TryGetIntConstantBounds(&leftConstantBounds, true));
ValueInfo *rightValueInfo;
IntConstantBounds rightConstantBounds;
if(info->RightValue())
{
rightValueInfo = info->RightValue()->GetValueInfo();
AssertVerify(rightValueInfo->TryGetIntConstantBounds(&rightConstantBounds, true));
}
else
{
rightValueInfo = nullptr;
rightConstantBounds = IntConstantBounds(info->RightConstantValue(), info->RightConstantValue());
}
ValueInfo *const newLeftValueInfo =
(this->*UpdateIntBoundsForLeftValue)(
info->LeftValue(),
leftConstantBounds,
info->RightValue(),
rightConstantBounds,
true);
if(newLeftValueInfo)
{
ChangeValueInfo(nullptr, info->LeftValue(), newLeftValueInfo);
AssertVerify(newLeftValueInfo->TryGetIntConstantBounds(&leftConstantBounds, true));
}
else
{
leftValueInfo = nullptr;
}
ValueInfo *const newRightValueInfo =
(this->*UpdateIntBoundsForRightValue)(
info->RightValue(),
rightConstantBounds,
info->LeftValue(),
leftConstantBounds,
true);
if(newRightValueInfo)
{
ChangeValueInfo(nullptr, info->RightValue(), newRightValueInfo);
}
else
{
rightValueInfo = nullptr;
}
return PathDependentInfoToRestore(leftValueInfo, rightValueInfo);
}
void
GlobOpt::RestorePathDependentInfo(PathDependentInfo *const info, const PathDependentInfoToRestore infoToRestore)
{
Assert(info);
if(infoToRestore.LeftValueInfo())
{
Assert(info->LeftValue());
ChangeValueInfo(nullptr, info->LeftValue(), infoToRestore.LeftValueInfo());
}
if(infoToRestore.RightValueInfo())
{
Assert(info->RightValue());
ChangeValueInfo(nullptr, info->RightValue(), infoToRestore.RightValueInfo());
}
}
bool
GlobOpt::TypeSpecializeFloatUnary(IR::Instr **pInstr, Value *src1Val, Value **pDstVal, bool skipDst /* = false */)
{
IR::Instr *&instr = *pInstr;
IR::Opnd *src1;
IR::Opnd *dst;
Js::OpCode opcode = instr->m_opcode;
Value *valueToTransfer = nullptr;
Assert(src1Val && src1Val->GetValueInfo()->IsLikelyNumber() || OpCodeAttr::IsInlineBuiltIn(instr->m_opcode));
if (!this->DoFloatTypeSpec())
{
return false;
}
// For inline built-ins we need to do type specialization. Check upfront to avoid duplicating same case labels.
if (!OpCodeAttr::IsInlineBuiltIn(instr->m_opcode))
{
switch (opcode)
{
case Js::OpCode::ArgOut_A_InlineBuiltIn:
skipDst = true;
// fall-through
case Js::OpCode::Ld_A:
case Js::OpCode::BrTrue_A:
case Js::OpCode::BrFalse_A:
if (instr->GetSrc1()->IsRegOpnd())
{
StackSym *sym = instr->GetSrc1()->AsRegOpnd()->m_sym;
if (CurrentBlockData()->IsFloat64TypeSpecialized(sym) == false)
{
// Type specializing an Ld_A isn't worth it, unless the src
// is already type specialized
return false;
}
}
if (instr->m_opcode == Js::OpCode::Ld_A)
{
valueToTransfer = src1Val;
}
break;
case Js::OpCode::Neg_A:
break;
case Js::OpCode::Conv_Num:
Assert(src1Val);
opcode = Js::OpCode::Ld_A;
valueToTransfer = src1Val;
if (!src1Val->GetValueInfo()->IsNumber())
{
StackSym *sym = instr->GetSrc1()->AsRegOpnd()->m_sym;
valueToTransfer = NewGenericValue(ValueType::Float, instr->GetDst()->GetStackSym());
if (CurrentBlockData()->IsFloat64TypeSpecialized(sym) == false)
{
// Set the dst as a nonDeadStore. We want to keep the Ld_A to prevent the FromVar from
// being dead-stored, as it could cause implicit calls.
dst = instr->GetDst();
dst->AsRegOpnd()->m_dontDeadStore = true;
}
}
break;
case Js::OpCode::StElemI_A:
case Js::OpCode::StElemI_A_Strict:
case Js::OpCode::StElemC:
return TypeSpecializeStElem(pInstr, src1Val, pDstVal);
default:
return false;
}
}
// Make sure the srcs are specialized
src1 = instr->GetSrc1();
// Use original val when calling toFloat64 as this is what we'll use to try hoisting the fromVar if we're in a loop.
this->ToFloat64(instr, src1, this->currentBlock, src1Val, nullptr, IR::BailOutPrimitiveButString);
if (!skipDst)
{
dst = instr->GetDst();
if (dst)
{
this->TypeSpecializeFloatDst(instr, valueToTransfer, src1Val, nullptr, pDstVal);
if (!this->IsLoopPrePass())
{
instr->m_opcode = opcode;
}
}
}
GOPT_TRACE_INSTR(instr, _u("Type specialized to FLOAT: "));
#if ENABLE_DEBUG_CONFIG_OPTIONS
if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::FloatTypeSpecPhase))
{
Output::Print(_u("Type specialized to FLOAT: "));
Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode));
}
#endif
return true;
}
// Unconditionally type-spec dst to float.
void
GlobOpt::TypeSpecializeFloatDst(IR::Instr *instr, Value *valToTransfer, Value *const src1Value, Value *const src2Value, Value **pDstVal)
{
IR::Opnd* dst = instr->GetDst();
Assert(dst);
AssertMsg(dst->IsRegOpnd(), "What else?");
this->ToFloat64Dst(instr, dst->AsRegOpnd(), this->currentBlock);
if(valToTransfer)
{
*pDstVal = this->ValueNumberTransferDst(instr, valToTransfer);
CurrentBlockData()->InsertNewValue(*pDstVal, dst);
}
else
{
*pDstVal = CreateDstUntransferredValue(ValueType::Float, instr, src1Value, src2Value);
}
}
#ifdef ENABLE_SIMDJS
void
GlobOpt::TypeSpecializeSimd128Dst(IRType type, IR::Instr *instr, Value *valToTransfer, Value *const src1Value, Value **pDstVal)
{
IR::Opnd* dst = instr->GetDst();
Assert(dst);
AssertMsg(dst->IsRegOpnd(), "What else?");
this->ToSimd128Dst(type, instr, dst->AsRegOpnd(), this->currentBlock);
if (valToTransfer)
{
*pDstVal = this->ValueNumberTransferDst(instr, valToTransfer);
CurrentBlockData()->InsertNewValue(*pDstVal, dst);
}
else
{
*pDstVal = NewGenericValue(GetValueTypeFromIRType(type), instr->GetDst());
}
}
#endif
bool
GlobOpt::TypeSpecializeLdLen(
IR::Instr * *const instrRef,
Value * *const src1ValueRef,
Value * *const dstValueRef,
bool *const forceInvariantHoistingRef)
{
Assert(instrRef);
IR::Instr *&instr = *instrRef;
Assert(instr);
Assert(instr->m_opcode == Js::OpCode::LdLen_A);
Assert(src1ValueRef);
Value *&src1Value = *src1ValueRef;
Assert(dstValueRef);
Value *&dstValue = *dstValueRef;
Assert(forceInvariantHoistingRef);
bool &forceInvariantHoisting = *forceInvariantHoistingRef;
if(!DoLdLenIntSpec(instr, instr->GetSrc1()->GetValueType()))
{
return false;
}
IR::BailOutKind bailOutKind = IR::BailOutOnIrregularLength;
if(!IsLoopPrePass())
{
IR::RegOpnd *const baseOpnd = instr->GetSrc1()->AsRegOpnd();
if(baseOpnd->IsArrayRegOpnd())
{
StackSym *const lengthSym = baseOpnd->AsArrayRegOpnd()->LengthSym();
if(lengthSym)
{
CaptureByteCodeSymUses(instr);
instr->m_opcode = Js::OpCode::Ld_I4;
instr->ReplaceSrc1(IR::RegOpnd::New(lengthSym, lengthSym->GetType(), func));
instr->ClearBailOutInfo();
// Find the hoisted length value
Value *const lengthValue = CurrentBlockData()->FindValue(lengthSym);
Assert(lengthValue);
src1Value = lengthValue;
ValueInfo *const lengthValueInfo = lengthValue->GetValueInfo();
Assert(lengthValueInfo->GetSymStore() != lengthSym);
IntConstantBounds lengthConstantBounds;
AssertVerify(lengthValueInfo->TryGetIntConstantBounds(&lengthConstantBounds));
Assert(lengthConstantBounds.LowerBound() >= 0);
// Int-specialize, and transfer the value to the dst
TypeSpecializeIntDst(
instr,
Js::OpCode::LdLen_A,
src1Value,
src1Value,
nullptr,
bailOutKind,
lengthConstantBounds.LowerBound(),
lengthConstantBounds.UpperBound(),
&dstValue);
// Try to force hoisting the Ld_I4 so that the length will have an invariant sym store that can be
// copy-propped. Invariant hoisting does not automatically hoist Ld_I4.
forceInvariantHoisting = true;
return true;
}
}
if (instr->HasBailOutInfo())
{
Assert(instr->GetBailOutKind() == IR::BailOutMarkTempObject);
bailOutKind = IR::BailOutOnIrregularLength | IR::BailOutMarkTempObject;
instr->SetBailOutKind(bailOutKind);
}
else
{
Assert(bailOutKind == IR::BailOutOnIrregularLength);
GenerateBailAtOperation(&instr, bailOutKind);
}
}
TypeSpecializeIntDst(
instr,
Js::OpCode::LdLen_A,
nullptr,
nullptr,
nullptr,
bailOutKind,
0,
INT32_MAX,
&dstValue);
return true;
}
bool
GlobOpt::TypeSpecializeFloatBinary(IR::Instr *instr, Value *src1Val, Value *src2Val, Value **pDstVal)
{
IR::Opnd *src1;
IR::Opnd *src2;
IR::Opnd *dst;
bool allowUndefinedOrNullSrc1 = true;
bool allowUndefinedOrNullSrc2 = true;
bool skipSrc1 = false;
bool skipSrc2 = false;
bool skipDst = false;
if (!this->DoFloatTypeSpec())
{
return false;
}
// For inline built-ins we need to do type specialization. Check upfront to avoid duplicating same case labels.
if (!OpCodeAttr::IsInlineBuiltIn(instr->m_opcode))
{
switch (instr->m_opcode)
{
case Js::OpCode::Sub_A:
case Js::OpCode::Mul_A:
case Js::OpCode::Div_A:
case Js::OpCode::Expo_A:
// Avoid if one source is known not to be a number.
if (src1Val->GetValueInfo()->IsNotNumber() || src2Val->GetValueInfo()->IsNotNumber())
{
return false;
}
break;
case Js::OpCode::BrSrEq_A:
case Js::OpCode::BrSrNeq_A:
case Js::OpCode::BrEq_A:
case Js::OpCode::BrNeq_A:
case Js::OpCode::BrSrNotEq_A:
case Js::OpCode::BrNotEq_A:
case Js::OpCode::BrSrNotNeq_A:
case Js::OpCode::BrNotNeq_A:
// Avoid if one source is known not to be a number.
if (src1Val->GetValueInfo()->IsNotNumber() || src2Val->GetValueInfo()->IsNotNumber())
{
return false;
}
// Undef == Undef, but +Undef != +Undef
// 0.0 != null, but 0.0 == +null
//
// So Bailout on anything but numbers for both src1 and src2
allowUndefinedOrNullSrc1 = false;
allowUndefinedOrNullSrc2 = false;
break;
case Js::OpCode::BrGt_A:
case Js::OpCode::BrGe_A:
case Js::OpCode::BrLt_A:
case Js::OpCode::BrLe_A:
case Js::OpCode::BrNotGt_A:
case Js::OpCode::BrNotGe_A:
case Js::OpCode::BrNotLt_A:
case Js::OpCode::BrNotLe_A:
// Avoid if one source is known not to be a number.
if (src1Val->GetValueInfo()->IsNotNumber() || src2Val->GetValueInfo()->IsNotNumber())
{
return false;
}
break;
case Js::OpCode::Add_A:
// For Add, we need both sources to be Numbers, otherwise it could be a string concat
if (!src1Val || !src2Val || !(src1Val->GetValueInfo()->IsLikelyNumber() && src2Val->GetValueInfo()->IsLikelyNumber()))
{
return false;
}
break;
case Js::OpCode::ArgOut_A_InlineBuiltIn:
skipSrc2 = true;
skipDst = true;
break;
default:
return false;
}
}
else
{
switch (instr->m_opcode)
{
case Js::OpCode::InlineArrayPush:
bool isFloatConstMissingItem = src2Val->GetValueInfo()->IsFloatConstant();
if(isFloatConstMissingItem)
{
FloatConstType floatValue = src2Val->GetValueInfo()->AsFloatConstant()->FloatValue();
isFloatConstMissingItem = Js::SparseArraySegment<double>::IsMissingItem(&floatValue);
}
// Don't specialize if the element is not likelyNumber - we will surely bailout
if(!(src2Val->GetValueInfo()->IsLikelyNumber()) || isFloatConstMissingItem)
{
return false;
}
// Only specialize the Second source - element
skipSrc1 = true;
skipDst = true;
allowUndefinedOrNullSrc2 = false;
break;
}
}
// Make sure the srcs are specialized
if(!skipSrc1)
{
src1 = instr->GetSrc1();
this->ToFloat64(instr, src1, this->currentBlock, src1Val, nullptr, (allowUndefinedOrNullSrc1 ? IR::BailOutPrimitiveButString : IR::BailOutNumberOnly));
}
if (!skipSrc2)
{
src2 = instr->GetSrc2();
this->ToFloat64(instr, src2, this->currentBlock, src2Val, nullptr, (allowUndefinedOrNullSrc2 ? IR::BailOutPrimitiveButString : IR::BailOutNumberOnly));
}
if (!skipDst)
{
dst = instr->GetDst();
if (dst)
{
*pDstVal = CreateDstUntransferredValue(ValueType::Float, instr, src1Val, src2Val);
AssertMsg(dst->IsRegOpnd(), "What else?");
this->ToFloat64Dst(instr, dst->AsRegOpnd(), this->currentBlock);
}
}
GOPT_TRACE_INSTR(instr, _u("Type specialized to FLOAT: "));
#if ENABLE_DEBUG_CONFIG_OPTIONS
if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::FloatTypeSpecPhase))
{
Output::Print(_u("Type specialized to FLOAT: "));
Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode));
}
#endif
return true;
}
bool
GlobOpt::TypeSpecializeStElem(IR::Instr ** pInstr, Value *src1Val, Value **pDstVal)
{
IR::Instr *&instr = *pInstr;
IR::RegOpnd *baseOpnd = instr->GetDst()->AsIndirOpnd()->GetBaseOpnd();
ValueType baseValueType(baseOpnd->GetValueType());
if (instr->DoStackArgsOpt(this->func) ||
(!this->DoTypedArrayTypeSpec() && baseValueType.IsLikelyOptimizedTypedArray()) ||
(!this->DoNativeArrayTypeSpec() && baseValueType.IsLikelyNativeArray()) ||
!(baseValueType.IsLikelyOptimizedTypedArray() || baseValueType.IsLikelyNativeArray()))
{
GOPT_TRACE_INSTR(instr, _u("Didn't type specialize array access, because typed array type specialization is disabled, or base is not an optimized typed array.\n"));
if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->func))
{
char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE];
baseValueType.ToString(baseValueTypeStr);
Output::Print(_u("Typed Array Optimization: function: %s (%s): instr: %s, base value type: %S, did not specialize because %s.\n"),
this->func->GetJITFunctionBody()->GetDisplayName(),
this->func->GetDebugNumberSet(debugStringBuffer),
Js::OpCodeUtil::GetOpCodeName(instr->m_opcode),
baseValueTypeStr,
instr->DoStackArgsOpt(this->func) ?
_u("instruction uses the arguments object") :
_u("typed array type specialization is disabled, or base is not an optimized typed array"));
Output::Flush();
}
return false;
}
Assert(instr->GetSrc1()->IsRegOpnd() || (src1Val && src1Val->GetValueInfo()->HasIntConstantValue()));
StackSym *sym = instr->GetSrc1()->IsRegOpnd() ? instr->GetSrc1()->AsRegOpnd()->m_sym : nullptr;
// Only type specialize the source of store element if the source symbol is already type specialized to int or float.
if (sym)
{
if (baseValueType.IsLikelyNativeArray())
{
// Gently coerce these src's into native if it seems likely to work.
// Otherwise we can't use the fast path to store.
// But don't try to put a float-specialized number into an int array this way.
if (!(
CurrentBlockData()->IsInt32TypeSpecialized(sym) ||
(
src1Val &&
(
DoAggressiveIntTypeSpec()
? src1Val->GetValueInfo()->IsLikelyInt()
: src1Val->GetValueInfo()->IsInt()
)
)
))
{
if (!(
CurrentBlockData()->IsFloat64TypeSpecialized(sym) ||
(src1Val && src1Val->GetValueInfo()->IsLikelyNumber())
) ||
baseValueType.HasIntElements())
{
return false;
}
}
}
else if (!CurrentBlockData()->IsInt32TypeSpecialized(sym) && !CurrentBlockData()->IsFloat64TypeSpecialized(sym))
{
GOPT_TRACE_INSTR(instr, _u("Didn't specialize array access, because src is not type specialized.\n"));
if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->func))
{
char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE];
baseValueType.ToString(baseValueTypeStr);
Output::Print(_u("Typed Array Optimization: function: %s (%s): instr: %s, base value type: %S, did not specialize because src is not specialized.\n"),
this->func->GetJITFunctionBody()->GetDisplayName(),
this->func->GetDebugNumberSet(debugStringBuffer),
Js::OpCodeUtil::GetOpCodeName(instr->m_opcode),
baseValueTypeStr);
Output::Flush();
}
return false;
}
}
int32 src1IntConstantValue;
if(baseValueType.IsLikelyNativeIntArray() && src1Val && src1Val->GetValueInfo()->TryGetIntConstantValue(&src1IntConstantValue))
{
if(Js::SparseArraySegment<int32>::IsMissingItem(&src1IntConstantValue))
{
return false;
}
}
// Note: doing ToVarUses to make sure we do get the int32 version of the index before trying to access its value in
// ShouldExpectConventionalArrayIndexValue. Not sure why that never gave us a problem before.
Assert(instr->GetDst()->IsIndirOpnd());
IR::IndirOpnd *dst = instr->GetDst()->AsIndirOpnd();
// Make sure we use the int32 version of the index operand symbol, if available. Otherwise, ensure the var symbol is live (by
// potentially inserting a ToVar).
this->ToVarUses(instr, dst, /* isDst = */ true, nullptr);
if (!ShouldExpectConventionalArrayIndexValue(dst))
{
GOPT_TRACE_INSTR(instr, _u("Didn't specialize array access, because index is negative or likely not int.\n"));
if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->func))
{
char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE];
baseValueType.ToString(baseValueTypeStr);
Output::Print(_u("Typed Array Optimization: function: %s (%s): instr: %s, base value type: %S, did not specialize because index is negative or likely not int.\n"),
this->func->GetJITFunctionBody()->GetDisplayName(),
this->func->GetDebugNumberSet(debugStringBuffer),
Js::OpCodeUtil::GetOpCodeName(instr->m_opcode),
baseValueTypeStr);
Output::Flush();
}
return false;
}
IRType toType = TyVar;
bool isLossyAllowed = true;
IR::BailOutKind arrayBailOutKind = IR::BailOutConventionalTypedArrayAccessOnly;
switch(baseValueType.GetObjectType())
{
case ObjectType::Int8Array:
case ObjectType::Uint8Array:
case ObjectType::Int16Array:
case ObjectType::Uint16Array:
case ObjectType::Int32Array:
case ObjectType::Int8VirtualArray:
case ObjectType::Uint8VirtualArray:
case ObjectType::Int16VirtualArray:
case ObjectType::Uint16VirtualArray:
case ObjectType::Int32VirtualArray:
case ObjectType::Int8MixedArray:
case ObjectType::Uint8MixedArray:
case ObjectType::Int16MixedArray:
case ObjectType::Uint16MixedArray:
case ObjectType::Int32MixedArray:
Int32Array:
if (this->DoAggressiveIntTypeSpec() || this->DoFloatTypeSpec())
{
toType = TyInt32;
}
break;
case ObjectType::Uint32Array:
case ObjectType::Uint32VirtualArray:
case ObjectType::Uint32MixedArray:
// Uint32Arrays may store values that overflow int32. If the value being stored comes from a symbol that's
// already losslessly type specialized to int32, we'll use it. Otherwise, if we only have a float64 specialized
// value, we don't want to force bailout if it doesn't fit in int32. Instead, we'll emit conversion in the
// lowerer, and handle overflow, if necessary.
if (!sym || CurrentBlockData()->IsInt32TypeSpecialized(sym))
{
toType = TyInt32;
}
else if (CurrentBlockData()->IsFloat64TypeSpecialized(sym))
{
toType = TyFloat64;
}
break;
case ObjectType::Float32Array:
case ObjectType::Float64Array:
case ObjectType::Float32VirtualArray:
case ObjectType::Float32MixedArray:
case ObjectType::Float64VirtualArray:
case ObjectType::Float64MixedArray:
Float64Array:
if (this->DoFloatTypeSpec())
{
toType = TyFloat64;
}
break;
case ObjectType::Uint8ClampedArray:
case ObjectType::Uint8ClampedVirtualArray:
case ObjectType::Uint8ClampedMixedArray:
// Uint8ClampedArray requires rounding (as opposed to truncation) of floating point values. If source symbol is
// float type specialized, type specialize this instruction to float as well, and handle rounding in the
// lowerer.
if (!sym || CurrentBlockData()->IsInt32TypeSpecialized(sym))
{
toType = TyInt32;
isLossyAllowed = false;
}
else if (CurrentBlockData()->IsFloat64TypeSpecialized(sym))
{
toType = TyFloat64;
}
break;
default:
Assert(baseValueType.IsLikelyNativeArray());
isLossyAllowed = false;
arrayBailOutKind = IR::BailOutConventionalNativeArrayAccessOnly;
if(baseValueType.HasIntElements())
{
goto Int32Array;
}
Assert(baseValueType.HasFloatElements());
goto Float64Array;
}
if (toType != TyVar)
{
GOPT_TRACE_INSTR(instr, _u("Type specialized array access.\n"));
if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->func))
{
char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE];
baseValueType.ToString(baseValueTypeStr);
Output::Print(_u("Typed Array Optimization: function: %s (%s): instr: %s, base value type: %S, type specialized to %s.\n"),
this->func->GetJITFunctionBody()->GetDisplayName(),
this->func->GetDebugNumberSet(debugStringBuffer),
Js::OpCodeUtil::GetOpCodeName(instr->m_opcode),
baseValueTypeStr,
toType == TyInt32 ? _u("int32") : _u("float64"));
Output::Flush();
}
IR::BailOutKind bailOutKind = ((toType == TyInt32) ? IR::BailOutIntOnly : IR::BailOutNumberOnly);
this->ToTypeSpecUse(instr, instr->GetSrc1(), this->currentBlock, src1Val, nullptr, toType, bailOutKind, /* lossy = */ isLossyAllowed);
if (!this->IsLoopPrePass())
{
bool bConvertToBailoutInstr = true;
// Definite StElemC doesn't need bailout, because it can't fail or cause conversion.
if (instr->m_opcode == Js::OpCode::StElemC && baseValueType.IsObject())
{
if (baseValueType.HasIntElements())
{
//Native int array requires a missing element check & bailout
int32 min = INT32_MIN;
int32 max = INT32_MAX;
if (src1Val->GetValueInfo()->GetIntValMinMax(&min, &max, false))
{
bConvertToBailoutInstr = ((min <= Js::JavascriptNativeIntArray::MissingItem) && (max >= Js::JavascriptNativeIntArray::MissingItem));
}
}
else
{
bConvertToBailoutInstr = false;
}
}
if (bConvertToBailoutInstr)
{
if(instr->HasBailOutInfo())
{
const IR::BailOutKind oldBailOutKind = instr->GetBailOutKind();
Assert(
(
!(oldBailOutKind & ~IR::BailOutKindBits) ||
(oldBailOutKind & ~IR::BailOutKindBits) == IR::BailOutOnImplicitCallsPreOp
) &&
!(oldBailOutKind & IR::BailOutKindBits & ~(IR::BailOutOnArrayAccessHelperCall | IR::BailOutMarkTempObject)));
if(arrayBailOutKind == IR::BailOutConventionalTypedArrayAccessOnly)
{
// BailOutConventionalTypedArrayAccessOnly also bails out if the array access is outside the head
// segment bounds, and guarantees no implicit calls. Override the bailout kind so that the instruction
// bails out for the right reason.
instr->SetBailOutKind(
arrayBailOutKind | (oldBailOutKind & (IR::BailOutKindBits - IR::BailOutOnArrayAccessHelperCall)));
}
else
{
// BailOutConventionalNativeArrayAccessOnly by itself may generate a helper call, and may cause implicit
// calls to occur, so it must be merged in to eliminate generating the helper call.
Assert(arrayBailOutKind == IR::BailOutConventionalNativeArrayAccessOnly);
instr->SetBailOutKind(oldBailOutKind | arrayBailOutKind);
}
}
else
{
GenerateBailAtOperation(&instr, arrayBailOutKind);
}
}
}
}
else
{
GOPT_TRACE_INSTR(instr, _u("Didn't specialize array access, because the source was not already specialized.\n"));
if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->func))
{
char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE];
baseValueType.ToString(baseValueTypeStr);
Output::Print(_u("Typed Array Optimization: function: %s (%s): instr: %s, base value type: %S, did not type specialize, because of array type.\n"),
this->func->GetJITFunctionBody()->GetDisplayName(),
this->func->GetDebugNumberSet(debugStringBuffer),
Js::OpCodeUtil::GetOpCodeName(instr->m_opcode),
baseValueTypeStr);
Output::Flush();
}
}
return toType != TyVar;
}
IR::Instr *
GlobOpt::ToVarUses(IR::Instr *instr, IR::Opnd *opnd, bool isDst, Value *val)
{
Sym *sym;
switch (opnd->GetKind())
{
case IR::OpndKindReg:
if (!isDst && !CurrentBlockData()->liveVarSyms->Test(opnd->AsRegOpnd()->m_sym->m_id))
{
instr = this->ToVar(instr, opnd->AsRegOpnd(), this->currentBlock, val, true);
}
break;
case IR::OpndKindSym:
sym = opnd->AsSymOpnd()->m_sym;
if (sym->IsPropertySym() && !CurrentBlockData()->liveVarSyms->Test(sym->AsPropertySym()->m_stackSym->m_id)
&& sym->AsPropertySym()->m_stackSym->IsVar())
{
StackSym *propertyBase = sym->AsPropertySym()->m_stackSym;
IR::RegOpnd *newOpnd = IR::RegOpnd::New(propertyBase, TyVar, instr->m_func);
instr = this->ToVar(instr, newOpnd, this->currentBlock, CurrentBlockData()->FindValue(propertyBase), true);
}
break;
case IR::OpndKindIndir:
IR::RegOpnd *baseOpnd = opnd->AsIndirOpnd()->GetBaseOpnd();
if (!CurrentBlockData()->liveVarSyms->Test(baseOpnd->m_sym->m_id))
{
instr = this->ToVar(instr, baseOpnd, this->currentBlock, CurrentBlockData()->FindValue(baseOpnd->m_sym), true);
}
IR::RegOpnd *indexOpnd = opnd->AsIndirOpnd()->GetIndexOpnd();
if (indexOpnd && !indexOpnd->m_sym->IsTypeSpec())
{
if((indexOpnd->GetValueType().IsInt()
? !IsTypeSpecPhaseOff(func)
: indexOpnd->GetValueType().IsLikelyInt() && DoAggressiveIntTypeSpec()) && !GetIsAsmJSFunc()) // typespec is disabled for asmjs
{
StackSym *const indexVarSym = indexOpnd->m_sym;
Value *const indexValue = CurrentBlockData()->FindValue(indexVarSym);
Assert(indexValue);
Assert(indexValue->GetValueInfo()->IsLikelyInt());
ToInt32(instr, indexOpnd, currentBlock, indexValue, opnd->AsIndirOpnd(), false);
Assert(indexValue->GetValueInfo()->IsInt());
if(!IsLoopPrePass())
{
indexOpnd = opnd->AsIndirOpnd()->GetIndexOpnd();
if(indexOpnd)
{
Assert(indexOpnd->m_sym->IsTypeSpec());
IntConstantBounds indexConstantBounds;
AssertVerify(indexValue->GetValueInfo()->TryGetIntConstantBounds(&indexConstantBounds));
if(ValueInfo::IsGreaterThanOrEqualTo(
indexValue,
indexConstantBounds.LowerBound(),
indexConstantBounds.UpperBound(),
nullptr,
0,
0))
{
indexOpnd->SetType(TyUint32);
}
}
}
}
else if (!CurrentBlockData()->liveVarSyms->Test(indexOpnd->m_sym->m_id))
{
instr = this->ToVar(instr, indexOpnd, this->currentBlock, CurrentBlockData()->FindValue(indexOpnd->m_sym), true);
}
}
break;
}
return instr;
}
IR::Instr *
GlobOpt::ToVar(IR::Instr *instr, IR::RegOpnd *regOpnd, BasicBlock *block, Value *value, bool needsUpdate)
{
IR::Instr *newInstr;
StackSym *varSym = regOpnd->m_sym;
if (IsTypeSpecPhaseOff(this->func))
{
return instr;
}
if (this->IsLoopPrePass())
{
block->globOptData.liveVarSyms->Set(varSym->m_id);
return instr;
}
if (block->globOptData.liveVarSyms->Test(varSym->m_id))
{
// Already live, nothing to do
return instr;
}
if (!varSym->IsVar())
{
Assert(!varSym->IsTypeSpec());
// Leave non-vars alone.
return instr;
}
Assert(block->globOptData.IsTypeSpecialized(varSym));
if (!value)
{
value = block->globOptData.FindValue(varSym);
}
ValueInfo *valueInfo = value ? value->GetValueInfo() : nullptr;
if(valueInfo && valueInfo->IsInt())
{
// If two syms have the same value, one is lossy-int-specialized, and then the other is int-specialized, the value
// would have been updated to definitely int. Upon using the lossy-int-specialized sym later, it would be flagged as
// lossy while the value is definitely int. Since the bit-vectors are based on the sym and not the value, update the
// lossy state.
block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id);
}
IRType fromType = TyIllegal;
StackSym *typeSpecSym = nullptr;
if (block->globOptData.liveInt32Syms->Test(varSym->m_id) && !block->globOptData.liveLossyInt32Syms->Test(varSym->m_id))
{
fromType = TyInt32;
typeSpecSym = varSym->GetInt32EquivSym(this->func);
Assert(valueInfo);
Assert(valueInfo->IsInt());
}
else if (block->globOptData.liveFloat64Syms->Test(varSym->m_id))
{
fromType = TyFloat64;
typeSpecSym = varSym->GetFloat64EquivSym(this->func);
// Ensure that all bailout FromVars that generate a value for this type-specialized sym will bail out on any non-number
// value, even ones that have already been generated before. Float-specialized non-number values cannot be converted
// back to Var since they will not go back to the original non-number value. The dead-store pass will update the bailout
// kind on already-generated FromVars based on this bit.
typeSpecSym->m_requiresBailOnNotNumber = true;
// A previous float conversion may have used BailOutPrimitiveButString, which does not change the value type to say
// definitely float, since it can also be a non-string primitive. The convert back to Var though, will cause that
// bailout kind to be changed to BailOutNumberOnly in the dead-store phase, so from the point of the initial conversion
// to float, that the value is definitely number. Since we don't know where the FromVar is, change the value type here.
if(valueInfo)
{
if(!valueInfo->IsNumber())
{
valueInfo = valueInfo->SpecializeToFloat64(alloc);
ChangeValueInfo(block, value, valueInfo);
regOpnd->SetValueType(valueInfo->Type());
}
}
else
{
value = NewGenericValue(ValueType::Float);
valueInfo = value->GetValueInfo();
block->globOptData.SetValue(value, varSym);
regOpnd->SetValueType(valueInfo->Type());
}
}
else
{
#ifdef ENABLE_SIMDJS
// SIMD_JS
Assert(block->globOptData.IsLiveAsSimd128(varSym));
if (block->globOptData.IsLiveAsSimd128F4(varSym))
{
fromType = TySimd128F4;
}
else
{
Assert(block->globOptData.IsLiveAsSimd128I4(varSym));
fromType = TySimd128I4;
}
if (valueInfo)
{
if (fromType == TySimd128F4 && !valueInfo->Type().IsSimd128Float32x4())
{
valueInfo = valueInfo->SpecializeToSimd128F4(alloc);
ChangeValueInfo(block, value, valueInfo);
regOpnd->SetValueType(valueInfo->Type());
}
else if (fromType == TySimd128I4 && !valueInfo->Type().IsSimd128Int32x4())
{
if (!valueInfo->Type().IsSimd128Int32x4())
{
valueInfo = valueInfo->SpecializeToSimd128I4(alloc);
ChangeValueInfo(block, value, valueInfo);
regOpnd->SetValueType(valueInfo->Type());
}
}
}
else
{
ValueType valueType = fromType == TySimd128F4 ? ValueType::GetSimd128(ObjectType::Simd128Float32x4) : ValueType::GetSimd128(ObjectType::Simd128Int32x4);
value = NewGenericValue(valueType);
valueInfo = value->GetValueInfo();
block->globOptData.SetValue(value, varSym);
regOpnd->SetValueType(valueInfo->Type());
}
ValueType valueType = valueInfo->Type();
// Should be definite if type-specialized
Assert(valueType.IsSimd128());
typeSpecSym = varSym->GetSimd128EquivSym(fromType, this->func);
#else
Assert(UNREACHED);
#endif
}
AssertOrFailFast(valueInfo);
int32 intConstantValue;
if (valueInfo->TryGetIntConstantValue(&intConstantValue))
{
// Lower will tag or create a number directly
newInstr = IR::Instr::New(Js::OpCode::LdC_A_I4, regOpnd,
IR::IntConstOpnd::New(intConstantValue, TyInt32, instr->m_func), instr->m_func);
}
else
{
IR::RegOpnd * regNew = IR::RegOpnd::New(typeSpecSym, fromType, instr->m_func);
Js::OpCode opcode = Js::OpCode::ToVar;
regNew->SetIsJITOptimizedReg(true);
newInstr = IR::Instr::New(opcode, regOpnd, regNew, instr->m_func);
}
newInstr->SetByteCodeOffset(instr);
newInstr->GetDst()->AsRegOpnd()->SetIsJITOptimizedReg(true);
ValueType valueType = valueInfo->Type();
if(fromType == TyInt32)
{
#if !INT32VAR // All 32-bit ints are taggable on 64-bit architectures
IntConstantBounds constantBounds;
AssertVerify(valueInfo->TryGetIntConstantBounds(&constantBounds));
if(constantBounds.IsTaggable())
#endif
{
// The value is within the taggable range, so set the opnd value types to TaggedInt to avoid the overflow check
valueType = ValueType::GetTaggedInt();
}
}
newInstr->GetDst()->SetValueType(valueType);
newInstr->GetSrc1()->SetValueType(valueType);
IR::Instr *insertAfterInstr = instr->m_prev;
if (instr == block->GetLastInstr() &&
(instr->IsBranchInstr() || instr->m_opcode == Js::OpCode::BailTarget))
{
// Don't insert code between the branch and the preceding ByteCodeUses instrs...
while(insertAfterInstr->m_opcode == Js::OpCode::ByteCodeUses)
{
insertAfterInstr = insertAfterInstr->m_prev;
}
}
block->InsertInstrAfter(newInstr, insertAfterInstr);
block->globOptData.liveVarSyms->Set(varSym->m_id);
GOPT_TRACE_OPND(regOpnd, _u("Converting to var\n"));
if (block->loop)
{
Assert(!this->IsLoopPrePass());
this->TryHoistInvariant(newInstr, block, value, value, nullptr, false);
}
if (needsUpdate)
{
// Make sure that the kill effect of the ToVar instruction is tracked and that the kill of a property
// type is reflected in the current instruction.
this->ProcessKills(newInstr);
this->ValueNumberObjectType(newInstr->GetDst(), newInstr);
if (instr->GetSrc1() && instr->GetSrc1()->IsSymOpnd() && instr->GetSrc1()->AsSymOpnd()->IsPropertySymOpnd())
{
// Reprocess the load source. We need to reset the PropertySymOpnd fields first.
IR::PropertySymOpnd *propertySymOpnd = instr->GetSrc1()->AsPropertySymOpnd();
if (propertySymOpnd->IsTypeCheckSeqCandidate())
{
propertySymOpnd->SetTypeChecked(false);
propertySymOpnd->SetTypeAvailable(false);
propertySymOpnd->SetWriteGuardChecked(false);
}
this->FinishOptPropOp(instr, propertySymOpnd);
instr = this->SetTypeCheckBailOut(instr->GetSrc1(), instr, nullptr);
}
}
return instr;
}
IR::Instr *
GlobOpt::ToInt32(IR::Instr *instr, IR::Opnd *opnd, BasicBlock *block, Value *val, IR::IndirOpnd *indir, bool lossy)
{
return this->ToTypeSpecUse(instr, opnd, block, val, indir, TyInt32, IR::BailOutIntOnly, lossy);
}
IR::Instr *
GlobOpt::ToFloat64(IR::Instr *instr, IR::Opnd *opnd, BasicBlock *block, Value *val, IR::IndirOpnd *indir, IR::BailOutKind bailOutKind)
{
return this->ToTypeSpecUse(instr, opnd, block, val, indir, TyFloat64, bailOutKind);
}
IR::Instr *
GlobOpt::ToTypeSpecUse(IR::Instr *instr, IR::Opnd *opnd, BasicBlock *block, Value *val, IR::IndirOpnd *indir, IRType toType, IR::BailOutKind bailOutKind, bool lossy, IR::Instr *insertBeforeInstr)
{
Assert(bailOutKind != IR::BailOutInvalid);
IR::Instr *newInstr;
if (!val && opnd->IsRegOpnd())
{
val = block->globOptData.FindValue(opnd->AsRegOpnd()->m_sym);
}
ValueInfo *valueInfo = val ? val->GetValueInfo() : nullptr;
bool needReplaceSrc = false;
bool updateBlockLastInstr = false;
if (instr)
{
needReplaceSrc = true;
if (!insertBeforeInstr)
{
insertBeforeInstr = instr;
}
}
else if (!insertBeforeInstr)
{
// Insert it at the end of the block
insertBeforeInstr = block->GetLastInstr();
if (insertBeforeInstr->IsBranchInstr() || insertBeforeInstr->m_opcode == Js::OpCode::BailTarget)
{
// Don't insert code between the branch and the preceding ByteCodeUses instrs...
while(insertBeforeInstr->m_prev->m_opcode == Js::OpCode::ByteCodeUses)
{
insertBeforeInstr = insertBeforeInstr->m_prev;
}
}
else
{
insertBeforeInstr = insertBeforeInstr->m_next;
updateBlockLastInstr = true;
}
}
// Int constant values will be propagated into the instruction. For ArgOut_A_InlineBuiltIn, there's no benefit from
// const-propping, so those are excluded.
if (opnd->IsRegOpnd() &&
!(
valueInfo &&
(valueInfo->HasIntConstantValue() || valueInfo->IsFloatConstant()) &&
(!instr || instr->m_opcode != Js::OpCode::ArgOut_A_InlineBuiltIn)
))
{
IR::RegOpnd *regSrc = opnd->AsRegOpnd();
StackSym *varSym = regSrc->m_sym;
Js::OpCode opcode = Js::OpCode::FromVar;
if (varSym->IsTypeSpec() || !block->globOptData.liveVarSyms->Test(varSym->m_id))
{
// Conversion between int32 and float64
if (varSym->IsTypeSpec())
{
varSym = varSym->GetVarEquivSym(this->func);
}
opcode = Js::OpCode::Conv_Prim;
}
Assert(block->globOptData.liveVarSyms->Test(varSym->m_id) || block->globOptData.IsTypeSpecialized(varSym));
StackSym *typeSpecSym = nullptr;
BOOL isLive = FALSE;
BVSparse<JitArenaAllocator> *livenessBv = nullptr;
if(valueInfo && valueInfo->IsInt())
{
// If two syms have the same value, one is lossy-int-specialized, and then the other is int-specialized, the value
// would have been updated to definitely int. Upon using the lossy-int-specialized sym later, it would be flagged as
// lossy while the value is definitely int. Since the bit-vectors are based on the sym and not the value, update the
// lossy state.
block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id);
}
if (toType == TyInt32)
{
// Need to determine whether the conversion is actually lossy or lossless. If the value is an int, then it's a
// lossless conversion despite the type of conversion requested. The liveness of the converted int32 sym needs to be
// set to reflect the actual type of conversion done. Also, a lossless conversion needs the value to determine
// whether the conversion may need to bail out.
Assert(valueInfo);
if(valueInfo->IsInt())
{
lossy = false;
}
else
{
Assert(IsLoopPrePass() || !block->globOptData.IsInt32TypeSpecialized(varSym));
}
livenessBv = block->globOptData.liveInt32Syms;
isLive = livenessBv->Test(varSym->m_id) && (lossy || !block->globOptData.liveLossyInt32Syms->Test(varSym->m_id));
if (this->IsLoopPrePass())
{
if(!isLive)
{
livenessBv->Set(varSym->m_id);
if(lossy)
{
block->globOptData.liveLossyInt32Syms->Set(varSym->m_id);
}
else
{
block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id);
}
}
if(!lossy)
{
Assert(bailOutKind == IR::BailOutIntOnly || bailOutKind == IR::BailOutExpectingInteger);
valueInfo = valueInfo->SpecializeToInt32(alloc);
ChangeValueInfo(nullptr, val, valueInfo);
if(needReplaceSrc)
{
opnd->SetValueType(valueInfo->Type());
}
}
return instr;
}
typeSpecSym = varSym->GetInt32EquivSym(this->func);
if (!isLive)
{
if (!opnd->IsVar() ||
!block->globOptData.liveVarSyms->Test(varSym->m_id) ||
(block->globOptData.liveFloat64Syms->Test(varSym->m_id) && valueInfo && valueInfo->IsLikelyFloat()))
{
Assert(block->globOptData.liveFloat64Syms->Test(varSym->m_id));
if(!lossy && !valueInfo->IsInt())
{
// Shouldn't try to do a lossless conversion from float64 to int32 when the value is not known to be an
// int. There are cases where we need more than two passes over loops to flush out all dependencies.
// It's possible for the loop prepass to think that a sym s1 remains an int because it acquires the
// value of another sym s2 that is an int in the prepass at that time. However, s2 can become a float
// later in the loop body, in which case s1 would become a float on the second iteration of the loop. By
// that time, we would have already committed to having s1 live as a lossless int on entry into the
// loop, and we end up having to compensate by doing a lossless conversion from float to int, which will
// need a bailout and will most likely bail out.
//
// If s2 becomes a var instead of a float, then the compensation is legal although not ideal. After
// enough bailouts, rejit would be triggered with aggressive int type spec turned off. For the
// float-to-int conversion though, there's no point in emitting a bailout because we already know that
// the value is a float and has high probability of bailing out (whereas a var has a chance to be a
// tagged int), and so currently lossless conversion from float to int with bailout is not supported.
//
// So, treating this case as a compile-time bailout. The exception will trigger the jit work item to be
// restarted with aggressive int type specialization disabled.
if(bailOutKind == IR::BailOutExpectingInteger)
{
Assert(IsSwitchOptEnabledForIntTypeSpec());
throw Js::RejitException(RejitReason::DisableSwitchOptExpectingInteger);
}
else
{
Assert(DoAggressiveIntTypeSpec());
if(PHASE_TRACE(Js::BailOutPhase, this->func))
{
char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
Output::Print(
_u("BailOut (compile-time): function: %s (%s) varSym: "),
this->func->GetJITFunctionBody()->GetDisplayName(),
this->func->GetDebugNumberSet(debugStringBuffer),
varSym->m_id);
#if DBG_DUMP
varSym->Dump();
#else
Output::Print(_u("s%u"), varSym->m_id);
#endif
if(varSym->HasByteCodeRegSlot())
{
Output::Print(_u(" byteCodeReg: R%u"), varSym->GetByteCodeRegSlot());
}
Output::Print(_u(" (lossless conversion from float64 to int32)\n"));
Output::Flush();
}
if(!DoAggressiveIntTypeSpec())
{
// Aggressive int type specialization is already off for some reason. Prevent trying to rejit again
// because it won't help and the same thing will happen again. Just abort jitting this function.
if(PHASE_TRACE(Js::BailOutPhase, this->func))
{
Output::Print(_u(" Aborting JIT because AggressiveIntTypeSpec is already off\n"));
Output::Flush();
}
throw Js::OperationAbortedException();
}
throw Js::RejitException(RejitReason::AggressiveIntTypeSpecDisabled);
}
}
if(opnd->IsVar())
{
regSrc->SetType(TyFloat64);
regSrc->m_sym = varSym->GetFloat64EquivSym(this->func);
opcode = Js::OpCode::Conv_Prim;
}
else
{
Assert(regSrc->IsFloat64());
Assert(regSrc->m_sym->IsFloat64());
Assert(opcode == Js::OpCode::Conv_Prim);
}
}
}
GOPT_TRACE_OPND(regSrc, _u("Converting to int32\n"));
}
else if (toType == TyFloat64)
{
// float64
typeSpecSym = varSym->GetFloat64EquivSym(this->func);
if(!IsLoopPrePass() && typeSpecSym->m_requiresBailOnNotNumber && block->globOptData.IsFloat64TypeSpecialized(varSym))
{
// This conversion is already protected by a BailOutNumberOnly bailout (or at least it will be after the
// dead-store phase). Since 'requiresBailOnNotNumber' is not flow-based, change the value to definitely float.
if(valueInfo)
{
if(!valueInfo->IsNumber())
{
valueInfo = valueInfo->SpecializeToFloat64(alloc);
ChangeValueInfo(block, val, valueInfo);
opnd->SetValueType(valueInfo->Type());
}
}
else
{
val = NewGenericValue(ValueType::Float);
valueInfo = val->GetValueInfo();
block->globOptData.SetValue(val, varSym);
opnd->SetValueType(valueInfo->Type());
}
}
if(bailOutKind == IR::BailOutNumberOnly)
{
if(!IsLoopPrePass())
{
// Ensure that all bailout FromVars that generate a value for this type-specialized sym will bail out on any
// non-number value, even ones that have already been generated before. The dead-store pass will update the
// bailout kind on already-generated FromVars based on this bit.
typeSpecSym->m_requiresBailOnNotNumber = true;
}
}
else if(typeSpecSym->m_requiresBailOnNotNumber)
{
Assert(bailOutKind == IR::BailOutPrimitiveButString);
bailOutKind = IR::BailOutNumberOnly;
}
livenessBv = block->globOptData.liveFloat64Syms;
isLive = livenessBv->Test(varSym->m_id);
if (this->IsLoopPrePass())
{
if(!isLive)
{
livenessBv->Set(varSym->m_id);
}
if (this->OptIsInvariant(opnd, block, this->prePassLoop, val, false, true))
{
this->prePassLoop->forceFloat64SymsOnEntry->Set(varSym->m_id);
}
else
{
Sym *symStore = (valueInfo ? valueInfo->GetSymStore() : NULL);
if (symStore && symStore != varSym
&& this->OptIsInvariant(symStore, block, this->prePassLoop, block->globOptData.FindValue(symStore), false, true))
{
// If symStore is assigned to sym and we want sym to be type-specialized, for symStore to be specialized
// outside the loop.
this->prePassLoop->forceFloat64SymsOnEntry->Set(symStore->m_id);
}
}
if(bailOutKind == IR::BailOutNumberOnly)
{
if(valueInfo)
{
valueInfo = valueInfo->SpecializeToFloat64(alloc);
ChangeValueInfo(block, val, valueInfo);
}
else
{
val = NewGenericValue(ValueType::Float);
valueInfo = val->GetValueInfo();
block->globOptData.SetValue(val, varSym);
}
if(needReplaceSrc)
{
opnd->SetValueType(valueInfo->Type());
}
}
return instr;
}
if (!isLive && regSrc->IsVar())
{
if (!block->globOptData.liveVarSyms->Test(varSym->m_id) ||
(
block->globOptData.liveInt32Syms->Test(varSym->m_id) &&
!block->globOptData.liveLossyInt32Syms->Test(varSym->m_id) &&
valueInfo &&
valueInfo->IsLikelyInt()
))
{
Assert(block->globOptData.liveInt32Syms->Test(varSym->m_id));
Assert(!block->globOptData.liveLossyInt32Syms->Test(varSym->m_id)); // Shouldn't try to convert a lossy int32 to anything
regSrc->SetType(TyInt32);
regSrc->m_sym = varSym->GetInt32EquivSym(this->func);
opcode = Js::OpCode::Conv_Prim;
}
}
GOPT_TRACE_OPND(regSrc, _u("Converting to float64\n"));
}
#ifdef ENABLE_SIMDJS
else
{
// SIMD_JS
Assert(IRType_IsSimd128(toType));
// Get or create type-spec sym
typeSpecSym = varSym->GetSimd128EquivSym(toType, this->func);
if (!IsLoopPrePass() && block->globOptData.IsSimd128TypeSpecialized(toType, varSym))
{
// Consider: Is this needed ? Shouldn't this have been done at previous FromVar since the simd128 sym is alive ?
if (valueInfo)
{
if (!valueInfo->IsSimd128(toType))
{
valueInfo = valueInfo->SpecializeToSimd128(toType, alloc);
ChangeValueInfo(block, val, valueInfo);
opnd->SetValueType(valueInfo->Type());
}
}
else
{
val = NewGenericValue(GetValueTypeFromIRType(toType));
valueInfo = val->GetValueInfo();
block->globOptData.SetValue(val, varSym);
opnd->SetValueType(valueInfo->Type());
}
}
livenessBv = block->globOptData.GetSimd128LivenessBV(toType);
isLive = livenessBv->Test(varSym->m_id);
if (this->IsLoopPrePass())
{
// FromVar Hoisting
BVSparse<Memory::JitArenaAllocator> * forceSimd128SymsOnEntry;
forceSimd128SymsOnEntry = \
toType == TySimd128F4 ? this->prePassLoop->forceSimd128F4SymsOnEntry : this->prePassLoop->forceSimd128I4SymsOnEntry;
if (!isLive)
{
livenessBv->Set(varSym->m_id);
}
// Be aggressive with hoisting only if value is always initialized to SIMD type before entering loop.
// This reduces the chance that the FromVar gets executed while the specialized instruction in the loop is not. Leading to unnecessary excessive bailouts.
if (val && !val->GetValueInfo()->HasBeenUndefined() && !val->GetValueInfo()->HasBeenNull() &&
this->OptIsInvariant(opnd, block, this->prePassLoop, val, false, true))
{
forceSimd128SymsOnEntry->Set(varSym->m_id);
}
else
{
Sym *symStore = (valueInfo ? valueInfo->GetSymStore() : NULL);
Value * value = symStore ? block->globOptData.FindValue(symStore) : nullptr;
if (symStore && symStore != varSym
&& value
&& !value->GetValueInfo()->HasBeenUndefined() && !value->GetValueInfo()->HasBeenNull()
&& this->OptIsInvariant(symStore, block, this->prePassLoop, value, true, true))
{
// If symStore is assigned to sym and we want sym to be type-specialized, for symStore to be specialized
// outside the loop.
forceSimd128SymsOnEntry->Set(symStore->m_id);
}
}
Assert(bailOutKind == IR::BailOutSimd128F4Only || bailOutKind == IR::BailOutSimd128I4Only);
// We are in loop prepass, we haven't propagated the value info to the src. Do it now.
if (valueInfo)
{
valueInfo = valueInfo->SpecializeToSimd128(toType, alloc);
ChangeValueInfo(block, val, valueInfo);
}
else
{
val = NewGenericValue(GetValueTypeFromIRType(toType));
valueInfo = val->GetValueInfo();
block->globOptData.SetValue(val, varSym);
}
if (needReplaceSrc)
{
opnd->SetValueType(valueInfo->Type());
}
return instr;
}
GOPT_TRACE_OPND(regSrc, _u("Converting to Simd128\n"));
}
#endif
bool needLoad = false;
if (needReplaceSrc)
{
bool wasDead = regSrc->GetIsDead();
// needReplaceSrc means we are type specializing a use, and need to replace the src on the instr
if (!isLive)
{
needLoad = true;
// ReplaceSrc will delete it.
regSrc = regSrc->Copy(instr->m_func)->AsRegOpnd();
}
IR::RegOpnd * regNew = IR::RegOpnd::New(typeSpecSym, toType, instr->m_func);
if(valueInfo)
{
regNew->SetValueType(valueInfo->Type());
regNew->m_wasNegativeZeroPreventedByBailout = valueInfo->WasNegativeZeroPreventedByBailout();
}
regNew->SetIsDead(wasDead);
regNew->SetIsJITOptimizedReg(true);
this->CaptureByteCodeSymUses(instr);
if (indir == nullptr)
{
instr->ReplaceSrc(opnd, regNew);
}
else
{
indir->ReplaceIndexOpnd(regNew);
}
opnd = regNew;
if (!needLoad)
{
Assert(isLive);
return instr;
}
}
else
{
// We just need to insert a load of a type spec sym
if(isLive)
{
return instr;
}
// Insert it before the specified instruction
instr = insertBeforeInstr;
}
IR::RegOpnd *regDst = IR::RegOpnd::New(typeSpecSym, toType, instr->m_func);
bool isBailout = false;
bool isHoisted = false;
bool isInLandingPad = (block->next && !block->next->isDeleted && block->next->isLoopHeader);
if (isInLandingPad)
{
Loop *loop = block->next->loop;
Assert(loop && loop->landingPad == block);
Assert(loop->bailOutInfo);
}
if (opcode == Js::OpCode::FromVar)
{
if (toType == TyInt32)
{
Assert(valueInfo);
if (lossy)
{
if (!valueInfo->IsPrimitive() && !block->globOptData.IsTypeSpecialized(varSym))
{
// Lossy conversions to int32 on non-primitive values may have implicit calls to toString or valueOf, which
// may be overridden to have a side effect. The side effect needs to happen every time the conversion is
// supposed to happen, so the resulting lossy int32 value cannot be reused. Bail out on implicit calls.
Assert(DoLossyIntTypeSpec());
bailOutKind = IR::BailOutOnNotPrimitive;
isBailout = true;
}
}
else if (!valueInfo->IsInt())
{
// The operand is likely an int (hence the request to convert to int), so bail out if it's not an int. Only
// bail out if a lossless conversion to int is requested. Lossy conversions to int such as in (a | 0) don't
// need to bail out.
if (bailOutKind == IR::BailOutExpectingInteger)
{
Assert(IsSwitchOptEnabledForIntTypeSpec());
}
else
{
Assert(DoAggressiveIntTypeSpec());
}
isBailout = true;
}
}
else if (toType == TyFloat64 &&
(!valueInfo || !valueInfo->IsNumber()))
{
// Bailout if converting vars to float if we can't prove they are floats:
// x = str + float; -> need to bailout if str is a string
//
// x = obj * 0.1;
// y = obj * 0.2; -> if obj has valueof, we'll only call valueof once on the FromVar conversion...
Assert(bailOutKind != IR::BailOutInvalid);
isBailout = true;
}
#ifdef ENABLE_SIMDJS
else if (IRType_IsSimd128(toType) &&
(!valueInfo || !valueInfo->IsSimd128(toType)))
{
Assert(toType == TySimd128F4 && bailOutKind == IR::BailOutSimd128F4Only
|| toType == TySimd128I4 && bailOutKind == IR::BailOutSimd128I4Only);
isBailout = true;
}
#endif
}
if (isBailout)
{
if (isInLandingPad)
{
Loop *loop = block->next->loop;
this->EnsureBailTarget(loop);
instr = loop->bailOutInfo->bailOutInstr;
updateBlockLastInstr = false;
newInstr = IR::BailOutInstr::New(opcode, bailOutKind, loop->bailOutInfo, instr->m_func);
newInstr->SetDst(regDst);
newInstr->SetSrc1(regSrc);
}
else
{
newInstr = IR::BailOutInstr::New(opcode, regDst, regSrc, bailOutKind, instr, instr->m_func);
}
}
else
{
newInstr = IR::Instr::New(opcode, regDst, regSrc, instr->m_func);
}
newInstr->SetByteCodeOffset(instr);
instr->InsertBefore(newInstr);
if (updateBlockLastInstr)
{
block->SetLastInstr(newInstr);
}
regDst->SetIsJITOptimizedReg(true);
newInstr->GetSrc1()->AsRegOpnd()->SetIsJITOptimizedReg(true);
ValueInfo *const oldValueInfo = valueInfo;
if(valueInfo)
{
newInstr->GetSrc1()->SetValueType(valueInfo->Type());
}
if(isBailout)
{
Assert(opcode == Js::OpCode::FromVar);
if(toType == TyInt32)
{
Assert(valueInfo);
if(!lossy)
{
Assert(bailOutKind == IR::BailOutIntOnly || bailOutKind == IR::BailOutExpectingInteger);
valueInfo = valueInfo->SpecializeToInt32(alloc, isPerformingLoopBackEdgeCompensation);
ChangeValueInfo(nullptr, val, valueInfo);
int32 intConstantValue;
if(indir && needReplaceSrc && valueInfo->TryGetIntConstantValue(&intConstantValue))
{
// A likely-int value can have constant bounds due to conditional branches narrowing its range. Now that
// the sym has been proven to be an int, the likely-int value, after specialization, will be constant.
// Replace the index opnd in the indir with an offset.
Assert(opnd == indir->GetIndexOpnd());
Assert(indir->GetScale() == 0);
indir->UnlinkIndexOpnd()->Free(instr->m_func);
opnd = nullptr;
indir->SetOffset(intConstantValue);
}
}
}
else if (toType == TyFloat64)
{
if(bailOutKind == IR::BailOutNumberOnly)
{
if(valueInfo)
{
valueInfo = valueInfo->SpecializeToFloat64(alloc);
ChangeValueInfo(block, val, valueInfo);
}
else
{
val = NewGenericValue(ValueType::Float);
valueInfo = val->GetValueInfo();
block->globOptData.SetValue(val, varSym);
}
}
}
else
{
Assert(IRType_IsSimd128(toType));
if (valueInfo)
{
valueInfo = valueInfo->SpecializeToSimd128(toType, alloc);
ChangeValueInfo(block, val, valueInfo);
}
else
{
val = NewGenericValue(GetValueTypeFromIRType(toType));
valueInfo = val->GetValueInfo();
block->globOptData.SetValue(val, varSym);
}
}
}
if(valueInfo)
{
newInstr->GetDst()->SetValueType(valueInfo->Type());
if(needReplaceSrc && opnd)
{
opnd->SetValueType(valueInfo->Type());
}
}
if (block->loop)
{
Assert(!this->IsLoopPrePass());
isHoisted = this->TryHoistInvariant(newInstr, block, val, val, nullptr, false, lossy, false, bailOutKind);
}
if (isBailout)
{
if (!isHoisted && !isInLandingPad)
{
if(valueInfo)
{
// Since this is a pre-op bailout, the old value info should be used for the purposes of bailout. For
// instance, the value info could be LikelyInt but with a constant range. Once specialized to int, the value
// info would be an int constant. However, the int constant is only guaranteed if the value is actually an
// int, which this conversion is verifying, so bailout cannot assume the constant value.
if(oldValueInfo)
{
val->SetValueInfo(oldValueInfo);
}
else
{
block->globOptData.ClearSymValue(varSym);
}
}
// Fill in bail out info if the FromVar is a bailout instr, and it wasn't hoisted as invariant.
// If it was hoisted, the invariant code will fill out the bailout info with the loop landing pad bailout info.
this->FillBailOutInfo(block, newInstr->GetBailOutInfo());
if(valueInfo)
{
// Restore the new value info after filling the bailout info
if(oldValueInfo)
{
val->SetValueInfo(valueInfo);
}
else
{
block->globOptData.SetValue(val, varSym);
}
}
}
}
// Now that we've captured the liveness in the bailout info, we can mark this as live.
// This type specialized sym isn't live if the FromVar bails out.
livenessBv->Set(varSym->m_id);
if(toType == TyInt32)
{
if(lossy)
{
block->globOptData.liveLossyInt32Syms->Set(varSym->m_id);
}
else
{
block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id);
}
}
}
else
{
Assert(valueInfo);
if(opnd->IsRegOpnd() && valueInfo->IsInt())
{
// If two syms have the same value, one is lossy-int-specialized, and then the other is int-specialized, the value
// would have been updated to definitely int. Upon using the lossy-int-specialized sym later, it would be flagged as
// lossy while the value is definitely int. Since the bit-vectors are based on the sym and not the value, update the
// lossy state.
block->globOptData.liveLossyInt32Syms->Clear(opnd->AsRegOpnd()->m_sym->m_id);
if(toType == TyInt32)
{
lossy = false;
}
}
if (this->IsLoopPrePass())
{
if(opnd->IsRegOpnd())
{
StackSym *const sym = opnd->AsRegOpnd()->m_sym;
if(toType == TyInt32)
{
Assert(!sym->IsTypeSpec());
block->globOptData.liveInt32Syms->Set(sym->m_id);
if(lossy)
{
block->globOptData.liveLossyInt32Syms->Set(sym->m_id);
}
else
{
block->globOptData.liveLossyInt32Syms->Clear(sym->m_id);
}
}
else
{
Assert(toType == TyFloat64);
AnalysisAssert(instr);
StackSym *const varSym = sym->IsTypeSpec() ? sym->GetVarEquivSym(instr->m_func) : sym;
block->globOptData.liveFloat64Syms->Set(varSym->m_id);
}
}
return instr;
}
if (!needReplaceSrc)
{
instr = insertBeforeInstr;
}
IR::Opnd *constOpnd;
int32 intConstantValue;
if(valueInfo->TryGetIntConstantValue(&intConstantValue))
{
if(toType == TyInt32)
{
constOpnd = IR::IntConstOpnd::New(intConstantValue, TyInt32, instr->m_func);
}
else
{
Assert(toType == TyFloat64);
constOpnd = IR::FloatConstOpnd::New(static_cast<FloatConstType>(intConstantValue), TyFloat64, instr->m_func);
}
}
else if(valueInfo->IsFloatConstant())
{
const FloatConstType floatValue = valueInfo->AsFloatConstant()->FloatValue();
if(toType == TyInt32)
{
Assert(lossy);
constOpnd =
IR::IntConstOpnd::New(
Js::JavascriptMath::ToInt32(floatValue),
TyInt32,
instr->m_func);
}
else
{
Assert(toType == TyFloat64);
constOpnd = IR::FloatConstOpnd::New(floatValue, TyFloat64, instr->m_func);
}
}
else
{
Assert(opnd->IsVar());
Assert(opnd->IsAddrOpnd());
AssertMsg(opnd->AsAddrOpnd()->IsVar(), "We only expect to see addr that are var before lower.");
// Don't need to capture uses, we are only replacing an addr opnd
if(toType == TyInt32)
{
constOpnd = IR::IntConstOpnd::New(Js::TaggedInt::ToInt32(opnd->AsAddrOpnd()->m_address), TyInt32, instr->m_func);
}
else
{
Assert(toType == TyFloat64);
constOpnd = IR::FloatConstOpnd::New(Js::TaggedInt::ToDouble(opnd->AsAddrOpnd()->m_address), TyFloat64, instr->m_func);
}
}
if (toType == TyInt32)
{
if (needReplaceSrc)
{
CaptureByteCodeSymUses(instr);
if(indir)
{
Assert(opnd == indir->GetIndexOpnd());
Assert(indir->GetScale() == 0);
indir->UnlinkIndexOpnd()->Free(instr->m_func);
indir->SetOffset(constOpnd->AsIntConstOpnd()->AsInt32());
}
else
{
instr->ReplaceSrc(opnd, constOpnd);
}
}
else
{
StackSym *varSym = opnd->AsRegOpnd()->m_sym;
if(varSym->IsTypeSpec())
{
varSym = varSym->GetVarEquivSym(nullptr);
Assert(varSym);
}
if(block->globOptData.liveInt32Syms->TestAndSet(varSym->m_id))
{
Assert(!!block->globOptData.liveLossyInt32Syms->Test(varSym->m_id) == lossy);
}
else
{
if(lossy)
{
block->globOptData.liveLossyInt32Syms->Set(varSym->m_id);
}
StackSym *int32Sym = varSym->GetInt32EquivSym(instr->m_func);
IR::RegOpnd *int32Reg = IR::RegOpnd::New(int32Sym, TyInt32, instr->m_func);
int32Reg->SetIsJITOptimizedReg(true);
newInstr = IR::Instr::New(Js::OpCode::Ld_I4, int32Reg, constOpnd, instr->m_func);
newInstr->SetByteCodeOffset(instr);
instr->InsertBefore(newInstr);
if (updateBlockLastInstr)
{
block->SetLastInstr(newInstr);
}
}
}
}
else
{
StackSym *floatSym;
bool newFloatSym = false;
StackSym* varSym;
if (opnd->IsRegOpnd())
{
varSym = opnd->AsRegOpnd()->m_sym;
if (varSym->IsTypeSpec())
{
varSym = varSym->GetVarEquivSym(nullptr);
Assert(varSym);
}
floatSym = varSym->GetFloat64EquivSym(instr->m_func);
}
else
{
varSym = block->globOptData.GetCopyPropSym(nullptr, val);
if(!varSym)
{
// Clear the symstore to ensure it's set below to this new symbol
this->SetSymStoreDirect(val->GetValueInfo(), nullptr);
varSym = StackSym::New(TyVar, instr->m_func);
newFloatSym = true;
}
floatSym = varSym->GetFloat64EquivSym(instr->m_func);
}
IR::RegOpnd *floatReg = IR::RegOpnd::New(floatSym, TyFloat64, instr->m_func);
floatReg->SetIsJITOptimizedReg(true);
// If the value is not live - let's load it.
if(!block->globOptData.liveFloat64Syms->TestAndSet(varSym->m_id))
{
newInstr = IR::Instr::New(Js::OpCode::LdC_F8_R8, floatReg, constOpnd, instr->m_func);
newInstr->SetByteCodeOffset(instr);
instr->InsertBefore(newInstr);
if (updateBlockLastInstr)
{
block->SetLastInstr(newInstr);
}
if(newFloatSym)
{
block->globOptData.SetValue(val, varSym);
}
// Src is always invariant, but check if the dst is, and then hoist.
if (block->loop &&
(
(newFloatSym && block->loop->CanHoistInvariants()) ||
this->OptIsInvariant(floatReg, block, block->loop, val, false, false)
))
{
Assert(!this->IsLoopPrePass());
this->OptHoistInvariant(newInstr, block, block->loop, val, val, nullptr, false);
}
}
if (needReplaceSrc)
{
CaptureByteCodeSymUses(instr);
instr->ReplaceSrc(opnd, floatReg);
}
}
return instr;
}
return newInstr;
}
void
GlobOpt::ToVarRegOpnd(IR::RegOpnd *dst, BasicBlock *block)
{
ToVarStackSym(dst->m_sym, block);
}
void
GlobOpt::ToVarStackSym(StackSym *varSym, BasicBlock *block)
{
//added another check for sym , in case of asmjs there is mostly no var syms and hence added a new check to see if it is the primary sym
Assert(!varSym->IsTypeSpec());
block->globOptData.liveVarSyms->Set(varSym->m_id);
block->globOptData.liveInt32Syms->Clear(varSym->m_id);
block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id);
block->globOptData.liveFloat64Syms->Clear(varSym->m_id);
#ifdef ENABLE_SIMDJS
// SIMD_JS
block->globOptData.liveSimd128F4Syms->Clear(varSym->m_id);
block->globOptData.liveSimd128I4Syms->Clear(varSym->m_id);
#endif
}
void
GlobOpt::ToInt32Dst(IR::Instr *instr, IR::RegOpnd *dst, BasicBlock *block)
{
StackSym *varSym = dst->m_sym;
Assert(!varSym->IsTypeSpec());
if (!this->IsLoopPrePass() && varSym->IsVar())
{
StackSym *int32Sym = varSym->GetInt32EquivSym(instr->m_func);
// Use UnlinkDst / SetDst to make sure isSingleDef is tracked properly,
// since we'll just be hammering the symbol.
dst = instr->UnlinkDst()->AsRegOpnd();
dst->m_sym = int32Sym;
dst->SetType(TyInt32);
instr->SetDst(dst);
}
block->globOptData.liveInt32Syms->Set(varSym->m_id);
block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id); // The store makes it lossless
block->globOptData.liveVarSyms->Clear(varSym->m_id);
block->globOptData.liveFloat64Syms->Clear(varSym->m_id);
#ifdef ENABLE_SIMDJS
// SIMD_JS
block->globOptData.liveSimd128F4Syms->Clear(varSym->m_id);
block->globOptData.liveSimd128I4Syms->Clear(varSym->m_id);
#endif
}
void
GlobOpt::ToUInt32Dst(IR::Instr *instr, IR::RegOpnd *dst, BasicBlock *block)
{
// We should be calling only for asmjs function
Assert(GetIsAsmJSFunc());
StackSym *varSym = dst->m_sym;
Assert(!varSym->IsTypeSpec());
block->globOptData.liveInt32Syms->Set(varSym->m_id);
block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id); // The store makes it lossless
block->globOptData.liveVarSyms->Clear(varSym->m_id);
block->globOptData.liveFloat64Syms->Clear(varSym->m_id);
#ifdef ENABLE_SIMDJS
// SIMD_JS
block->globOptData.liveSimd128F4Syms->Clear(varSym->m_id);
block->globOptData.liveSimd128I4Syms->Clear(varSym->m_id);
#endif
}
void
GlobOpt::ToFloat64Dst(IR::Instr *instr, IR::RegOpnd *dst, BasicBlock *block)
{
StackSym *varSym = dst->m_sym;
Assert(!varSym->IsTypeSpec());
if (!this->IsLoopPrePass() && varSym->IsVar())
{
StackSym *float64Sym = varSym->GetFloat64EquivSym(this->func);
// Use UnlinkDst / SetDst to make sure isSingleDef is tracked properly,
// since we'll just be hammering the symbol.
dst = instr->UnlinkDst()->AsRegOpnd();
dst->m_sym = float64Sym;
dst->SetType(TyFloat64);
instr->SetDst(dst);
}
block->globOptData.liveFloat64Syms->Set(varSym->m_id);
block->globOptData.liveVarSyms->Clear(varSym->m_id);
block->globOptData.liveInt32Syms->Clear(varSym->m_id);
block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id);
#ifdef ENABLE_SIMDJS
// SIMD_JS
block->globOptData.liveSimd128F4Syms->Clear(varSym->m_id);
block->globOptData.liveSimd128I4Syms->Clear(varSym->m_id);
#endif
}
#ifdef ENABLE_SIMDJS
// SIMD_JS
void
GlobOpt::ToSimd128Dst(IRType toType, IR::Instr *instr, IR::RegOpnd *dst, BasicBlock *block)
{
StackSym *varSym = dst->m_sym;
Assert(!varSym->IsTypeSpec());
BVSparse<JitArenaAllocator> * livenessBV = block->globOptData.GetSimd128LivenessBV(toType);
Assert(livenessBV);
if (!this->IsLoopPrePass() && varSym->IsVar())
{
StackSym *simd128Sym = varSym->GetSimd128EquivSym(toType, this->func);
// Use UnlinkDst / SetDst to make sure isSingleDef is tracked properly,
// since we'll just be hammering the symbol.
dst = instr->UnlinkDst()->AsRegOpnd();
dst->m_sym = simd128Sym;
dst->SetType(toType);
instr->SetDst(dst);
}
block->globOptData.liveFloat64Syms->Clear(varSym->m_id);
block->globOptData.liveVarSyms->Clear(varSym->m_id);
block->globOptData.liveInt32Syms->Clear(varSym->m_id);
block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id);
// SIMD_JS
block->globOptData.liveSimd128F4Syms->Clear(varSym->m_id);
block->globOptData.liveSimd128I4Syms->Clear(varSym->m_id);
livenessBV->Set(varSym->m_id);
}
#endif
static void SetIsConstFlag(StackSym* dstSym, int64 value)
{
Assert(dstSym);
dstSym->SetIsInt64Const();
}
static void SetIsConstFlag(StackSym* dstSym, int value)
{
Assert(dstSym);
dstSym->SetIsIntConst(value);
}
static IR::Opnd* CreateIntConstOpnd(IR::Instr* instr, int64 value)
{
return (IR::Opnd*)IR::Int64ConstOpnd::New(value, instr->GetDst()->GetType(), instr->m_func);
}
static IR::Opnd* CreateIntConstOpnd(IR::Instr* instr, int value)
{
IntConstType constVal;
if (instr->GetDst()->IsUnsigned())
{
// we should zero extend in case of uint
constVal = (uint32)value;
}
else
{
constVal = value;
}
return (IR::Opnd*)IR::IntConstOpnd::New(constVal, instr->GetDst()->GetType(), instr->m_func);
}
template <typename T>
IR::Opnd* GlobOpt::ReplaceWConst(IR::Instr **pInstr, T value, Value **pDstVal)
{
IR::Instr * &instr = *pInstr;
IR::Opnd * constOpnd = CreateIntConstOpnd(instr, value);
instr->ReplaceSrc1(constOpnd);
instr->FreeSrc2();
this->OptSrc(constOpnd, &instr);
IR::Opnd *dst = instr->GetDst();
StackSym *dstSym = dst->AsRegOpnd()->m_sym;
if (dstSym->IsSingleDef())
{
SetIsConstFlag(dstSym, value);
}
GOPT_TRACE_INSTR(instr, _u("Constant folding to %d: \n"), value);
*pDstVal = GetIntConstantValue(value, instr, dst);
return dst;
}
template <typename T>
bool GlobOpt::OptConstFoldBinaryWasm(
IR::Instr** pInstr,
const Value* src1,
const Value* src2,
Value **pDstVal)
{
IR::Instr* &instr = *pInstr;
if (!DoConstFold())
{
return false;
}
T src1IntConstantValue, src2IntConstantValue;
if (!src1 || !src1->GetValueInfo()->TryGetIntConstantValue(&src1IntConstantValue, false) || //a bit sketchy: false for int32 means likelyInt = false
!src2 || !src2->GetValueInfo()->TryGetIntConstantValue(&src2IntConstantValue, false) //and unsigned = false for int64
)
{
return false;
}
int64 tmpValueOut;
if (!instr->BinaryCalculatorT<T>(src1IntConstantValue, src2IntConstantValue, &tmpValueOut, func->GetJITFunctionBody()->IsWasmFunction()))
{
return false;
}
this->CaptureByteCodeSymUses(instr);
IR::Opnd *dst = (instr->GetDst()->IsInt64()) ? //dst can be int32 for int64 comparison operators
ReplaceWConst(pInstr, tmpValueOut, pDstVal) :
ReplaceWConst(pInstr, (int)tmpValueOut, pDstVal);
instr->m_opcode = Js::OpCode::Ld_I4;
this->ToInt32Dst(instr, dst->AsRegOpnd(), this->currentBlock);
return true;
}
bool
GlobOpt::OptConstFoldBinary(
IR::Instr * *pInstr,
const IntConstantBounds &src1IntConstantBounds,
const IntConstantBounds &src2IntConstantBounds,
Value **pDstVal)
{
IR::Instr * &instr = *pInstr;
int32 value;
IR::IntConstOpnd *constOpnd;
if (!DoConstFold())
{
return false;
}
int32 src1IntConstantValue = -1;
int32 src2IntConstantValue = -1;
int32 src1MaxIntConstantValue = -1;
int32 src2MaxIntConstantValue = -1;
int32 src1MinIntConstantValue = -1;
int32 src2MinIntConstantValue = -1;
if (instr->IsBranchInstr())
{
src1MinIntConstantValue = src1IntConstantBounds.LowerBound();
src1MaxIntConstantValue = src1IntConstantBounds.UpperBound();
src2MinIntConstantValue = src2IntConstantBounds.LowerBound();
src2MaxIntConstantValue = src2IntConstantBounds.UpperBound();
}
else if (src1IntConstantBounds.IsConstant() && src2IntConstantBounds.IsConstant())
{
src1IntConstantValue = src1IntConstantBounds.LowerBound();
src2IntConstantValue = src2IntConstantBounds.LowerBound();
}
else
{
return false;
}
IntConstType tmpValueOut;
if (!instr->BinaryCalculator(src1IntConstantValue, src2IntConstantValue, &tmpValueOut, TyInt32)
|| !Math::FitsInDWord(tmpValueOut))
{
return false;
}
value = (int32)tmpValueOut;
this->CaptureByteCodeSymUses(instr);
constOpnd = IR::IntConstOpnd::New(value, TyInt32, instr->m_func);
instr->ReplaceSrc1(constOpnd);
instr->FreeSrc2();
this->OptSrc(constOpnd, &instr);
IR::Opnd *dst = instr->GetDst();
Assert(dst->IsRegOpnd());
StackSym *dstSym = dst->AsRegOpnd()->m_sym;
if (dstSym->IsSingleDef())
{
dstSym->SetIsIntConst(value);
}
GOPT_TRACE_INSTR(instr, _u("Constant folding to %d: \n"), value);
*pDstVal = GetIntConstantValue(value, instr, dst);
if (IsTypeSpecPhaseOff(this->func))
{
instr->m_opcode = Js::OpCode::LdC_A_I4;
this->ToVarRegOpnd(dst->AsRegOpnd(), this->currentBlock);
}
else
{
instr->m_opcode = Js::OpCode::Ld_I4;
this->ToInt32Dst(instr, dst->AsRegOpnd(), this->currentBlock);
}
// If this is an induction variable, then treat it the way the prepass would have if it had seen
// the assignment and the resulting change to the value number, and mark it as indeterminate.
for (Loop * loop = this->currentBlock->loop; loop; loop = loop->parent)
{
InductionVariable *iv = nullptr;
if (loop->inductionVariables && loop->inductionVariables->TryGetReference(dstSym->m_id, &iv))
{
iv->SetChangeIsIndeterminate();
}
}
return true;
}
void
GlobOpt::OptConstFoldBr(bool test, IR::Instr *instr, Value * src1Val, Value * src2Val)
{
GOPT_TRACE_INSTR(instr, _u("Constant folding to branch: "));
BasicBlock *deadBlock;
if (src1Val)
{
this->ToInt32(instr, instr->GetSrc1(), this->currentBlock, src1Val, nullptr, false);
}
if (src2Val)
{
this->ToInt32(instr, instr->GetSrc2(), this->currentBlock, src2Val, nullptr, false);
}
this->CaptureByteCodeSymUses(instr);
if (test)
{
instr->m_opcode = Js::OpCode::Br;
instr->FreeSrc1();
if(instr->GetSrc2())
{
instr->FreeSrc2();
}
deadBlock = instr->m_next->AsLabelInstr()->GetBasicBlock();
}
else
{
AssertMsg(instr->m_next->IsLabelInstr(), "Next instr of branch should be a label...");
if(instr->AsBranchInstr()->IsMultiBranch())
{
return;
}
deadBlock = instr->AsBranchInstr()->GetTarget()->GetBasicBlock();
instr->FreeSrc1();
if(instr->GetSrc2())
{
instr->FreeSrc2();
}
instr->m_opcode = Js::OpCode::Nop;
}
// Loop back edge: we would have already decremented data use count for the tail block when we processed the loop header.
if (!(this->currentBlock->loop && this->currentBlock->loop->GetHeadBlock() == deadBlock))
{
this->currentBlock->DecrementDataUseCount();
}
this->currentBlock->RemoveDeadSucc(deadBlock, this->func->m_fg);
if (deadBlock->GetPredList()->Count() == 0)
{
deadBlock->SetDataUseCount(0);
}
}
void
GlobOpt::ChangeValueType(
BasicBlock *const block,
Value *const value,
const ValueType newValueType,
const bool preserveSubclassInfo,
const bool allowIncompatibleType) const
{
Assert(value);
// Why are we trying to change the value type of the type sym value? Asserting here to make sure we don't deep copy the type sym's value info.
Assert(!value->GetValueInfo()->IsJsType());
ValueInfo *const valueInfo = value->GetValueInfo();
const ValueType valueType(valueInfo->Type());
if(valueType == newValueType && (preserveSubclassInfo || valueInfo->IsGeneric()))
{
return;
}
// ArrayValueInfo has information specific to the array type, so make sure that doesn't change
Assert(
!preserveSubclassInfo ||
!valueInfo->IsArrayValueInfo() ||
newValueType.IsObject() && newValueType.GetObjectType() == valueInfo->GetObjectType());
Assert(!valueInfo->GetSymStore() || !valueInfo->GetSymStore()->IsStackSym() || !valueInfo->GetSymStore()->AsStackSym()->IsFromByteCodeConstantTable());
ValueInfo *const newValueInfo =
preserveSubclassInfo
? valueInfo->Copy(alloc)
: valueInfo->CopyWithGenericStructureKind(alloc);
newValueInfo->Type() = newValueType;
ChangeValueInfo(block, value, newValueInfo, allowIncompatibleType);
}
void
GlobOpt::ChangeValueInfo(BasicBlock *const block, Value *const value, ValueInfo *const newValueInfo, const bool allowIncompatibleType, const bool compensated) const
{
Assert(value);
Assert(newValueInfo);
// The value type must be changed to something more specific or something more generic. For instance, it would be changed to
// something more specific if the current value type is LikelyArray and checks have been done to ensure that it's an array,
// and it would be changed to something more generic if a call kills the Array value type and it must be treated as
// LikelyArray going forward.
// There are cases where we change the type because of different profile information, and because of rejit, these profile information
// may conflict. Need to allow incompatible type in those cause. However, the old type should be indefinite.
Assert((allowIncompatibleType && !value->GetValueInfo()->IsDefinite()) ||
AreValueInfosCompatible(newValueInfo, value->GetValueInfo()));
// ArrayValueInfo has information specific to the array type, so make sure that doesn't change
Assert(
!value->GetValueInfo()->IsArrayValueInfo() ||
!newValueInfo->IsArrayValueInfo() ||
newValueInfo->GetObjectType() == value->GetValueInfo()->GetObjectType());
if(block)
{
TrackValueInfoChangeForKills(block, value, newValueInfo, compensated);
}
value->SetValueInfo(newValueInfo);
}
bool
GlobOpt::AreValueInfosCompatible(const ValueInfo *const v0, const ValueInfo *const v1) const
{
Assert(v0);
Assert(v1);
if(v0->IsUninitialized() || v1->IsUninitialized())
{
return true;
}
const bool doAggressiveIntTypeSpec = DoAggressiveIntTypeSpec();
if(doAggressiveIntTypeSpec && (v0->IsInt() || v1->IsInt()))
{
// Int specialization in some uncommon loop cases involving dependencies, needs to allow specializing values of
// arbitrary types, even values that are definitely not int, to compensate for aggressive assumptions made by a loop
// prepass
return true;
}
if ((v0->Type()).IsMixedTypedArrayPair(v1->Type()) || (v1->Type()).IsMixedTypedArrayPair(v0->Type()))
{
return true;
}
const bool doFloatTypeSpec = DoFloatTypeSpec();
if(doFloatTypeSpec && (v0->IsFloat() || v1->IsFloat()))
{
// Float specialization allows specializing values of arbitrary types, even values that are definitely not float
return true;
}
#ifdef ENABLE_SIMDJS
// SIMD_JS
if (SIMD128_TYPE_SPEC_FLAG && v0->Type().IsSimd128())
{
// We only type-spec Undefined values, Objects (possibly merged SIMD values), or actual SIMD values.
if (v1->Type().IsLikelyUndefined() || v1->Type().IsLikelyNull())
{
return true;
}
if (v1->Type().IsLikelyObject() && v1->Type().GetObjectType() == ObjectType::Object)
{
return true;
}
if (v1->Type().IsSimd128())
{
return v0->Type().GetObjectType() == v1->Type().GetObjectType();
}
}
#endif
const bool doArrayMissingValueCheckHoist = DoArrayMissingValueCheckHoist();
const bool doNativeArrayTypeSpec = DoNativeArrayTypeSpec();
const auto AreValueTypesCompatible = [=](const ValueType t0, const ValueType t1)
{
return
t0.IsSubsetOf(t1, doAggressiveIntTypeSpec, doFloatTypeSpec, doArrayMissingValueCheckHoist, doNativeArrayTypeSpec) ||
t1.IsSubsetOf(t0, doAggressiveIntTypeSpec, doFloatTypeSpec, doArrayMissingValueCheckHoist, doNativeArrayTypeSpec);
};
const ValueType t0(v0->Type().ToDefinite()), t1(v1->Type().ToDefinite());
if(t0.IsLikelyObject() && t1.IsLikelyObject())
{
// Check compatibility for the primitive portions and the object portions of the value types separately
if(AreValueTypesCompatible(t0.ToDefiniteObject(), t1.ToDefiniteObject()) &&
(
!t0.HasBeenPrimitive() ||
!t1.HasBeenPrimitive() ||
AreValueTypesCompatible(t0.ToDefinitePrimitiveSubset(), t1.ToDefinitePrimitiveSubset())
))
{
return true;
}
}
else if(AreValueTypesCompatible(t0, t1))
{
return true;
}
const FloatConstantValueInfo *floatConstantValueInfo;
const ValueInfo *likelyIntValueinfo;
if(v0->IsFloatConstant() && v1->IsLikelyInt())
{
floatConstantValueInfo = v0->AsFloatConstant();
likelyIntValueinfo = v1;
}
else if(v0->IsLikelyInt() && v1->IsFloatConstant())
{
floatConstantValueInfo = v1->AsFloatConstant();
likelyIntValueinfo = v0;
}
else
{
return false;
}
// A float constant value with a value that is actually an int is a subset of a likely-int value.
// Ideally, we should create an int constant value for this up front, such that IsInt() also returns true. There
// were other issues with that, should see if that can be done.
int32 int32Value;
return
Js::JavascriptNumber::TryGetInt32Value(floatConstantValueInfo->FloatValue(), &int32Value) &&
(!likelyIntValueinfo->IsLikelyTaggedInt() || !Js::TaggedInt::IsOverflow(int32Value));
}
#if DBG
void
GlobOpt::VerifyArrayValueInfoForTracking(
const ValueInfo *const valueInfo,
const bool isJsArray,
const BasicBlock *const block,
const bool ignoreKnownImplicitCalls) const
{
Assert(valueInfo);
Assert(valueInfo->IsAnyOptimizedArray());
Assert(isJsArray == valueInfo->IsArrayOrObjectWithArray());
Assert(!isJsArray == valueInfo->IsOptimizedTypedArray());
Assert(block);
Loop *implicitCallsLoop;
if(block->next && !block->next->isDeleted && block->next->isLoopHeader)
{
// Since a loop's landing pad does not have user code, determine whether disabling implicit calls is allowed in the
// landing pad based on the loop for which this block is the landing pad.
implicitCallsLoop = block->next->loop;
Assert(implicitCallsLoop);
Assert(implicitCallsLoop->landingPad == block);
}
else
{
implicitCallsLoop = block->loop;
}
Assert(
!isJsArray ||
DoArrayCheckHoist(valueInfo->Type(), implicitCallsLoop) ||
(
ignoreKnownImplicitCalls &&
!(implicitCallsLoop ? ImplicitCallFlagsAllowOpts(implicitCallsLoop) : ImplicitCallFlagsAllowOpts(func))
));
Assert(!(isJsArray && valueInfo->HasNoMissingValues() && !DoArrayMissingValueCheckHoist()));
Assert(
!(
valueInfo->IsArrayValueInfo() &&
(
valueInfo->AsArrayValueInfo()->HeadSegmentSym() ||
valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym()
) &&
!DoArraySegmentHoist(valueInfo->Type())
));
#if 0
// We can't assert here that there is only a head segment length sym if hoisting is allowed in the current block,
// because we may have propagated the sym forward out of a loop, and hoisting may be allowed inside but not
// outside the loop.
Assert(
isJsArray ||
!valueInfo->IsArrayValueInfo() ||
!valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym() ||
DoTypedArraySegmentLengthHoist(implicitCallsLoop) ||
ignoreKnownImplicitCalls ||
(implicitCallsLoop ? ImplicitCallFlagsAllowOpts(implicitCallsLoop) : ImplicitCallFlagsAllowOpts(func))
);
#endif
Assert(
!(
isJsArray &&
valueInfo->IsArrayValueInfo() &&
valueInfo->AsArrayValueInfo()->LengthSym() &&
!DoArrayLengthHoist()
));
}
#endif
void
GlobOpt::TrackNewValueForKills(Value *const value)
{
Assert(value);
if(!value->GetValueInfo()->IsAnyOptimizedArray())
{
return;
}
DoTrackNewValueForKills(value);
}
void
GlobOpt::DoTrackNewValueForKills(Value *const value)
{
Assert(value);
ValueInfo *const valueInfo = value->GetValueInfo();
Assert(valueInfo->IsAnyOptimizedArray());
Assert(!valueInfo->IsArrayValueInfo());
// The value and value info here are new, so it's okay to modify the value info in-place
Assert(!valueInfo->GetSymStore());
const bool isJsArray = valueInfo->IsArrayOrObjectWithArray();
Assert(!isJsArray == valueInfo->IsOptimizedTypedArray());
Loop *implicitCallsLoop;
if(currentBlock->next && !currentBlock->next->isDeleted && currentBlock->next->isLoopHeader)
{
// Since a loop's landing pad does not have user code, determine whether disabling implicit calls is allowed in the
// landing pad based on the loop for which this block is the landing pad.
implicitCallsLoop = currentBlock->next->loop;
Assert(implicitCallsLoop);
Assert(implicitCallsLoop->landingPad == currentBlock);
}
else
{
implicitCallsLoop = currentBlock->loop;
}
if(isJsArray)
{
if(!DoArrayCheckHoist(valueInfo->Type(), implicitCallsLoop))
{
// Array opts are disabled for this value type, so treat it as an indefinite value type going forward
valueInfo->Type() = valueInfo->Type().ToLikely();
return;
}
if(valueInfo->HasNoMissingValues() && !DoArrayMissingValueCheckHoist())
{
valueInfo->Type() = valueInfo->Type().SetHasNoMissingValues(false);
}
}
#if DBG
VerifyArrayValueInfoForTracking(valueInfo, isJsArray, currentBlock);
#endif
if(!isJsArray)
{
return;
}
// Can't assume going forward that it will definitely be an array without disabling implicit calls, because the
// array may be transformed into an ES5 array. Since array opts are enabled, implicit calls can be disabled, and we can
// treat it as a definite value type going forward, but the value needs to be tracked so that something like a call can
// revert the value type to a likely version.
CurrentBlockData()->valuesToKillOnCalls->Add(value);
}
void
GlobOpt::TrackCopiedValueForKills(Value *const value)
{
Assert(value);
if(!value->GetValueInfo()->IsAnyOptimizedArray())
{
return;
}
DoTrackCopiedValueForKills(value);
}
void
GlobOpt::DoTrackCopiedValueForKills(Value *const value)
{
Assert(value);
ValueInfo *const valueInfo = value->GetValueInfo();
Assert(valueInfo->IsAnyOptimizedArray());
const bool isJsArray = valueInfo->IsArrayOrObjectWithArray();
Assert(!isJsArray == valueInfo->IsOptimizedTypedArray());
#if DBG
VerifyArrayValueInfoForTracking(valueInfo, isJsArray, currentBlock);
#endif
if(!isJsArray && !(valueInfo->IsArrayValueInfo() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym()))
{
return;
}
// Can't assume going forward that it will definitely be an array without disabling implicit calls, because the
// array may be transformed into an ES5 array. Since array opts are enabled, implicit calls can be disabled, and we can
// treat it as a definite value type going forward, but the value needs to be tracked so that something like a call can
// revert the value type to a likely version.
CurrentBlockData()->valuesToKillOnCalls->Add(value);
}
void
GlobOpt::TrackMergedValueForKills(
Value *const value,
GlobOptBlockData *const blockData,
BVSparse<JitArenaAllocator> *const mergedValueTypesTrackedForKills) const
{
Assert(value);
if(!value->GetValueInfo()->IsAnyOptimizedArray())
{
return;
}
DoTrackMergedValueForKills(value, blockData, mergedValueTypesTrackedForKills);
}
void
GlobOpt::DoTrackMergedValueForKills(
Value *const value,
GlobOptBlockData *const blockData,
BVSparse<JitArenaAllocator> *const mergedValueTypesTrackedForKills) const
{
Assert(value);
Assert(blockData);
ValueInfo *valueInfo = value->GetValueInfo();
Assert(valueInfo->IsAnyOptimizedArray());
const bool isJsArray = valueInfo->IsArrayOrObjectWithArray();
Assert(!isJsArray == valueInfo->IsOptimizedTypedArray());
#if DBG
VerifyArrayValueInfoForTracking(valueInfo, isJsArray, currentBlock, true);
#endif
if(!isJsArray && !(valueInfo->IsArrayValueInfo() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym()))
{
return;
}
// Can't assume going forward that it will definitely be an array without disabling implicit calls, because the
// array may be transformed into an ES5 array. Since array opts are enabled, implicit calls can be disabled, and we can
// treat it as a definite value type going forward, but the value needs to be tracked so that something like a call can
// revert the value type to a likely version.
if(!mergedValueTypesTrackedForKills || !mergedValueTypesTrackedForKills->TestAndSet(value->GetValueNumber()))
{
blockData->valuesToKillOnCalls->Add(value);
}
}
void
GlobOpt::TrackValueInfoChangeForKills(BasicBlock *const block, Value *const value, ValueInfo *const newValueInfo, const bool compensated) const
{
Assert(block);
Assert(value);
Assert(newValueInfo);
ValueInfo *const oldValueInfo = value->GetValueInfo();
#if DBG
if(oldValueInfo->IsAnyOptimizedArray())
{
VerifyArrayValueInfoForTracking(oldValueInfo, oldValueInfo->IsArrayOrObjectWithArray(), block, compensated);
}
#endif
const bool trackOldValueInfo =
oldValueInfo->IsArrayOrObjectWithArray() ||
(
oldValueInfo->IsOptimizedTypedArray() &&
oldValueInfo->IsArrayValueInfo() &&
oldValueInfo->AsArrayValueInfo()->HeadSegmentLengthSym()
);
Assert(trackOldValueInfo == block->globOptData.valuesToKillOnCalls->ContainsKey(value));
#if DBG
if(newValueInfo->IsAnyOptimizedArray())
{
VerifyArrayValueInfoForTracking(newValueInfo, newValueInfo->IsArrayOrObjectWithArray(), block, compensated);
}
#endif
const bool trackNewValueInfo =
newValueInfo->IsArrayOrObjectWithArray() ||
(
newValueInfo->IsOptimizedTypedArray() &&
newValueInfo->IsArrayValueInfo() &&
newValueInfo->AsArrayValueInfo()->HeadSegmentLengthSym()
);
if(trackOldValueInfo == trackNewValueInfo)
{
return;
}
if(trackNewValueInfo)
{
block->globOptData.valuesToKillOnCalls->Add(value);
}
else
{
block->globOptData.valuesToKillOnCalls->Remove(value);
}
}
void
GlobOpt::ProcessValueKills(IR::Instr *const instr)
{
Assert(instr);
ValueSet *const valuesToKillOnCalls = CurrentBlockData()->valuesToKillOnCalls;
if(!IsLoopPrePass() && valuesToKillOnCalls->Count() == 0)
{
return;
}
const JsArrayKills kills = CheckJsArrayKills(instr);
Assert(!kills.KillsArrayHeadSegments() || kills.KillsArrayHeadSegmentLengths());
if(IsLoopPrePass())
{
rootLoopPrePass->jsArrayKills = rootLoopPrePass->jsArrayKills.Merge(kills);
Assert(
!rootLoopPrePass->parent ||
rootLoopPrePass->jsArrayKills.AreSubsetOf(rootLoopPrePass->parent->jsArrayKills));
if(kills.KillsAllArrays())
{
rootLoopPrePass->needImplicitCallBailoutChecksForJsArrayCheckHoist = false;
}
if(valuesToKillOnCalls->Count() == 0)
{
return;
}
}
if(kills.KillsAllArrays())
{
Assert(kills.KillsTypedArrayHeadSegmentLengths());
// - Calls need to kill the value types of values in the following list. For instance, calls can transform a JS array
// into an ES5 array, so any definitely-array value types need to be killed. Also, VirtualTypeArrays do not have
// bounds checks; this can be problematic if the array is detached, so check to ensure that it is a virtual array.
// Update the value types to likley to ensure a bailout that asserts Array type is generated.
// - Calls also need to kill typed array head segment lengths. A typed array's array buffer may be transferred to a web
// worker, in which case the typed array's length is set to zero.
for(auto it = valuesToKillOnCalls->GetIterator(); it.IsValid(); it.MoveNext())
{
Value *const value = it.CurrentValue();
ValueInfo *const valueInfo = value->GetValueInfo();
Assert(
valueInfo->IsArrayOrObjectWithArray() ||
valueInfo->IsOptimizedTypedArray() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym());
if (valueInfo->IsArrayOrObjectWithArray() || valueInfo->IsOptimizedVirtualTypedArray())
{
ChangeValueType(nullptr, value, valueInfo->Type().ToLikely(), false);
continue;
}
ChangeValueInfo(
nullptr,
value,
valueInfo->AsArrayValueInfo()->Copy(alloc, true, false /* copyHeadSegmentLength */, true));
}
valuesToKillOnCalls->Clear();
return;
}
if(kills.KillsArraysWithNoMissingValues())
{
// Some operations may kill arrays with no missing values in unlikely circumstances. Convert their value types to likely
// versions so that the checks have to be redone.
for(auto it = valuesToKillOnCalls->GetIteratorWithRemovalSupport(); it.IsValid(); it.MoveNext())
{
Value *const value = it.CurrentValue();
ValueInfo *const valueInfo = value->GetValueInfo();
Assert(
valueInfo->IsArrayOrObjectWithArray() ||
valueInfo->IsOptimizedTypedArray() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym());
if(!valueInfo->IsArrayOrObjectWithArray() || !valueInfo->HasNoMissingValues())
{
continue;
}
ChangeValueType(nullptr, value, valueInfo->Type().ToLikely(), false);
it.RemoveCurrent();
}
}
if(kills.KillsNativeArrays())
{
// Some operations may kill native arrays in (what should be) unlikely circumstances. Convert their value types to
// likely versions so that the checks have to be redone.
for(auto it = valuesToKillOnCalls->GetIteratorWithRemovalSupport(); it.IsValid(); it.MoveNext())
{
Value *const value = it.CurrentValue();
ValueInfo *const valueInfo = value->GetValueInfo();
Assert(
valueInfo->IsArrayOrObjectWithArray() ||
valueInfo->IsOptimizedTypedArray() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym());
if(!valueInfo->IsArrayOrObjectWithArray() || valueInfo->HasVarElements())
{
continue;
}
ChangeValueType(nullptr, value, valueInfo->Type().ToLikely(), false);
it.RemoveCurrent();
}
}
const bool likelyKillsJsArraysWithNoMissingValues = IsOperationThatLikelyKillsJsArraysWithNoMissingValues(instr);
if(!kills.KillsArrayHeadSegmentLengths())
{
Assert(!kills.KillsArrayHeadSegments());
if(!likelyKillsJsArraysWithNoMissingValues && !kills.KillsArrayLengths())
{
return;
}
}
for(auto it = valuesToKillOnCalls->GetIterator(); it.IsValid(); it.MoveNext())
{
Value *const value = it.CurrentValue();
ValueInfo *valueInfo = value->GetValueInfo();
Assert(
valueInfo->IsArrayOrObjectWithArray() ||
valueInfo->IsOptimizedTypedArray() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym());
if(!valueInfo->IsArrayOrObjectWithArray())
{
continue;
}
if(likelyKillsJsArraysWithNoMissingValues && valueInfo->HasNoMissingValues())
{
ChangeValueType(nullptr, value, valueInfo->Type().SetHasNoMissingValues(false), true);
valueInfo = value->GetValueInfo();
}
if(!valueInfo->IsArrayValueInfo())
{
continue;
}
ArrayValueInfo *const arrayValueInfo = valueInfo->AsArrayValueInfo();
const bool removeHeadSegment = kills.KillsArrayHeadSegments() && arrayValueInfo->HeadSegmentSym();
const bool removeHeadSegmentLength = kills.KillsArrayHeadSegmentLengths() && arrayValueInfo->HeadSegmentLengthSym();
const bool removeLength = kills.KillsArrayLengths() && arrayValueInfo->LengthSym();
if(removeHeadSegment || removeHeadSegmentLength || removeLength)
{
ChangeValueInfo(
nullptr,
value,
arrayValueInfo->Copy(alloc, !removeHeadSegment, !removeHeadSegmentLength, !removeLength));
valueInfo = value->GetValueInfo();
}
}
}
void
GlobOpt::ProcessValueKills(BasicBlock *const block, GlobOptBlockData *const blockData)
{
Assert(block);
Assert(blockData);
ValueSet *const valuesToKillOnCalls = blockData->valuesToKillOnCalls;
if(!IsLoopPrePass() && valuesToKillOnCalls->Count() == 0)
{
return;
}
// If the current block or loop has implicit calls, kill all definitely-array value types, as using that info will cause
// implicit calls to be disabled, resulting in unnecessary bailouts
const bool killValuesOnImplicitCalls =
(block->loop ? !this->ImplicitCallFlagsAllowOpts(block->loop) : !this->ImplicitCallFlagsAllowOpts(func));
if (!killValuesOnImplicitCalls)
{
return;
}
if(IsLoopPrePass() && block->loop == rootLoopPrePass)
{
AnalysisAssert(rootLoopPrePass);
for (Loop * loop = rootLoopPrePass; loop != nullptr; loop = loop->parent)
{
loop->jsArrayKills.SetKillsAllArrays();
}
Assert(!rootLoopPrePass->parent || rootLoopPrePass->jsArrayKills.AreSubsetOf(rootLoopPrePass->parent->jsArrayKills));
if(valuesToKillOnCalls->Count() == 0)
{
return;
}
}
for(auto it = valuesToKillOnCalls->GetIterator(); it.IsValid(); it.MoveNext())
{
Value *const value = it.CurrentValue();
ValueInfo *const valueInfo = value->GetValueInfo();
Assert(
valueInfo->IsArrayOrObjectWithArray() ||
valueInfo->IsOptimizedTypedArray() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym());
if(valueInfo->IsArrayOrObjectWithArray())
{
ChangeValueType(nullptr, value, valueInfo->Type().ToLikely(), false);
continue;
}
ChangeValueInfo(
nullptr,
value,
valueInfo->AsArrayValueInfo()->Copy(alloc, true, false /* copyHeadSegmentLength */, true));
}
valuesToKillOnCalls->Clear();
}
void
GlobOpt::ProcessValueKillsForLoopHeaderAfterBackEdgeMerge(BasicBlock *const block, GlobOptBlockData *const blockData)
{
Assert(block);
Assert(block->isLoopHeader);
Assert(blockData);
ValueSet *const valuesToKillOnCalls = blockData->valuesToKillOnCalls;
if(valuesToKillOnCalls->Count() == 0)
{
return;
}
const JsArrayKills loopKills(block->loop->jsArrayKills);
for(auto it = valuesToKillOnCalls->GetIteratorWithRemovalSupport(); it.IsValid(); it.MoveNext())
{
Value *const value = it.CurrentValue();
ValueInfo *valueInfo = value->GetValueInfo();
Assert(
valueInfo->IsArrayOrObjectWithArray() ||
valueInfo->IsOptimizedTypedArray() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym());
const bool isJsArray = valueInfo->IsArrayOrObjectWithArray();
Assert(!isJsArray == valueInfo->IsOptimizedTypedArray());
if(isJsArray ? loopKills.KillsValueType(valueInfo->Type()) : loopKills.KillsTypedArrayHeadSegmentLengths())
{
// Hoisting array checks and other related things for this type is disabled for the loop due to the kill, as
// compensation code is currently not added on back-edges. When merging values from a back-edge, the array value
// type cannot be definite, as that may require adding compensation code on the back-edge if the optimization pass
// chooses to not optimize the array.
if(isJsArray)
{
ChangeValueType(nullptr, value, valueInfo->Type().ToLikely(), false);
}
else
{
ChangeValueInfo(
nullptr,
value,
valueInfo->AsArrayValueInfo()->Copy(alloc, true, false /* copyHeadSegmentLength */, true));
}
it.RemoveCurrent();
continue;
}
if(!isJsArray || !valueInfo->IsArrayValueInfo())
{
continue;
}
// Similarly, if the loop contains an operation that kills JS array segments, don't make the segment or other related
// syms available initially inside the loop
ArrayValueInfo *const arrayValueInfo = valueInfo->AsArrayValueInfo();
const bool removeHeadSegment = loopKills.KillsArrayHeadSegments() && arrayValueInfo->HeadSegmentSym();
const bool removeHeadSegmentLength = loopKills.KillsArrayHeadSegmentLengths() && arrayValueInfo->HeadSegmentLengthSym();
const bool removeLength = loopKills.KillsArrayLengths() && arrayValueInfo->LengthSym();
if(removeHeadSegment || removeHeadSegmentLength || removeLength)
{
ChangeValueInfo(
nullptr,
value,
arrayValueInfo->Copy(alloc, !removeHeadSegment, !removeHeadSegmentLength, !removeLength));
valueInfo = value->GetValueInfo();
}
}
}
bool
GlobOpt::NeedBailOnImplicitCallForLiveValues(BasicBlock const * const block, const bool isForwardPass) const
{
if(isForwardPass)
{
return block->globOptData.valuesToKillOnCalls->Count() != 0;
}
if(block->noImplicitCallUses->IsEmpty())
{
Assert(block->noImplicitCallNoMissingValuesUses->IsEmpty());
Assert(block->noImplicitCallNativeArrayUses->IsEmpty());
Assert(block->noImplicitCallJsArrayHeadSegmentSymUses->IsEmpty());
Assert(block->noImplicitCallArrayLengthSymUses->IsEmpty());
return false;
}
return true;
}
IR::Instr*
GlobOpt::CreateBoundsCheckInstr(IR::Opnd* lowerBound, IR::Opnd* upperBound, int offset, Func* func)
{
IR::Instr* instr = IR::Instr::New(Js::OpCode::BoundCheck, func);
return AttachBoundsCheckData(instr, lowerBound, upperBound, offset);
}
IR::Instr*
GlobOpt::CreateBoundsCheckInstr(IR::Opnd* lowerBound, IR::Opnd* upperBound, int offset, IR::BailOutKind bailoutkind, BailOutInfo* bailoutInfo, Func * func)
{
IR::Instr* instr = IR::BailOutInstr::New(Js::OpCode::BoundCheck, bailoutkind, bailoutInfo, func);
return AttachBoundsCheckData(instr, lowerBound, upperBound, offset);
}
IR::Instr*
GlobOpt::AttachBoundsCheckData(IR::Instr* instr, IR::Opnd* lowerBound, IR::Opnd* upperBound, int offset)
{
instr->SetSrc1(lowerBound);
instr->SetSrc2(upperBound);
if (offset != 0)
{
instr->SetDst(IR::IntConstOpnd::New(offset, TyInt32, instr->m_func));
}
return instr;
}
void
GlobOpt::OptArraySrc(IR::Instr * *const instrRef)
{
Assert(instrRef);
IR::Instr *&instr = *instrRef;
Assert(instr);
IR::Instr *baseOwnerInstr;
IR::IndirOpnd *baseOwnerIndir;
IR::RegOpnd *baseOpnd;
bool isProfilableLdElem, isProfilableStElem;
bool isLoad, isStore;
bool needsHeadSegment, needsHeadSegmentLength, needsLength, needsBoundChecks;
switch(instr->m_opcode)
{
// SIMD_JS
case Js::OpCode::Simd128_LdArr_F4:
case Js::OpCode::Simd128_LdArr_I4:
// no type-spec for Asm.js
if (this->GetIsAsmJSFunc())
{
return;
}
// fall through
case Js::OpCode::LdElemI_A:
case Js::OpCode::LdMethodElem:
if(!instr->GetSrc1()->IsIndirOpnd())
{
return;
}
baseOwnerInstr = nullptr;
baseOwnerIndir = instr->GetSrc1()->AsIndirOpnd();
baseOpnd = baseOwnerIndir->GetBaseOpnd();
isProfilableLdElem = instr->m_opcode == Js::OpCode::LdElemI_A; // LdMethodElem is currently not profiled
isProfilableLdElem |= Js::IsSimd128Load(instr->m_opcode);
needsBoundChecks = needsHeadSegmentLength = needsHeadSegment = isLoad = true;
needsLength = isStore = isProfilableStElem = false;
break;
// SIMD_JS
case Js::OpCode::Simd128_StArr_F4:
case Js::OpCode::Simd128_StArr_I4:
if (this->GetIsAsmJSFunc())
{
return;
}
// fall through
case Js::OpCode::StElemI_A:
case Js::OpCode::StElemI_A_Strict:
case Js::OpCode::StElemC:
if(!instr->GetDst()->IsIndirOpnd())
{
return;
}
baseOwnerInstr = nullptr;
baseOwnerIndir = instr->GetDst()->AsIndirOpnd();
baseOpnd = baseOwnerIndir->GetBaseOpnd();
needsBoundChecks = isProfilableStElem = instr->m_opcode != Js::OpCode::StElemC;
isProfilableStElem |= Js::IsSimd128Store(instr->m_opcode);
needsHeadSegmentLength = needsHeadSegment = isStore = true;
needsLength = isLoad = isProfilableLdElem = false;
break;
case Js::OpCode::InlineArrayPush:
case Js::OpCode::InlineArrayPop:
{
baseOwnerInstr = instr;
baseOwnerIndir = nullptr;
IR::Opnd * thisOpnd = instr->GetSrc1();
// Return if it not a LikelyArray or Object with Array - No point in doing array check elimination.
if(!thisOpnd->IsRegOpnd() || !thisOpnd->GetValueType().IsLikelyArrayOrObjectWithArray())
{
return;
}
baseOpnd = thisOpnd->AsRegOpnd();
isLoad = instr->m_opcode == Js::OpCode::InlineArrayPop;
isStore = instr->m_opcode == Js::OpCode::InlineArrayPush;
needsLength = needsHeadSegmentLength = needsHeadSegment = true;
needsBoundChecks = isProfilableLdElem = isProfilableStElem = false;
break;
}
case Js::OpCode::LdLen_A:
if(!instr->GetSrc1()->IsRegOpnd())
{
return;
}
baseOwnerInstr = instr;
baseOwnerIndir = nullptr;
baseOpnd = instr->GetSrc1()->AsRegOpnd();
if(baseOpnd->GetValueType().IsLikelyObject() &&
baseOpnd->GetValueType().GetObjectType() == ObjectType::ObjectWithArray)
{
return;
}
needsLength = true;
needsBoundChecks =
needsHeadSegmentLength =
needsHeadSegment =
isStore =
isLoad =
isProfilableStElem =
isProfilableLdElem = false;
break;
default:
return;
}
Assert(!(baseOwnerInstr && baseOwnerIndir));
Assert(!needsHeadSegmentLength || needsHeadSegment);
if(baseOwnerIndir && !IsLoopPrePass())
{
// Since this happens before type specialization, make sure that any necessary conversions are done, and that the index
// is int-specialized if possible such that the const flags are correct.
ToVarUses(instr, baseOwnerIndir, baseOwnerIndir == instr->GetDst(), nullptr);
}
if(isProfilableStElem && !IsLoopPrePass())
{
// If the dead-store pass decides to add the bailout kind IR::BailOutInvalidatedArrayHeadSegment, and the fast path is
// generated, it may bail out before the operation is done, so this would need to be a pre-op bailout.
if(instr->HasBailOutInfo())
{
Assert(
instr->GetByteCodeOffset() != Js::Constants::NoByteCodeOffset &&
instr->GetBailOutInfo()->bailOutOffset <= instr->GetByteCodeOffset());
const IR::BailOutKind bailOutKind = instr->GetBailOutKind();
Assert(
!(bailOutKind & ~IR::BailOutKindBits) ||
(bailOutKind & ~IR::BailOutKindBits) == IR::BailOutOnImplicitCallsPreOp);
if(!(bailOutKind & ~IR::BailOutKindBits))
{
instr->SetBailOutKind(bailOutKind + IR::BailOutOnImplicitCallsPreOp);
}
}
else
{
GenerateBailAtOperation(&instr, IR::BailOutOnImplicitCallsPreOp);
}
}
Value *const baseValue = CurrentBlockData()->FindValue(baseOpnd->m_sym);
if(!baseValue)
{
return;
}
ValueInfo *baseValueInfo = baseValue->GetValueInfo();
ValueType baseValueType(baseValueInfo->Type());
baseOpnd->SetValueType(baseValueType);
if(!baseValueType.IsLikelyAnyOptimizedArray() ||
!DoArrayCheckHoist(baseValueType, currentBlock->loop, instr) ||
(baseOwnerIndir && !ShouldExpectConventionalArrayIndexValue(baseOwnerIndir)))
{
return;
}
const bool isLikelyJsArray = !baseValueType.IsLikelyTypedArray();
Assert(isLikelyJsArray == baseValueType.IsLikelyArrayOrObjectWithArray());
Assert(!isLikelyJsArray == baseValueType.IsLikelyOptimizedTypedArray());
if(!isLikelyJsArray && instr->m_opcode == Js::OpCode::LdMethodElem)
{
// Fast path is not generated in this case since the subsequent call will throw
return;
}
ValueType newBaseValueType(baseValueType.ToDefiniteObject());
if(isLikelyJsArray && newBaseValueType.HasNoMissingValues() && !DoArrayMissingValueCheckHoist())
{
newBaseValueType = newBaseValueType.SetHasNoMissingValues(false);
}
Assert((newBaseValueType == baseValueType) == baseValueType.IsObject());
ArrayValueInfo *baseArrayValueInfo = nullptr;
const auto UpdateValue = [&](StackSym *newHeadSegmentSym, StackSym *newHeadSegmentLengthSym, StackSym *newLengthSym)
{
Assert(baseValueType.GetObjectType() == newBaseValueType.GetObjectType());
Assert(newBaseValueType.IsObject());
Assert(baseValueType.IsLikelyArray() || !newLengthSym);
if(!(newHeadSegmentSym || newHeadSegmentLengthSym || newLengthSym))
{
// We're not adding new information to the value other than changing the value type. Preserve any existing
// information and just change the value type.
ChangeValueType(currentBlock, baseValue, newBaseValueType, true);
return;
}
// Merge the new syms into the value while preserving any existing information, and change the value type
if(baseArrayValueInfo)
{
if(!newHeadSegmentSym)
{
newHeadSegmentSym = baseArrayValueInfo->HeadSegmentSym();
}
if(!newHeadSegmentLengthSym)
{
newHeadSegmentLengthSym = baseArrayValueInfo->HeadSegmentLengthSym();
}
if(!newLengthSym)
{
newLengthSym = baseArrayValueInfo->LengthSym();
}
Assert(
!baseArrayValueInfo->HeadSegmentSym() ||
newHeadSegmentSym == baseArrayValueInfo->HeadSegmentSym());
Assert(
!baseArrayValueInfo->HeadSegmentLengthSym() ||
newHeadSegmentLengthSym == baseArrayValueInfo->HeadSegmentLengthSym());
Assert(!baseArrayValueInfo->LengthSym() || newLengthSym == baseArrayValueInfo->LengthSym());
}
ArrayValueInfo *const newBaseArrayValueInfo =
ArrayValueInfo::New(
alloc,
newBaseValueType,
newHeadSegmentSym,
newHeadSegmentLengthSym,
newLengthSym,
baseValueInfo->GetSymStore());
ChangeValueInfo(currentBlock, baseValue, newBaseArrayValueInfo);
};
if(IsLoopPrePass())
{
if(newBaseValueType != baseValueType)
{
UpdateValue(nullptr, nullptr, nullptr);
}
// For javascript arrays and objects with javascript arrays:
// - Implicit calls need to be disabled and calls cannot be allowed in the loop since the array vtable may be changed
// into an ES5 array.
// For typed arrays:
// - A typed array's array buffer may be transferred to a web worker as part of an implicit call, in which case the
// typed array's length is set to zero. Implicit calls need to be disabled if the typed array's head segment length
// is going to be loaded and used later.
// Since we don't know if the loop has kills after this instruction, the kill information may not be complete. If a kill
// is found later, this information will be updated to not require disabling implicit calls.
if(!(
isLikelyJsArray
? rootLoopPrePass->jsArrayKills.KillsValueType(newBaseValueType)
: rootLoopPrePass->jsArrayKills.KillsTypedArrayHeadSegmentLengths()
))
{
rootLoopPrePass->needImplicitCallBailoutChecksForJsArrayCheckHoist = true;
}
return;
}
if(baseValueInfo->IsArrayValueInfo())
{
baseArrayValueInfo = baseValueInfo->AsArrayValueInfo();
}
const bool doArrayChecks = !baseValueType.IsObject();
const bool doArraySegmentHoist = DoArraySegmentHoist(baseValueType) && instr->m_opcode != Js::OpCode::StElemC;
const bool headSegmentIsAvailable = baseArrayValueInfo && baseArrayValueInfo->HeadSegmentSym();
const bool doHeadSegmentLoad = doArraySegmentHoist && needsHeadSegment && !headSegmentIsAvailable;
const bool doArraySegmentLengthHoist =
doArraySegmentHoist && (isLikelyJsArray || DoTypedArraySegmentLengthHoist(currentBlock->loop));
const bool headSegmentLengthIsAvailable = baseArrayValueInfo && baseArrayValueInfo->HeadSegmentLengthSym();
const bool doHeadSegmentLengthLoad =
doArraySegmentLengthHoist &&
(needsHeadSegmentLength || (!isLikelyJsArray && needsLength)) &&
!headSegmentLengthIsAvailable;
const bool lengthIsAvailable = baseArrayValueInfo && baseArrayValueInfo->LengthSym();
const bool doLengthLoad =
DoArrayLengthHoist() &&
needsLength &&
!lengthIsAvailable &&
baseValueType.IsLikelyArray() &&
DoLdLenIntSpec(instr->m_opcode == Js::OpCode::LdLen_A ? instr : nullptr, baseValueType);
StackSym *const newHeadSegmentSym = doHeadSegmentLoad ? StackSym::New(TyMachPtr, instr->m_func) : nullptr;
StackSym *const newHeadSegmentLengthSym = doHeadSegmentLengthLoad ? StackSym::New(TyUint32, instr->m_func) : nullptr;
StackSym *const newLengthSym = doLengthLoad ? StackSym::New(TyUint32, instr->m_func) : nullptr;
bool canBailOutOnArrayAccessHelperCall;
if (Js::IsSimd128LoadStore(instr->m_opcode))
{
// SIMD_JS
// simd load/store never call helper
canBailOutOnArrayAccessHelperCall = true;
}
else
{
canBailOutOnArrayAccessHelperCall = (isProfilableLdElem || isProfilableStElem) &&
DoEliminateArrayAccessHelperCall() &&
!(
instr->IsProfiledInstr() &&
(
isProfilableLdElem
? instr->AsProfiledInstr()->u.ldElemInfo->LikelyNeedsHelperCall()
: instr->AsProfiledInstr()->u.stElemInfo->LikelyNeedsHelperCall()
)
);
}
bool doExtractBoundChecks = false, eliminatedLowerBoundCheck = false, eliminatedUpperBoundCheck = false;
StackSym *indexVarSym = nullptr;
Value *indexValue = nullptr;
IntConstantBounds indexConstantBounds;
Value *headSegmentLengthValue = nullptr;
IntConstantBounds headSegmentLengthConstantBounds;
#if ENABLE_FAST_ARRAYBUFFER
if (baseValueType.IsLikelyOptimizedVirtualTypedArray() && !Js::IsSimd128LoadStore(instr->m_opcode) /*Always extract bounds for SIMD */)
{
if (isProfilableStElem ||
!instr->IsDstNotAlwaysConvertedToInt32() ||
( (baseValueType.GetObjectType() == ObjectType::Float32VirtualArray ||
baseValueType.GetObjectType() == ObjectType::Float64VirtualArray) &&
!instr->IsDstNotAlwaysConvertedToNumber()
)
)
{
// Unless we're in asm.js (where it is guaranteed that virtual typed array accesses cannot read/write beyond 4GB),
// check the range of the index to make sure we won't access beyond the reserved memory beforing eliminating bounds
// checks in jitted code.
if (!GetIsAsmJSFunc() && baseOwnerIndir)
{
IR::RegOpnd * idxOpnd = baseOwnerIndir->GetIndexOpnd();
if (idxOpnd)
{
StackSym * idxSym = idxOpnd->m_sym->IsTypeSpec() ? idxOpnd->m_sym->GetVarEquivSym(nullptr) : idxOpnd->m_sym;
Value * idxValue = CurrentBlockData()->FindValue(idxSym);
IntConstantBounds idxConstantBounds;
if (idxValue && idxValue->GetValueInfo()->TryGetIntConstantBounds(&idxConstantBounds))
{
BYTE indirScale = Lowerer::GetArrayIndirScale(baseValueType);
int32 upperBound = idxConstantBounds.UpperBound();
int32 lowerBound = idxConstantBounds.LowerBound();
if (lowerBound >= 0 && ((static_cast<uint64>(upperBound) << indirScale) < MAX_ASMJS_ARRAYBUFFER_LENGTH))
{
eliminatedLowerBoundCheck = true;
eliminatedUpperBoundCheck = true;
canBailOutOnArrayAccessHelperCall = false;
}
}
}
}
else
{
if (!baseOwnerIndir)
{
Assert(instr->m_opcode == Js::OpCode::InlineArrayPush ||
instr->m_opcode == Js::OpCode::InlineArrayPop ||
instr->m_opcode == Js::OpCode::LdLen_A);
}
eliminatedLowerBoundCheck = true;
eliminatedUpperBoundCheck = true;
canBailOutOnArrayAccessHelperCall = false;
}
}
}
#endif
if(needsBoundChecks && DoBoundCheckElimination())
{
AnalysisAssert(baseOwnerIndir);
Assert(needsHeadSegmentLength);
// Bound checks can be separated from the instruction only if it can bail out instead of making a helper call when a
// bound check fails. And only if it would bail out, can we use a bound check to eliminate redundant bound checks later
// on that path.
doExtractBoundChecks = (headSegmentLengthIsAvailable || doHeadSegmentLengthLoad) && canBailOutOnArrayAccessHelperCall;
do
{
// Get the index value
IR::RegOpnd *const indexOpnd = baseOwnerIndir->GetIndexOpnd();
if(indexOpnd)
{
StackSym *const indexSym = indexOpnd->m_sym;
if(indexSym->IsTypeSpec())
{
Assert(indexSym->IsInt32());
indexVarSym = indexSym->GetVarEquivSym(nullptr);
Assert(indexVarSym);
indexValue = CurrentBlockData()->FindValue(indexVarSym);
Assert(indexValue);
AssertVerify(indexValue->GetValueInfo()->TryGetIntConstantBounds(&indexConstantBounds));
Assert(indexOpnd->GetType() == TyInt32 || indexOpnd->GetType() == TyUint32);
Assert(
(indexOpnd->GetType() == TyUint32) ==
ValueInfo::IsGreaterThanOrEqualTo(
indexValue,
indexConstantBounds.LowerBound(),
indexConstantBounds.UpperBound(),
nullptr,
0,
0));
if(indexOpnd->GetType() == TyUint32)
{
eliminatedLowerBoundCheck = true;
}
}
else
{
doExtractBoundChecks = false; // Bound check instruction operates only on int-specialized operands
indexValue = CurrentBlockData()->FindValue(indexSym);
if(!indexValue || !indexValue->GetValueInfo()->TryGetIntConstantBounds(&indexConstantBounds))
{
break;
}
if(ValueInfo::IsGreaterThanOrEqualTo(
indexValue,
indexConstantBounds.LowerBound(),
indexConstantBounds.UpperBound(),
nullptr,
0,
0))
{
eliminatedLowerBoundCheck = true;
}
}
if(!eliminatedLowerBoundCheck &&
ValueInfo::IsLessThan(
indexValue,
indexConstantBounds.LowerBound(),
indexConstantBounds.UpperBound(),
nullptr,
0,
0))
{
eliminatedUpperBoundCheck = true;
doExtractBoundChecks = false;
break;
}
}
else
{
const int32 indexConstantValue = baseOwnerIndir->GetOffset();
if(indexConstantValue < 0)
{
eliminatedUpperBoundCheck = true;
doExtractBoundChecks = false;
break;
}
if(indexConstantValue == INT32_MAX)
{
eliminatedLowerBoundCheck = true;
doExtractBoundChecks = false;
break;
}
indexConstantBounds = IntConstantBounds(indexConstantValue, indexConstantValue);
eliminatedLowerBoundCheck = true;
}
if(!headSegmentLengthIsAvailable)
{
break;
}
headSegmentLengthValue = CurrentBlockData()->FindValue(baseArrayValueInfo->HeadSegmentLengthSym());
if(!headSegmentLengthValue)
{
if(doExtractBoundChecks)
{
headSegmentLengthConstantBounds = IntConstantBounds(0, Js::SparseArraySegmentBase::MaxLength);
}
break;
}
AssertVerify(headSegmentLengthValue->GetValueInfo()->TryGetIntConstantBounds(&headSegmentLengthConstantBounds));
if (ValueInfo::IsLessThanOrEqualTo(
indexValue,
indexConstantBounds.LowerBound(),
indexConstantBounds.UpperBound(),
headSegmentLengthValue,
headSegmentLengthConstantBounds.LowerBound(),
headSegmentLengthConstantBounds.UpperBound(),
GetBoundCheckOffsetForSimd(newBaseValueType, instr, -1)
))
{
eliminatedUpperBoundCheck = true;
if(eliminatedLowerBoundCheck)
{
doExtractBoundChecks = false;
}
}
} while(false);
}
if(doArrayChecks || doHeadSegmentLoad || doHeadSegmentLengthLoad || doLengthLoad || doExtractBoundChecks)
{
// Find the loops out of which array checks and head segment loads need to be hoisted
Loop *hoistChecksOutOfLoop = nullptr;
Loop *hoistHeadSegmentLoadOutOfLoop = nullptr;
Loop *hoistHeadSegmentLengthLoadOutOfLoop = nullptr;
Loop *hoistLengthLoadOutOfLoop = nullptr;
if(doArrayChecks || doHeadSegmentLoad || doHeadSegmentLengthLoad || doLengthLoad)
{
for(Loop *loop = currentBlock->loop; loop; loop = loop->parent)
{
const JsArrayKills loopKills(loop->jsArrayKills);
Value *baseValueInLoopLandingPad = nullptr;
if((isLikelyJsArray && loopKills.KillsValueType(newBaseValueType)) ||
!OptIsInvariant(baseOpnd->m_sym, currentBlock, loop, baseValue, true, true, &baseValueInLoopLandingPad) ||
!(doArrayChecks || baseValueInLoopLandingPad->GetValueInfo()->IsObject()))
{
break;
}
// The value types should be the same, except:
// - The value type in the landing pad is a type that can merge to a specific object type. Typically, these
// cases will use BailOnNoProfile, but that can be disabled due to excessive bailouts. Those value types
// merge aggressively to the other side's object type, so the value type may have started off as
// Uninitialized, [Likely]Undefined|Null, [Likely]UninitializedObject, etc., and changed in the loop to an
// array type during a prepass.
// - StElems in the loop can kill the no-missing-values info.
// - The native array type may be made more conservative based on profile data by an instruction in the loop.
#if DBG
if (!baseValueInLoopLandingPad->GetValueInfo()->CanMergeToSpecificObjectType())
{
ValueType landingPadValueType = baseValueInLoopLandingPad->GetValueInfo()->Type();
Assert(landingPadValueType.IsSimilar(baseValueType) ||
(
landingPadValueType.IsLikelyNativeArray() &&
landingPadValueType.Merge(baseValueType).IsSimilar(baseValueType)
)
);
}
#endif
if(doArrayChecks)
{
hoistChecksOutOfLoop = loop;
}
if(isLikelyJsArray && loopKills.KillsArrayHeadSegments())
{
Assert(loopKills.KillsArrayHeadSegmentLengths());
if(!(doArrayChecks || doLengthLoad))
{
break;
}
}
else
{
if(doHeadSegmentLoad || headSegmentIsAvailable)
{
// If the head segment is already available, we may need to rehoist the value including other
// information. So, need to track the loop out of which the head segment length can be hoisted even if
// the head segment length is not being loaded here.
hoistHeadSegmentLoadOutOfLoop = loop;
}
if(isLikelyJsArray
? loopKills.KillsArrayHeadSegmentLengths()
: loopKills.KillsTypedArrayHeadSegmentLengths())
{
if(!(doArrayChecks || doHeadSegmentLoad || doLengthLoad))
{
break;
}
}
else if(doHeadSegmentLengthLoad || headSegmentLengthIsAvailable)
{
// If the head segment length is already available, we may need to rehoist the value including other
// information. So, need to track the loop out of which the head segment length can be hoisted even if
// the head segment length is not being loaded here.
hoistHeadSegmentLengthLoadOutOfLoop = loop;
}
}
if(isLikelyJsArray && loopKills.KillsArrayLengths())
{
if(!(doArrayChecks || doHeadSegmentLoad || doHeadSegmentLengthLoad))
{
break;
}
}
else if(doLengthLoad || lengthIsAvailable)
{
// If the length is already available, we may need to rehoist the value including other information. So,
// need to track the loop out of which the head segment length can be hoisted even if the length is not
// being loaded here.
hoistLengthLoadOutOfLoop = loop;
}
}
}
IR::Instr *insertBeforeInstr = instr->GetInsertBeforeByteCodeUsesInstr();
const auto InsertInstrInLandingPad = [&](IR::Instr *const instr, Loop *const hoistOutOfLoop)
{
if(hoistOutOfLoop->bailOutInfo->bailOutInstr)
{
instr->SetByteCodeOffset(hoistOutOfLoop->bailOutInfo->bailOutInstr);
hoistOutOfLoop->bailOutInfo->bailOutInstr->InsertBefore(instr);
}
else
{
instr->SetByteCodeOffset(hoistOutOfLoop->landingPad->GetLastInstr());
hoistOutOfLoop->landingPad->InsertAfter(instr);
}
};
BailOutInfo *shareableBailOutInfo = nullptr;
IR::Instr *shareableBailOutInfoOriginalOwner = nullptr;
const auto ShareBailOut = [&]()
{
Assert(shareableBailOutInfo);
if(shareableBailOutInfo->bailOutInstr != shareableBailOutInfoOriginalOwner)
{
return;
}
Assert(shareableBailOutInfoOriginalOwner->GetBailOutInfo() == shareableBailOutInfo);
IR::Instr *const sharedBailOut = shareableBailOutInfoOriginalOwner->ShareBailOut();
Assert(sharedBailOut->GetBailOutInfo() == shareableBailOutInfo);
shareableBailOutInfoOriginalOwner = nullptr;
sharedBailOut->Unlink();
insertBeforeInstr->InsertBefore(sharedBailOut);
insertBeforeInstr = sharedBailOut;
};
if(doArrayChecks)
{
TRACE_TESTTRACE_PHASE_INSTR(Js::ArrayCheckHoistPhase, instr, _u("Separating array checks with bailout\n"));
IR::Instr *bailOnNotArray = IR::Instr::New(Js::OpCode::BailOnNotArray, instr->m_func);
bailOnNotArray->SetSrc1(baseOpnd);
bailOnNotArray->GetSrc1()->SetIsJITOptimizedReg(true);
const IR::BailOutKind bailOutKind =
newBaseValueType.IsLikelyNativeArray() ? IR::BailOutOnNotNativeArray : IR::BailOutOnNotArray;
if(hoistChecksOutOfLoop)
{
Assert(!(isLikelyJsArray && hoistChecksOutOfLoop->jsArrayKills.KillsValueType(newBaseValueType)));
TRACE_PHASE_INSTR(
Js::ArrayCheckHoistPhase,
instr,
_u("Hoisting array checks with bailout out of loop %u to landing pad block %u\n"),
hoistChecksOutOfLoop->GetLoopNumber(),
hoistChecksOutOfLoop->landingPad->GetBlockNum());
TESTTRACE_PHASE_INSTR(Js::ArrayCheckHoistPhase, instr, _u("Hoisting array checks with bailout out of loop\n"));
Assert(hoistChecksOutOfLoop->bailOutInfo);
EnsureBailTarget(hoistChecksOutOfLoop);
InsertInstrInLandingPad(bailOnNotArray, hoistChecksOutOfLoop);
bailOnNotArray = bailOnNotArray->ConvertToBailOutInstr(hoistChecksOutOfLoop->bailOutInfo, bailOutKind);
}
else
{
bailOnNotArray->SetByteCodeOffset(instr);
insertBeforeInstr->InsertBefore(bailOnNotArray);
GenerateBailAtOperation(&bailOnNotArray, bailOutKind);
shareableBailOutInfo = bailOnNotArray->GetBailOutInfo();
shareableBailOutInfoOriginalOwner = bailOnNotArray;
}
baseValueType = newBaseValueType;
baseOpnd->SetValueType(newBaseValueType);
}
if(doLengthLoad)
{
Assert(baseValueType.IsArray());
Assert(newLengthSym);
TRACE_TESTTRACE_PHASE_INSTR(Js::Phase::ArrayLengthHoistPhase, instr, _u("Separating array length load\n"));
// Create an initial value for the length
CurrentBlockData()->liveVarSyms->Set(newLengthSym->m_id);
Value *const lengthValue = NewIntRangeValue(0, INT32_MAX, false);
CurrentBlockData()->SetValue(lengthValue, newLengthSym);
// SetValue above would have set the sym store to newLengthSym. This sym won't be used for copy-prop though, so
// remove it as the sym store.
this->SetSymStoreDirect(lengthValue->GetValueInfo(), nullptr);
// length = [array + offsetOf(length)]
IR::Instr *const loadLength =
IR::Instr::New(
Js::OpCode::LdIndir,
IR::RegOpnd::New(newLengthSym, newLengthSym->GetType(), instr->m_func),
IR::IndirOpnd::New(
baseOpnd,
Js::JavascriptArray::GetOffsetOfLength(),
newLengthSym->GetType(),
instr->m_func),
instr->m_func);
loadLength->GetDst()->SetIsJITOptimizedReg(true);
loadLength->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->SetIsJITOptimizedReg(true);
// BailOnNegative length (BailOutOnIrregularLength)
IR::Instr *bailOnIrregularLength = IR::Instr::New(Js::OpCode::BailOnNegative, instr->m_func);
bailOnIrregularLength->SetSrc1(loadLength->GetDst());
const IR::BailOutKind bailOutKind = IR::BailOutOnIrregularLength;
if(hoistLengthLoadOutOfLoop)
{
Assert(!hoistLengthLoadOutOfLoop->jsArrayKills.KillsArrayLengths());
TRACE_PHASE_INSTR(
Js::Phase::ArrayLengthHoistPhase,
instr,
_u("Hoisting array length load out of loop %u to landing pad block %u\n"),
hoistLengthLoadOutOfLoop->GetLoopNumber(),
hoistLengthLoadOutOfLoop->landingPad->GetBlockNum());
TESTTRACE_PHASE_INSTR(Js::Phase::ArrayLengthHoistPhase, instr, _u("Hoisting array length load out of loop\n"));
Assert(hoistLengthLoadOutOfLoop->bailOutInfo);
EnsureBailTarget(hoistLengthLoadOutOfLoop);
InsertInstrInLandingPad(loadLength, hoistLengthLoadOutOfLoop);
InsertInstrInLandingPad(bailOnIrregularLength, hoistLengthLoadOutOfLoop);
bailOnIrregularLength =
bailOnIrregularLength->ConvertToBailOutInstr(hoistLengthLoadOutOfLoop->bailOutInfo, bailOutKind);
// Hoist the length value
for(InvariantBlockBackwardIterator it(
this,
currentBlock,
hoistLengthLoadOutOfLoop->landingPad,
baseOpnd->m_sym,
baseValue->GetValueNumber());
it.IsValid();
it.MoveNext())
{
BasicBlock *const block = it.Block();
block->globOptData.liveVarSyms->Set(newLengthSym->m_id);
Assert(!block->globOptData.FindValue(newLengthSym));
Value *const lengthValueCopy = CopyValue(lengthValue, lengthValue->GetValueNumber());
block->globOptData.SetValue(lengthValueCopy, newLengthSym);
this->SetSymStoreDirect(lengthValueCopy->GetValueInfo(), nullptr);
}
}
else
{
loadLength->SetByteCodeOffset(instr);
insertBeforeInstr->InsertBefore(loadLength);
bailOnIrregularLength->SetByteCodeOffset(instr);
insertBeforeInstr->InsertBefore(bailOnIrregularLength);
if(shareableBailOutInfo)
{
ShareBailOut();
bailOnIrregularLength = bailOnIrregularLength->ConvertToBailOutInstr(shareableBailOutInfo, bailOutKind);
}
else
{
GenerateBailAtOperation(&bailOnIrregularLength, bailOutKind);
shareableBailOutInfo = bailOnIrregularLength->GetBailOutInfo();
shareableBailOutInfoOriginalOwner = bailOnIrregularLength;
}
}
}
const auto InsertHeadSegmentLoad = [&]()
{
TRACE_TESTTRACE_PHASE_INSTR(Js::ArraySegmentHoistPhase, instr, _u("Separating array segment load\n"));
Assert(newHeadSegmentSym);
IR::RegOpnd *const headSegmentOpnd =
IR::RegOpnd::New(newHeadSegmentSym, newHeadSegmentSym->GetType(), instr->m_func);
headSegmentOpnd->SetIsJITOptimizedReg(true);
IR::RegOpnd *const jitOptimizedBaseOpnd = baseOpnd->Copy(instr->m_func)->AsRegOpnd();
jitOptimizedBaseOpnd->SetIsJITOptimizedReg(true);
IR::Instr *loadObjectArray;
if(baseValueType.GetObjectType() == ObjectType::ObjectWithArray)
{
loadObjectArray =
IR::Instr::New(
Js::OpCode::LdIndir,
headSegmentOpnd,
IR::IndirOpnd::New(
jitOptimizedBaseOpnd,
Js::DynamicObject::GetOffsetOfObjectArray(),
jitOptimizedBaseOpnd->GetType(),
instr->m_func),
instr->m_func);
}
else
{
loadObjectArray = nullptr;
}
IR::Instr *const loadHeadSegment =
IR::Instr::New(
Js::OpCode::LdIndir,
headSegmentOpnd,
IR::IndirOpnd::New(
loadObjectArray ? headSegmentOpnd : jitOptimizedBaseOpnd,
Lowerer::GetArrayOffsetOfHeadSegment(baseValueType),
headSegmentOpnd->GetType(),
instr->m_func),
instr->m_func);
if(hoistHeadSegmentLoadOutOfLoop)
{
Assert(!(isLikelyJsArray && hoistHeadSegmentLoadOutOfLoop->jsArrayKills.KillsArrayHeadSegments()));
TRACE_PHASE_INSTR(
Js::ArraySegmentHoistPhase,
instr,
_u("Hoisting array segment load out of loop %u to landing pad block %u\n"),
hoistHeadSegmentLoadOutOfLoop->GetLoopNumber(),
hoistHeadSegmentLoadOutOfLoop->landingPad->GetBlockNum());
TESTTRACE_PHASE_INSTR(Js::ArraySegmentHoistPhase, instr, _u("Hoisting array segment load out of loop\n"));
if(loadObjectArray)
{
InsertInstrInLandingPad(loadObjectArray, hoistHeadSegmentLoadOutOfLoop);
}
InsertInstrInLandingPad(loadHeadSegment, hoistHeadSegmentLoadOutOfLoop);
}
else
{
if(loadObjectArray)
{
loadObjectArray->SetByteCodeOffset(instr);
insertBeforeInstr->InsertBefore(loadObjectArray);
}
loadHeadSegment->SetByteCodeOffset(instr);
insertBeforeInstr->InsertBefore(loadHeadSegment);
instr->loadedArrayHeadSegment = true;
}
};
if(doHeadSegmentLoad && isLikelyJsArray)
{
// For javascript arrays, the head segment is required to load the head segment length
InsertHeadSegmentLoad();
}
if(doHeadSegmentLengthLoad)
{
Assert(!isLikelyJsArray || newHeadSegmentSym || baseArrayValueInfo && baseArrayValueInfo->HeadSegmentSym());
Assert(newHeadSegmentLengthSym);
Assert(!headSegmentLengthValue);
TRACE_TESTTRACE_PHASE_INSTR(Js::ArraySegmentHoistPhase, instr, _u("Separating array segment length load\n"));
// Create an initial value for the head segment length
CurrentBlockData()->liveVarSyms->Set(newHeadSegmentLengthSym->m_id);
headSegmentLengthValue = NewIntRangeValue(0, Js::SparseArraySegmentBase::MaxLength, false);
headSegmentLengthConstantBounds = IntConstantBounds(0, Js::SparseArraySegmentBase::MaxLength);
CurrentBlockData()->SetValue(headSegmentLengthValue, newHeadSegmentLengthSym);
// SetValue above would have set the sym store to newHeadSegmentLengthSym. This sym won't be used for copy-prop
// though, so remove it as the sym store.
this->SetSymStoreDirect(headSegmentLengthValue->GetValueInfo(), nullptr);
StackSym *const headSegmentSym =
isLikelyJsArray
? newHeadSegmentSym ? newHeadSegmentSym : baseArrayValueInfo->HeadSegmentSym()
: nullptr;
IR::Instr *const loadHeadSegmentLength =
IR::Instr::New(
Js::OpCode::LdIndir,
IR::RegOpnd::New(newHeadSegmentLengthSym, newHeadSegmentLengthSym->GetType(), instr->m_func),
IR::IndirOpnd::New(
isLikelyJsArray ? IR::RegOpnd::New(headSegmentSym, headSegmentSym->GetType(), instr->m_func) : baseOpnd,
isLikelyJsArray
? Js::SparseArraySegmentBase::GetOffsetOfLength()
: Lowerer::GetArrayOffsetOfLength(baseValueType),
newHeadSegmentLengthSym->GetType(),
instr->m_func),
instr->m_func);
loadHeadSegmentLength->GetDst()->SetIsJITOptimizedReg(true);
loadHeadSegmentLength->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->SetIsJITOptimizedReg(true);
// We don't check the head segment length for negative (very large uint32) values. For JS arrays, the bound checks
// cover that. For typed arrays, we currently don't allocate array buffers with more than 1 GB elements.
if(hoistHeadSegmentLengthLoadOutOfLoop)
{
Assert(
!(
isLikelyJsArray
? hoistHeadSegmentLengthLoadOutOfLoop->jsArrayKills.KillsArrayHeadSegmentLengths()
: hoistHeadSegmentLengthLoadOutOfLoop->jsArrayKills.KillsTypedArrayHeadSegmentLengths()
));
TRACE_PHASE_INSTR(
Js::ArraySegmentHoistPhase,
instr,
_u("Hoisting array segment length load out of loop %u to landing pad block %u\n"),
hoistHeadSegmentLengthLoadOutOfLoop->GetLoopNumber(),
hoistHeadSegmentLengthLoadOutOfLoop->landingPad->GetBlockNum());
TESTTRACE_PHASE_INSTR(Js::ArraySegmentHoistPhase, instr, _u("Hoisting array segment length load out of loop\n"));
InsertInstrInLandingPad(loadHeadSegmentLength, hoistHeadSegmentLengthLoadOutOfLoop);
// Hoist the head segment length value
for(InvariantBlockBackwardIterator it(
this,
currentBlock,
hoistHeadSegmentLengthLoadOutOfLoop->landingPad,
baseOpnd->m_sym,
baseValue->GetValueNumber());
it.IsValid();
it.MoveNext())
{
BasicBlock *const block = it.Block();
block->globOptData.liveVarSyms->Set(newHeadSegmentLengthSym->m_id);
Assert(!block->globOptData.FindValue(newHeadSegmentLengthSym));
Value *const headSegmentLengthValueCopy =
CopyValue(headSegmentLengthValue, headSegmentLengthValue->GetValueNumber());
block->globOptData.SetValue(headSegmentLengthValueCopy, newHeadSegmentLengthSym);
this->SetSymStoreDirect(headSegmentLengthValueCopy->GetValueInfo(), nullptr);
}
}
else
{
loadHeadSegmentLength->SetByteCodeOffset(instr);
insertBeforeInstr->InsertBefore(loadHeadSegmentLength);
instr->loadedArrayHeadSegmentLength = true;
}
}
if(doExtractBoundChecks)
{
Assert(!(eliminatedLowerBoundCheck && eliminatedUpperBoundCheck));
Assert(baseOwnerIndir);
Assert(!baseOwnerIndir->GetIndexOpnd() || baseOwnerIndir->GetIndexOpnd()->m_sym->IsTypeSpec());
Assert(doHeadSegmentLengthLoad || headSegmentLengthIsAvailable);
Assert(canBailOutOnArrayAccessHelperCall);
Assert(!isStore || instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict || Js::IsSimd128LoadStore(instr->m_opcode));
StackSym *const headSegmentLengthSym =
headSegmentLengthIsAvailable ? baseArrayValueInfo->HeadSegmentLengthSym() : newHeadSegmentLengthSym;
Assert(headSegmentLengthSym);
Assert(headSegmentLengthValue);
ArrayLowerBoundCheckHoistInfo lowerBoundCheckHoistInfo;
ArrayUpperBoundCheckHoistInfo upperBoundCheckHoistInfo;
bool failedToUpdateCompatibleLowerBoundCheck = false, failedToUpdateCompatibleUpperBoundCheck = false;
if(DoBoundCheckHoist())
{
if(indexVarSym)
{
TRACE_PHASE_INSTR_VERBOSE(
Js::Phase::BoundCheckHoistPhase,
instr,
_u("Determining array bound check hoistability for index s%u\n"),
indexVarSym->m_id);
}
else
{
TRACE_PHASE_INSTR_VERBOSE(
Js::Phase::BoundCheckHoistPhase,
instr,
_u("Determining array bound check hoistability for index %d\n"),
indexConstantBounds.LowerBound());
}
DetermineArrayBoundCheckHoistability(
!eliminatedLowerBoundCheck,
!eliminatedUpperBoundCheck,
lowerBoundCheckHoistInfo,
upperBoundCheckHoistInfo,
isLikelyJsArray,
indexVarSym,
indexValue,
indexConstantBounds,
headSegmentLengthSym,
headSegmentLengthValue,
headSegmentLengthConstantBounds,
hoistHeadSegmentLengthLoadOutOfLoop,
failedToUpdateCompatibleLowerBoundCheck,
failedToUpdateCompatibleUpperBoundCheck);
#ifdef ENABLE_SIMDJS
// SIMD_JS
UpdateBoundCheckHoistInfoForSimd(upperBoundCheckHoistInfo, newBaseValueType, instr);
#endif
}
if(!eliminatedLowerBoundCheck)
{
eliminatedLowerBoundCheck = true;
Assert(indexVarSym);
Assert(baseOwnerIndir->GetIndexOpnd());
Assert(indexValue);
ArrayLowerBoundCheckHoistInfo &hoistInfo = lowerBoundCheckHoistInfo;
if(hoistInfo.HasAnyInfo())
{
BasicBlock *hoistBlock;
if(hoistInfo.CompatibleBoundCheckBlock())
{
hoistBlock = hoistInfo.CompatibleBoundCheckBlock();
TRACE_PHASE_INSTR(
Js::Phase::BoundCheckHoistPhase,
instr,
_u("Hoisting array lower bound check into existing bound check instruction in block %u\n"),
hoistBlock->GetBlockNum());
TESTTRACE_PHASE_INSTR(
Js::Phase::BoundCheckHoistPhase,
instr,
_u("Hoisting array lower bound check into existing bound check instruction\n"));
}
else
{
Assert(hoistInfo.Loop());
BasicBlock *const landingPad = hoistInfo.Loop()->landingPad;
hoistBlock = landingPad;
StackSym *indexIntSym;
if(hoistInfo.IndexSym() && hoistInfo.IndexSym()->IsVar())
{
if(!landingPad->globOptData.IsInt32TypeSpecialized(hoistInfo.IndexSym()))
{
// Int-specialize the index sym, as the BoundCheck instruction requires int operands. Specialize
// it in this block if it is invariant, as the conversion will be hoisted along with value
// updates.
BasicBlock *specializationBlock = hoistInfo.Loop()->landingPad;
IR::Instr *specializeBeforeInstr = nullptr;
if(!CurrentBlockData()->IsInt32TypeSpecialized(hoistInfo.IndexSym()) &&
OptIsInvariant(
hoistInfo.IndexSym(),
currentBlock,
hoistInfo.Loop(),
CurrentBlockData()->FindValue(hoistInfo.IndexSym()),
false,
true))
{
specializationBlock = currentBlock;
specializeBeforeInstr = insertBeforeInstr;
}
Assert(tempBv->IsEmpty());
tempBv->Set(hoistInfo.IndexSym()->m_id);
ToInt32(tempBv, specializationBlock, false, specializeBeforeInstr);
tempBv->ClearAll();
Assert(landingPad->globOptData.IsInt32TypeSpecialized(hoistInfo.IndexSym()));
}
indexIntSym = hoistInfo.IndexSym()->GetInt32EquivSym(nullptr);
Assert(indexIntSym);
}
else
{
indexIntSym = hoistInfo.IndexSym();
Assert(!indexIntSym || indexIntSym->GetType() == TyInt32 || indexIntSym->GetType() == TyUint32);
}
// The info in the landing pad may be better than the info in the current block due to changes made to
// the index sym inside the loop. Check if the bound check we intend to hoist is unnecessary in the
// landing pad.
if(!ValueInfo::IsLessThanOrEqualTo(
nullptr,
0,
0,
hoistInfo.IndexValue(),
hoistInfo.IndexConstantBounds().LowerBound(),
hoistInfo.IndexConstantBounds().UpperBound(),
hoistInfo.Offset()))
{
Assert(hoistInfo.IndexSym());
Assert(hoistInfo.Loop()->bailOutInfo);
EnsureBailTarget(hoistInfo.Loop());
if(hoistInfo.LoopCount())
{
// Generate the loop count and loop count based bound that will be used for the bound check
if(!hoistInfo.LoopCount()->HasBeenGenerated())
{
GenerateLoopCount(hoistInfo.Loop(), hoistInfo.LoopCount());
}
GenerateSecondaryInductionVariableBound(
hoistInfo.Loop(),
indexVarSym->GetInt32EquivSym(nullptr),
hoistInfo.LoopCount(),
hoistInfo.MaxMagnitudeChange(),
hoistInfo.IndexSym());
}
IR::Opnd* lowerBound = IR::IntConstOpnd::New(0, TyInt32, instr->m_func, true);
IR::Opnd* upperBound = IR::RegOpnd::New(indexIntSym, TyInt32, instr->m_func);
upperBound->SetIsJITOptimizedReg(true);
// 0 <= indexSym + offset (src1 <= src2 + dst)
IR::Instr *const boundCheck = CreateBoundsCheckInstr(
lowerBound,
upperBound,
hoistInfo.Offset(),
hoistInfo.IsLoopCountBasedBound()
? IR::BailOutOnFailedHoistedLoopCountBasedBoundCheck
: IR::BailOutOnFailedHoistedBoundCheck,
hoistInfo.Loop()->bailOutInfo,
hoistInfo.Loop()->bailOutInfo->bailOutFunc);
InsertInstrInLandingPad(boundCheck, hoistInfo.Loop());
TRACE_PHASE_INSTR(
Js::Phase::BoundCheckHoistPhase,
instr,
_u("Hoisting array lower bound check out of loop %u to landing pad block %u, as (0 <= s%u + %d)\n"),
hoistInfo.Loop()->GetLoopNumber(),
landingPad->GetBlockNum(),
hoistInfo.IndexSym()->m_id,
hoistInfo.Offset());
TESTTRACE_PHASE_INSTR(
Js::Phase::BoundCheckHoistPhase,
instr,
_u("Hoisting array lower bound check out of loop\n"));
// Record the bound check instruction as available
const IntBoundCheck boundCheckInfo(
ZeroValueNumber,
hoistInfo.IndexValueNumber(),
boundCheck,
landingPad);
{
const bool added = CurrentBlockData()->availableIntBoundChecks->AddNew(boundCheckInfo) >= 0;
Assert(added || failedToUpdateCompatibleLowerBoundCheck);
}
for(InvariantBlockBackwardIterator it(this, currentBlock, landingPad, nullptr);
it.IsValid();
it.MoveNext())
{
const bool added = it.Block()->globOptData.availableIntBoundChecks->AddNew(boundCheckInfo) >= 0;
Assert(added || failedToUpdateCompatibleLowerBoundCheck);
}
}
}
// Update values of the syms involved in the bound check to reflect the bound check
if(hoistBlock != currentBlock && hoistInfo.IndexSym() && hoistInfo.Offset() != INT32_MIN)
{
for(InvariantBlockBackwardIterator it(
this,
currentBlock->next,
hoistBlock,
hoistInfo.IndexSym(),
hoistInfo.IndexValueNumber());
it.IsValid();
it.MoveNext())
{
Value *const value = it.InvariantSymValue();
IntConstantBounds constantBounds;
AssertVerify(value->GetValueInfo()->TryGetIntConstantBounds(&constantBounds, true));
ValueInfo *const newValueInfo =
UpdateIntBoundsForGreaterThanOrEqual(
value,
constantBounds,
nullptr,
IntConstantBounds(-hoistInfo.Offset(), -hoistInfo.Offset()),
false);
if(newValueInfo)
{
ChangeValueInfo(nullptr, value, newValueInfo);
if(it.Block() == currentBlock && value == indexValue)
{
AssertVerify(newValueInfo->TryGetIntConstantBounds(&indexConstantBounds));
}
}
}
}
}
else
{
IR::Opnd* lowerBound = IR::IntConstOpnd::New(0, TyInt32, instr->m_func, true);
IR::Opnd* upperBound = baseOwnerIndir->GetIndexOpnd();
upperBound->SetIsJITOptimizedReg(true);
const int offset = 0;
IR::Instr *boundCheck;
if(shareableBailOutInfo)
{
ShareBailOut();
boundCheck = CreateBoundsCheckInstr(
lowerBound,
upperBound,
offset,
IR::BailOutOnArrayAccessHelperCall,
shareableBailOutInfo,
shareableBailOutInfo->bailOutFunc);
}
else
{
boundCheck = CreateBoundsCheckInstr(
lowerBound,
upperBound,
offset,
instr->m_func);
}
boundCheck->SetByteCodeOffset(instr);
insertBeforeInstr->InsertBefore(boundCheck);
if(!shareableBailOutInfo)
{
GenerateBailAtOperation(&boundCheck, IR::BailOutOnArrayAccessHelperCall);
shareableBailOutInfo = boundCheck->GetBailOutInfo();
shareableBailOutInfoOriginalOwner = boundCheck;
}
TRACE_PHASE_INSTR(
Js::Phase::BoundCheckEliminationPhase,
instr,
_u("Separating array lower bound check, as (0 <= s%u)\n"),
indexVarSym->m_id);
TESTTRACE_PHASE_INSTR(
Js::Phase::BoundCheckEliminationPhase,
instr,
_u("Separating array lower bound check\n"));
if(DoBoundCheckHoist())
{
// Record the bound check instruction as available
const bool added =
CurrentBlockData()->availableIntBoundChecks->AddNew(
IntBoundCheck(ZeroValueNumber, indexValue->GetValueNumber(), boundCheck, currentBlock)) >= 0;
Assert(added || failedToUpdateCompatibleLowerBoundCheck);
}
}
// Update the index value to reflect the bound check
ValueInfo *const newValueInfo =
UpdateIntBoundsForGreaterThanOrEqual(
indexValue,
indexConstantBounds,
nullptr,
IntConstantBounds(0, 0),
false);
if(newValueInfo)
{
ChangeValueInfo(nullptr, indexValue, newValueInfo);
AssertVerify(newValueInfo->TryGetIntConstantBounds(&indexConstantBounds));
}
}
if(!eliminatedUpperBoundCheck)
{
eliminatedUpperBoundCheck = true;
ArrayUpperBoundCheckHoistInfo &hoistInfo = upperBoundCheckHoistInfo;
if(hoistInfo.HasAnyInfo())
{
BasicBlock *hoistBlock;
if(hoistInfo.CompatibleBoundCheckBlock())
{
hoistBlock = hoistInfo.CompatibleBoundCheckBlock();
TRACE_PHASE_INSTR(
Js::Phase::BoundCheckHoistPhase,
instr,
_u("Hoisting array upper bound check into existing bound check instruction in block %u\n"),
hoistBlock->GetBlockNum());
TESTTRACE_PHASE_INSTR(
Js::Phase::BoundCheckHoistPhase,
instr,
_u("Hoisting array upper bound check into existing bound check instruction\n"));
}
else
{
Assert(hoistInfo.Loop());
BasicBlock *const landingPad = hoistInfo.Loop()->landingPad;
hoistBlock = landingPad;
StackSym *indexIntSym;
if(hoistInfo.IndexSym() && hoistInfo.IndexSym()->IsVar())
{
if(!landingPad->globOptData.IsInt32TypeSpecialized(hoistInfo.IndexSym()))
{
// Int-specialize the index sym, as the BoundCheck instruction requires int operands. Specialize it
// in this block if it is invariant, as the conversion will be hoisted along with value updates.
BasicBlock *specializationBlock = hoistInfo.Loop()->landingPad;
IR::Instr *specializeBeforeInstr = nullptr;
if(!CurrentBlockData()->IsInt32TypeSpecialized(hoistInfo.IndexSym()) &&
OptIsInvariant(
hoistInfo.IndexSym(),
currentBlock,
hoistInfo.Loop(),
CurrentBlockData()->FindValue(hoistInfo.IndexSym()),
false,
true))
{
specializationBlock = currentBlock;
specializeBeforeInstr = insertBeforeInstr;
}
Assert(tempBv->IsEmpty());
tempBv->Set(hoistInfo.IndexSym()->m_id);
ToInt32(tempBv, specializationBlock, false, specializeBeforeInstr);
tempBv->ClearAll();
Assert(landingPad->globOptData.IsInt32TypeSpecialized(hoistInfo.IndexSym()));
}
indexIntSym = hoistInfo.IndexSym()->GetInt32EquivSym(nullptr);
Assert(indexIntSym);
}
else
{
indexIntSym = hoistInfo.IndexSym();
Assert(!indexIntSym || indexIntSym->GetType() == TyInt32 || indexIntSym->GetType() == TyUint32);
}
// The info in the landing pad may be better than the info in the current block due to changes made to the
// index sym inside the loop. Check if the bound check we intend to hoist is unnecessary in the landing pad.
if(!ValueInfo::IsLessThanOrEqualTo(
hoistInfo.IndexValue(),
hoistInfo.IndexConstantBounds().LowerBound(),
hoistInfo.IndexConstantBounds().UpperBound(),
hoistInfo.HeadSegmentLengthValue(),
hoistInfo.HeadSegmentLengthConstantBounds().LowerBound(),
hoistInfo.HeadSegmentLengthConstantBounds().UpperBound(),
hoistInfo.Offset()))
{
Assert(hoistInfo.Loop()->bailOutInfo);
EnsureBailTarget(hoistInfo.Loop());
if(hoistInfo.LoopCount())
{
// Generate the loop count and loop count based bound that will be used for the bound check
if(!hoistInfo.LoopCount()->HasBeenGenerated())
{
GenerateLoopCount(hoistInfo.Loop(), hoistInfo.LoopCount());
}
GenerateSecondaryInductionVariableBound(
hoistInfo.Loop(),
indexVarSym->GetInt32EquivSym(nullptr),
hoistInfo.LoopCount(),
hoistInfo.MaxMagnitudeChange(),
hoistInfo.IndexSym());
}
IR::Opnd* lowerBound = indexIntSym
? static_cast<IR::Opnd *>(IR::RegOpnd::New(indexIntSym, TyInt32, instr->m_func))
: IR::IntConstOpnd::New(
hoistInfo.IndexConstantBounds().LowerBound(),
TyInt32,
instr->m_func);
lowerBound->SetIsJITOptimizedReg(true);
IR::Opnd* upperBound = IR::RegOpnd::New(headSegmentLengthSym, headSegmentLengthSym->GetType(), instr->m_func);
upperBound->SetIsJITOptimizedReg(true);
// indexSym <= headSegmentLength + offset (src1 <= src2 + dst)
IR::Instr *const boundCheck = CreateBoundsCheckInstr(
lowerBound,
upperBound,
hoistInfo.Offset(),
hoistInfo.IsLoopCountBasedBound()
? IR::BailOutOnFailedHoistedLoopCountBasedBoundCheck
: IR::BailOutOnFailedHoistedBoundCheck,
hoistInfo.Loop()->bailOutInfo,
hoistInfo.Loop()->bailOutInfo->bailOutFunc);
InsertInstrInLandingPad(boundCheck, hoistInfo.Loop());
if(indexIntSym)
{
TRACE_PHASE_INSTR(
Js::Phase::BoundCheckHoistPhase,
instr,
_u("Hoisting array upper bound check out of loop %u to landing pad block %u, as (s%u <= s%u + %d)\n"),
hoistInfo.Loop()->GetLoopNumber(),
landingPad->GetBlockNum(),
hoistInfo.IndexSym()->m_id,
headSegmentLengthSym->m_id,
hoistInfo.Offset());
}
else
{
TRACE_PHASE_INSTR(
Js::Phase::BoundCheckHoistPhase,
instr,
_u("Hoisting array upper bound check out of loop %u to landing pad block %u, as (%d <= s%u + %d)\n"),
hoistInfo.Loop()->GetLoopNumber(),
landingPad->GetBlockNum(),
hoistInfo.IndexConstantBounds().LowerBound(),
headSegmentLengthSym->m_id,
hoistInfo.Offset());
}
TESTTRACE_PHASE_INSTR(
Js::Phase::BoundCheckHoistPhase,
instr,
_u("Hoisting array upper bound check out of loop\n"));
// Record the bound check instruction as available
const IntBoundCheck boundCheckInfo(
hoistInfo.IndexValue() ? hoistInfo.IndexValueNumber() : ZeroValueNumber,
hoistInfo.HeadSegmentLengthValue()->GetValueNumber(),
boundCheck,
landingPad);
{
const bool added = CurrentBlockData()->availableIntBoundChecks->AddNew(boundCheckInfo) >= 0;
Assert(added || failedToUpdateCompatibleUpperBoundCheck);
}
for(InvariantBlockBackwardIterator it(this, currentBlock, landingPad, nullptr);
it.IsValid();
it.MoveNext())
{
const bool added = it.Block()->globOptData.availableIntBoundChecks->AddNew(boundCheckInfo) >= 0;
Assert(added || failedToUpdateCompatibleUpperBoundCheck);
}
}
}
// Update values of the syms involved in the bound check to reflect the bound check
Assert(!hoistInfo.Loop() || hoistBlock != currentBlock);
if(hoistBlock != currentBlock)
{
for(InvariantBlockBackwardIterator it(this, currentBlock->next, hoistBlock, nullptr);
it.IsValid();
it.MoveNext())
{
BasicBlock *const block = it.Block();
Value *leftValue;
IntConstantBounds leftConstantBounds;
if(hoistInfo.IndexSym())
{
leftValue = block->globOptData.FindValue(hoistInfo.IndexSym());
if(!leftValue || leftValue->GetValueNumber() != hoistInfo.IndexValueNumber())
{
continue;
}
AssertVerify(leftValue->GetValueInfo()->TryGetIntConstantBounds(&leftConstantBounds, true));
}
else
{
leftValue = nullptr;
leftConstantBounds = hoistInfo.IndexConstantBounds();
}
Value *const rightValue = block->globOptData.FindValue(headSegmentLengthSym);
if(!rightValue)
{
continue;
}
Assert(rightValue->GetValueNumber() == headSegmentLengthValue->GetValueNumber());
IntConstantBounds rightConstantBounds;
AssertVerify(rightValue->GetValueInfo()->TryGetIntConstantBounds(&rightConstantBounds));
ValueInfo *const newValueInfoForLessThanOrEqual =
UpdateIntBoundsForLessThanOrEqual(
leftValue,
leftConstantBounds,
rightValue,
rightConstantBounds,
hoistInfo.Offset(),
false);
if (newValueInfoForLessThanOrEqual)
{
ChangeValueInfo(nullptr, leftValue, newValueInfoForLessThanOrEqual);
AssertVerify(newValueInfoForLessThanOrEqual->TryGetIntConstantBounds(&leftConstantBounds, true));
if(block == currentBlock && leftValue == indexValue)
{
Assert(newValueInfoForLessThanOrEqual->IsInt());
indexConstantBounds = leftConstantBounds;
}
}
if(hoistInfo.Offset() != INT32_MIN)
{
ValueInfo *const newValueInfoForGreaterThanOrEqual =
UpdateIntBoundsForGreaterThanOrEqual(
rightValue,
rightConstantBounds,
leftValue,
leftConstantBounds,
-hoistInfo.Offset(),
false);
if (newValueInfoForGreaterThanOrEqual)
{
ChangeValueInfo(nullptr, rightValue, newValueInfoForGreaterThanOrEqual);
if(block == currentBlock)
{
Assert(rightValue == headSegmentLengthValue);
AssertVerify(newValueInfoForGreaterThanOrEqual->TryGetIntConstantBounds(&headSegmentLengthConstantBounds));
}
}
}
}
}
}
else
{
IR::Opnd* lowerBound = baseOwnerIndir->GetIndexOpnd()
? static_cast<IR::Opnd *>(baseOwnerIndir->GetIndexOpnd())
: IR::IntConstOpnd::New(baseOwnerIndir->GetOffset(), TyInt32, instr->m_func);
lowerBound->SetIsJITOptimizedReg(true);
IR::Opnd* upperBound = IR::RegOpnd::New(headSegmentLengthSym, headSegmentLengthSym->GetType(), instr->m_func);
upperBound->SetIsJITOptimizedReg(true);
const int offset = GetBoundCheckOffsetForSimd(newBaseValueType, instr, -1);
IR::Instr *boundCheck;
// index <= headSegmentLength - 1 (src1 <= src2 + dst)
if (shareableBailOutInfo)
{
ShareBailOut();
boundCheck = CreateBoundsCheckInstr(
lowerBound,
upperBound,
offset,
IR::BailOutOnArrayAccessHelperCall,
shareableBailOutInfo,
shareableBailOutInfo->bailOutFunc);
}
else
{
boundCheck = CreateBoundsCheckInstr(
lowerBound,
upperBound,
offset,
instr->m_func);
}
boundCheck->SetByteCodeOffset(instr);
insertBeforeInstr->InsertBefore(boundCheck);
if(!shareableBailOutInfo)
{
GenerateBailAtOperation(&boundCheck, IR::BailOutOnArrayAccessHelperCall);
shareableBailOutInfo = boundCheck->GetBailOutInfo();
shareableBailOutInfoOriginalOwner = boundCheck;
}
instr->extractedUpperBoundCheckWithoutHoisting = true;
if(baseOwnerIndir->GetIndexOpnd())
{
TRACE_PHASE_INSTR(
Js::Phase::BoundCheckEliminationPhase,
instr,
_u("Separating array upper bound check, as (s%u < s%u)\n"),
indexVarSym->m_id,
headSegmentLengthSym->m_id);
}
else
{
TRACE_PHASE_INSTR(
Js::Phase::BoundCheckEliminationPhase,
instr,
_u("Separating array upper bound check, as (%d < s%u)\n"),
baseOwnerIndir->GetOffset(),
headSegmentLengthSym->m_id);
}
TESTTRACE_PHASE_INSTR(
Js::Phase::BoundCheckEliminationPhase,
instr,
_u("Separating array upper bound check\n"));
if(DoBoundCheckHoist())
{
// Record the bound check instruction as available
const bool added =
CurrentBlockData()->availableIntBoundChecks->AddNew(
IntBoundCheck(
indexValue ? indexValue->GetValueNumber() : ZeroValueNumber,
headSegmentLengthValue->GetValueNumber(),
boundCheck,
currentBlock)) >= 0;
Assert(added || failedToUpdateCompatibleUpperBoundCheck);
}
}
// Update the index and head segment length values to reflect the bound check
ValueInfo *newValueInfo =
UpdateIntBoundsForLessThan(
indexValue,
indexConstantBounds,
headSegmentLengthValue,
headSegmentLengthConstantBounds,
false);
if(newValueInfo)
{
ChangeValueInfo(nullptr, indexValue, newValueInfo);
AssertVerify(newValueInfo->TryGetIntConstantBounds(&indexConstantBounds));
}
newValueInfo =
UpdateIntBoundsForGreaterThan(
headSegmentLengthValue,
headSegmentLengthConstantBounds,
indexValue,
indexConstantBounds,
false);
if(newValueInfo)
{
ChangeValueInfo(nullptr, headSegmentLengthValue, newValueInfo);
}
}
}
if(doHeadSegmentLoad && !isLikelyJsArray)
{
// For typed arrays, load the length first, followed by the bound checks, and then load the head segment. This
// allows the length sym to become dead by the time of the head segment load, freeing up the register for use by the
// head segment sym.
InsertHeadSegmentLoad();
}
if(doArrayChecks || doHeadSegmentLoad || doHeadSegmentLengthLoad || doLengthLoad)
{
UpdateValue(newHeadSegmentSym, newHeadSegmentLengthSym, newLengthSym);
baseValueInfo = baseValue->GetValueInfo();
baseArrayValueInfo = baseValueInfo->IsArrayValueInfo() ? baseValueInfo->AsArrayValueInfo() : nullptr;
// Iterate up to the root loop's landing pad until all necessary value info is updated
uint hoistItemCount =
static_cast<uint>(!!hoistChecksOutOfLoop) +
!!hoistHeadSegmentLoadOutOfLoop +
!!hoistHeadSegmentLengthLoadOutOfLoop +
!!hoistLengthLoadOutOfLoop;
if(hoistItemCount != 0)
{
Loop *rootLoop = nullptr;
for(Loop *loop = currentBlock->loop; loop; loop = loop->parent)
{
rootLoop = loop;
}
Assert(rootLoop);
ValueInfo *valueInfoToHoist = baseValueInfo;
bool removeHeadSegment, removeHeadSegmentLength, removeLength;
if(baseArrayValueInfo)
{
removeHeadSegment = baseArrayValueInfo->HeadSegmentSym() && !hoistHeadSegmentLoadOutOfLoop;
removeHeadSegmentLength =
baseArrayValueInfo->HeadSegmentLengthSym() && !hoistHeadSegmentLengthLoadOutOfLoop;
removeLength = baseArrayValueInfo->LengthSym() && !hoistLengthLoadOutOfLoop;
}
else
{
removeLength = removeHeadSegmentLength = removeHeadSegment = false;
}
for(InvariantBlockBackwardIterator it(
this,
currentBlock,
rootLoop->landingPad,
baseOpnd->m_sym,
baseValue->GetValueNumber());
it.IsValid();
it.MoveNext())
{
if(removeHeadSegment || removeHeadSegmentLength || removeLength)
{
// Remove information that shouldn't be there anymore, from the value info
valueInfoToHoist =
valueInfoToHoist->AsArrayValueInfo()->Copy(
alloc,
!removeHeadSegment,
!removeHeadSegmentLength,
!removeLength);
removeLength = removeHeadSegmentLength = removeHeadSegment = false;
}
BasicBlock *const block = it.Block();
Value *const blockBaseValue = it.InvariantSymValue();
HoistInvariantValueInfo(valueInfoToHoist, blockBaseValue, block);
// See if we have completed hoisting value info for one of the items
if(hoistChecksOutOfLoop && block == hoistChecksOutOfLoop->landingPad)
{
// All other items depend on array checks, so we can just stop here
hoistChecksOutOfLoop = nullptr;
break;
}
if(hoistHeadSegmentLoadOutOfLoop && block == hoistHeadSegmentLoadOutOfLoop->landingPad)
{
hoistHeadSegmentLoadOutOfLoop = nullptr;
if(--hoistItemCount == 0)
break;
if(valueInfoToHoist->IsArrayValueInfo() && valueInfoToHoist->AsArrayValueInfo()->HeadSegmentSym())
removeHeadSegment = true;
}
if(hoistHeadSegmentLengthLoadOutOfLoop && block == hoistHeadSegmentLengthLoadOutOfLoop->landingPad)
{
hoistHeadSegmentLengthLoadOutOfLoop = nullptr;
if(--hoistItemCount == 0)
break;
if(valueInfoToHoist->IsArrayValueInfo() && valueInfoToHoist->AsArrayValueInfo()->HeadSegmentLengthSym())
removeHeadSegmentLength = true;
}
if(hoistLengthLoadOutOfLoop && block == hoistLengthLoadOutOfLoop->landingPad)
{
hoistLengthLoadOutOfLoop = nullptr;
if(--hoistItemCount == 0)
break;
if(valueInfoToHoist->IsArrayValueInfo() && valueInfoToHoist->AsArrayValueInfo()->LengthSym())
removeLength = true;
}
}
}
}
}
IR::ArrayRegOpnd *baseArrayOpnd;
if(baseArrayValueInfo)
{
// Update the opnd to include the associated syms
baseArrayOpnd =
baseArrayValueInfo->CreateOpnd(
baseOpnd,
needsHeadSegment,
needsHeadSegmentLength || (!isLikelyJsArray && needsLength),
needsLength,
eliminatedLowerBoundCheck,
eliminatedUpperBoundCheck,
instr->m_func);
if(baseOwnerInstr)
{
Assert(baseOwnerInstr->GetSrc1() == baseOpnd);
baseOwnerInstr->ReplaceSrc1(baseArrayOpnd);
}
else
{
Assert(baseOwnerIndir);
Assert(baseOwnerIndir->GetBaseOpnd() == baseOpnd);
baseOwnerIndir->ReplaceBaseOpnd(baseArrayOpnd);
}
baseOpnd = baseArrayOpnd;
}
else
{
baseArrayOpnd = nullptr;
}
if(isLikelyJsArray)
{
// Insert an instruction to indicate to the dead-store pass that implicit calls need to be kept disabled until this
// instruction. Operations other than LdElem and StElem don't benefit much from arrays having no missing values, so
// no need to ensure that the array still has no missing values. For a particular array, if none of the accesses
// benefit much from the no-missing-values information, it may be beneficial to avoid checking for no missing
// values, especially in the case for a single array access, where the cost of the check could be relatively
// significant. An StElem has to do additional checks in the common path if the array may have missing values, and
// a StElem that operates on an array that has no missing values is more likely to keep the no-missing-values info
// on the array more precise, so it still benefits a little from the no-missing-values info.
CaptureNoImplicitCallUses(baseOpnd, isLoad || isStore);
}
else if(baseArrayOpnd && baseArrayOpnd->HeadSegmentLengthSym())
{
// A typed array's array buffer may be transferred to a web worker as part of an implicit call, in which case the typed
// array's length is set to zero. Insert an instruction to indicate to the dead-store pass that implicit calls need to
// be disabled until this instruction.
IR::RegOpnd *const headSegmentLengthOpnd =
IR::RegOpnd::New(
baseArrayOpnd->HeadSegmentLengthSym(),
baseArrayOpnd->HeadSegmentLengthSym()->GetType(),
instr->m_func);
const IR::AutoReuseOpnd autoReuseHeadSegmentLengthOpnd(headSegmentLengthOpnd, instr->m_func);
CaptureNoImplicitCallUses(headSegmentLengthOpnd, false);
}
const auto OnEliminated = [&](const Js::Phase phase, const char *const eliminatedLoad)
{
TRACE_TESTTRACE_PHASE_INSTR(phase, instr, _u("Eliminating array %S\n"), eliminatedLoad);
};
OnEliminated(Js::Phase::ArrayCheckHoistPhase, "checks");
if(baseArrayOpnd)
{
if(baseArrayOpnd->HeadSegmentSym())
{
OnEliminated(Js::Phase::ArraySegmentHoistPhase, "head segment load");
}
if(baseArrayOpnd->HeadSegmentLengthSym())
{
OnEliminated(Js::Phase::ArraySegmentHoistPhase, "head segment length load");
}
if(baseArrayOpnd->LengthSym())
{
OnEliminated(Js::Phase::ArrayLengthHoistPhase, "length load");
}
if(baseArrayOpnd->EliminatedLowerBoundCheck())
{
OnEliminated(Js::Phase::BoundCheckEliminationPhase, "lower bound check");
}
if(baseArrayOpnd->EliminatedUpperBoundCheck())
{
OnEliminated(Js::Phase::BoundCheckEliminationPhase, "upper bound check");
}
}
if(!canBailOutOnArrayAccessHelperCall)
{
return;
}
// Bail out instead of generating a helper call. This helps to remove the array reference when the head segment and head
// segment length are available, reduces code size, and allows bound checks to be separated.
if(instr->HasBailOutInfo())
{
const IR::BailOutKind bailOutKind = instr->GetBailOutKind();
Assert(
!(bailOutKind & ~IR::BailOutKindBits) ||
(bailOutKind & ~IR::BailOutKindBits) == IR::BailOutOnImplicitCallsPreOp);
instr->SetBailOutKind(bailOutKind & IR::BailOutKindBits | IR::BailOutOnArrayAccessHelperCall);
}
else
{
GenerateBailAtOperation(&instr, IR::BailOutOnArrayAccessHelperCall);
}
}
void
GlobOpt::CaptureNoImplicitCallUses(
IR::Opnd *opnd,
const bool usesNoMissingValuesInfo,
IR::Instr *const includeCurrentInstr)
{
Assert(!IsLoopPrePass());
Assert(noImplicitCallUsesToInsert);
Assert(opnd);
// The opnd may be deleted later, so make a copy to ensure it is alive for inserting NoImplicitCallUses later
opnd = opnd->Copy(func);
if(!usesNoMissingValuesInfo)
{
const ValueType valueType(opnd->GetValueType());
if(valueType.IsArrayOrObjectWithArray() && valueType.HasNoMissingValues())
{
// Inserting NoImplicitCallUses for an opnd with a definitely-array-with-no-missing-values value type means that the
// instruction following it uses the information that the array has no missing values in some way, for instance, it
// may omit missing value checks. Based on that, the dead-store phase in turn ensures that the necessary bailouts
// are inserted to ensure that the array still has no missing values until the following instruction. Since
// 'usesNoMissingValuesInfo' is false, change the value type to indicate to the dead-store phase that the following
// instruction does not use the no-missing-values information.
opnd->SetValueType(valueType.SetHasNoMissingValues(false));
}
}
if(includeCurrentInstr)
{
IR::Instr *const noImplicitCallUses =
IR::PragmaInstr::New(Js::OpCode::NoImplicitCallUses, 0, includeCurrentInstr->m_func);
noImplicitCallUses->SetSrc1(opnd);
noImplicitCallUses->GetSrc1()->SetIsJITOptimizedReg(true);
includeCurrentInstr->InsertAfter(noImplicitCallUses);
return;
}
noImplicitCallUsesToInsert->Add(opnd);
}
void
GlobOpt::InsertNoImplicitCallUses(IR::Instr *const instr)
{
Assert(noImplicitCallUsesToInsert);
const int n = noImplicitCallUsesToInsert->Count();
if(n == 0)
{
return;
}
IR::Instr *const insertBeforeInstr = instr->GetInsertBeforeByteCodeUsesInstr();
for(int i = 0; i < n;)
{
IR::Instr *const noImplicitCallUses = IR::PragmaInstr::New(Js::OpCode::NoImplicitCallUses, 0, instr->m_func);
noImplicitCallUses->SetSrc1(noImplicitCallUsesToInsert->Item(i));
noImplicitCallUses->GetSrc1()->SetIsJITOptimizedReg(true);
++i;
if(i < n)
{
noImplicitCallUses->SetSrc2(noImplicitCallUsesToInsert->Item(i));
noImplicitCallUses->GetSrc2()->SetIsJITOptimizedReg(true);
++i;
}
noImplicitCallUses->SetByteCodeOffset(instr);
insertBeforeInstr->InsertBefore(noImplicitCallUses);
}
noImplicitCallUsesToInsert->Clear();
}
void
GlobOpt::PrepareLoopArrayCheckHoist()
{
if(IsLoopPrePass() || !currentBlock->loop || !currentBlock->isLoopHeader || !currentBlock->loop->parent)
{
return;
}
if(currentBlock->loop->parent->needImplicitCallBailoutChecksForJsArrayCheckHoist)
{
// If the parent loop is an array check elimination candidate, so is the current loop. Even though the current loop may
// not have array accesses, if the parent loop hoists array checks, the current loop also needs implicit call checks.
currentBlock->loop->needImplicitCallBailoutChecksForJsArrayCheckHoist = true;
}
}
JsArrayKills
GlobOpt::CheckJsArrayKills(IR::Instr *const instr)
{
Assert(instr);
JsArrayKills kills;
if(instr->UsesAllFields())
{
// Calls can (but are unlikely to) change a javascript array into an ES5 array, which may have different behavior for
// index properties.
kills.SetKillsAllArrays();
return kills;
}
const bool doArrayMissingValueCheckHoist = DoArrayMissingValueCheckHoist();
const bool doNativeArrayTypeSpec = DoNativeArrayTypeSpec();
const bool doArraySegmentHoist = DoArraySegmentHoist(ValueType::GetObject(ObjectType::Array));
Assert(doArraySegmentHoist == DoArraySegmentHoist(ValueType::GetObject(ObjectType::ObjectWithArray)));
const bool doArrayLengthHoist = DoArrayLengthHoist();
if(!doArrayMissingValueCheckHoist && !doNativeArrayTypeSpec && !doArraySegmentHoist && !doArrayLengthHoist)
{
return kills;
}
// The following operations may create missing values in an array in an unlikely circumstance. Even though they don't kill
// the fact that the 'this' parameter is an array (when implicit calls are disabled), we don't have a way to say the value
// type is definitely array but it likely has no missing values. So, these will kill the definite value type as well, making
// it likely array, such that the array checks will have to be redone.
const bool useValueTypes = !IsLoopPrePass(); // Source value types are not guaranteed to be correct in a loop prepass
switch(instr->m_opcode)
{
case Js::OpCode::StElemI_A:
case Js::OpCode::StElemI_A_Strict:
{
Assert(instr->GetDst());
if(!instr->GetDst()->IsIndirOpnd())
{
break;
}
const ValueType baseValueType =
useValueTypes ? instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->GetValueType() : ValueType::Uninitialized;
if(useValueTypes && baseValueType.IsNotArrayOrObjectWithArray())
{
break;
}
if(instr->IsProfiledInstr())
{
const Js::StElemInfo *const stElemInfo = instr->AsProfiledInstr()->u.stElemInfo;
if(doArraySegmentHoist && stElemInfo->LikelyStoresOutsideHeadSegmentBounds())
{
kills.SetKillsArrayHeadSegments();
kills.SetKillsArrayHeadSegmentLengths();
}
if(doArrayLengthHoist &&
!(useValueTypes && baseValueType.IsNotArray()) &&
stElemInfo->LikelyStoresOutsideArrayBounds())
{
kills.SetKillsArrayLengths();
}
}
break;
}
case Js::OpCode::DeleteElemI_A:
case Js::OpCode::DeleteElemIStrict_A:
Assert(instr->GetSrc1());
if(!instr->GetSrc1()->IsIndirOpnd() ||
(useValueTypes && instr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->GetValueType().IsNotArrayOrObjectWithArray()))
{
break;
}
if(doArrayMissingValueCheckHoist)
{
kills.SetKillsArraysWithNoMissingValues();
}
if(doArraySegmentHoist)
{
kills.SetKillsArrayHeadSegmentLengths();
}
break;
case Js::OpCode::StFld:
case Js::OpCode::StFldStrict:
{
Assert(instr->GetDst());
if(!doArraySegmentHoist && !doArrayLengthHoist)
{
break;
}
IR::SymOpnd *const symDst = instr->GetDst()->AsSymOpnd();
if(!symDst->IsPropertySymOpnd())
{
break;
}
IR::PropertySymOpnd *const dst = symDst->AsPropertySymOpnd();
if(dst->m_sym->AsPropertySym()->m_propertyId != Js::PropertyIds::length)
{
break;
}
if(useValueTypes && dst->GetPropertyOwnerValueType().IsNotArray())
{
// Setting the 'length' property of an object that is not an array, even if it has an internal array, does
// not kill the head segment or head segment length of any arrays.
break;
}
if(doArraySegmentHoist)
{
kills.SetKillsArrayHeadSegmentLengths();
}
if(doArrayLengthHoist)
{
kills.SetKillsArrayLengths();
}
break;
}
case Js::OpCode::InlineArrayPush:
{
Assert(instr->GetSrc2());
IR::Opnd *const arrayOpnd = instr->GetSrc1();
Assert(arrayOpnd);
const ValueType arrayValueType(arrayOpnd->GetValueType());
if(!arrayOpnd->IsRegOpnd() || (useValueTypes && arrayValueType.IsNotArrayOrObjectWithArray()))
{
break;
}
if(doArrayMissingValueCheckHoist)
{
kills.SetKillsArraysWithNoMissingValues();
}
if(doArraySegmentHoist)
{
kills.SetKillsArrayHeadSegments();
kills.SetKillsArrayHeadSegmentLengths();
}
if(doArrayLengthHoist && !(useValueTypes && arrayValueType.IsNotArray()))
{
kills.SetKillsArrayLengths();
}
// Don't kill NativeArray, if there is no mismatch between array's type and element's type.
if(doNativeArrayTypeSpec &&
!(useValueTypes && arrayValueType.IsNativeArray() &&
((arrayValueType.IsLikelyNativeIntArray() && instr->GetSrc2()->IsInt32()) ||
(arrayValueType.IsLikelyNativeFloatArray() && instr->GetSrc2()->IsFloat()))
) &&
!(useValueTypes && arrayValueType.IsNotNativeArray()))
{
kills.SetKillsNativeArrays();
}
break;
}
case Js::OpCode::InlineArrayPop:
{
IR::Opnd *const arrayOpnd = instr->GetSrc1();
Assert(arrayOpnd);
const ValueType arrayValueType(arrayOpnd->GetValueType());
if(!arrayOpnd->IsRegOpnd() || (useValueTypes && arrayValueType.IsNotArrayOrObjectWithArray()))
{
break;
}
if(doArraySegmentHoist)
{
kills.SetKillsArrayHeadSegmentLengths();
}
if(doArrayLengthHoist && !(useValueTypes && arrayValueType.IsNotArray()))
{
kills.SetKillsArrayLengths();
}
break;
}
case Js::OpCode::CallDirect:
{
Assert(instr->GetSrc1());
// Find the 'this' parameter and check if it's possible for it to be an array
IR::Opnd *const arrayOpnd = instr->FindCallArgumentOpnd(1);
Assert(arrayOpnd);
const ValueType arrayValueType(arrayOpnd->GetValueType());
if(!arrayOpnd->IsRegOpnd() || (useValueTypes && arrayValueType.IsNotArrayOrObjectWithArray()))
{
break;
}
const IR::JnHelperMethod helperMethod = instr->GetSrc1()->AsHelperCallOpnd()->m_fnHelper;
if(doArrayMissingValueCheckHoist)
{
switch(helperMethod)
{
case IR::HelperArray_Reverse:
case IR::HelperArray_Shift:
case IR::HelperArray_Splice:
case IR::HelperArray_Unshift:
kills.SetKillsArraysWithNoMissingValues();
break;
}
}
if(doArraySegmentHoist)
{
switch(helperMethod)
{
case IR::HelperArray_Reverse:
case IR::HelperArray_Shift:
case IR::HelperArray_Splice:
case IR::HelperArray_Unshift:
kills.SetKillsArrayHeadSegments();
kills.SetKillsArrayHeadSegmentLengths();
break;
}
}
if(doArrayLengthHoist && !(useValueTypes && arrayValueType.IsNotArray()))
{
switch(helperMethod)
{
case IR::HelperArray_Shift:
case IR::HelperArray_Splice:
case IR::HelperArray_Unshift:
kills.SetKillsArrayLengths();
break;
}
}
if(doNativeArrayTypeSpec && !(useValueTypes && arrayValueType.IsNotNativeArray()))
{
switch(helperMethod)
{
case IR::HelperArray_Reverse:
case IR::HelperArray_Shift:
case IR::HelperArray_Slice:
// Currently not inlined.
//case IR::HelperArray_Sort:
case IR::HelperArray_Splice:
case IR::HelperArray_Unshift:
kills.SetKillsNativeArrays();
break;
}
}
break;
}
}
return kills;
}
GlobOptBlockData const * GlobOpt::CurrentBlockData() const
{
return &this->currentBlock->globOptData;
}
GlobOptBlockData * GlobOpt::CurrentBlockData()
{
return &this->currentBlock->globOptData;
}
void GlobOpt::CommitCapturedValuesCandidate()
{
GlobOptBlockData * globOptData = CurrentBlockData();
globOptData->changedSyms->ClearAll();
if (!this->changedSymsAfterIncBailoutCandidate->IsEmpty())
{
//
// some symbols are changed after the values for current bailout have been
// captured (GlobOpt::CapturedValues), need to restore such symbols as changed
// for following incremental bailout construction, or we will miss capturing
// values for later bailout
//
// swap changedSyms and changedSymsAfterIncBailoutCandidate
// because both are from this->alloc
BVSparse<JitArenaAllocator> * tempBvSwap = globOptData->changedSyms;
globOptData->changedSyms = this->changedSymsAfterIncBailoutCandidate;
this->changedSymsAfterIncBailoutCandidate = tempBvSwap;
}
if (globOptData->capturedValues)
{
globOptData->capturedValues->DecrementRefCount();
}
globOptData->capturedValues = globOptData->capturedValuesCandidate;
// null out capturedValuesCandidate to stop tracking symbols change for it
globOptData->capturedValuesCandidate = nullptr;
}
bool
GlobOpt::IsOperationThatLikelyKillsJsArraysWithNoMissingValues(IR::Instr *const instr)
{
// StElem is profiled with information indicating whether it will likely create a missing value in the array. In that case,
// we prefer to kill the no-missing-values information in the value so that we don't bail out in a likely circumstance.
return
(instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict) &&
DoArrayMissingValueCheckHoist() &&
instr->IsProfiledInstr() &&
instr->AsProfiledInstr()->u.stElemInfo->LikelyCreatesMissingValue();
}
bool
GlobOpt::NeedBailOnImplicitCallForArrayCheckHoist(BasicBlock const * const block, const bool isForwardPass) const
{
Assert(block);
return isForwardPass && block->loop && block->loop->needImplicitCallBailoutChecksForJsArrayCheckHoist;
}
bool
GlobOpt::PrepareForIgnoringIntOverflow(IR::Instr *const instr)
{
Assert(instr);
const bool isBoundary = instr->m_opcode == Js::OpCode::NoIntOverflowBoundary;
// Update the instruction's "int overflow matters" flag based on whether we are currently allowing ignoring int overflows.
// Some operations convert their srcs to int32s, those can still ignore int overflow.
if(instr->ignoreIntOverflowInRange)
{
instr->ignoreIntOverflowInRange = !intOverflowCurrentlyMattersInRange || OpCodeAttr::IsInt32(instr->m_opcode);
}
if(!intOverflowDoesNotMatterRange)
{
Assert(intOverflowCurrentlyMattersInRange);
// There are no more ranges of instructions where int overflow does not matter, in this block.
return isBoundary;
}
if(instr == intOverflowDoesNotMatterRange->LastInstr())
{
Assert(isBoundary);
// Reached the last instruction in the range
intOverflowCurrentlyMattersInRange = true;
intOverflowDoesNotMatterRange = intOverflowDoesNotMatterRange->Next();
return isBoundary;
}
if(!intOverflowCurrentlyMattersInRange)
{
return isBoundary;
}
if(instr != intOverflowDoesNotMatterRange->FirstInstr())
{
// Have not reached the next range
return isBoundary;
}
Assert(isBoundary);
// This is the first instruction in a range of instructions where int overflow does not matter. There can be many inputs to
// instructions in the range, some of which are inputs to the range itself (that is, the values are not defined in the
// range). Ignoring int overflow is only valid for int operations, so we need to ensure that all inputs to the range are
// int (not "likely int") before ignoring any overflows in the range. Ensuring that a sym with a "likely int" value is an
// int requires a bail-out. These bail-out check need to happen before any overflows are ignored, otherwise it's too late.
// The backward pass tracked all inputs into the range. Iterate over them and verify the values, and insert lossless
// conversions to int as necessary, before the first instruction in the range. If for any reason all values cannot be
// guaranteed to be ints, the optimization will be disabled for this range.
intOverflowCurrentlyMattersInRange = false;
{
BVSparse<JitArenaAllocator> tempBv1(tempAlloc);
BVSparse<JitArenaAllocator> tempBv2(tempAlloc);
{
// Just renaming the temp BVs for this section to indicate how they're used so that it makes sense
BVSparse<JitArenaAllocator> &symsToExclude = tempBv1;
BVSparse<JitArenaAllocator> &symsToInclude = tempBv2;
#if DBG_DUMP
SymID couldNotConvertSymId = 0;
#endif
FOREACH_BITSET_IN_SPARSEBV(id, intOverflowDoesNotMatterRange->SymsRequiredToBeInt())
{
Sym *const sym = func->m_symTable->Find(id);
Assert(sym);
// Some instructions with property syms are also tracked by the backward pass, and may be included in the range
// (LdSlot for instance). These property syms don't get their values until either copy-prop resolves a value for
// them, or a new value is created once the use of the property sym is reached. In either case, we're not that
// far yet, so we need to find the future value of the property sym by evaluating copy-prop in reverse.
Value *const value = sym->IsStackSym() ? CurrentBlockData()->FindValue(sym) : CurrentBlockData()->FindFuturePropertyValue(sym->AsPropertySym());
if(!value)
{
#if DBG_DUMP
couldNotConvertSymId = id;
#endif
intOverflowCurrentlyMattersInRange = true;
BREAK_BITSET_IN_SPARSEBV;
}
const bool isInt32OrUInt32Float =
value->GetValueInfo()->IsFloatConstant() &&
Js::JavascriptNumber::IsInt32OrUInt32(value->GetValueInfo()->AsFloatConstant()->FloatValue());
if(value->GetValueInfo()->IsInt() || isInt32OrUInt32Float)
{
if(!IsLoopPrePass())
{
// Input values that are already int can be excluded from int-specialization. We can treat unsigned
// int32 values as int32 values (ignoring the overflow), since the values will only be used inside the
// range where overflow does not matter.
symsToExclude.Set(sym->m_id);
}
continue;
}
if(!DoAggressiveIntTypeSpec() || !value->GetValueInfo()->IsLikelyInt())
{
// When aggressive int specialization is off, syms with "likely int" values cannot be forced to int since
// int bail-out checks are not allowed in that mode. Similarly, with aggressive int specialization on, it
// wouldn't make sense to force non-"likely int" values to int since it would almost guarantee a bail-out at
// runtime. In both cases, just disable ignoring overflow for this range.
#if DBG_DUMP
couldNotConvertSymId = id;
#endif
intOverflowCurrentlyMattersInRange = true;
BREAK_BITSET_IN_SPARSEBV;
}
if(IsLoopPrePass())
{
// The loop prepass does not modify bit-vectors. Since it doesn't add bail-out checks, it also does not need
// to specialize anything up-front. It only needs to be consistent in how it determines whether to allow
// ignoring overflow for a range, based on the values of inputs into the range.
continue;
}
// Since input syms are tracked in the backward pass, where there is no value tracking, it will not be aware of
// copy-prop. If a copy-prop sym is available, it will be used instead, so exclude the original sym and include
// the copy-prop sym for specialization.
StackSym *const copyPropSym = CurrentBlockData()->GetCopyPropSym(sym, value);
if(copyPropSym)
{
symsToExclude.Set(sym->m_id);
Assert(!symsToExclude.Test(copyPropSym->m_id));
const bool needsToBeLossless =
!intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt()->Test(sym->m_id);
if(intOverflowDoesNotMatterRange->SymsRequiredToBeInt()->Test(copyPropSym->m_id) ||
symsToInclude.TestAndSet(copyPropSym->m_id))
{
// The copy-prop sym is already included
if(needsToBeLossless)
{
// The original sym needs to be lossless, so make the copy-prop sym lossless as well.
intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt()->Clear(copyPropSym->m_id);
}
}
else if(!needsToBeLossless)
{
// The copy-prop sym was not included before, and the original sym can be lossy, so make it lossy.
intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt()->Set(copyPropSym->m_id);
}
}
else if(!sym->IsStackSym())
{
// Only stack syms can be converted to int, and copy-prop syms are stack syms. If a copy-prop sym was not
// found for the property sym, we can't ignore overflows in this range.
#if DBG_DUMP
couldNotConvertSymId = id;
#endif
intOverflowCurrentlyMattersInRange = true;
BREAK_BITSET_IN_SPARSEBV;
}
} NEXT_BITSET_IN_SPARSEBV;
if(intOverflowCurrentlyMattersInRange)
{
#if DBG_DUMP
if(PHASE_TRACE(Js::TrackCompoundedIntOverflowPhase, func) && !IsLoopPrePass())
{
char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
Output::Print(
_u("TrackCompoundedIntOverflow - Top function: %s (%s), Phase: %s, Block: %u, Disabled ignoring overflows\n"),
func->GetJITFunctionBody()->GetDisplayName(),
func->GetDebugNumberSet(debugStringBuffer),
Js::PhaseNames[Js::ForwardPhase],
currentBlock->GetBlockNum());
Output::Print(_u(" Input sym could not be turned into an int: %u\n"), couldNotConvertSymId);
Output::Print(_u(" First instr: "));
instr->m_next->Dump();
Output::Flush();
}
#endif
intOverflowDoesNotMatterRange = intOverflowDoesNotMatterRange->Next();
return isBoundary;
}
if(IsLoopPrePass())
{
return isBoundary;
}
// Update the syms to specialize after enumeration
intOverflowDoesNotMatterRange->SymsRequiredToBeInt()->Minus(&symsToExclude);
intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt()->Minus(&symsToExclude);
intOverflowDoesNotMatterRange->SymsRequiredToBeInt()->Or(&symsToInclude);
}
{
// Exclude syms that are already live as lossless int32, and exclude lossy conversions of syms that are already live
// as lossy int32.
// symsToExclude = liveInt32Syms - liveLossyInt32Syms // syms live as lossless int
// lossySymsToExclude = symsRequiredToBeLossyInt & liveLossyInt32Syms; // syms we want as lossy int that are already live as lossy int
// symsToExclude |= lossySymsToExclude
// symsRequiredToBeInt -= symsToExclude
// symsRequiredToBeLossyInt -= symsToExclude
BVSparse<JitArenaAllocator> &symsToExclude = tempBv1;
BVSparse<JitArenaAllocator> &lossySymsToExclude = tempBv2;
symsToExclude.Minus(CurrentBlockData()->liveInt32Syms, CurrentBlockData()->liveLossyInt32Syms);
lossySymsToExclude.And(
intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt(),
CurrentBlockData()->liveLossyInt32Syms);
symsToExclude.Or(&lossySymsToExclude);
intOverflowDoesNotMatterRange->SymsRequiredToBeInt()->Minus(&symsToExclude);
intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt()->Minus(&symsToExclude);
}
#if DBG
{
// Verify that the syms to be converted are live
// liveSyms = liveInt32Syms | liveFloat64Syms | liveVarSyms
// deadSymsRequiredToBeInt = symsRequiredToBeInt - liveSyms
BVSparse<JitArenaAllocator> &liveSyms = tempBv1;
BVSparse<JitArenaAllocator> &deadSymsRequiredToBeInt = tempBv2;
liveSyms.Or(CurrentBlockData()->liveInt32Syms, CurrentBlockData()->liveFloat64Syms);
liveSyms.Or(CurrentBlockData()->liveVarSyms);
deadSymsRequiredToBeInt.Minus(intOverflowDoesNotMatterRange->SymsRequiredToBeInt(), &liveSyms);
Assert(deadSymsRequiredToBeInt.IsEmpty());
}
#endif
}
// Int-specialize the syms before the first instruction of the range (the current instruction)
intOverflowDoesNotMatterRange->SymsRequiredToBeInt()->Minus(intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt());
#if DBG_DUMP
if(PHASE_TRACE(Js::TrackCompoundedIntOverflowPhase, func))
{
char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
Output::Print(
_u("TrackCompoundedIntOverflow - Top function: %s (%s), Phase: %s, Block: %u\n"),
func->GetJITFunctionBody()->GetDisplayName(),
func->GetDebugNumberSet(debugStringBuffer),
Js::PhaseNames[Js::ForwardPhase],
currentBlock->GetBlockNum());
Output::Print(_u(" Input syms to be int-specialized (lossless): "));
intOverflowDoesNotMatterRange->SymsRequiredToBeInt()->Dump();
Output::Print(_u(" Input syms to be converted to int (lossy): "));
intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt()->Dump();
Output::Print(_u(" First instr: "));
instr->m_next->Dump();
Output::Flush();
}
#endif
ToInt32(intOverflowDoesNotMatterRange->SymsRequiredToBeInt(), currentBlock, false /* lossy */, instr);
ToInt32(intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt(), currentBlock, true /* lossy */, instr);
return isBoundary;
}
void
GlobOpt::VerifyIntSpecForIgnoringIntOverflow(IR::Instr *const instr)
{
if(intOverflowCurrentlyMattersInRange || IsLoopPrePass())
{
return;
}
Assert(instr->m_opcode != Js::OpCode::Mul_I4 ||
(instr->m_opcode == Js::OpCode::Mul_I4 && !instr->ShouldCheckFor32BitOverflow() && instr->ShouldCheckForNon32BitOverflow() ));
// Instructions that are marked as "overflow doesn't matter" in the range must guarantee that they operate on int values and
// result in int values, for ignoring overflow to be valid. So, int-specialization is required for such instructions in the
// range. Ld_A is an exception because it only specializes if the src sym is available as a required specialized sym, and it
// doesn't generate bailouts or cause ignoring int overflow to be invalid.
// MULs are allowed to start a region and have BailOutInfo since they will bailout on non-32 bit overflow.
if(instr->m_opcode == Js::OpCode::Ld_A ||
((!instr->HasBailOutInfo() || instr->m_opcode == Js::OpCode::Mul_I4) &&
(!instr->GetDst() || instr->GetDst()->IsInt32()) &&
(!instr->GetSrc1() || instr->GetSrc1()->IsInt32()) &&
(!instr->GetSrc2() || instr->GetSrc2()->IsInt32())))
{
return;
}
if (!instr->HasBailOutInfo() && !instr->HasAnySideEffects())
{
return;
}
// This can happen for Neg_A if it needs to bail out on negative zero, and perhaps other cases as well. It's too late to fix
// the problem (overflows may already be ignored), so handle it by bailing out at compile-time and disabling tracking int
// overflow.
Assert(!func->IsTrackCompoundedIntOverflowDisabled());
if(PHASE_TRACE(Js::BailOutPhase, this->func))
{
char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
Output::Print(
_u("BailOut (compile-time): function: %s (%s) instr: "),
func->GetJITFunctionBody()->GetDisplayName(),
func->GetDebugNumberSet(debugStringBuffer));
#if DBG_DUMP
instr->Dump();
#else
Output::Print(_u("%s "), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode));
#endif
Output::Print(_u("(overflow does not matter but could not int-spec or needed bailout)\n"));
Output::Flush();
}
if(func->IsTrackCompoundedIntOverflowDisabled())
{
// Tracking int overflows is already off for some reason. Prevent trying to rejit again because it won't help and the
// same thing will happen again and cause an infinite loop. Just abort jitting this function.
if(PHASE_TRACE(Js::BailOutPhase, this->func))
{
Output::Print(_u(" Aborting JIT because TrackIntOverflow is already off\n"));
Output::Flush();
}
throw Js::OperationAbortedException();
}
throw Js::RejitException(RejitReason::TrackIntOverflowDisabled);
}
// It makes lowering easier if it can assume that the first src is never a constant,
// at least for commutative operators. For non-commutative, just hoist the constant.
void
GlobOpt::PreLowerCanonicalize(IR::Instr *instr, Value **pSrc1Val, Value **pSrc2Val)
{
IR::Opnd *dst = instr->GetDst();
IR::Opnd *src1 = instr->GetSrc1();
IR::Opnd *src2 = instr->GetSrc2();
if (src1->IsImmediateOpnd())
{
// Swap for dst, src
}
else if (src2 && dst && src2->IsRegOpnd())
{
if (src2->GetIsDead() && !src1->GetIsDead() && !src1->IsEqual(dst))
{
// Swap if src2 is dead, as the reg can be reuse for the dst for opEqs like on x86 (ADD r1, r2)
}
else if (src2->IsEqual(dst))
{
// Helps lowering of opEqs
}
else
{
return;
}
// Make sure we don't swap 2 srcs with valueOf calls.
if (OpCodeAttr::OpndHasImplicitCall(instr->m_opcode))
{
if (instr->IsBranchInstr())
{
if (!src1->GetValueType().IsPrimitive() || !src2->GetValueType().IsPrimitive())
{
return;
}
}
else if (!src1->GetValueType().IsPrimitive() && !src2->GetValueType().IsPrimitive())
{
return;
}
}
}
else
{
return;
}
Js::OpCode opcode = instr->m_opcode;
switch (opcode)
{
case Js::OpCode::And_A:
case Js::OpCode::Mul_A:
case Js::OpCode::Or_A:
case Js::OpCode::Xor_A:
case Js::OpCode::And_I4:
case Js::OpCode::Mul_I4:
case Js::OpCode::Or_I4:
case Js::OpCode::Xor_I4:
case Js::OpCode::Add_I4:
swap_srcs:
if (!instr->GetSrc2()->IsImmediateOpnd())
{
instr->m_opcode = opcode;
instr->SwapOpnds();
Value *tempVal = *pSrc1Val;
*pSrc1Val = *pSrc2Val;
*pSrc2Val = tempVal;
return;
}
break;
case Js::OpCode::BrSrEq_A:
case Js::OpCode::BrSrNotNeq_A:
case Js::OpCode::BrEq_I4:
goto swap_srcs;
case Js::OpCode::BrSrNeq_A:
case Js::OpCode::BrNeq_A:
case Js::OpCode::BrSrNotEq_A:
case Js::OpCode::BrNotEq_A:
case Js::OpCode::BrNeq_I4:
goto swap_srcs;
case Js::OpCode::BrGe_A:
opcode = Js::OpCode::BrLe_A;
goto swap_srcs;
case Js::OpCode::BrNotGe_A:
opcode = Js::OpCode::BrNotLe_A;
goto swap_srcs;
case Js::OpCode::BrGe_I4:
opcode = Js::OpCode::BrLe_I4;
goto swap_srcs;
case Js::OpCode::BrGt_A:
opcode = Js::OpCode::BrLt_A;
goto swap_srcs;
case Js::OpCode::BrNotGt_A:
opcode = Js::OpCode::BrNotLt_A;
goto swap_srcs;
case Js::OpCode::BrGt_I4:
opcode = Js::OpCode::BrLt_I4;
goto swap_srcs;
case Js::OpCode::BrLe_A:
opcode = Js::OpCode::BrGe_A;
goto swap_srcs;
case Js::OpCode::BrNotLe_A:
opcode = Js::OpCode::BrNotGe_A;
goto swap_srcs;
case Js::OpCode::BrLe_I4:
opcode = Js::OpCode::BrGe_I4;
goto swap_srcs;
case Js::OpCode::BrLt_A:
opcode = Js::OpCode::BrGt_A;
goto swap_srcs;
case Js::OpCode::BrNotLt_A:
opcode = Js::OpCode::BrNotGt_A;
goto swap_srcs;
case Js::OpCode::BrLt_I4:
opcode = Js::OpCode::BrGt_I4;
goto swap_srcs;
case Js::OpCode::BrEq_A:
case Js::OpCode::BrNotNeq_A:
case Js::OpCode::CmEq_A:
case Js::OpCode::CmNeq_A:
// this == "" not the same as "" == this...
if (!src1->IsImmediateOpnd() && (!src1->GetValueType().IsPrimitive() || !src2->GetValueType().IsPrimitive()))
{
return;
}
goto swap_srcs;
case Js::OpCode::CmGe_A:
if (!src1->IsImmediateOpnd() && (!src1->GetValueType().IsPrimitive() || !src2->GetValueType().IsPrimitive()))
{
return;
}
opcode = Js::OpCode::CmLe_A;
goto swap_srcs;
case Js::OpCode::CmGt_A:
if (!src1->IsImmediateOpnd() && (!src1->GetValueType().IsPrimitive() || !src2->GetValueType().IsPrimitive()))
{
return;
}
opcode = Js::OpCode::CmLt_A;
goto swap_srcs;
case Js::OpCode::CmLe_A:
if (!src1->IsImmediateOpnd() && (!src1->GetValueType().IsPrimitive() || !src2->GetValueType().IsPrimitive()))
{
return;
}
opcode = Js::OpCode::CmGe_A;
goto swap_srcs;
case Js::OpCode::CmLt_A:
if (!src1->IsImmediateOpnd() && (!src1->GetValueType().IsPrimitive() || !src2->GetValueType().IsPrimitive()))
{
return;
}
opcode = Js::OpCode::CmGt_A;
goto swap_srcs;
case Js::OpCode::CallI:
case Js::OpCode::CallIFixed:
case Js::OpCode::NewScObject:
case Js::OpCode::NewScObjectSpread:
case Js::OpCode::NewScObjArray:
case Js::OpCode::NewScObjArraySpread:
case Js::OpCode::NewScObjectNoCtor:
// Don't insert load to register if the function operand is a fixed function.
if (instr->HasFixedFunctionAddressTarget())
{
return;
}
break;
// Can't do add because <32 + "Hello"> isn't equal to <"Hello" + 32>
// Lower can do the swap. Other op-codes listed below don't need immediate source hoisting, as the fast paths handle it,
// or the lowering handles the hoisting.
case Js::OpCode::Add_A:
if (src1->IsFloat())
{
goto swap_srcs;
}
return;
case Js::OpCode::Sub_I4:
case Js::OpCode::Neg_I4:
case Js::OpCode::Not_I4:
case Js::OpCode::NewScFunc:
case Js::OpCode::NewScGenFunc:
case Js::OpCode::NewScArray:
case Js::OpCode::NewScIntArray:
case Js::OpCode::NewScFltArray:
case Js::OpCode::NewScArrayWithMissingValues:
case Js::OpCode::NewRegEx:
case Js::OpCode::Ld_A:
case Js::OpCode::Ld_I4:
case Js::OpCode::ThrowRuntimeError:
case Js::OpCode::TrapIfMinIntOverNegOne:
case Js::OpCode::TrapIfTruncOverflow:
case Js::OpCode::TrapIfZero:
case Js::OpCode::FromVar:
case Js::OpCode::Conv_Prim:
case Js::OpCode::LdC_A_I4:
case Js::OpCode::LdStr:
case Js::OpCode::InitFld:
case Js::OpCode::InitRootFld:
case Js::OpCode::StartCall:
case Js::OpCode::ArgOut_A:
case Js::OpCode::ArgOut_A_Inline:
case Js::OpCode::ArgOut_A_Dynamic:
case Js::OpCode::ArgOut_A_FromStackArgs:
case Js::OpCode::ArgOut_A_InlineBuiltIn:
case Js::OpCode::ArgOut_A_InlineSpecialized:
case Js::OpCode::ArgOut_A_SpreadArg:
case Js::OpCode::InlineeEnd:
case Js::OpCode::EndCallForPolymorphicInlinee:
case Js::OpCode::InlineeMetaArg:
case Js::OpCode::InlineBuiltInEnd:
case Js::OpCode::InlineNonTrackingBuiltInEnd:
case Js::OpCode::CallHelper:
case Js::OpCode::LdElemUndef:
case Js::OpCode::LdElemUndefScoped:
case Js::OpCode::RuntimeTypeError:
case Js::OpCode::RuntimeReferenceError:
case Js::OpCode::Ret:
case Js::OpCode::NewScObjectSimple:
case Js::OpCode::NewScObjectLiteral:
case Js::OpCode::StFld:
case Js::OpCode::StRootFld:
case Js::OpCode::StSlot:
case Js::OpCode::StSlotChkUndecl:
case Js::OpCode::StElemC:
case Js::OpCode::StArrSegElemC:
case Js::OpCode::StElemI_A:
case Js::OpCode::StElemI_A_Strict:
case Js::OpCode::CallDirect:
case Js::OpCode::BrNotHasSideEffects:
case Js::OpCode::NewConcatStrMulti:
case Js::OpCode::NewConcatStrMultiBE:
case Js::OpCode::ExtendArg_A:
#ifdef ENABLE_DOM_FAST_PATH
case Js::OpCode::DOMFastPathGetter:
case Js::OpCode::DOMFastPathSetter:
#endif
case Js::OpCode::NewScopeSlots:
case Js::OpCode::NewScopeSlotsWithoutPropIds:
case Js::OpCode::NewStackScopeSlots:
case Js::OpCode::IsInst:
case Js::OpCode::BailOnEqual:
case Js::OpCode::BailOnNotEqual:
case Js::OpCode::StArrViewElem:
return;
}
if (!src1->IsImmediateOpnd())
{
return;
}
// The fast paths or lowering of the remaining instructions may not support handling immediate opnds for the first src. The
// immediate src1 is hoisted here into a separate instruction.
if (src1->IsIntConstOpnd())
{
IR::Instr *newInstr = instr->HoistSrc1(Js::OpCode::Ld_I4);
ToInt32Dst(newInstr, newInstr->GetDst()->AsRegOpnd(), this->currentBlock);
}
else if (src1->IsInt64ConstOpnd())
{
instr->HoistSrc1(Js::OpCode::Ld_I4);
}
else
{
instr->HoistSrc1(Js::OpCode::Ld_A);
}
src1 = instr->GetSrc1();
src1->AsRegOpnd()->m_sym->SetIsConst();
}
// Clear the ValueMap pf the values invalidated by this instr.
void
GlobOpt::ProcessKills(IR::Instr *instr)
{
this->ProcessFieldKills(instr);
this->ProcessValueKills(instr);
this->ProcessArrayValueKills(instr);
}
bool
GlobOpt::OptIsInvariant(IR::Opnd *src, BasicBlock *block, Loop *loop, Value *srcVal, bool isNotTypeSpecConv, bool allowNonPrimitives)
{
if(!loop->CanHoistInvariants())
{
return false;
}
Sym *sym;
switch(src->GetKind())
{
case IR::OpndKindAddr:
case IR::OpndKindFloatConst:
case IR::OpndKindIntConst:
return true;
case IR::OpndKindReg:
sym = src->AsRegOpnd()->m_sym;
break;
case IR::OpndKindSym:
sym = src->AsSymOpnd()->m_sym;
if (src->AsSymOpnd()->IsPropertySymOpnd())
{
if (src->AsSymOpnd()->AsPropertySymOpnd()->IsTypeChecked())
{
// We do not handle hoisting these yet. We might be hoisting this across the instr with the type check protecting this one.
// And somehow, the dead-store pass now removes the type check on that instr later on...
// For CheckFixedFld, there is no benefit hoisting these if they don't have a type check as they won't generate code.
return false;
}
}
break;
case IR::OpndKindHelperCall:
// Helper calls, like the private slot getter, can be invariant.
// Consider moving more math builtin to invariant?
return HelperMethodAttributes::IsInVariant(src->AsHelperCallOpnd()->m_fnHelper);
default:
return false;
}
return OptIsInvariant(sym, block, loop, srcVal, isNotTypeSpecConv, allowNonPrimitives);
}
bool
GlobOpt::OptIsInvariant(Sym *sym, BasicBlock *block, Loop *loop, Value *srcVal, bool isNotTypeSpecConv, bool allowNonPrimitives, Value **loopHeadValRef)
{
Value *localLoopHeadVal;
if(!loopHeadValRef)
{
loopHeadValRef = &localLoopHeadVal;
}
Value *&loopHeadVal = *loopHeadValRef;
loopHeadVal = nullptr;
if(!loop->CanHoistInvariants())
{
return false;
}
if (sym->IsStackSym())
{
if (sym->AsStackSym()->IsTypeSpec())
{
StackSym *varSym = sym->AsStackSym()->GetVarEquivSym(this->func);
// Make sure the int32/float64 version of this is available.
// Note: We could handle this by converting the src, but usually the
// conversion is hoistable if this is hoistable anyway.
// In some weird cases it may not be however, so we'll bail out.
if (sym->AsStackSym()->IsInt32())
{
Assert(block->globOptData.liveInt32Syms->Test(varSym->m_id));
if (!loop->landingPad->globOptData.liveInt32Syms->Test(varSym->m_id) ||
(loop->landingPad->globOptData.liveLossyInt32Syms->Test(varSym->m_id) &&
!block->globOptData.liveLossyInt32Syms->Test(varSym->m_id)))
{
// Either the int32 sym is not live in the landing pad, or it's lossy in the landing pad and the
// instruction's block is using the lossless version. In either case, the instruction cannot be hoisted
// without doing a conversion of this operand.
return false;
}
}
else if (sym->AsStackSym()->IsFloat64())
{
if (!loop->landingPad->globOptData.liveFloat64Syms->Test(varSym->m_id))
{
return false;
}
}
#ifdef ENABLE_SIMDJS
else
{
Assert(sym->AsStackSym()->IsSimd128());
if (!loop->landingPad->globOptData.liveSimd128F4Syms->Test(varSym->m_id) && !loop->landingPad->globOptData.liveSimd128I4Syms->Test(varSym->m_id))
{
return false;
}
}
#endif
sym = sym->AsStackSym()->GetVarEquivSym(this->func);
}
else
{
// Make sure the var version of this is available.
// Note: We could handle this by converting the src, but usually the
// conversion is hoistable if this is hoistable anyway.
// In some weird cases it may not be however, so we'll bail out.
if (!loop->landingPad->globOptData.liveVarSyms->Test(sym->m_id))
{
return false;
}
}
}
else if (sym->IsPropertySym())
{
if (!loop->landingPad->globOptData.liveFields->Test(sym->m_id))
{
return false;
}
}
else
{
return false;
}
// We rely on having a value.
if (srcVal == NULL)
{
return false;
}
// A symbol is invariant if its current value is the same as it was upon entering the loop.
loopHeadVal = loop->landingPad->globOptData.FindValue(sym);
if (loopHeadVal == NULL || loopHeadVal->GetValueNumber() != srcVal->GetValueNumber())
{
return false;
}
// Can't hoist non-primitives, unless we have safeguards against valueof/tostring. Additionally, we need to consider
// the value annotations on the source *before* the loop: if we hoist this instruction outside the loop, we can't
// necessarily rely on type annotations added (and enforced) earlier in the loop's body.
//
// It might look as though !loopHeadVal->GetValueInfo()->IsPrimitive() implies
// !loop->landingPad->globOptData.IsTypeSpecialized(sym), but it turns out that this is not always the case. We
// encountered a test case in which we had previously hoisted a FromVar (to float 64) instruction, but its bailout code was
// BailoutPrimitiveButString, rather than BailoutNumberOnly, which would have allowed us to conclude that the dest was
// definitely a float64. Instead, it was only *likely* a float64, causing IsPrimitive to return false.
if (!allowNonPrimitives && !loopHeadVal->GetValueInfo()->IsPrimitive() && !loop->landingPad->globOptData.IsTypeSpecialized(sym))
{
return false;
}
if(!isNotTypeSpecConv && loop->symsDefInLoop->Test(sym->m_id))
{
// Typically, a sym is considered invariant if it has the same value in the current block and in the loop landing pad.
// The sym may have had a different value earlier in the loop or on the back-edge, but as long as it's reassigned to its
// value outside the loop, it would be considered invariant in this block. Consider that case:
// s1 = s2[invariant]
// <loop start>
// s1 = s2[invariant]
// // s1 now has the same value as in the landing pad, and is considered invariant
// s1 += s3
// // s1 is not invariant here, or on the back-edge
// ++s3 // s3 is not invariant, so the add above cannot be hoisted
// <loop end>
//
// A problem occurs at the point of (s1 += s3) when:
// - At (s1 = s2) inside the loop, s1 was made to be the sym store of that value. This by itself is legal, because
// after that transfer, s1 and s2 have the same value.
// - (s1 += s3) is type-specialized but s1 is not specialized in the loop header. This happens when s1 is not
// specialized entering the loop, and since s1 is not used before it's defined in the loop, it's not specialized
// on back-edges.
//
// With that, at (s1 += s3), the conversion of s1 to the type-specialized version would be hoisted because s1 is
// invariant just before that instruction. Since this add is specialized, the specialized version of the sym is modified
// in the loop without a reassignment at (s1 = s2) inside the loop, and (s1 += s3) would then use an incorrect value of
// s1 (it would use the value of s1 from the previous loop iteration, instead of using the value of s2).
//
// The problem here, is that we cannot hoist the conversion of s1 into its specialized version across the assignment
// (s1 = s2) inside the loop. So for the purposes of type specialization, don't consider a sym invariant if it has a def
// inside the loop.
return false;
}
// For values with an int range, require additionally that the range is the same as in the landing pad, as the range may
// have been changed on this path based on branches, and int specialization and invariant hoisting may rely on the range
// being the same. For type spec conversions, only require that if the value is an int constant in the current block, that
// it is also an int constant with the same value in the landing pad. Other range differences don't matter for type spec.
IntConstantBounds srcIntConstantBounds, loopHeadIntConstantBounds;
if(srcVal->GetValueInfo()->TryGetIntConstantBounds(&srcIntConstantBounds) &&
(isNotTypeSpecConv || srcIntConstantBounds.IsConstant()) &&
(
!loopHeadVal->GetValueInfo()->TryGetIntConstantBounds(&loopHeadIntConstantBounds) ||
loopHeadIntConstantBounds.LowerBound() != srcIntConstantBounds.LowerBound() ||
loopHeadIntConstantBounds.UpperBound() != srcIntConstantBounds.UpperBound()
))
{
return false;
}
// If the loopHeadVal is primitive, the current value should be as well. This really should be
// srcVal->GetValueInfo()->IsPrimitive() instead of IsLikelyPrimitive, but this stronger assertion
// doesn't hold in some cases when this method is called out of the array code.
Assert((!loopHeadVal->GetValueInfo()->IsPrimitive()) || srcVal->GetValueInfo()->IsLikelyPrimitive());
return true;
}
bool
GlobOpt::OptIsInvariant(
IR::Instr *instr,
BasicBlock *block,
Loop *loop,
Value *src1Val,
Value *src2Val,
bool isNotTypeSpecConv,
const bool forceInvariantHoisting)
{
if (!loop->CanHoistInvariants())
{
return false;
}
if (!OpCodeAttr::CanCSE(instr->m_opcode))
{
return false;
}
bool allowNonPrimitives = !OpCodeAttr::OpndHasImplicitCall(instr->m_opcode);
switch(instr->m_opcode)
{
// Can't legally hoist these
case Js::OpCode::LdLen_A:
return false;
//Can't Hoist BailOnNotStackArgs, as it is necessary as InlineArgsOptimization relies on this opcode
//to decide whether to throw rejit exception or not.
case Js::OpCode::BailOnNotStackArgs:
return false;
// Usually not worth hoisting these
case Js::OpCode::LdStr:
case Js::OpCode::Ld_A:
case Js::OpCode::Ld_I4:
case Js::OpCode::LdC_A_I4:
if(!forceInvariantHoisting)
{
return false;
}
break;
// Can't hoist these outside the function it's for. The LdArgumentsFromFrame for an inlinee depends on the inlinee meta arg
// that holds the arguments object, which is only initialized at the start of the inlinee. So, can't hoist this outside the
// inlinee.
case Js::OpCode::LdArgumentsFromFrame:
if(instr->m_func != loop->GetFunc())
{
return false;
}
break;
case Js::OpCode::FromVar:
if (instr->HasBailOutInfo())
{
allowNonPrimitives = true;
}
break;
case Js::OpCode::CheckObjType:
// Bug 11712101: If the operand is a field, ensure that its containing object type is invariant
// before hoisting -- that is, don't hoist a CheckObjType over a DeleteFld on that object.
// (CheckObjType only checks the operand and its immediate parent, so we don't need to go
// any farther up the object graph.)
Assert(instr->GetSrc1());
PropertySym *propertySym = instr->GetSrc1()->AsPropertySymOpnd()->GetPropertySym();
if (propertySym->HasObjectTypeSym()) {
StackSym *objectTypeSym = propertySym->GetObjectTypeSym();
if (!this->OptIsInvariant(objectTypeSym, block, loop, this->CurrentBlockData()->FindValue(objectTypeSym), true, true)) {
return false;
}
}
break;
}
IR::Opnd *dst = instr->GetDst();
if (dst && !dst->IsRegOpnd())
{
return false;
}
IR::Opnd *src1 = instr->GetSrc1();
if (src1)
{
if (!this->OptIsInvariant(src1, block, loop, src1Val, isNotTypeSpecConv, allowNonPrimitives))
{
return false;
}
IR::Opnd *src2 = instr->GetSrc2();
if (src2)
{
if (!this->OptIsInvariant(src2, block, loop, src2Val, isNotTypeSpecConv, allowNonPrimitives))
{
return false;
}
}
}
return true;
}
bool
GlobOpt::OptDstIsInvariant(IR::RegOpnd *dst)
{
StackSym *dstSym = dst->m_sym;
if (dstSym->IsTypeSpec())
{
// The type-specialized sym may be single def, but not the original...
dstSym = dstSym->GetVarEquivSym(this->func);
}
return (dstSym->m_isSingleDef);
}
void
GlobOpt::OptHoistUpdateValueType(
Loop* loop,
IR::Instr* instr,
IR::Opnd* srcOpnd,
Value* opndVal)
{
if (opndVal == nullptr || instr->m_opcode == Js::OpCode::FromVar)
{
return;
}
Sym* opndSym = srcOpnd->GetSym();;
if (opndSym)
{
BasicBlock* landingPad = loop->landingPad;
Value* opndValueInLandingPad = landingPad->globOptData.FindValue(opndSym);
Assert(opndVal->GetValueNumber() == opndValueInLandingPad->GetValueNumber());
ValueType opndValueTypeInLandingPad = opndValueInLandingPad->GetValueInfo()->Type();
if (srcOpnd->GetValueType() != opndValueTypeInLandingPad)
{
if (instr->m_opcode == Js::OpCode::SetConcatStrMultiItemBE)
{
Assert(!opndValueTypeInLandingPad.IsString());
Assert(instr->GetDst());
IR::RegOpnd* strOpnd = IR::RegOpnd::New(TyVar, instr->m_func);
strOpnd->SetValueType(ValueType::String);
strOpnd->SetValueTypeFixed();
IR::Instr* convPrimStrInstr =
IR::Instr::New(Js::OpCode::Conv_PrimStr, strOpnd, srcOpnd->Use(instr->m_func), instr->m_func);
instr->ReplaceSrc(srcOpnd, strOpnd);
if (loop->bailOutInfo->bailOutInstr)
{
loop->bailOutInfo->bailOutInstr->InsertBefore(convPrimStrInstr);
}
else
{
landingPad->InsertAfter(convPrimStrInstr);
}
}
srcOpnd->SetValueType(opndValueTypeInLandingPad);
}
if (opndSym->IsPropertySym())
{
// Also fix valueInfo on objPtr
StackSym* opndObjPtrSym = opndSym->AsPropertySym()->m_stackSym;
Value* opndObjPtrSymValInLandingPad = landingPad->globOptData.FindValue(opndObjPtrSym);
ValueInfo* opndObjPtrSymValueInfoInLandingPad = opndObjPtrSymValInLandingPad->GetValueInfo();
srcOpnd->AsSymOpnd()->SetPropertyOwnerValueType(opndObjPtrSymValueInfoInLandingPad->Type());
}
}
}
void
GlobOpt::OptHoistInvariant(
IR::Instr *instr,
BasicBlock *block,
Loop *loop,
Value *dstVal,
Value *const src1Val,
Value *const src2Val,
bool isNotTypeSpecConv,
bool lossy,
IR::BailOutKind bailoutKind)
{
BasicBlock *landingPad = loop->landingPad;
IR::Opnd* src1 = instr->GetSrc1();
if (src1)
{
// We are hoisting this instruction possibly past other uses, which might invalidate the last use info. Clear it.
OptHoistUpdateValueType(loop, instr, src1, src1Val);
if (src1->IsRegOpnd())
{
src1->AsRegOpnd()->m_isTempLastUse = false;
}
IR::Opnd* src2 = instr->GetSrc2();
if (src2)
{
OptHoistUpdateValueType(loop, instr, src2, src2Val);
if (src2->IsRegOpnd())
{
src2->AsRegOpnd()->m_isTempLastUse = false;
}
}
}
IR::RegOpnd *dst = instr->GetDst() ? instr->GetDst()->AsRegOpnd() : nullptr;
if(dst)
{
switch (instr->m_opcode)
{
case Js::OpCode::CmEq_I4:
case Js::OpCode::CmNeq_I4:
case Js::OpCode::CmLt_I4:
case Js::OpCode::CmLe_I4:
case Js::OpCode::CmGt_I4:
case Js::OpCode::CmGe_I4:
case Js::OpCode::CmUnLt_I4:
case Js::OpCode::CmUnLe_I4:
case Js::OpCode::CmUnGt_I4:
case Js::OpCode::CmUnGe_I4:
// These operations are a special case. They generate a lossy int value, and the var sym is initialized using
// Conv_Bool. A sym cannot be live only as a lossy int sym, the var needs to be live as well since the lossy int
// sym cannot be used to convert to var. We don't know however, whether the Conv_Bool will be hoisted. The idea
// currently is that the sym is only used on the path in which it is initialized inside the loop. So, don't
// hoist any liveness info for the dst.
if (!this->GetIsAsmJSFunc())
{
lossy = true;
}
break;
case Js::OpCode::FromVar:
{
StackSym* src1StackSym = IR::RegOpnd::TryGetStackSym(instr->GetSrc1());
if (instr->HasBailOutInfo())
{
IR::BailOutKind instrBailoutKind = instr->GetBailOutKind();
#ifdef ENABLE_SIMDJS
Assert(instrBailoutKind == IR::BailOutIntOnly ||
instrBailoutKind == IR::BailOutExpectingInteger ||
instrBailoutKind == IR::BailOutOnNotPrimitive ||
instrBailoutKind == IR::BailOutNumberOnly ||
instrBailoutKind == IR::BailOutPrimitiveButString ||
instrBailoutKind == IR::BailOutSimd128F4Only ||
instrBailoutKind == IR::BailOutSimd128I4Only);
#else
Assert(instrBailoutKind == IR::BailOutIntOnly ||
instrBailoutKind == IR::BailOutExpectingInteger ||
instrBailoutKind == IR::BailOutOnNotPrimitive ||
instrBailoutKind == IR::BailOutNumberOnly ||
instrBailoutKind == IR::BailOutPrimitiveButString);
#endif
}
else if (src1StackSym && bailoutKind != IR::BailOutInvalid)
{
// We may be hoisting FromVar from a region where it didn't need a bailout (src1 had a definite value type) to a region
// where it would. In such cases, the FromVar needs a bailout based on the value type of src1 in its new position.
Assert(!src1StackSym->IsTypeSpec());
Value* landingPadSrc1val = landingPad->globOptData.FindValue(src1StackSym);
Assert(src1Val->GetValueNumber() == landingPadSrc1val->GetValueNumber());
ValueInfo *src1ValueInfo = src1Val->GetValueInfo();
ValueInfo *landingPadSrc1ValueInfo = landingPadSrc1val->GetValueInfo();
IRType dstType = dst->GetType();
const auto AddBailOutToFromVar = [&]()
{
instr->GetSrc1()->SetValueType(landingPadSrc1val->GetValueInfo()->Type());
EnsureBailTarget(loop);
if (block->IsLandingPad())
{
instr = instr->ConvertToBailOutInstr(instr, bailoutKind, loop->bailOutInfo->bailOutOffset);
}
else
{
instr = instr->ConvertToBailOutInstr(instr, bailoutKind);
}
};
// A definite type in the source position and not a definite type in the destination (landing pad)
// and no bailout on the instruction; we should put a bailout on the hoisted instruction.
if (dstType == TyInt32)
{
if (lossy)
{
if ((src1ValueInfo->IsPrimitive() || block->globOptData.IsTypeSpecialized(src1StackSym)) && // didn't need a lossy type spec bailout in the source block
(!landingPadSrc1ValueInfo->IsPrimitive() && !landingPad->globOptData.IsTypeSpecialized(src1StackSym))) // needs a lossy type spec bailout in the landing pad
{
bailoutKind = IR::BailOutOnNotPrimitive;
AddBailOutToFromVar();
}
}
else if (src1ValueInfo->IsInt() && !landingPadSrc1ValueInfo->IsInt())
{
AddBailOutToFromVar();
}
}
else if ((dstType == TyFloat64 && src1ValueInfo->IsNumber() && !landingPadSrc1ValueInfo->IsNumber()) ||
(IRType_IsSimd128(dstType) && src1ValueInfo->IsSimd128() && !landingPadSrc1ValueInfo->IsSimd128()))
{
AddBailOutToFromVar();
}
}
break;
}
}
if (dstVal == NULL)
{
dstVal = this->NewGenericValue(ValueType::Uninitialized, dst);
}
// ToVar/FromVar don't need a new dst because it has to be invariant if their src is invariant.
bool dstDoesntNeedLoad = (!isNotTypeSpecConv && instr->m_opcode != Js::OpCode::LdC_A_I4);
StackSym *varSym = dst->m_sym;
if (varSym->IsTypeSpec())
{
varSym = varSym->GetVarEquivSym(this->func);
}
Value *const landingPadDstVal = loop->landingPad->globOptData.FindValue(varSym);
if(landingPadDstVal
? dstVal->GetValueNumber() != landingPadDstVal->GetValueNumber()
: loop->symsDefInLoop->Test(varSym->m_id))
{
// We need a temp for FromVar/ToVar if dst changes in the loop.
dstDoesntNeedLoad = false;
}
if (!dstDoesntNeedLoad && this->OptDstIsInvariant(dst) == false)
{
// Keep dst in place, hoist instr using a new dst.
instr->UnlinkDst();
// Set type specialization info correctly for this new sym
StackSym *copyVarSym;
IR::RegOpnd *copyReg;
if (dst->m_sym->IsTypeSpec())
{
copyVarSym = StackSym::New(TyVar, instr->m_func);
StackSym *copySym = copyVarSym;
if (dst->m_sym->IsInt32())
{
if(lossy)
{
// The new sym would only be live as a lossy int since we're only hoisting the store to the int version
// of the sym, and cannot be converted to var. It is not legal to have a sym only live as a lossy int,
// so don't update liveness info for this sym.
}
else
{
block->globOptData.liveInt32Syms->Set(copyVarSym->m_id);
}
copySym = copySym->GetInt32EquivSym(instr->m_func);
}
else if (dst->m_sym->IsFloat64())
{
block->globOptData.liveFloat64Syms->Set(copyVarSym->m_id);
copySym = copySym->GetFloat64EquivSym(instr->m_func);
}
#ifdef ENABLE_SIMDJS
else if (dst->IsSimd128())
{
// SIMD_JS
if (dst->IsSimd128F4())
{
block->globOptData.liveSimd128F4Syms->Set(copyVarSym->m_id);
copySym = copySym->GetSimd128F4EquivSym(instr->m_func);
}
else
{
Assert(dst->IsSimd128I4());
block->globOptData.liveSimd128I4Syms->Set(copyVarSym->m_id);
copySym = copySym->GetSimd128I4EquivSym(instr->m_func);
}
}
#endif
copyReg = IR::RegOpnd::New(copySym, copySym->GetType(), instr->m_func);
}
else
{
copyReg = IR::RegOpnd::New(dst->GetType(), instr->m_func);
copyVarSym = copyReg->m_sym;
block->globOptData.liveVarSyms->Set(copyVarSym->m_id);
}
copyReg->SetValueType(dst->GetValueType());
IR::Instr *copyInstr = IR::Instr::New(Js::OpCode::Ld_A, dst, copyReg, instr->m_func);
copyInstr->SetByteCodeOffset(instr);
instr->SetDst(copyReg);
instr->InsertBefore(copyInstr);
dst->m_sym->m_mayNotBeTempLastUse = true;
if (instr->GetSrc1() && instr->GetSrc1()->IsImmediateOpnd())
{
// Propagate IsIntConst if appropriate
switch(instr->m_opcode)
{
case Js::OpCode::Ld_A:
case Js::OpCode::Ld_I4:
case Js::OpCode::LdC_A_I4:
copyReg->m_sym->SetIsConst();
break;
}
}
ValueInfo *dstValueInfo = dstVal->GetValueInfo();
if((!dstValueInfo->GetSymStore() || dstValueInfo->GetSymStore() == varSym) && !lossy)
{
// The destination's value may have been transferred from one of the invariant sources, in which case we should
// keep the sym store intact, as that sym will likely have a better lifetime than this new copy sym. For
// instance, if we're inside a conditioned block, because we don't make the copy sym live and set its value in
// all preceding blocks, this sym would not be live after exiting this block, causing this value to not
// participate in copy-prop after this block.
this->SetSymStoreDirect(dstValueInfo, copyVarSym);
}
block->globOptData.InsertNewValue(dstVal, copyReg);
dst = copyReg;
}
}
// Move to landing pad
block->UnlinkInstr(instr);
if (loop->bailOutInfo->bailOutInstr)
{
loop->bailOutInfo->bailOutInstr->InsertBefore(instr);
}
else
{
landingPad->InsertAfter(instr);
}
GlobOpt::MarkNonByteCodeUsed(instr);
if (instr->HasBailOutInfo() || instr->HasAuxBailOut())
{
Assert(loop->bailOutInfo);
EnsureBailTarget(loop);
// Copy bailout info of loop top.
instr->ReplaceBailOutInfo(loop->bailOutInfo);
}
if(!dst)
{
return;
}
// The bailout info's liveness for the dst sym is not updated in loop landing pads because bailout instructions previously
// hoisted into the loop's landing pad may bail out before the current type of the dst sym became live (perhaps due to this
// instruction). Since the landing pad will have a shared bailout point, the bailout info cannot assume that the current
// type of the dst sym was live during every bailout hoisted into the landing pad.
StackSym *const dstSym = dst->m_sym;
StackSym *const dstVarSym = dstSym->IsTypeSpec() ? dstSym->GetVarEquivSym(nullptr) : dstSym;
Assert(dstVarSym);
if(isNotTypeSpecConv || !loop->landingPad->globOptData.IsLive(dstVarSym))
{
// A new dst is being hoisted, or the same single-def dst that would not be live before this block. So, make it live and
// update the value info with the same value info in this block.
if(lossy)
{
// This is a lossy conversion to int. The instruction was given a new dst specifically for hoisting, so this new dst
// will not be live as a var before this block. A sym cannot be live only as a lossy int sym, the var needs to be
// live as well since the lossy int sym cannot be used to convert to var. Since the var version of the sym is not
// going to be initialized, don't hoist any liveness info for the dst. The sym is only going to be used on the path
// in which it is initialized inside the loop.
Assert(dstSym->IsTypeSpec());
Assert(dstSym->IsInt32());
return;
}
// Check if the dst value was transferred from the src. If so, the value transfer needs to be replicated.
bool isTransfer = dstVal == src1Val;
StackSym *transferValueOfSym = nullptr;
if(isTransfer)
{
Assert(instr->GetSrc1());
if(instr->GetSrc1()->IsRegOpnd())
{
StackSym *src1Sym = instr->GetSrc1()->AsRegOpnd()->m_sym;
if(src1Sym->IsTypeSpec())
{
src1Sym = src1Sym->GetVarEquivSym(nullptr);
Assert(src1Sym);
}
if(dstVal == block->globOptData.FindValue(src1Sym))
{
transferValueOfSym = src1Sym;
}
}
}
// SIMD_JS
if (instr->m_opcode == Js::OpCode::ExtendArg_A)
{
// Check if we should have CSE'ed this EA
Assert(instr->GetSrc1());
// If the dstVal symstore is not the dst itself, then we copied the Value from another expression.
if (dstVal->GetValueInfo()->GetSymStore() != instr->GetDst()->GetStackSym())
{
isTransfer = true;
transferValueOfSym = dstVal->GetValueInfo()->GetSymStore()->AsStackSym();
}
}
const ValueNumber dstValueNumber = dstVal->GetValueNumber();
ValueNumber dstNewValueNumber = InvalidValueNumber;
for(InvariantBlockBackwardIterator it(this, block, loop->landingPad, nullptr); it.IsValid(); it.MoveNext())
{
BasicBlock *const hoistBlock = it.Block();
GlobOptBlockData &hoistBlockData = hoistBlock->globOptData;
Assert(!hoistBlockData.IsLive(dstVarSym));
hoistBlockData.MakeLive(dstSym, lossy);
Value *newDstValue;
do
{
if(isTransfer)
{
if(transferValueOfSym)
{
newDstValue = hoistBlockData.FindValue(transferValueOfSym);
if(newDstValue && newDstValue->GetValueNumber() == dstValueNumber)
{
break;
}
}
// It's a transfer, but we don't have a sym whose value number matches in the target block. Use a new value
// number since we don't know if there is already a value with the current number for the target block.
if(dstNewValueNumber == InvalidValueNumber)
{
dstNewValueNumber = NewValueNumber();
}
newDstValue = CopyValue(dstVal, dstNewValueNumber);
break;
}
newDstValue = CopyValue(dstVal, dstValueNumber);
} while(false);
hoistBlockData.SetValue(newDstValue, dstVarSym);
}
return;
}
#if DBG
if(instr->GetSrc1()->IsRegOpnd()) // Type spec conversion may load a constant into a dst sym
{
StackSym *const srcSym = instr->GetSrc1()->AsRegOpnd()->m_sym;
Assert(srcSym != dstSym); // Type spec conversion must be changing the type, so the syms must be different
StackSym *const srcVarSym = srcSym->IsTypeSpec() ? srcSym->GetVarEquivSym(nullptr) : srcSym;
Assert(srcVarSym == dstVarSym); // Type spec conversion must be between variants of the same var sym
}
#endif
bool changeValueType = false, changeValueTypeToInt = false;
if(dstSym->IsTypeSpec())
{
if(dst->IsInt32())
{
if(!lossy)
{
Assert(
!instr->HasBailOutInfo() ||
instr->GetBailOutKind() == IR::BailOutIntOnly ||
instr->GetBailOutKind() == IR::BailOutExpectingInteger);
changeValueType = changeValueTypeToInt = true;
}
}
else if (dst->IsFloat64())
{
if(instr->HasBailOutInfo() && instr->GetBailOutKind() == IR::BailOutNumberOnly)
{
changeValueType = true;
}
}
#ifdef ENABLE_SIMDJS
else
{
// SIMD_JS
Assert(dst->IsSimd128());
if (instr->HasBailOutInfo() &&
(instr->GetBailOutKind() == IR::BailOutSimd128F4Only || instr->GetBailOutKind() == IR::BailOutSimd128I4Only))
{
changeValueType = true;
}
}
#endif
}
ValueInfo *previousValueInfoBeforeUpdate = nullptr, *previousValueInfoAfterUpdate = nullptr;
for(InvariantBlockBackwardIterator it(
this,
block,
loop->landingPad,
dstVarSym,
dstVal->GetValueNumber());
it.IsValid();
it.MoveNext())
{
BasicBlock *const hoistBlock = it.Block();
GlobOptBlockData &hoistBlockData = hoistBlock->globOptData;
#if DBG
// TODO: There are some odd cases with field hoisting where the sym is invariant in only part of the loop and the info
// does not flow through all blocks. Un-comment the verification below after PRE replaces field hoisting.
//// Verify that the src sym is live as the required type, and that the conversion is valid
//Assert(IsLive(dstVarSym, &hoistBlockData));
//if(instr->GetSrc1()->IsRegOpnd())
//{
// IR::RegOpnd *const src = instr->GetSrc1()->AsRegOpnd();
// StackSym *const srcSym = instr->GetSrc1()->AsRegOpnd()->m_sym;
// if(srcSym->IsTypeSpec())
// {
// if(src->IsInt32())
// {
// Assert(hoistBlockData.liveInt32Syms->Test(dstVarSym->m_id));
// Assert(!hoistBlockData.liveLossyInt32Syms->Test(dstVarSym->m_id)); // shouldn't try to convert a lossy int32 to anything
// }
// else
// {
// Assert(src->IsFloat64());
// Assert(hoistBlockData.liveFloat64Syms->Test(dstVarSym->m_id));
// if(dstSym->IsTypeSpec() && dst->IsInt32())
// {
// Assert(lossy); // shouldn't try to do a lossless conversion from float64 to int32
// }
// }
// }
// else
// {
// Assert(hoistBlockData.liveVarSyms->Test(dstVarSym->m_id));
// }
//}
//if(dstSym->IsTypeSpec() && dst->IsInt32())
//{
// // If the sym is already specialized as required in the block to which we are attempting to hoist the conversion,
// // that info should have flowed into this block
// if(lossy)
// {
// Assert(!hoistBlockData.liveInt32Syms->Test(dstVarSym->m_id));
// }
// else
// {
// Assert(!IsInt32TypeSpecialized(dstVarSym, hoistBlock));
// }
//}
#endif
hoistBlockData.MakeLive(dstSym, lossy);
if(!changeValueType)
{
continue;
}
Value *const hoistBlockValue = it.InvariantSymValue();
ValueInfo *const hoistBlockValueInfo = hoistBlockValue->GetValueInfo();
if(hoistBlockValueInfo == previousValueInfoBeforeUpdate)
{
if(hoistBlockValueInfo != previousValueInfoAfterUpdate)
{
HoistInvariantValueInfo(previousValueInfoAfterUpdate, hoistBlockValue, hoistBlock);
}
}
else
{
previousValueInfoBeforeUpdate = hoistBlockValueInfo;
ValueInfo *const newValueInfo =
changeValueTypeToInt
? hoistBlockValueInfo->SpecializeToInt32(alloc)
: hoistBlockValueInfo->SpecializeToFloat64(alloc);
previousValueInfoAfterUpdate = newValueInfo;
ChangeValueInfo(changeValueTypeToInt ? nullptr : hoistBlock, hoistBlockValue, newValueInfo);
}
}
}
bool
GlobOpt::TryHoistInvariant(
IR::Instr *instr,
BasicBlock *block,
Value *dstVal,
Value *src1Val,
Value *src2Val,
bool isNotTypeSpecConv,
const bool lossy,
const bool forceInvariantHoisting,
IR::BailOutKind bailoutKind)
{
Assert(!this->IsLoopPrePass());
if (OptIsInvariant(instr, block, block->loop, src1Val, src2Val, isNotTypeSpecConv, forceInvariantHoisting))
{
#if DBG
if (Js::Configuration::Global.flags.Trace.IsEnabled(Js::InvariantsPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId()))
{
Output::Print(_u(" **** INVARIANT *** "));
instr->Dump();
}
#endif
#if ENABLE_DEBUG_CONFIG_OPTIONS
if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::InvariantsPhase))
{
Output::Print(_u(" **** INVARIANT *** "));
Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode));
}
#endif
Loop *loop = block->loop;
// Try hoisting from to outer most loop
while (loop->parent && OptIsInvariant(instr, block, loop->parent, src1Val, src2Val, isNotTypeSpecConv, forceInvariantHoisting))
{
loop = loop->parent;
}
// Record the byte code use here since we are going to move this instruction up
if (isNotTypeSpecConv)
{
InsertNoImplicitCallUses(instr);
this->CaptureByteCodeSymUses(instr);
this->InsertByteCodeUses(instr, true);
}
#if DBG
else
{
PropertySym *propertySymUse = NULL;
NoRecoverMemoryJitArenaAllocator tempAllocator(_u("BE-GlobOpt-Temp"), this->alloc->GetPageAllocator(), Js::Throw::OutOfMemory);
BVSparse<JitArenaAllocator> * tempByteCodeUse = JitAnew(&tempAllocator, BVSparse<JitArenaAllocator>, &tempAllocator);
GlobOpt::TrackByteCodeSymUsed(instr, tempByteCodeUse, &propertySymUse);
Assert(tempByteCodeUse->Count() == 0 && propertySymUse == NULL);
}
#endif
OptHoistInvariant(instr, block, loop, dstVal, src1Val, src2Val, isNotTypeSpecConv, lossy, bailoutKind);
return true;
}
return false;
}
InvariantBlockBackwardIterator::InvariantBlockBackwardIterator(
GlobOpt *const globOpt,
BasicBlock *const exclusiveBeginBlock,
BasicBlock *const inclusiveEndBlock,
StackSym *const invariantSym,
const ValueNumber invariantSymValueNumber)
: globOpt(globOpt),
exclusiveEndBlock(inclusiveEndBlock->prev),
invariantSym(invariantSym),
invariantSymValueNumber(invariantSymValueNumber),
block(exclusiveBeginBlock)
#if DBG
,
inclusiveEndBlock(inclusiveEndBlock)
#endif
{
Assert(exclusiveBeginBlock);
Assert(inclusiveEndBlock);
Assert(!inclusiveEndBlock->isDeleted);
Assert(exclusiveBeginBlock != inclusiveEndBlock);
Assert(!invariantSym == (invariantSymValueNumber == InvalidValueNumber));
MoveNext();
}
bool
InvariantBlockBackwardIterator::IsValid() const
{
return block != exclusiveEndBlock;
}
void
InvariantBlockBackwardIterator::MoveNext()
{
Assert(IsValid());
while(true)
{
#if DBG
BasicBlock *const previouslyIteratedBlock = block;
#endif
block = block->prev;
if(!IsValid())
{
Assert(previouslyIteratedBlock == inclusiveEndBlock);
break;
}
if(block->isDeleted)
{
continue;
}
if(!block->globOptData.HasData())
{
// This block's info has already been merged with all of its successors
continue;
}
if(!invariantSym)
{
break;
}
invariantSymValue = block->globOptData.FindValue(invariantSym);
if(!invariantSymValue || invariantSymValue->GetValueNumber() != invariantSymValueNumber)
{
// BailOnNoProfile and throw blocks are not moved outside loops. A sym table cleanup on these paths may delete the
// values. Field hoisting also has some odd cases where the hoisted stack sym is invariant in only part of the loop.
continue;
}
break;
}
}
BasicBlock *
InvariantBlockBackwardIterator::Block() const
{
Assert(IsValid());
return block;
}
Value *
InvariantBlockBackwardIterator::InvariantSymValue() const
{
Assert(IsValid());
Assert(invariantSym);
return invariantSymValue;
}
void
GlobOpt::HoistInvariantValueInfo(
ValueInfo *const invariantValueInfoToHoist,
Value *const valueToUpdate,
BasicBlock *const targetBlock)
{
Assert(invariantValueInfoToHoist);
Assert(valueToUpdate);
Assert(targetBlock);
// Why are we trying to change the value type of the type sym value? Asserting here to make sure we don't deep copy the type sym's value info.
Assert(!invariantValueInfoToHoist->IsJsType());
Sym *const symStore = valueToUpdate->GetValueInfo()->GetSymStore();
ValueInfo *newValueInfo;
if(invariantValueInfoToHoist->GetSymStore() == symStore)
{
newValueInfo = invariantValueInfoToHoist;
}
else
{
newValueInfo = invariantValueInfoToHoist->Copy(alloc);
this->SetSymStoreDirect(newValueInfo, symStore);
}
ChangeValueInfo(targetBlock, valueToUpdate, newValueInfo, true);
}
// static
bool
GlobOpt::DoInlineArgsOpt(Func const * func)
{
Func const * topFunc = func->GetTopFunc();
Assert(topFunc != func);
bool doInlineArgsOpt =
!PHASE_OFF(Js::InlineArgsOptPhase, topFunc) &&
!func->GetHasCalls() &&
!func->GetHasUnoptimizedArgumentsAccess() &&
func->m_canDoInlineArgsOpt;
return doInlineArgsOpt;
}
bool
GlobOpt::IsSwitchOptEnabled(Func const * func)
{
Assert(func->IsTopFunc());
return !PHASE_OFF(Js::SwitchOptPhase, func) && !func->IsSwitchOptDisabled() && func->DoGlobOpt();
}
bool
GlobOpt::IsSwitchOptEnabledForIntTypeSpec(Func const * func)
{
return IsSwitchOptEnabled(func) && !IsTypeSpecPhaseOff(func) && DoAggressiveIntTypeSpec(func);
}
bool
GlobOpt::DoConstFold() const
{
return !PHASE_OFF(Js::ConstFoldPhase, func);
}
bool
GlobOpt::IsTypeSpecPhaseOff(Func const *func)
{
return PHASE_OFF(Js::TypeSpecPhase, func) || func->IsJitInDebugMode() || !func->DoGlobOptsForGeneratorFunc();
}
bool
GlobOpt::DoTypeSpec() const
{
return doTypeSpec;
}
bool
GlobOpt::DoAggressiveIntTypeSpec(Func const * func)
{
return
!PHASE_OFF(Js::AggressiveIntTypeSpecPhase, func) &&
!IsTypeSpecPhaseOff(func) &&
!func->IsAggressiveIntTypeSpecDisabled();
}
bool
GlobOpt::DoAggressiveIntTypeSpec() const
{
return doAggressiveIntTypeSpec;
}
bool
GlobOpt::DoAggressiveMulIntTypeSpec() const
{
return doAggressiveMulIntTypeSpec;
}
bool
GlobOpt::DoDivIntTypeSpec() const
{
return doDivIntTypeSpec;
}
// static
bool
GlobOpt::DoLossyIntTypeSpec(Func const * func)
{
return
!PHASE_OFF(Js::LossyIntTypeSpecPhase, func) &&
!IsTypeSpecPhaseOff(func) &&
(!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsLossyIntTypeSpecDisabled());
}
bool
GlobOpt::DoLossyIntTypeSpec() const
{
return doLossyIntTypeSpec;
}
// static
bool
GlobOpt::DoFloatTypeSpec(Func const * func)
{
return
!PHASE_OFF(Js::FloatTypeSpecPhase, func) &&
!IsTypeSpecPhaseOff(func) &&
(!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsFloatTypeSpecDisabled()) &&
AutoSystemInfo::Data.SSE2Available();
}
bool
GlobOpt::DoFloatTypeSpec() const
{
return doFloatTypeSpec;
}
bool
GlobOpt::DoStringTypeSpec(Func const * func)
{
return !PHASE_OFF(Js::StringTypeSpecPhase, func) && !IsTypeSpecPhaseOff(func);
}
// static
bool
GlobOpt::DoTypedArrayTypeSpec(Func const * func)
{
return !PHASE_OFF(Js::TypedArrayTypeSpecPhase, func) &&
!IsTypeSpecPhaseOff(func) &&
(!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsTypedArrayTypeSpecDisabled(func->IsLoopBody()))
#if defined(_M_IX86)
&& AutoSystemInfo::Data.SSE2Available()
#endif
;
}
// static
bool
GlobOpt::DoNativeArrayTypeSpec(Func const * func)
{
return !PHASE_OFF(Js::NativeArrayPhase, func) &&
!IsTypeSpecPhaseOff(func)
#if defined(_M_IX86)
&& AutoSystemInfo::Data.SSE2Available()
#endif
;
}
bool
GlobOpt::DoArrayCheckHoist(Func const * const func)
{
Assert(func->IsTopFunc());
return
!PHASE_OFF(Js::ArrayCheckHoistPhase, func) &&
!func->IsArrayCheckHoistDisabled() &&
!func->IsJitInDebugMode() && // StElemI fast path is not allowed when in debug mode, so it cannot have bailout
func->DoGlobOptsForGeneratorFunc();
}
bool
GlobOpt::DoArrayCheckHoist() const
{
return doArrayCheckHoist;
}
bool
GlobOpt::DoArrayCheckHoist(const ValueType baseValueType, Loop* loop, IR::Instr const * const instr) const
{
if(!DoArrayCheckHoist() || (instr && !IsLoopPrePass() && instr->DoStackArgsOpt(func)))
{
return false;
}
if(!baseValueType.IsLikelyArrayOrObjectWithArray() ||
(loop ? ImplicitCallFlagsAllowOpts(loop) : ImplicitCallFlagsAllowOpts(func)))
{
return true;
}
// The function or loop does not allow disabling implicit calls, which is required to eliminate redundant JS array checks
#if DBG_DUMP
if((((loop ? loop->GetImplicitCallFlags() : func->m_fg->implicitCallFlags) & ~Js::ImplicitCall_External) == 0) &&
Js::Configuration::Global.flags.Trace.IsEnabled(Js::HostOptPhase))
{
Output::Print(_u("DoArrayCheckHoist disabled for JS arrays because of external: "));
func->DumpFullFunctionName();
Output::Print(_u("\n"));
Output::Flush();
}
#endif
return false;
}
bool
GlobOpt::DoArrayMissingValueCheckHoist(Func const * const func)
{
return
DoArrayCheckHoist(func) &&
!PHASE_OFF(Js::ArrayMissingValueCheckHoistPhase, func) &&
(!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsArrayMissingValueCheckHoistDisabled(func->IsLoopBody()));
}
bool
GlobOpt::DoArrayMissingValueCheckHoist() const
{
return doArrayMissingValueCheckHoist;
}
bool
GlobOpt::DoArraySegmentHoist(const ValueType baseValueType, Func const * const func)
{
Assert(baseValueType.IsLikelyAnyOptimizedArray());
if(!DoArrayCheckHoist(func) || PHASE_OFF(Js::ArraySegmentHoistPhase, func))
{
return false;
}
if(!baseValueType.IsLikelyArrayOrObjectWithArray())
{
return true;
}
return
!PHASE_OFF(Js::JsArraySegmentHoistPhase, func) &&
(!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsJsArraySegmentHoistDisabled(func->IsLoopBody()));
}
bool
GlobOpt::DoArraySegmentHoist(const ValueType baseValueType) const
{
Assert(baseValueType.IsLikelyAnyOptimizedArray());
return baseValueType.IsLikelyArrayOrObjectWithArray() ? doJsArraySegmentHoist : doArraySegmentHoist;
}
bool
GlobOpt::DoTypedArraySegmentLengthHoist(Loop *const loop) const
{
if(!DoArraySegmentHoist(ValueType::GetObject(ObjectType::Int32Array)))
{
return false;
}
if(loop ? ImplicitCallFlagsAllowOpts(loop) : ImplicitCallFlagsAllowOpts(func))
{
return true;
}
// The function or loop does not allow disabling implicit calls, which is required to eliminate redundant typed array
// segment length loads.
#if DBG_DUMP
if((((loop ? loop->GetImplicitCallFlags() : func->m_fg->implicitCallFlags) & ~Js::ImplicitCall_External) == 0) &&
Js::Configuration::Global.flags.Trace.IsEnabled(Js::HostOptPhase))
{
Output::Print(_u("DoArraySegmentLengthHoist disabled for typed arrays because of external: "));
func->DumpFullFunctionName();
Output::Print(_u("\n"));
Output::Flush();
}
#endif
return false;
}
bool
GlobOpt::DoArrayLengthHoist(Func const * const func)
{
return
DoArrayCheckHoist(func) &&
!PHASE_OFF(Js::Phase::ArrayLengthHoistPhase, func) &&
(!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsArrayLengthHoistDisabled(func->IsLoopBody()));
}
bool
GlobOpt::DoArrayLengthHoist() const
{
return doArrayLengthHoist;
}
bool
GlobOpt::DoEliminateArrayAccessHelperCall(Func *const func)
{
return DoArrayCheckHoist(func);
}
bool
GlobOpt::DoEliminateArrayAccessHelperCall() const
{
return doEliminateArrayAccessHelperCall;
}
bool
GlobOpt::DoLdLenIntSpec(IR::Instr * const instr, const ValueType baseValueType)
{
Assert(!instr || instr->m_opcode == Js::OpCode::LdLen_A);
Assert(!instr || instr->GetDst());
Assert(!instr || instr->GetSrc1());
if(PHASE_OFF(Js::LdLenIntSpecPhase, func) ||
IsTypeSpecPhaseOff(func) ||
(func->HasProfileInfo() && func->GetReadOnlyProfileInfo()->IsLdLenIntSpecDisabled()) ||
(instr && !IsLoopPrePass() && instr->DoStackArgsOpt(func)))
{
return false;
}
if(instr &&
instr->IsProfiledInstr() &&
(
!instr->AsProfiledInstr()->u.ldElemInfo->GetElementType().IsLikelyInt() ||
instr->GetDst()->AsRegOpnd()->m_sym->m_isNotInt
))
{
return false;
}
Assert(!instr || baseValueType == instr->GetSrc1()->GetValueType());
return
baseValueType.HasBeenString() ||
(baseValueType.IsLikelyAnyOptimizedArray() && baseValueType.GetObjectType() != ObjectType::ObjectWithArray);
}
bool
GlobOpt::DoPathDependentValues() const
{
return !PHASE_OFF(Js::Phase::PathDependentValuesPhase, func);
}
bool
GlobOpt::DoTrackRelativeIntBounds() const
{
return doTrackRelativeIntBounds;
}
bool
GlobOpt::DoBoundCheckElimination() const
{
return doBoundCheckElimination;
}
bool
GlobOpt::DoBoundCheckHoist() const
{
return doBoundCheckHoist;
}
bool
GlobOpt::DoLoopCountBasedBoundCheckHoist() const
{
return doLoopCountBasedBoundCheckHoist;
}
bool
GlobOpt::DoPowIntIntTypeSpec() const
{
return doPowIntIntTypeSpec;
}
bool
GlobOpt::DoTagChecks() const
{
return doTagChecks;
}
bool
GlobOpt::TrackArgumentsObject()
{
if (PHASE_OFF(Js::StackArgOptPhase, this->func))
{
this->CannotAllocateArgumentsObjectOnStack();
return false;
}
return func->GetHasStackArgs();
}
void
GlobOpt::CannotAllocateArgumentsObjectOnStack()
{
func->SetHasStackArgs(false);
#ifdef ENABLE_DEBUG_CONFIG_OPTIONS
if (PHASE_TESTTRACE(Js::StackArgOptPhase, this->func))
{
char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
Output::Print(_u("Stack args disabled for function %s(%s)\n"), func->GetJITFunctionBody()->GetDisplayName(), func->GetDebugNumberSet(debugStringBuffer));
Output::Flush();
}
#endif
}
IR::Instr *
GlobOpt::PreOptPeep(IR::Instr *instr)
{
if (OpCodeAttr::HasDeadFallThrough(instr->m_opcode))
{
switch (instr->m_opcode)
{
case Js::OpCode::BailOnNoProfile:
{
// Handle BailOnNoProfile
if (instr->HasBailOutInfo())
{
if (!this->prePassLoop)
{
FillBailOutInfo(this->currentBlock, instr->GetBailOutInfo());
}
// Already processed.
return instr;
}
// Convert to bailout instr
IR::Instr *nextBytecodeOffsetInstr = instr->GetNextRealInstrOrLabel();
while(nextBytecodeOffsetInstr->GetByteCodeOffset() == Js::Constants::NoByteCodeOffset)
{
nextBytecodeOffsetInstr = nextBytecodeOffsetInstr->GetNextRealInstrOrLabel();
Assert(!nextBytecodeOffsetInstr->IsLabelInstr());
}
instr = instr->ConvertToBailOutInstr(nextBytecodeOffsetInstr, IR::BailOutOnNoProfile);
instr->ClearByteCodeOffset();
instr->SetByteCodeOffset(nextBytecodeOffsetInstr);
if (!this->currentBlock->loop)
{
FillBailOutInfo(this->currentBlock, instr->GetBailOutInfo());
}
else
{
Assert(this->prePassLoop);
}
break;
}
case Js::OpCode::BailOnException:
{
Assert(
(
this->func->HasTry() && this->func->DoOptimizeTry() &&
instr->m_prev->m_opcode == Js::OpCode::Catch &&
instr->m_prev->m_prev->IsLabelInstr() &&
instr->m_prev->m_prev->AsLabelInstr()->GetRegion()->GetType() == RegionType::RegionTypeCatch
)
||
(
this->func->HasFinally() && this->func->DoOptimizeTry() &&
instr->m_prev->AsLabelInstr() &&
instr->m_prev->AsLabelInstr()->GetRegion()->GetType() == RegionType::RegionTypeFinally
)
);
break;
}
case Js::OpCode::BailOnEarlyExit:
{
Assert(this->func->HasFinally() && this->func->DoOptimizeTry());
break;
}
default:
{
if(this->currentBlock->loop && !this->IsLoopPrePass())
{
return instr;
}
break;
}
}
RemoveCodeAfterNoFallthroughInstr(instr);
}
return instr;
}
void
GlobOpt::RemoveCodeAfterNoFallthroughInstr(IR::Instr *instr)
{
if (instr != this->currentBlock->GetLastInstr())
{
// Remove dead code after bailout
IR::Instr *instrDead = instr->m_next;
IR::Instr *instrNext;
for (; instrDead != this->currentBlock->GetLastInstr(); instrDead = instrNext)
{
instrNext = instrDead->m_next;
if (instrNext->m_opcode == Js::OpCode::FunctionExit)
{
break;
}
this->func->m_fg->RemoveInstr(instrDead, this);
}
IR::Instr *instrNextBlock = instrDead->m_next;
this->func->m_fg->RemoveInstr(instrDead, this);
this->currentBlock->SetLastInstr(instrNextBlock->m_prev);
}
// Cleanup dead successors
FOREACH_SUCCESSOR_BLOCK_EDITING(deadBlock, this->currentBlock, iter)
{
this->currentBlock->RemoveDeadSucc(deadBlock, this->func->m_fg);
if (this->currentBlock->GetDataUseCount() > 0)
{
this->currentBlock->DecrementDataUseCount();
}
} NEXT_SUCCESSOR_BLOCK_EDITING;
}
void
GlobOpt::ProcessTryHandler(IR::Instr* instr)
{
Assert(instr->m_next->IsLabelInstr() && instr->m_next->AsLabelInstr()->GetRegion()->GetType() == RegionType::RegionTypeTry);
Region* tryRegion = instr->m_next->AsLabelInstr()->GetRegion();
BVSparse<JitArenaAllocator> * writeThroughSymbolsSet = tryRegion->writeThroughSymbolsSet;
ToVar(writeThroughSymbolsSet, this->currentBlock);
}
bool
GlobOpt::ProcessExceptionHandlingEdges(IR::Instr* instr)
{
Assert(instr->m_opcode == Js::OpCode::BrOnException || instr->m_opcode == Js::OpCode::BrOnNoException);
if (instr->m_opcode == Js::OpCode::BrOnException)
{
if (instr->AsBranchInstr()->GetTarget()->GetRegion()->GetType() == RegionType::RegionTypeCatch)
{
// BrOnException was added to model flow from try region to the catch region to assist
// the backward pass in propagating bytecode upward exposed info from the catch block
// to the try, and to handle break blocks. Removing it here as it has served its purpose
// and keeping it around might also have unintended effects while merging block data for
// the catch block's predecessors.
// Note that the Deadstore pass will still be able to propagate bytecode upward exposed info
// because it doesn't skip dead blocks for that.
this->RemoveFlowEdgeToCatchBlock(instr);
this->currentBlock->RemoveInstr(instr);
return true;
}
else
{
// We add BrOnException from a finally region to early exit, remove that since it has served its purpose
return this->RemoveFlowEdgeToFinallyOnExceptionBlock(instr);
}
}
else if (instr->m_opcode == Js::OpCode::BrOnNoException)
{
if (instr->AsBranchInstr()->GetTarget()->GetRegion()->GetType() == RegionType::RegionTypeCatch)
{
this->RemoveFlowEdgeToCatchBlock(instr);
}
else
{
this->RemoveFlowEdgeToFinallyOnExceptionBlock(instr);
}
}
return false;
}
void
GlobOpt::InsertToVarAtDefInTryRegion(IR::Instr * instr, IR::Opnd * dstOpnd)
{
if ((this->currentRegion->GetType() == RegionTypeTry || this->currentRegion->GetType() == RegionTypeFinally) &&
dstOpnd->IsRegOpnd() && dstOpnd->AsRegOpnd()->m_sym->HasByteCodeRegSlot())
{
StackSym * sym = dstOpnd->AsRegOpnd()->m_sym;
if (sym->IsVar())
{
return;
}
StackSym * varSym = sym->GetVarEquivSym(nullptr);
if ((this->currentRegion->GetType() == RegionTypeTry && this->currentRegion->writeThroughSymbolsSet->Test(varSym->m_id)) ||
((this->currentRegion->GetType() == RegionTypeFinally && this->currentRegion->GetMatchingTryRegion()->writeThroughSymbolsSet->Test(varSym->m_id))))
{
IR::RegOpnd * regOpnd = IR::RegOpnd::New(varSym, IRType::TyVar, instr->m_func);
this->ToVar(instr->m_next, regOpnd, this->currentBlock, NULL, false);
}
}
}
void
GlobOpt::RemoveFlowEdgeToCatchBlock(IR::Instr * instr)
{
Assert(instr->IsBranchInstr());
BasicBlock * catchBlock = nullptr;
BasicBlock * predBlock = nullptr;
if (instr->m_opcode == Js::OpCode::BrOnException)
{
catchBlock = instr->AsBranchInstr()->GetTarget()->GetBasicBlock();
predBlock = this->currentBlock;
}
else
{
Assert(instr->m_opcode == Js::OpCode::BrOnNoException);
IR::Instr * nextInstr = instr->GetNextRealInstrOrLabel();
Assert(nextInstr->IsLabelInstr());
IR::LabelInstr * nextLabel = nextInstr->AsLabelInstr();
if (nextLabel->GetRegion() && nextLabel->GetRegion()->GetType() == RegionTypeCatch)
{
catchBlock = nextLabel->GetBasicBlock();
predBlock = this->currentBlock;
}
else
{
Assert(nextLabel->m_next->IsBranchInstr() && nextLabel->m_next->AsBranchInstr()->IsUnconditional());
BasicBlock * nextBlock = nextLabel->GetBasicBlock();
IR::BranchInstr * branchToCatchBlock = nextLabel->m_next->AsBranchInstr();
IR::LabelInstr * catchBlockLabel = branchToCatchBlock->GetTarget();
Assert(catchBlockLabel->GetRegion()->GetType() == RegionTypeCatch);
catchBlock = catchBlockLabel->GetBasicBlock();
predBlock = nextBlock;
}
}
Assert(catchBlock);
Assert(predBlock);
if (this->func->m_fg->FindEdge(predBlock, catchBlock))
{
predBlock->RemoveDeadSucc(catchBlock, this->func->m_fg);
if (predBlock == this->currentBlock)
{
predBlock->DecrementDataUseCount();
}
}
}
bool
GlobOpt::RemoveFlowEdgeToFinallyOnExceptionBlock(IR::Instr * instr)
{
Assert(instr->IsBranchInstr());
if (instr->m_opcode == Js::OpCode::BrOnNoException && instr->AsBranchInstr()->m_brFinallyToEarlyExit)
{
// We add edge from finally to early exit block
// We should not remove this edge
// If a loop has continue, and we add edge in finally to continue
// Break block removal can move all continues inside the loop to branch to the continue added within finally
// If we get rid of this edge, then loop may loose all backedges
// Ideally, doing tail duplication before globopt would enable us to remove these edges, but since we do it after globopt, keep it this way for now
// See test1() in core/test/tryfinallytests.js
return false;
}
BasicBlock * finallyBlock = nullptr;
BasicBlock * predBlock = nullptr;
if (instr->m_opcode == Js::OpCode::BrOnException)
{
finallyBlock = instr->AsBranchInstr()->GetTarget()->GetBasicBlock();
predBlock = this->currentBlock;
}
else
{
Assert(instr->m_opcode == Js::OpCode::BrOnNoException);
IR::Instr * nextInstr = instr->GetNextRealInstrOrLabel();
Assert(nextInstr->IsLabelInstr());
IR::LabelInstr * nextLabel = nextInstr->AsLabelInstr();
if (nextLabel->GetRegion() && nextLabel->GetRegion()->GetType() == RegionTypeFinally)
{
finallyBlock = nextLabel->GetBasicBlock();
predBlock = this->currentBlock;
}
else
{
if (!(nextLabel->m_next->IsBranchInstr() && nextLabel->m_next->AsBranchInstr()->IsUnconditional()))
{
return false;
}
BasicBlock * nextBlock = nextLabel->GetBasicBlock();
IR::BranchInstr * branchTofinallyBlockOrEarlyExit = nextLabel->m_next->AsBranchInstr();
IR::LabelInstr * finallyBlockLabelOrEarlyExitLabel = branchTofinallyBlockOrEarlyExit->GetTarget();
finallyBlock = finallyBlockLabelOrEarlyExitLabel->GetBasicBlock();
predBlock = nextBlock;
}
}
Assert(finallyBlock && predBlock);
if (this->func->m_fg->FindEdge(predBlock, finallyBlock))
{
predBlock->RemoveDeadSucc(finallyBlock, this->func->m_fg);
if (instr->m_opcode == Js::OpCode::BrOnException)
{
this->currentBlock->RemoveInstr(instr);
}
if (finallyBlock->GetFirstInstr()->AsLabelInstr()->IsUnreferenced())
{
// Traverse predBlocks of finallyBlock, if any of the preds have a different region, set m_hasNonBranchRef to true
// If not, this label can get eliminated and an incorrect region from the predecessor can get propagated in lowered code
// See test3() in tryfinallytests.js
Region * finallyRegion = finallyBlock->GetFirstInstr()->AsLabelInstr()->GetRegion();
FOREACH_PREDECESSOR_BLOCK(pred, finallyBlock)
{
Region * predRegion = pred->GetFirstInstr()->AsLabelInstr()->GetRegion();
if (predRegion != finallyRegion)
{
finallyBlock->GetFirstInstr()->AsLabelInstr()->m_hasNonBranchRef = true;
}
} NEXT_PREDECESSOR_BLOCK;
}
if (predBlock == this->currentBlock)
{
predBlock->DecrementDataUseCount();
}
}
return true;
}
IR::Instr *
GlobOpt::OptPeep(IR::Instr *instr, Value *src1Val, Value *src2Val)
{
IR::Opnd *dst, *src1, *src2;
if (this->IsLoopPrePass())
{
return instr;
}
switch (instr->m_opcode)
{
case Js::OpCode::DeadBrEqual:
case Js::OpCode::DeadBrRelational:
case Js::OpCode::DeadBrSrEqual:
src1 = instr->GetSrc1();
src2 = instr->GetSrc2();
// These branches were turned into dead branches because they were unnecessary (branch to next, ...).
// The DeadBr are necessary in case the evaluation of the sources have side-effects.
// If we know for sure the srcs are primitive or have been type specialized, we don't need these instructions
if (((src1Val && src1Val->GetValueInfo()->IsPrimitive()) || (src1->IsRegOpnd() && CurrentBlockData()->IsTypeSpecialized(src1->AsRegOpnd()->m_sym))) &&
((src2Val && src2Val->GetValueInfo()->IsPrimitive()) || (src2->IsRegOpnd() && CurrentBlockData()->IsTypeSpecialized(src2->AsRegOpnd()->m_sym))))
{
this->CaptureByteCodeSymUses(instr);
instr->m_opcode = Js::OpCode::Nop;
}
break;
case Js::OpCode::DeadBrOnHasProperty:
src1 = instr->GetSrc1();
if (((src1Val && src1Val->GetValueInfo()->IsPrimitive()) || (src1->IsRegOpnd() && CurrentBlockData()->IsTypeSpecialized(src1->AsRegOpnd()->m_sym))))
{
this->CaptureByteCodeSymUses(instr);
instr->m_opcode = Js::OpCode::Nop;
}
break;
case Js::OpCode::Ld_A:
case Js::OpCode::Ld_I4:
src1 = instr->GetSrc1();
dst = instr->GetDst();
if (dst->IsRegOpnd() && dst->IsEqual(src1))
{
dst = instr->UnlinkDst();
if (!dst->GetIsJITOptimizedReg())
{
IR::ByteCodeUsesInstr *bytecodeUse = IR::ByteCodeUsesInstr::New(instr);
bytecodeUse->SetDst(dst);
instr->InsertAfter(bytecodeUse);
}
instr->FreeSrc1();
instr->m_opcode = Js::OpCode::Nop;
}
break;
}
return instr;
}
void
GlobOpt::OptimizeIndirUses(IR::IndirOpnd *indirOpnd, IR::Instr * *pInstr, Value **indirIndexValRef)
{
IR::Instr * &instr = *pInstr;
Assert(!indirIndexValRef || !*indirIndexValRef);
// Update value types and copy-prop the base
OptSrc(indirOpnd->GetBaseOpnd(), &instr, nullptr, indirOpnd);
IR::RegOpnd *indexOpnd = indirOpnd->GetIndexOpnd();
if (!indexOpnd)
{
return;
}
// Update value types and copy-prop the index
Value *indexVal = OptSrc(indexOpnd, &instr, nullptr, indirOpnd);
if(indirIndexValRef)
{
*indirIndexValRef = indexVal;
}
}
bool
GlobOpt::IsPREInstrCandidateLoad(Js::OpCode opcode)
{
switch (opcode)
{
case Js::OpCode::LdFld:
case Js::OpCode::LdFldForTypeOf:
case Js::OpCode::LdRootFld:
case Js::OpCode::LdRootFldForTypeOf:
case Js::OpCode::LdMethodFld:
case Js::OpCode::LdRootMethodFld:
case Js::OpCode::LdSlot:
case Js::OpCode::LdSlotArr:
return true;
}
return false;
}
bool
GlobOpt::IsPREInstrCandidateStore(Js::OpCode opcode)
{
switch (opcode)
{
case Js::OpCode::StFld:
case Js::OpCode::StRootFld:
case Js::OpCode::StSlot:
return true;
}
return false;
}
bool
GlobOpt::ImplicitCallFlagsAllowOpts(Loop *loop)
{
return loop->GetImplicitCallFlags() != Js::ImplicitCall_HasNoInfo &&
(((loop->GetImplicitCallFlags() & ~Js::ImplicitCall_Accessor) | Js::ImplicitCall_None) == Js::ImplicitCall_None);
}
bool
GlobOpt::ImplicitCallFlagsAllowOpts(Func const *func)
{
return func->m_fg->implicitCallFlags != Js::ImplicitCall_HasNoInfo &&
(((func->m_fg->implicitCallFlags & ~Js::ImplicitCall_Accessor) | Js::ImplicitCall_None) == Js::ImplicitCall_None);
}
#if DBG_DUMP
void
GlobOpt::Dump() const
{
this->DumpSymToValueMap();
}
void
GlobOpt::DumpSymToValueMap(BasicBlock const * block) const
{
Output::Print(_u("\n*** SymToValueMap ***\n"));
block->globOptData.DumpSymToValueMap();
}
void
GlobOpt::DumpSymToValueMap() const
{
DumpSymToValueMap(this->currentBlock);
}
void
GlobOpt::DumpSymVal(int index)
{
SymID id = index;
extern Func *CurrentFunc;
Sym *sym = this->func->m_symTable->Find(id);
AssertMsg(sym, "Sym not found!!!");
Output::Print(_u("Sym: "));
sym->Dump();
Output::Print(_u("\t\tValueNumber: "));
Value * pValue = CurrentBlockData()->FindValueFromMapDirect(sym->m_id);
pValue->Dump();
Output::Print(_u("\n"));
}
void
GlobOpt::Trace(BasicBlock * block, bool before) const
{
bool globOptTrace = Js::Configuration::Global.flags.Trace.IsEnabled(Js::GlobOptPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId());
bool typeSpecTrace = Js::Configuration::Global.flags.Trace.IsEnabled(Js::TypeSpecPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId());
bool floatTypeSpecTrace = Js::Configuration::Global.flags.Trace.IsEnabled(Js::FloatTypeSpecPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId());
bool fieldHoistTrace = Js::Configuration::Global.flags.Trace.IsEnabled(Js::FieldHoistPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId());
bool fieldCopyPropTrace = fieldHoistTrace || Js::Configuration::Global.flags.Trace.IsEnabled(Js::FieldCopyPropPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId());
bool objTypeSpecTrace = Js::Configuration::Global.flags.Trace.IsEnabled(Js::ObjTypeSpecPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId());
bool valueTableTrace = Js::Configuration::Global.flags.Trace.IsEnabled(Js::ValueTablePhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId());
bool fieldPRETrace = Js::Configuration::Global.flags.Trace.IsEnabled(Js::FieldPREPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId());
bool anyTrace = globOptTrace || typeSpecTrace || floatTypeSpecTrace || fieldCopyPropTrace || fieldHoistTrace || objTypeSpecTrace || valueTableTrace || fieldPRETrace;
if (!anyTrace)
{
return;
}
if (fieldPRETrace && this->IsLoopPrePass())
{
if (block->isLoopHeader && before)
{
Output::Print(_u("==== Loop Prepass block header #%-3d, Visiting Loop block head #%-3d\n"),
this->prePassLoop->GetHeadBlock()->GetBlockNum(), block->GetBlockNum());
}
}
if (!typeSpecTrace && !floatTypeSpecTrace && !valueTableTrace && !Js::Configuration::Global.flags.Verbose)
{
return;
}
if (before)
{
Output::Print(_u("========================================================================\n"));
Output::Print(_u("Begin OptBlock: Block #%-3d"), block->GetBlockNum());
if (block->loop)
{
Output::Print(_u(" Loop block header:%-3d currentLoop block head:%-3d %s"),
block->loop->GetHeadBlock()->GetBlockNum(),
this->prePassLoop ? this->prePassLoop->GetHeadBlock()->GetBlockNum() : 0,
this->IsLoopPrePass() ? _u("PrePass") : _u(""));
}
Output::Print(_u("\n"));
}
else
{
Output::Print(_u("-----------------------------------------------------------------------\n"));
Output::Print(_u("After OptBlock: Block #%-3d\n"), block->GetBlockNum());
}
if ((typeSpecTrace || floatTypeSpecTrace) && !block->globOptData.liveVarSyms->IsEmpty())
{
Output::Print(_u(" Live var syms: "));
block->globOptData.liveVarSyms->Dump();
}
if (typeSpecTrace && !block->globOptData.liveInt32Syms->IsEmpty())
{
Assert(this->tempBv->IsEmpty());
this->tempBv->Minus(block->globOptData.liveInt32Syms, block->globOptData.liveLossyInt32Syms);
if(!this->tempBv->IsEmpty())
{
Output::Print(_u(" Int32 type specialized (lossless) syms: "));
this->tempBv->Dump();
}
this->tempBv->ClearAll();
if(!block->globOptData.liveLossyInt32Syms->IsEmpty())
{
Output::Print(_u(" Int32 converted (lossy) syms: "));
block->globOptData.liveLossyInt32Syms->Dump();
}
}
if (floatTypeSpecTrace && !block->globOptData.liveFloat64Syms->IsEmpty())
{
Output::Print(_u(" Float64 type specialized syms: "));
block->globOptData.liveFloat64Syms->Dump();
}
if ((fieldCopyPropTrace || objTypeSpecTrace) && this->DoFieldCopyProp(block->loop) && !block->globOptData.liveFields->IsEmpty())
{
Output::Print(_u(" Live field syms: "));
block->globOptData.liveFields->Dump();
}
if ((fieldHoistTrace || objTypeSpecTrace) && this->DoFieldHoisting(block->loop) && HasHoistableFields(block))
{
Output::Print(_u(" Hoistable field sym: "));
block->globOptData.hoistableFields->Dump();
}
if (objTypeSpecTrace || valueTableTrace)
{
Output::Print(_u(" Value table:\n"));
block->globOptData.DumpSymToValueMap();
}
if (before)
{
Output::Print(_u("-----------------------------------------------------------------------\n")); \
}
Output::Flush();
}
void
GlobOpt::TraceSettings() const
{
Output::Print(_u("GlobOpt Settings:\r\n"));
Output::Print(_u(" FloatTypeSpec: %s\r\n"), this->DoFloatTypeSpec() ? _u("enabled") : _u("disabled"));
Output::Print(_u(" AggressiveIntTypeSpec: %s\r\n"), this->DoAggressiveIntTypeSpec() ? _u("enabled") : _u("disabled"));
Output::Print(_u(" LossyIntTypeSpec: %s\r\n"), this->DoLossyIntTypeSpec() ? _u("enabled") : _u("disabled"));
Output::Print(_u(" ArrayCheckHoist: %s\r\n"), this->func->IsArrayCheckHoistDisabled() ? _u("disabled") : _u("enabled"));
Output::Print(_u(" ImplicitCallFlags: %s\r\n"), Js::DynamicProfileInfo::GetImplicitCallFlagsString(this->func->m_fg->implicitCallFlags));
for (Loop * loop = this->func->m_fg->loopList; loop != NULL; loop = loop->next)
{
Output::Print(_u(" loop: %d, ImplicitCallFlags: %s\r\n"), loop->GetLoopNumber(),
Js::DynamicProfileInfo::GetImplicitCallFlagsString(loop->GetImplicitCallFlags()));
}
Output::Flush();
}
#endif // DBG_DUMP
IR::Instr *
GlobOpt::TrackMarkTempObject(IR::Instr * instrStart, IR::Instr * instrLast)
{
if (!this->func->GetHasMarkTempObjects())
{
return instrLast;
}
IR::Instr * instr = instrStart;
IR::Instr * instrEnd = instrLast->m_next;
IR::Instr * lastInstr = nullptr;
GlobOptBlockData& globOptData = *CurrentBlockData();
do
{
bool mayNeedBailOnImplicitCallsPreOp = !this->IsLoopPrePass()
&& instr->HasAnyImplicitCalls()
&& globOptData.maybeTempObjectSyms != nullptr;
if (mayNeedBailOnImplicitCallsPreOp)
{
IR::Opnd * src1 = instr->GetSrc1();
if (src1)
{
instr = GenerateBailOutMarkTempObjectIfNeeded(instr, src1, false);
IR::Opnd * src2 = instr->GetSrc2();
if (src2)
{
instr = GenerateBailOutMarkTempObjectIfNeeded(instr, src2, false);
}
}
}
IR::Opnd *dst = instr->GetDst();
if (dst)
{
if (dst->IsRegOpnd())
{
TrackTempObjectSyms(instr, dst->AsRegOpnd());
}
else if (mayNeedBailOnImplicitCallsPreOp)
{
instr = GenerateBailOutMarkTempObjectIfNeeded(instr, dst, true);
}
}
lastInstr = instr;
instr = instr->m_next;
}
while (instr != instrEnd);
return lastInstr;
}
void
GlobOpt::TrackTempObjectSyms(IR::Instr * instr, IR::RegOpnd * opnd)
{
// If it is marked as dstIsTempObject, we should have mark temped it, or type specialized it to Ld_I4.
Assert(!instr->dstIsTempObject || ObjectTempVerify::CanMarkTemp(instr, nullptr));
GlobOptBlockData& globOptData = *CurrentBlockData();
bool canStoreTemp = false;
bool maybeTemp = false;
if (OpCodeAttr::TempObjectProducing(instr->m_opcode))
{
maybeTemp = instr->dstIsTempObject;
// We have to make sure that lower will always generate code to do stack allocation
// before we can store any other stack instance onto it. Otherwise, we would not
// walk object to box the stack property.
canStoreTemp = instr->dstIsTempObject && ObjectTemp::CanStoreTemp(instr);
}
else if (OpCodeAttr::TempObjectTransfer(instr->m_opcode))
{
// Need to check both sources, GetNewScObject has two srcs for transfer.
// No need to get var equiv sym here as transfer of type spec value does not transfer a mark temp object.
maybeTemp = globOptData.maybeTempObjectSyms && (
(instr->GetSrc1()->IsRegOpnd() && globOptData.maybeTempObjectSyms->Test(instr->GetSrc1()->AsRegOpnd()->m_sym->m_id))
|| (instr->GetSrc2() && instr->GetSrc2()->IsRegOpnd() && globOptData.maybeTempObjectSyms->Test(instr->GetSrc2()->AsRegOpnd()->m_sym->m_id)));
canStoreTemp = globOptData.canStoreTempObjectSyms && (
(instr->GetSrc1()->IsRegOpnd() && globOptData.canStoreTempObjectSyms->Test(instr->GetSrc1()->AsRegOpnd()->m_sym->m_id))
&& (!instr->GetSrc2() || (instr->GetSrc2()->IsRegOpnd() && globOptData.canStoreTempObjectSyms->Test(instr->GetSrc2()->AsRegOpnd()->m_sym->m_id))));
AssertOrFailFast(!canStoreTemp || instr->dstIsTempObject);
AssertOrFailFast(!maybeTemp || instr->dstIsTempObject);
}
// Need to get the var equiv sym as assignment of type specialized sym kill the var sym value anyway.
StackSym * sym = opnd->m_sym;
if (!sym->IsVar())
{
sym = sym->GetVarEquivSym(nullptr);
if (sym == nullptr)
{
return;
}
}
SymID symId = sym->m_id;
if (maybeTemp)
{
// Only var sym should be temp objects
Assert(opnd->m_sym == sym);
if (globOptData.maybeTempObjectSyms == nullptr)
{
globOptData.maybeTempObjectSyms = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc);
}
globOptData.maybeTempObjectSyms->Set(symId);
if (canStoreTemp)
{
if (instr->m_opcode == Js::OpCode::NewScObjectLiteral && !this->IsLoopPrePass())
{
// For object literal, we install the final type up front.
// If there are bailout before we finish initializing all the fields, we need to
// zero out the rest if we stack allocate the literal, so that the boxing would not
// try to box trash pointer in the properties.
// Although object Literal initialization can be done lexically, BailOnNoProfile may cause some path
// to disappear. Do it is flow base make it easier to stop propagate those entries.
IR::IntConstOpnd * propertyArrayIdOpnd = instr->GetSrc1()->AsIntConstOpnd();
const Js::PropertyIdArray * propIds = instr->m_func->GetJITFunctionBody()->ReadPropertyIdArrayFromAuxData(propertyArrayIdOpnd->AsUint32());
// Duplicates are removed by parser
Assert(!propIds->hadDuplicates);
if (globOptData.stackLiteralInitFldDataMap == nullptr)
{
globOptData.stackLiteralInitFldDataMap = JitAnew(alloc, StackLiteralInitFldDataMap, alloc);
}
else
{
Assert(!globOptData.stackLiteralInitFldDataMap->ContainsKey(sym));
}
StackLiteralInitFldData data = { propIds, 0};
globOptData.stackLiteralInitFldDataMap->AddNew(sym, data);
}
if (globOptData.canStoreTempObjectSyms == nullptr)
{
globOptData.canStoreTempObjectSyms = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc);
}
globOptData.canStoreTempObjectSyms->Set(symId);
}
else if (globOptData.canStoreTempObjectSyms)
{
globOptData.canStoreTempObjectSyms->Clear(symId);
}
}
else
{
Assert(!canStoreTemp);
if (globOptData.maybeTempObjectSyms)
{
if (globOptData.canStoreTempObjectSyms)
{
globOptData.canStoreTempObjectSyms->Clear(symId);
}
globOptData.maybeTempObjectSyms->Clear(symId);
}
else
{
Assert(!globOptData.canStoreTempObjectSyms);
}
// The symbol is being assigned to, the sym shouldn't still be in the stackLiteralInitFldDataMap
Assert(this->IsLoopPrePass() ||
globOptData.stackLiteralInitFldDataMap == nullptr
|| globOptData.stackLiteralInitFldDataMap->Count() == 0
|| !globOptData.stackLiteralInitFldDataMap->ContainsKey(sym));
}
}
IR::Instr *
GlobOpt::GenerateBailOutMarkTempObjectIfNeeded(IR::Instr * instr, IR::Opnd * opnd, bool isDst)
{
Assert(opnd);
Assert(isDst == (opnd == instr->GetDst()));
Assert(opnd != instr->GetDst() || !opnd->IsRegOpnd());
Assert(!this->IsLoopPrePass());
Assert(instr->HasAnyImplicitCalls());
// Only dst reg opnd opcode or ArgOut_A should have dstIsTempObject marked
Assert(!isDst || !instr->dstIsTempObject || instr->m_opcode == Js::OpCode::ArgOut_A);
// Post-op implicit call shouldn't have installed yet
Assert(!instr->HasBailOutInfo() || (instr->GetBailOutKind() & IR::BailOutKindBits) != IR::BailOutOnImplicitCalls);
GlobOptBlockData& globOptData = *CurrentBlockData();
Assert(globOptData.maybeTempObjectSyms != nullptr);
IR::PropertySymOpnd * propertySymOpnd = nullptr;
StackSym * stackSym = ObjectTemp::GetStackSym(opnd, &propertySymOpnd);
// It is okay to not get the var equiv sym here, as use of a type specialized sym is not use of the temp object
// so no need to add mark temp bailout.
// TempObjectSysm doesn't contain any type spec sym, so we will get false here for all type spec sym.
if (stackSym && globOptData.maybeTempObjectSyms->Test(stackSym->m_id))
{
if (instr->HasBailOutInfo())
{
instr->SetBailOutKind(instr->GetBailOutKind() | IR::BailOutMarkTempObject);
}
else
{
// On insert the pre op bailout if it is not Direct field access do nothing, don't check the dst yet.
// SetTypeCheckBailout will clear this out if it is direct field access.
if (isDst
|| (instr->m_opcode == Js::OpCode::FromVar && !opnd->GetValueType().IsPrimitive())
|| propertySymOpnd == nullptr
|| !propertySymOpnd->IsTypeCheckProtected())
{
this->GenerateBailAtOperation(&instr, IR::BailOutMarkTempObject);
}
}
if (!opnd->IsRegOpnd() && (!isDst || (globOptData.canStoreTempObjectSyms && globOptData.canStoreTempObjectSyms->Test(stackSym->m_id))))
{
// If this opnd is a dst, that means that the object pointer is a stack object,
// and we can store temp object/number on it.
// If the opnd is a src, that means that the object pointer may be a stack object
// so the load may be a temp object/number and we need to track its use.
// Don't mark start of indir as can store temp, because we don't actually know
// what it is assigning to.
if (!isDst || !opnd->IsIndirOpnd())
{
opnd->SetCanStoreTemp();
}
if (propertySymOpnd)
{
// Track initfld of stack literals
if (isDst && instr->m_opcode == Js::OpCode::InitFld)
{
const Js::PropertyId propertyId = propertySymOpnd->m_sym->AsPropertySym()->m_propertyId;
// We don't need to track numeric properties init
if (!this->func->GetThreadContextInfo()->IsNumericProperty(propertyId))
{
DebugOnly(bool found = false);
globOptData.stackLiteralInitFldDataMap->RemoveIf(stackSym,
[&](StackSym * key, StackLiteralInitFldData & data)
{
DebugOnly(found = true);
Assert(key == stackSym);
Assert(data.currentInitFldCount < data.propIds->count);
if (data.propIds->elements[data.currentInitFldCount] != propertyId)
{
#if DBG
bool duplicate = false;
for (uint i = 0; i < data.currentInitFldCount; i++)
{
if (data.propIds->elements[i] == propertyId)
{
duplicate = true;
break;
}
}
Assert(duplicate);
#endif
// duplicate initialization
return false;
}
bool finished = (++data.currentInitFldCount == data.propIds->count);
#if DBG
if (finished)
{
// We can still track the finished stack literal InitFld lexically.
this->finishedStackLiteralInitFld->Set(stackSym->m_id);
}
#endif
return finished;
});
// We might still see InitFld even we have finished with all the property Id because
// of duplicate entries at the end
Assert(found || finishedStackLiteralInitFld->Test(stackSym->m_id));
}
}
}
}
}
return instr;
}
LoopCount *
GlobOpt::GetOrGenerateLoopCountForMemOp(Loop *loop)
{
LoopCount *loopCount = loop->loopCount;
if (loopCount && !loopCount->HasGeneratedLoopCountSym())
{
Assert(loop->bailOutInfo);
EnsureBailTarget(loop);
GenerateLoopCountPlusOne(loop, loopCount);
}
return loopCount;
}
IR::Opnd *
GlobOpt::GenerateInductionVariableChangeForMemOp(Loop *loop, byte unroll, IR::Instr *insertBeforeInstr)
{
LoopCount *loopCount = loop->loopCount;
IR::Opnd *sizeOpnd = nullptr;
Assert(loopCount);
Assert(loop->memOpInfo->inductionVariableOpndPerUnrollMap);
if (loop->memOpInfo->inductionVariableOpndPerUnrollMap->TryGetValue(unroll, &sizeOpnd))
{
return sizeOpnd;
}
Func *localFunc = loop->GetFunc();
const auto InsertInstr = [&](IR::Instr *instr)
{
if (insertBeforeInstr == nullptr)
{
loop->landingPad->InsertAfter(instr);
}
else
{
insertBeforeInstr->InsertBefore(instr);
}
};
if (loopCount->LoopCountMinusOneSym())
{
IRType type = loopCount->LoopCountSym()->GetType();
// Loop count is off by one, so add one
IR::RegOpnd *loopCountOpnd = IR::RegOpnd::New(loopCount->LoopCountSym(), type, localFunc);
sizeOpnd = loopCountOpnd;
if (unroll != 1)
{
sizeOpnd = IR::RegOpnd::New(TyUint32, this->func);
IR::Opnd *unrollOpnd = IR::IntConstOpnd::New(unroll, type, localFunc);
InsertInstr(IR::Instr::New(Js::OpCode::Mul_I4,
sizeOpnd,
loopCountOpnd,
unrollOpnd,
localFunc));
}
}
else
{
uint size = (loopCount->LoopCountMinusOneConstantValue() + 1) * unroll;
sizeOpnd = IR::IntConstOpnd::New(size, IRType::TyUint32, localFunc);
}
loop->memOpInfo->inductionVariableOpndPerUnrollMap->Add(unroll, sizeOpnd);
return sizeOpnd;
}
IR::RegOpnd*
GlobOpt::GenerateStartIndexOpndForMemop(Loop *loop, IR::Opnd *indexOpnd, IR::Opnd *sizeOpnd, bool isInductionVariableChangeIncremental, bool bIndexAlreadyChanged, IR::Instr *insertBeforeInstr)
{
IR::RegOpnd *startIndexOpnd = nullptr;
Func *localFunc = loop->GetFunc();
IRType type = indexOpnd->GetType();
const int cacheIndex = ((int)isInductionVariableChangeIncremental << 1) | (int)bIndexAlreadyChanged;
if (loop->memOpInfo->startIndexOpndCache[cacheIndex])
{
return loop->memOpInfo->startIndexOpndCache[cacheIndex];
}
const auto InsertInstr = [&](IR::Instr *instr)
{
if (insertBeforeInstr == nullptr)
{
loop->landingPad->InsertAfter(instr);
}
else
{
insertBeforeInstr->InsertBefore(instr);
}
};
startIndexOpnd = IR::RegOpnd::New(type, localFunc);
// If the 2 are different we can simply use indexOpnd
if (isInductionVariableChangeIncremental != bIndexAlreadyChanged)
{
InsertInstr(IR::Instr::New(Js::OpCode::Ld_A,
startIndexOpnd,
indexOpnd,
localFunc));
}
else
{
// Otherwise add 1 to it
InsertInstr(IR::Instr::New(Js::OpCode::Add_I4,
startIndexOpnd,
indexOpnd,
IR::IntConstOpnd::New(1, type, localFunc, true),
localFunc));
}
if (!isInductionVariableChangeIncremental)
{
InsertInstr(IR::Instr::New(Js::OpCode::Sub_I4,
startIndexOpnd,
startIndexOpnd,
sizeOpnd,
localFunc));
}
loop->memOpInfo->startIndexOpndCache[cacheIndex] = startIndexOpnd;
return startIndexOpnd;
}
IR::Instr*
GlobOpt::FindUpperBoundsCheckInstr(IR::Instr* fromInstr)
{
IR::Instr *upperBoundCheck = fromInstr;
do
{
upperBoundCheck = upperBoundCheck->m_prev;
Assert(upperBoundCheck);
Assert(!upperBoundCheck->IsLabelInstr());
} while (upperBoundCheck->m_opcode != Js::OpCode::BoundCheck);
return upperBoundCheck;
}
IR::Instr*
GlobOpt::FindArraySegmentLoadInstr(IR::Instr* fromInstr)
{
IR::Instr *headSegmentLengthLoad = fromInstr;
do
{
headSegmentLengthLoad = headSegmentLengthLoad->m_prev;
Assert(headSegmentLengthLoad);
Assert(!headSegmentLengthLoad->IsLabelInstr());
} while (headSegmentLengthLoad->m_opcode != Js::OpCode::LdIndir);
return headSegmentLengthLoad;
}
void
GlobOpt::RemoveMemOpSrcInstr(IR::Instr* memopInstr, IR::Instr* srcInstr, BasicBlock* block)
{
Assert(srcInstr && (srcInstr->m_opcode == Js::OpCode::LdElemI_A || srcInstr->m_opcode == Js::OpCode::StElemI_A || srcInstr->m_opcode == Js::OpCode::StElemI_A_Strict));
Assert(memopInstr && (memopInstr->m_opcode == Js::OpCode::Memcopy || memopInstr->m_opcode == Js::OpCode::Memset));
Assert(block);
const bool isDst = srcInstr->m_opcode == Js::OpCode::StElemI_A || srcInstr->m_opcode == Js::OpCode::StElemI_A_Strict;
IR::RegOpnd* opnd = (isDst ? memopInstr->GetDst() : memopInstr->GetSrc1())->AsIndirOpnd()->GetBaseOpnd();
IR::ArrayRegOpnd* arrayOpnd = opnd->IsArrayRegOpnd() ? opnd->AsArrayRegOpnd() : nullptr;
IR::Instr* topInstr = srcInstr;
if (srcInstr->extractedUpperBoundCheckWithoutHoisting)
{
IR::Instr *upperBoundCheck = FindUpperBoundsCheckInstr(srcInstr);
Assert(upperBoundCheck && upperBoundCheck != srcInstr);
topInstr = upperBoundCheck;
}
if (srcInstr->loadedArrayHeadSegmentLength && arrayOpnd && arrayOpnd->HeadSegmentLengthSym())
{
IR::Instr *arrayLoadSegmentHeadLength = FindArraySegmentLoadInstr(topInstr);
Assert(arrayLoadSegmentHeadLength);
topInstr = arrayLoadSegmentHeadLength;
arrayOpnd->RemoveHeadSegmentLengthSym();
}
if (srcInstr->loadedArrayHeadSegment && arrayOpnd && arrayOpnd->HeadSegmentSym())
{
IR::Instr *arrayLoadSegmentHead = FindArraySegmentLoadInstr(topInstr);
Assert(arrayLoadSegmentHead);
topInstr = arrayLoadSegmentHead;
arrayOpnd->RemoveHeadSegmentSym();
}
// If no bounds check are present, simply look up for instruction added for instrumentation
if(topInstr == srcInstr)
{
bool checkPrev = true;
while (checkPrev)
{
switch (topInstr->m_prev->m_opcode)
{
case Js::OpCode::BailOnNotArray:
case Js::OpCode::NoImplicitCallUses:
case Js::OpCode::ByteCodeUses:
topInstr = topInstr->m_prev;
checkPrev = !!topInstr->m_prev;
break;
default:
checkPrev = false;
break;
}
}
}
while (topInstr != srcInstr)
{
IR::Instr* removeInstr = topInstr;
topInstr = topInstr->m_next;
Assert(
removeInstr->m_opcode == Js::OpCode::BailOnNotArray ||
removeInstr->m_opcode == Js::OpCode::NoImplicitCallUses ||
removeInstr->m_opcode == Js::OpCode::ByteCodeUses ||
removeInstr->m_opcode == Js::OpCode::LdIndir ||
removeInstr->m_opcode == Js::OpCode::BoundCheck
);
if (removeInstr->m_opcode != Js::OpCode::ByteCodeUses)
{
block->RemoveInstr(removeInstr);
}
}
this->ConvertToByteCodeUses(srcInstr);
}
void
GlobOpt::GetMemOpSrcInfo(Loop* loop, IR::Instr* instr, IR::RegOpnd*& base, IR::RegOpnd*& index, IRType& arrayType)
{
Assert(instr && (instr->m_opcode == Js::OpCode::LdElemI_A || instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict));
IR::Opnd* arrayOpnd = instr->m_opcode == Js::OpCode::LdElemI_A ? instr->GetSrc1() : instr->GetDst();
Assert(arrayOpnd->IsIndirOpnd());
IR::IndirOpnd* indirArrayOpnd = arrayOpnd->AsIndirOpnd();
IR::RegOpnd* baseOpnd = (IR::RegOpnd*)indirArrayOpnd->GetBaseOpnd();
IR::RegOpnd* indexOpnd = (IR::RegOpnd*)indirArrayOpnd->GetIndexOpnd();
Assert(baseOpnd);
Assert(indexOpnd);
// Process Out Params
base = baseOpnd;
index = indexOpnd;
arrayType = indirArrayOpnd->GetType();
}
void
GlobOpt::EmitMemop(Loop * loop, LoopCount *loopCount, const MemOpEmitData* emitData)
{
Assert(emitData);
Assert(emitData->candidate);
Assert(emitData->stElemInstr);
Assert(emitData->stElemInstr->m_opcode == Js::OpCode::StElemI_A || emitData->stElemInstr->m_opcode == Js::OpCode::StElemI_A_Strict);
IR::BailOutKind bailOutKind = emitData->bailOutKind;
const byte unroll = emitData->inductionVar.unroll;
Assert(unroll == 1);
const bool isInductionVariableChangeIncremental = emitData->inductionVar.isIncremental;
const bool bIndexAlreadyChanged = emitData->candidate->bIndexAlreadyChanged;
IR::RegOpnd *baseOpnd = nullptr;
IR::RegOpnd *indexOpnd = nullptr;
IRType dstType;
GetMemOpSrcInfo(loop, emitData->stElemInstr, baseOpnd, indexOpnd, dstType);
Func *localFunc = loop->GetFunc();
// Handle bailout info
EnsureBailTarget(loop);
Assert(bailOutKind != IR::BailOutInvalid);
// Keep only Array bits bailOuts. Consider handling these bailouts instead of simply ignoring them
bailOutKind &= IR::BailOutForArrayBits;
// Add our custom bailout to handle Op_MemCopy return value.
bailOutKind |= IR::BailOutOnMemOpError;
BailOutInfo *const bailOutInfo = loop->bailOutInfo;
Assert(bailOutInfo);
IR::Instr *insertBeforeInstr = bailOutInfo->bailOutInstr;
Assert(insertBeforeInstr);
IR::Opnd *sizeOpnd = GenerateInductionVariableChangeForMemOp(loop, unroll, insertBeforeInstr);
IR::RegOpnd *startIndexOpnd = GenerateStartIndexOpndForMemop(loop, indexOpnd, sizeOpnd, isInductionVariableChangeIncremental, bIndexAlreadyChanged, insertBeforeInstr);
IR::IndirOpnd* dstOpnd = IR::IndirOpnd::New(baseOpnd, startIndexOpnd, dstType, localFunc);
IR::Opnd *src1;
const bool isMemset = emitData->candidate->IsMemSet();
// Get the source according to the memop type
if (isMemset)
{
MemSetEmitData* data = (MemSetEmitData*)emitData;
const Loop::MemSetCandidate* candidate = data->candidate->AsMemSet();
if (candidate->srcSym)
{
IR::RegOpnd* regSrc = IR::RegOpnd::New(candidate->srcSym, candidate->srcSym->GetType(), func);
regSrc->SetIsJITOptimizedReg(true);
src1 = regSrc;
}
else
{
src1 = IR::AddrOpnd::New(candidate->constant.ToVar(localFunc), IR::AddrOpndKindConstantAddress, localFunc);
}
}
else
{
Assert(emitData->candidate->IsMemCopy());
MemCopyEmitData* data = (MemCopyEmitData*)emitData;
Assert(data->ldElemInstr);
Assert(data->ldElemInstr->m_opcode == Js::OpCode::LdElemI_A);
IR::RegOpnd *srcBaseOpnd = nullptr;
IR::RegOpnd *srcIndexOpnd = nullptr;
IRType srcType;
GetMemOpSrcInfo(loop, data->ldElemInstr, srcBaseOpnd, srcIndexOpnd, srcType);
Assert(GetVarSymID(srcIndexOpnd->GetStackSym()) == GetVarSymID(indexOpnd->GetStackSym()));
src1 = IR::IndirOpnd::New(srcBaseOpnd, startIndexOpnd, srcType, localFunc);
}
// Generate memcopy
IR::Instr* memopInstr = IR::BailOutInstr::New(isMemset ? Js::OpCode::Memset : Js::OpCode::Memcopy, bailOutKind, bailOutInfo, localFunc);
memopInstr->SetDst(dstOpnd);
memopInstr->SetSrc1(src1);
memopInstr->SetSrc2(sizeOpnd);
insertBeforeInstr->InsertBefore(memopInstr);
#if DBG_DUMP
if (DO_MEMOP_TRACE())
{
char valueTypeStr[VALUE_TYPE_MAX_STRING_SIZE];
baseOpnd->GetValueType().ToString(valueTypeStr);
const int loopCountBufSize = 16;
char16 loopCountBuf[loopCountBufSize];
if (loopCount->LoopCountMinusOneSym())
{
swprintf_s(loopCountBuf, _u("s%u"), loopCount->LoopCountMinusOneSym()->m_id);
}
else
{
swprintf_s(loopCountBuf, _u("%u"), loopCount->LoopCountMinusOneConstantValue() + 1);
}
if (isMemset)
{
const Loop::MemSetCandidate* candidate = emitData->candidate->AsMemSet();
const int constBufSize = 32;
char16 constBuf[constBufSize];
if (candidate->srcSym)
{
swprintf_s(constBuf, _u("s%u"), candidate->srcSym->m_id);
}
else
{
switch (candidate->constant.type)
{
case TyInt8:
case TyInt16:
case TyInt32:
case TyInt64:
swprintf_s(constBuf, sizeof(IntConstType) == 8 ? _u("%lld") : _u("%d"), candidate->constant.u.intConst.value);
break;
case TyFloat32:
case TyFloat64:
swprintf_s(constBuf, _u("%.4f"), candidate->constant.u.floatConst.value);
break;
case TyVar:
swprintf_s(constBuf, sizeof(Js::Var) == 8 ? _u("0x%.16llX") : _u("0x%.8X"), candidate->constant.u.varConst.value);
break;
default:
AssertMsg(false, "Unsupported constant type");
swprintf_s(constBuf, _u("Unknown"));
break;
}
}
TRACE_MEMOP_PHASE(MemSet, loop, emitData->stElemInstr,
_u("ValueType: %S, Base: s%u, Index: s%u, Constant: %s, LoopCount: %s, IsIndexChangedBeforeUse: %d"),
valueTypeStr,
candidate->base,
candidate->index,
constBuf,
loopCountBuf,
bIndexAlreadyChanged);
}
else
{
const Loop::MemCopyCandidate* candidate = emitData->candidate->AsMemCopy();
TRACE_MEMOP_PHASE(MemCopy, loop, emitData->stElemInstr,
_u("ValueType: %S, StBase: s%u, Index: s%u, LdBase: s%u, LoopCount: %s, IsIndexChangedBeforeUse: %d"),
valueTypeStr,
candidate->base,
candidate->index,
candidate->ldBase,
loopCountBuf,
bIndexAlreadyChanged);
}
}
#endif
RemoveMemOpSrcInstr(memopInstr, emitData->stElemInstr, emitData->block);
if (!isMemset)
{
RemoveMemOpSrcInstr(memopInstr, ((MemCopyEmitData*)emitData)->ldElemInstr, emitData->block);
}
}
bool
GlobOpt::InspectInstrForMemSetCandidate(Loop* loop, IR::Instr* instr, MemSetEmitData* emitData, bool& errorInInstr)
{
Assert(emitData && emitData->candidate && emitData->candidate->IsMemSet());
Loop::MemSetCandidate* candidate = (Loop::MemSetCandidate*)emitData->candidate;
if (instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict)
{
if (instr->GetDst()->IsIndirOpnd()
&& (GetVarSymID(instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->GetStackSym()) == candidate->base)
&& (GetVarSymID(instr->GetDst()->AsIndirOpnd()->GetIndexOpnd()->GetStackSym()) == candidate->index)
)
{
Assert(instr->IsProfiledInstr());
emitData->stElemInstr = instr;
emitData->bailOutKind = instr->GetBailOutKind();
return true;
}
TRACE_MEMOP_PHASE_VERBOSE(MemSet, loop, instr, _u("Orphan StElemI_A detected"));
errorInInstr = true;
}
else if (instr->m_opcode == Js::OpCode::LdElemI_A)
{
TRACE_MEMOP_PHASE_VERBOSE(MemSet, loop, instr, _u("Orphan LdElemI_A detected"));
errorInInstr = true;
}
return false;
}
bool
GlobOpt::InspectInstrForMemCopyCandidate(Loop* loop, IR::Instr* instr, MemCopyEmitData* emitData, bool& errorInInstr)
{
Assert(emitData && emitData->candidate && emitData->candidate->IsMemCopy());
Loop::MemCopyCandidate* candidate = (Loop::MemCopyCandidate*)emitData->candidate;
if (instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict)
{
if (
instr->GetDst()->IsIndirOpnd() &&
(GetVarSymID(instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->GetStackSym()) == candidate->base) &&
(GetVarSymID(instr->GetDst()->AsIndirOpnd()->GetIndexOpnd()->GetStackSym()) == candidate->index)
)
{
Assert(instr->IsProfiledInstr());
emitData->stElemInstr = instr;
emitData->bailOutKind = instr->GetBailOutKind();
// Still need to find the LdElem
return false;
}
TRACE_MEMOP_PHASE_VERBOSE(MemCopy, loop, instr, _u("Orphan StElemI_A detected"));
errorInInstr = true;
}
else if (instr->m_opcode == Js::OpCode::LdElemI_A)
{
if (
emitData->stElemInstr &&
instr->GetSrc1()->IsIndirOpnd() &&
(GetVarSymID(instr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->GetStackSym()) == candidate->ldBase) &&
(GetVarSymID(instr->GetSrc1()->AsIndirOpnd()->GetIndexOpnd()->GetStackSym()) == candidate->index)
)
{
Assert(instr->IsProfiledInstr());
emitData->ldElemInstr = instr;
ValueType stValueType = emitData->stElemInstr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->GetValueType();
ValueType ldValueType = emitData->ldElemInstr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->GetValueType();
if (stValueType != ldValueType)
{
#if DBG_DUMP
char16 stValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE];
stValueType.ToString(stValueTypeStr);
char16 ldValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE];
ldValueType.ToString(ldValueTypeStr);
TRACE_MEMOP_PHASE_VERBOSE(MemCopy, loop, instr, _u("for mismatch in Load(%s) and Store(%s) value type"), ldValueTypeStr, stValueTypeStr);
#endif
errorInInstr = true;
return false;
}
// We found both instruction for this candidate
return true;
}
TRACE_MEMOP_PHASE_VERBOSE(MemCopy, loop, instr, _u("Orphan LdElemI_A detected"));
errorInInstr = true;
}
return false;
}
// The caller is responsible to free the memory allocated between inOrderEmitData[iEmitData -> end]
bool
GlobOpt::ValidateMemOpCandidates(Loop * loop, _Out_writes_(iEmitData) MemOpEmitData** inOrderEmitData, int& iEmitData)
{
AnalysisAssert(iEmitData == (int)loop->memOpInfo->candidates->Count());
// We iterate over the second block of the loop only. MemOp Works only if the loop has exactly 2 blocks
Assert(loop->blockList.HasTwo());
Loop::MemOpList::Iterator iter(loop->memOpInfo->candidates);
BasicBlock* bblock = loop->blockList.Head()->next;
Loop::MemOpCandidate* candidate = nullptr;
MemOpEmitData* emitData = nullptr;
// Iterate backward because the list of candidate is reversed
FOREACH_INSTR_BACKWARD_IN_BLOCK(instr, bblock)
{
if (!candidate)
{
// Time to check next candidate
if (!iter.Next())
{
// We have been through the whole list of candidates, finish
break;
}
candidate = iter.Data();
if (!candidate)
{
continue;
}
// Common check for memset and memcopy
Loop::InductionVariableChangeInfo inductionVariableChangeInfo = { 0, 0 };
// Get the inductionVariable changeInfo
if (!loop->memOpInfo->inductionVariableChangeInfoMap->TryGetValue(candidate->index, &inductionVariableChangeInfo))
{
TRACE_MEMOP_VERBOSE(loop, nullptr, _u("MemOp skipped (s%d): no induction variable"), candidate->base);
return false;
}
if (inductionVariableChangeInfo.unroll != candidate->count)
{
TRACE_MEMOP_VERBOSE(loop, nullptr, _u("MemOp skipped (s%d): not matching unroll count"), candidate->base);
return false;
}
if (candidate->IsMemSet())
{
Assert(!PHASE_OFF(Js::MemSetPhase, this->func));
emitData = JitAnew(this->alloc, MemSetEmitData);
}
else
{
Assert(!PHASE_OFF(Js::MemCopyPhase, this->func));
// Specific check for memcopy
Assert(candidate->IsMemCopy());
Loop::MemCopyCandidate* memcopyCandidate = candidate->AsMemCopy();
if (memcopyCandidate->base == Js::Constants::InvalidSymID
|| memcopyCandidate->ldBase == Js::Constants::InvalidSymID
|| (memcopyCandidate->ldCount != memcopyCandidate->count))
{
TRACE_MEMOP_PHASE(MemCopy, loop, nullptr, _u("(s%d): not matching ldElem and stElem"), candidate->base);
return false;
}
emitData = JitAnew(this->alloc, MemCopyEmitData);
}
Assert(emitData);
emitData->block = bblock;
emitData->inductionVar = inductionVariableChangeInfo;
emitData->candidate = candidate;
}
bool errorInInstr = false;
bool candidateFound = candidate->IsMemSet() ?
InspectInstrForMemSetCandidate(loop, instr, (MemSetEmitData*)emitData, errorInInstr)
: InspectInstrForMemCopyCandidate(loop, instr, (MemCopyEmitData*)emitData, errorInInstr);
if (errorInInstr)
{
JitAdelete(this->alloc, emitData);
return false;
}
if (candidateFound)
{
AnalysisAssert(iEmitData > 0);
if (iEmitData == 0)
{
// Explicit for OACR
break;
}
inOrderEmitData[--iEmitData] = emitData;
candidate = nullptr;
emitData = nullptr;
}
} NEXT_INSTR_BACKWARD_IN_BLOCK;
if (iter.IsValid())
{
TRACE_MEMOP(loop, nullptr, _u("Candidates not found in loop while validating"));
return false;
}
return true;
}
void
GlobOpt::ProcessMemOp()
{
FOREACH_LOOP_IN_FUNC_EDITING(loop, this->func)
{
if (HasMemOp(loop))
{
const int candidateCount = loop->memOpInfo->candidates->Count();
Assert(candidateCount > 0);
LoopCount * loopCount = GetOrGenerateLoopCountForMemOp(loop);
// If loopCount is not available we can not continue with memop
if (!loopCount || !(loopCount->LoopCountMinusOneSym() || loopCount->LoopCountMinusOneConstantValue()))
{
TRACE_MEMOP(loop, nullptr, _u("MemOp skipped for no loop count"));
loop->doMemOp = false;
loop->memOpInfo->candidates->Clear();
continue;
}
// The list is reversed, check them and place them in order in the following array
MemOpEmitData** inOrderCandidates = JitAnewArray(this->alloc, MemOpEmitData*, candidateCount);
int i = candidateCount;
if (ValidateMemOpCandidates(loop, inOrderCandidates, i))
{
Assert(i == 0);
// Process the valid MemOp candidate in order.
for (; i < candidateCount; ++i)
{
// Emit
EmitMemop(loop, loopCount, inOrderCandidates[i]);
JitAdelete(this->alloc, inOrderCandidates[i]);
}
}
else
{
Assert(i != 0);
for (; i < candidateCount; ++i)
{
JitAdelete(this->alloc, inOrderCandidates[i]);
}
// One of the memop candidates did not validate. Do not emit for this loop.
loop->doMemOp = false;
loop->memOpInfo->candidates->Clear();
}
// Free memory
JitAdeleteArray(this->alloc, candidateCount, inOrderCandidates);
}
} NEXT_LOOP_EDITING;
}
| 38.806877 | 297 | 0.569813 | makepaddev |
8b19920335abcfa8c4c1746aef97e948f21828ec | 1,000 | cpp | C++ | Flick/src/Flick/Renderer/Buffer.cpp | firo1738/FLICK | a6ccb0f23c212d0f1b97f71520beb3a89be57f2d | [
"Apache-2.0"
] | null | null | null | Flick/src/Flick/Renderer/Buffer.cpp | firo1738/FLICK | a6ccb0f23c212d0f1b97f71520beb3a89be57f2d | [
"Apache-2.0"
] | null | null | null | Flick/src/Flick/Renderer/Buffer.cpp | firo1738/FLICK | a6ccb0f23c212d0f1b97f71520beb3a89be57f2d | [
"Apache-2.0"
] | null | null | null | #include "fipch.h"
#include "Buffer.h"
#include "Renderer.h"
#include "Platform/OpenGL/OpenGLBuffer.h"
namespace Flick
{
///////////////////Index Buffer///////////////////
VertexBuffer* VertexBuffer::Create(float* verticies, uint32_t size)
{
switch (Renderer::GetAPI())
{
case RendererAPI::API::None: FI_CORE_ASSERT(false, "RendererAPi::None is not yet supported by Flick!"); return nullptr;
case RendererAPI::API::OpenGL: return new OpenGLVertexBuffer(verticies, size);
}
FI_CORE_ASSERT(false, "Unknown RendererAPI!");
return nullptr;
}
///////////////////Index Buffer///////////////////
IndexBuffer* IndexBuffer::Create(uint32_t* indicies, uint32_t count)
{
switch (Renderer::GetAPI())
{
case RendererAPI::API::None: FI_CORE_ASSERT(false, "RendererAPi::None is not yet supported by Flick!"); return nullptr;
case RendererAPI::API::OpenGL: return new OpenGLIndexBuffer(indicies, count);
}
FI_CORE_ASSERT(false, "Unknown RendererAPI!");
return nullptr;
}
} | 27.027027 | 121 | 0.679 | firo1738 |
8b1ba8c9a1f015dfe538c5ca55526f5b1addd0ba | 27,437 | cpp | C++ | tess-two/jni/com_googlecode_tesseract_android/src/ccstruct/statistc.cpp | leejoo71/tess-two | 2f3e2eec1d1cc61b0f27c31deb845fc460c78b00 | [
"Apache-2.0"
] | 3,479 | 2015-01-05T10:07:00.000Z | 2022-03-31T06:00:43.000Z | tess-two/jni/com_googlecode_tesseract_android/src/ccstruct/statistc.cpp | skumailraza/Tess4OCR | 26001172d5d856b0553cb36e3ae9eb64f1631cb0 | [
"Apache-2.0"
] | 207 | 2015-01-05T11:45:56.000Z | 2019-10-20T00:53:21.000Z | tess-two/jni/com_googlecode_tesseract_android/src/ccstruct/statistc.cpp | skumailraza/Tess4OCR | 26001172d5d856b0553cb36e3ae9eb64f1631cb0 | [
"Apache-2.0"
] | 1,287 | 2015-01-05T11:51:28.000Z | 2022-03-29T03:36:11.000Z | /**********************************************************************
* File: statistc.c (Formerly stats.c)
* Description: Simple statistical package for integer values.
* Author: Ray Smith
* Created: Mon Feb 04 16:56:05 GMT 1991
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#include "statistc.h"
#include <string.h>
#include <math.h>
#include <stdlib.h>
#include "helpers.h"
#include "scrollview.h"
#include "tprintf.h"
using tesseract::KDPairInc;
/**********************************************************************
* STATS::STATS
*
* Construct a new stats element by allocating and zeroing the memory.
**********************************************************************/
STATS::STATS(inT32 min_bucket_value, inT32 max_bucket_value_plus_1) {
if (max_bucket_value_plus_1 <= min_bucket_value) {
min_bucket_value = 0;
max_bucket_value_plus_1 = 1;
}
rangemin_ = min_bucket_value; // setup
rangemax_ = max_bucket_value_plus_1;
buckets_ = new inT32[rangemax_ - rangemin_];
clear();
}
STATS::STATS() {
rangemax_ = 0;
rangemin_ = 0;
buckets_ = NULL;
}
/**********************************************************************
* STATS::set_range
*
* Alter the range on an existing stats element.
**********************************************************************/
bool STATS::set_range(inT32 min_bucket_value, inT32 max_bucket_value_plus_1) {
if (max_bucket_value_plus_1 <= min_bucket_value) {
return false;
}
if (rangemax_ - rangemin_ != max_bucket_value_plus_1 - min_bucket_value) {
delete [] buckets_;
buckets_ = new inT32[max_bucket_value_plus_1 - min_bucket_value];
}
rangemin_ = min_bucket_value; // setup
rangemax_ = max_bucket_value_plus_1;
clear(); // zero it
return true;
}
/**********************************************************************
* STATS::clear
*
* Clear out the STATS class by zeroing all the buckets.
**********************************************************************/
void STATS::clear() { // clear out buckets
total_count_ = 0;
if (buckets_ != NULL)
memset(buckets_, 0, (rangemax_ - rangemin_) * sizeof(buckets_[0]));
}
/**********************************************************************
* STATS::~STATS
*
* Destructor for a stats class.
**********************************************************************/
STATS::~STATS () {
if (buckets_ != NULL) {
delete [] buckets_;
buckets_ = NULL;
}
}
/**********************************************************************
* STATS::add
*
* Add a set of samples to (or delete from) a pile.
**********************************************************************/
void STATS::add(inT32 value, inT32 count) {
if (buckets_ == NULL) {
return;
}
value = ClipToRange(value, rangemin_, rangemax_ - 1);
buckets_[value - rangemin_] += count;
total_count_ += count; // keep count of total
}
/**********************************************************************
* STATS::mode
*
* Find the mode of a stats class.
**********************************************************************/
inT32 STATS::mode() const { // get mode of samples
if (buckets_ == NULL) {
return rangemin_;
}
inT32 max = buckets_[0]; // max cell count
inT32 maxindex = 0; // index of max
for (int index = rangemax_ - rangemin_ - 1; index > 0; --index) {
if (buckets_[index] > max) {
max = buckets_[index]; // find biggest
maxindex = index;
}
}
return maxindex + rangemin_; // index of biggest
}
/**********************************************************************
* STATS::mean
*
* Find the mean of a stats class.
**********************************************************************/
double STATS::mean() const { //get mean of samples
if (buckets_ == NULL || total_count_ <= 0) {
return static_cast<double>(rangemin_);
}
inT64 sum = 0;
for (int index = rangemax_ - rangemin_ - 1; index >= 0; --index) {
sum += static_cast<inT64>(index) * buckets_[index];
}
return static_cast<double>(sum) / total_count_ + rangemin_;
}
/**********************************************************************
* STATS::sd
*
* Find the standard deviation of a stats class.
**********************************************************************/
double STATS::sd() const { //standard deviation
if (buckets_ == NULL || total_count_ <= 0) {
return 0.0;
}
inT64 sum = 0;
double sqsum = 0.0;
for (int index = rangemax_ - rangemin_ - 1; index >= 0; --index) {
sum += static_cast<inT64>(index) * buckets_[index];
sqsum += static_cast<double>(index) * index * buckets_[index];
}
double variance = static_cast<double>(sum) / total_count_;
variance = sqsum / total_count_ - variance * variance;
if (variance > 0.0)
return sqrt(variance);
return 0.0;
}
/**********************************************************************
* STATS::ile
*
* Returns the fractile value such that frac fraction (in [0,1]) of samples
* has a value less than the return value.
**********************************************************************/
double STATS::ile(double frac) const {
if (buckets_ == NULL || total_count_ == 0) {
return static_cast<double>(rangemin_);
}
#if 0
// TODO(rays) The existing code doesn't seem to be doing the right thing
// with target a double but this substitute crashes the code that uses it.
// Investigate and fix properly.
int target = IntCastRounded(frac * total_count_);
target = ClipToRange(target, 1, total_count_);
#else
double target = frac * total_count_;
target = ClipToRange(target, 1.0, static_cast<double>(total_count_));
#endif
int sum = 0;
int index = 0;
for (index = 0; index < rangemax_ - rangemin_ && sum < target;
sum += buckets_[index++]);
if (index > 0) {
ASSERT_HOST(buckets_[index - 1] > 0);
return rangemin_ + index -
static_cast<double>(sum - target) / buckets_[index - 1];
} else {
return static_cast<double>(rangemin_);
}
}
/**********************************************************************
* STATS::min_bucket
*
* Find REAL minimum bucket - ile(0.0) isn't necessarily correct
**********************************************************************/
inT32 STATS::min_bucket() const { // Find min
if (buckets_ == NULL || total_count_ == 0) {
return rangemin_;
}
inT32 min = 0;
for (min = 0; (min < rangemax_ - rangemin_) && (buckets_[min] == 0); min++);
return rangemin_ + min;
}
/**********************************************************************
* STATS::max_bucket
*
* Find REAL maximum bucket - ile(1.0) isn't necessarily correct
**********************************************************************/
inT32 STATS::max_bucket() const { // Find max
if (buckets_ == NULL || total_count_ == 0) {
return rangemin_;
}
inT32 max;
for (max = rangemax_ - rangemin_ - 1; max > 0 && buckets_[max] == 0; max--);
return rangemin_ + max;
}
/**********************************************************************
* STATS::median
*
* Finds a more useful estimate of median than ile(0.5).
*
* Overcomes a problem with ile() - if the samples are, for example,
* 6,6,13,14 ile(0.5) return 7.0 - when a more useful value would be midway
* between 6 and 13 = 9.5
**********************************************************************/
double STATS::median() const { //get median
if (buckets_ == NULL) {
return static_cast<double>(rangemin_);
}
double median = ile(0.5);
int median_pile = static_cast<int>(floor(median));
if ((total_count_ > 1) && (pile_count(median_pile) == 0)) {
inT32 min_pile;
inT32 max_pile;
/* Find preceding non zero pile */
for (min_pile = median_pile; pile_count(min_pile) == 0; min_pile--);
/* Find following non zero pile */
for (max_pile = median_pile; pile_count(max_pile) == 0; max_pile++);
median = (min_pile + max_pile) / 2.0;
}
return median;
}
/**********************************************************************
* STATS::local_min
*
* Return TRUE if this point is a local min.
**********************************************************************/
bool STATS::local_min(inT32 x) const {
if (buckets_ == NULL) {
return false;
}
x = ClipToRange(x, rangemin_, rangemax_ - 1) - rangemin_;
if (buckets_[x] == 0)
return true;
inT32 index; // table index
for (index = x - 1; index >= 0 && buckets_[index] == buckets_[x]; --index);
if (index >= 0 && buckets_[index] < buckets_[x])
return false;
for (index = x + 1; index < rangemax_ - rangemin_ &&
buckets_[index] == buckets_[x]; ++index);
if (index < rangemax_ - rangemin_ && buckets_[index] < buckets_[x])
return false;
else
return true;
}
/**********************************************************************
* STATS::smooth
*
* Apply a triangular smoothing filter to the stats.
* This makes the modes a bit more useful.
* The factor gives the height of the triangle, i.e. the weight of the
* centre.
**********************************************************************/
void STATS::smooth(inT32 factor) {
if (buckets_ == NULL || factor < 2) {
return;
}
STATS result(rangemin_, rangemax_);
int entrycount = rangemax_ - rangemin_;
for (int entry = 0; entry < entrycount; entry++) {
//centre weight
int count = buckets_[entry] * factor;
for (int offset = 1; offset < factor; offset++) {
if (entry - offset >= 0)
count += buckets_[entry - offset] * (factor - offset);
if (entry + offset < entrycount)
count += buckets_[entry + offset] * (factor - offset);
}
result.add(entry + rangemin_, count);
}
total_count_ = result.total_count_;
memcpy(buckets_, result.buckets_, entrycount * sizeof(buckets_[0]));
}
/**********************************************************************
* STATS::cluster
*
* Cluster the samples into max_cluster clusters.
* Each call runs one iteration. The array of clusters must be
* max_clusters+1 in size as cluster 0 is used to indicate which samples
* have been used.
* The return value is the current number of clusters.
**********************************************************************/
inT32 STATS::cluster(float lower, // thresholds
float upper,
float multiple, // distance threshold
inT32 max_clusters, // max no to make
STATS *clusters) { // array of clusters
BOOL8 new_cluster; // added one
float *centres; // cluster centres
inT32 entry; // bucket index
inT32 cluster; // cluster index
inT32 best_cluster; // one to assign to
inT32 new_centre = 0; // residual mode
inT32 new_mode; // pile count of new_centre
inT32 count; // pile to place
float dist; // from cluster
float min_dist; // from best_cluster
inT32 cluster_count; // no of clusters
if (buckets_ == NULL || max_clusters < 1)
return 0;
centres = new float[max_clusters + 1];
for (cluster_count = 1; cluster_count <= max_clusters
&& clusters[cluster_count].buckets_ != NULL
&& clusters[cluster_count].total_count_ > 0;
cluster_count++) {
centres[cluster_count] =
static_cast<float>(clusters[cluster_count].ile(0.5));
new_centre = clusters[cluster_count].mode();
for (entry = new_centre - 1; centres[cluster_count] - entry < lower
&& entry >= rangemin_
&& pile_count(entry) <= pile_count(entry + 1);
entry--) {
count = pile_count(entry) - clusters[0].pile_count(entry);
if (count > 0) {
clusters[cluster_count].add(entry, count);
clusters[0].add (entry, count);
}
}
for (entry = new_centre + 1; entry - centres[cluster_count] < lower
&& entry < rangemax_
&& pile_count(entry) <= pile_count(entry - 1);
entry++) {
count = pile_count(entry) - clusters[0].pile_count(entry);
if (count > 0) {
clusters[cluster_count].add(entry, count);
clusters[0].add(entry, count);
}
}
}
cluster_count--;
if (cluster_count == 0) {
clusters[0].set_range(rangemin_, rangemax_);
}
do {
new_cluster = FALSE;
new_mode = 0;
for (entry = 0; entry < rangemax_ - rangemin_; entry++) {
count = buckets_[entry] - clusters[0].buckets_[entry];
//remaining pile
if (count > 0) { //any to handle
min_dist = static_cast<float>(MAX_INT32);
best_cluster = 0;
for (cluster = 1; cluster <= cluster_count; cluster++) {
dist = entry + rangemin_ - centres[cluster];
//find distance
if (dist < 0)
dist = -dist;
if (dist < min_dist) {
min_dist = dist; //find least
best_cluster = cluster;
}
}
if (min_dist > upper //far enough for new
&& (best_cluster == 0
|| entry + rangemin_ > centres[best_cluster] * multiple
|| entry + rangemin_ < centres[best_cluster] / multiple)) {
if (count > new_mode) {
new_mode = count;
new_centre = entry + rangemin_;
}
}
}
}
// need new and room
if (new_mode > 0 && cluster_count < max_clusters) {
cluster_count++;
new_cluster = TRUE;
if (!clusters[cluster_count].set_range(rangemin_, rangemax_)) {
delete [] centres;
return 0;
}
centres[cluster_count] = static_cast<float>(new_centre);
clusters[cluster_count].add(new_centre, new_mode);
clusters[0].add(new_centre, new_mode);
for (entry = new_centre - 1; centres[cluster_count] - entry < lower
&& entry >= rangemin_
&& pile_count (entry) <= pile_count(entry + 1); entry--) {
count = pile_count(entry) - clusters[0].pile_count(entry);
if (count > 0) {
clusters[cluster_count].add(entry, count);
clusters[0].add(entry, count);
}
}
for (entry = new_centre + 1; entry - centres[cluster_count] < lower
&& entry < rangemax_
&& pile_count (entry) <= pile_count(entry - 1); entry++) {
count = pile_count(entry) - clusters[0].pile_count(entry);
if (count > 0) {
clusters[cluster_count].add(entry, count);
clusters[0].add (entry, count);
}
}
centres[cluster_count] =
static_cast<float>(clusters[cluster_count].ile(0.5));
}
} while (new_cluster && cluster_count < max_clusters);
delete [] centres;
return cluster_count;
}
// Helper tests that the current index is still part of the peak and gathers
// the data into the peak, returning false when the peak is ended.
// src_buckets[index] - used_buckets[index] is the unused part of the histogram.
// prev_count is the histogram count of the previous index on entry and is
// updated to the current index on return.
// total_count and total_value are accumulating the mean of the peak.
static bool GatherPeak(int index, const int* src_buckets, int* used_buckets,
int* prev_count, int* total_count, double* total_value) {
int pile_count = src_buckets[index] - used_buckets[index];
if (pile_count <= *prev_count && pile_count > 0) {
// Accumulate count and index.count product.
*total_count += pile_count;
*total_value += index * pile_count;
// Mark this index as used
used_buckets[index] = src_buckets[index];
*prev_count = pile_count;
return true;
} else {
return false;
}
}
// Finds (at most) the top max_modes modes, well actually the whole peak around
// each mode, returning them in the given modes vector as a <mean of peak,
// total count of peak> pair in order of decreasing total count.
// Since the mean is the key and the count the data in the pair, a single call
// to sort on the output will re-sort by increasing mean of peak if that is
// more useful than decreasing total count.
// Returns the actual number of modes found.
int STATS::top_n_modes(int max_modes,
GenericVector<KDPairInc<float, int> >* modes) const {
if (max_modes <= 0) return 0;
int src_count = rangemax_ - rangemin_;
// Used copies the counts in buckets_ as they get used.
STATS used(rangemin_, rangemax_);
modes->truncate(0);
// Total count of the smallest peak found so far.
int least_count = 1;
// Mode that is used as a seed for each peak
int max_count = 0;
do {
// Find an unused mode.
max_count = 0;
int max_index = 0;
for (int src_index = 0; src_index < src_count; src_index++) {
int pile_count = buckets_[src_index] - used.buckets_[src_index];
if (pile_count > max_count) {
max_count = pile_count;
max_index = src_index;
}
}
if (max_count > 0) {
// Copy the bucket count to used so it doesn't get found again.
used.buckets_[max_index] = max_count;
// Get the entire peak.
double total_value = max_index * max_count;
int total_count = max_count;
int prev_pile = max_count;
for (int offset = 1; max_index + offset < src_count; ++offset) {
if (!GatherPeak(max_index + offset, buckets_, used.buckets_,
&prev_pile, &total_count, &total_value))
break;
}
prev_pile = buckets_[max_index];
for (int offset = 1; max_index - offset >= 0; ++offset) {
if (!GatherPeak(max_index - offset, buckets_, used.buckets_,
&prev_pile, &total_count, &total_value))
break;
}
if (total_count > least_count || modes->size() < max_modes) {
// We definitely want this mode, so if we have enough discard the least.
if (modes->size() == max_modes)
modes->truncate(max_modes - 1);
int target_index = 0;
// Linear search for the target insertion point.
while (target_index < modes->size() &&
(*modes)[target_index].data >= total_count)
++target_index;
float peak_mean =
static_cast<float>(total_value / total_count + rangemin_);
modes->insert(KDPairInc<float, int>(peak_mean, total_count),
target_index);
least_count = modes->back().data;
}
}
} while (max_count > 0);
return modes->size();
}
/**********************************************************************
* STATS::print
*
* Prints a summary and table of the histogram.
**********************************************************************/
void STATS::print() const {
if (buckets_ == NULL) {
return;
}
inT32 min = min_bucket() - rangemin_;
inT32 max = max_bucket() - rangemin_;
int num_printed = 0;
for (int index = min; index <= max; index++) {
if (buckets_[index] != 0) {
tprintf("%4d:%-3d ", rangemin_ + index, buckets_[index]);
if (++num_printed % 8 == 0)
tprintf ("\n");
}
}
tprintf ("\n");
print_summary();
}
/**********************************************************************
* STATS::print_summary
*
* Print a summary of the stats.
**********************************************************************/
void STATS::print_summary() const {
if (buckets_ == NULL) {
return;
}
inT32 min = min_bucket();
inT32 max = max_bucket();
tprintf("Total count=%d\n", total_count_);
tprintf("Min=%.2f Really=%d\n", ile(0.0), min);
tprintf("Lower quartile=%.2f\n", ile(0.25));
tprintf("Median=%.2f, ile(0.5)=%.2f\n", median(), ile(0.5));
tprintf("Upper quartile=%.2f\n", ile(0.75));
tprintf("Max=%.2f Really=%d\n", ile(1.0), max);
tprintf("Range=%d\n", max + 1 - min);
tprintf("Mean= %.2f\n", mean());
tprintf("SD= %.2f\n", sd());
}
/**********************************************************************
* STATS::plot
*
* Draw a histogram of the stats table.
**********************************************************************/
#ifndef GRAPHICS_DISABLED
void STATS::plot(ScrollView* window, // to draw in
float xorigin, // bottom left
float yorigin,
float xscale, // one x unit
float yscale, // one y unit
ScrollView::Color colour) const { // colour to draw in
if (buckets_ == NULL) {
return;
}
window->Pen(colour);
for (int index = 0; index < rangemax_ - rangemin_; index++) {
window->Rectangle( xorigin + xscale * index, yorigin,
xorigin + xscale * (index + 1),
yorigin + yscale * buckets_[index]);
}
}
#endif
/**********************************************************************
* STATS::plotline
*
* Draw a histogram of the stats table. (Line only)
**********************************************************************/
#ifndef GRAPHICS_DISABLED
void STATS::plotline(ScrollView* window, // to draw in
float xorigin, // bottom left
float yorigin,
float xscale, // one x unit
float yscale, // one y unit
ScrollView::Color colour) const { // colour to draw in
if (buckets_ == NULL) {
return;
}
window->Pen(colour);
window->SetCursor(xorigin, yorigin + yscale * buckets_[0]);
for (int index = 0; index < rangemax_ - rangemin_; index++) {
window->DrawTo(xorigin + xscale * index,
yorigin + yscale * buckets_[index]);
}
}
#endif
/**********************************************************************
* choose_nth_item
*
* Returns the index of what would b the nth item in the array
* if the members were sorted, without actually sorting.
**********************************************************************/
inT32 choose_nth_item(inT32 index, float *array, inT32 count) {
inT32 next_sample; // next one to do
inT32 next_lesser; // space for new
inT32 prev_greater; // last one saved
inT32 equal_count; // no of equal ones
float pivot; // proposed median
float sample; // current sample
if (count <= 1)
return 0;
if (count == 2) {
if (array[0] < array[1]) {
return index >= 1 ? 1 : 0;
}
else {
return index >= 1 ? 0 : 1;
}
}
else {
if (index < 0)
index = 0; // ensure legal
else if (index >= count)
index = count - 1;
equal_count = (inT32) (rand() % count);
pivot = array[equal_count];
// fill gap
array[equal_count] = array[0];
next_lesser = 0;
prev_greater = count;
equal_count = 1;
for (next_sample = 1; next_sample < prev_greater;) {
sample = array[next_sample];
if (sample < pivot) {
// shuffle
array[next_lesser++] = sample;
next_sample++;
}
else if (sample > pivot) {
prev_greater--;
// juggle
array[next_sample] = array[prev_greater];
array[prev_greater] = sample;
}
else {
equal_count++;
next_sample++;
}
}
for (next_sample = next_lesser; next_sample < prev_greater;)
array[next_sample++] = pivot;
if (index < next_lesser)
return choose_nth_item (index, array, next_lesser);
else if (index < prev_greater)
return next_lesser; // in equal bracket
else
return choose_nth_item (index - prev_greater,
array + prev_greater,
count - prev_greater) + prev_greater;
}
}
/**********************************************************************
* choose_nth_item
*
* Returns the index of what would be the nth item in the array
* if the members were sorted, without actually sorting.
**********************************************************************/
inT32 choose_nth_item(inT32 index, void *array, inT32 count, size_t size,
int (*compar)(const void*, const void*)) {
int result; // of compar
inT32 next_sample; // next one to do
inT32 next_lesser; // space for new
inT32 prev_greater; // last one saved
inT32 equal_count; // no of equal ones
inT32 pivot; // proposed median
if (count <= 1)
return 0;
if (count == 2) {
if (compar (array, (char *) array + size) < 0) {
return index >= 1 ? 1 : 0;
}
else {
return index >= 1 ? 0 : 1;
}
}
if (index < 0)
index = 0; // ensure legal
else if (index >= count)
index = count - 1;
pivot = (inT32) (rand () % count);
swap_entries (array, size, pivot, 0);
next_lesser = 0;
prev_greater = count;
equal_count = 1;
for (next_sample = 1; next_sample < prev_greater;) {
result =
compar ((char *) array + size * next_sample,
(char *) array + size * next_lesser);
if (result < 0) {
swap_entries (array, size, next_lesser++, next_sample++);
// shuffle
}
else if (result > 0) {
prev_greater--;
swap_entries(array, size, prev_greater, next_sample);
}
else {
equal_count++;
next_sample++;
}
}
if (index < next_lesser)
return choose_nth_item (index, array, next_lesser, size, compar);
else if (index < prev_greater)
return next_lesser; // in equal bracket
else
return choose_nth_item (index - prev_greater,
(char *) array + size * prev_greater,
count - prev_greater, size,
compar) + prev_greater;
}
/**********************************************************************
* swap_entries
*
* Swap 2 entries of arbitrary size in-place in a table.
**********************************************************************/
void swap_entries(void *array, // array of entries
size_t size, // size of entry
inT32 index1, // entries to swap
inT32 index2) {
char tmp;
char *ptr1; // to entries
char *ptr2;
size_t count; // of bytes
ptr1 = reinterpret_cast<char*>(array) + index1 * size;
ptr2 = reinterpret_cast<char*>(array) + index2 * size;
for (count = 0; count < size; count++) {
tmp = *ptr1;
*ptr1++ = *ptr2;
*ptr2++ = tmp; // tedious!
}
}
| 34.907125 | 80 | 0.528884 | leejoo71 |
8b1ffff653407c7e8895c8aeb20113097d30754b | 32,159 | cpp | C++ | src/ZNCString.cpp | md-5/znc | 39c741fcd2307d707a0d1bebbed3d80be9b1899b | [
"Apache-2.0"
] | 1 | 2021-11-11T04:49:01.000Z | 2021-11-11T04:49:01.000Z | src/ZNCString.cpp | md-5/znc | 39c741fcd2307d707a0d1bebbed3d80be9b1899b | [
"Apache-2.0"
] | null | null | null | src/ZNCString.cpp | md-5/znc | 39c741fcd2307d707a0d1bebbed3d80be9b1899b | [
"Apache-2.0"
] | 1 | 2021-11-11T04:48:51.000Z | 2021-11-11T04:48:51.000Z | /*
* Copyright (C) 2004-2015 ZNC, see the NOTICE file for details.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <znc/FileUtils.h>
#include <znc/Utils.h>
#include <znc/MD5.h>
#include <znc/SHA256.h>
#include <sstream>
using std::stringstream;
CString::CString(char c) : string() { stringstream s; s << c; *this = s.str(); }
CString::CString(unsigned char c) : string() { stringstream s; s << c; *this = s.str(); }
CString::CString(short i) : string() { stringstream s; s << i; *this = s.str(); }
CString::CString(unsigned short i) : string() { stringstream s; s << i; *this = s.str(); }
CString::CString(int i) : string() { stringstream s; s << i; *this = s.str(); }
CString::CString(unsigned int i) : string() { stringstream s; s << i; *this = s.str(); }
CString::CString(long i) : string() { stringstream s; s << i; *this = s.str(); }
CString::CString(unsigned long i) : string() { stringstream s; s << i; *this = s.str(); }
CString::CString(long long i) : string() { stringstream s; s << i; *this = s.str(); }
CString::CString(unsigned long long i) : string() { stringstream s; s << i; *this = s.str(); }
CString::CString(double i, int precision) : string() { stringstream s; s.precision(precision); s << std::fixed << i; *this = s.str(); }
CString::CString(float i, int precision) : string() { stringstream s; s.precision(precision); s << std::fixed << i; *this = s.str(); }
unsigned char* CString::strnchr(const unsigned char* src, unsigned char c, unsigned int iMaxBytes, unsigned char* pFill, unsigned int* piCount) const {
for (unsigned int a = 0; a < iMaxBytes && *src; a++, src++) {
if (pFill) {
pFill[a] = *src;
}
if (*src == c) {
if (pFill) {
pFill[a +1] = 0;
}
if (piCount) {
*piCount = a;
}
return (unsigned char*) src;
}
}
if (pFill) {
*pFill = 0;
}
if (piCount) {
*piCount = 0;
}
return NULL;
}
int CString::CaseCmp(const CString& s, CString::size_type uLen) const {
if (uLen != CString::npos) {
return strncasecmp(c_str(), s.c_str(), uLen);
}
return strcasecmp(c_str(), s.c_str());
}
int CString::StrCmp(const CString& s, CString::size_type uLen) const {
if (uLen != CString::npos) {
return strncmp(c_str(), s.c_str(), uLen);
}
return strcmp(c_str(), s.c_str());
}
bool CString::Equals(const CString& s, CaseSensitivity cs) const {
if (cs == CaseSensitive) {
return (StrCmp(s) == 0);
} else {
return (CaseCmp(s) == 0);
}
}
bool CString::Equals(const CString& s, bool bCaseSensitive, CString::size_type uLen) const {
if (bCaseSensitive) {
return (StrCmp(s, uLen) == 0);
} else {
return (CaseCmp(s, uLen) == 0);
}
}
bool CString::WildCmp(const CString& sWild, const CString& sString) {
// Written by Jack Handy - [email protected]
const char *wild = sWild.c_str(), *CString = sString.c_str();
const char *cp = NULL, *mp = NULL;
while ((*CString) && (*wild != '*')) {
if ((*wild != *CString) && (*wild != '?')) {
return false;
}
wild++;
CString++;
}
while (*CString) {
if (*wild == '*') {
if (!*++wild) {
return true;
}
mp = wild;
cp = CString+1;
} else if ((*wild == *CString) || (*wild == '?')) {
wild++;
CString++;
} else {
wild = mp;
CString = cp++;
}
}
while (*wild == '*') {
wild++;
}
return (*wild == 0);
}
bool CString::WildCmp(const CString& sWild) const {
return CString::WildCmp(sWild, *this);
}
CString& CString::MakeUpper() {
for (size_type a = 0; a < length(); a++) {
char& c = (*this)[a];
//TODO use unicode
c = (char)toupper(c);
}
return *this;
}
CString& CString::MakeLower() {
for (size_type a = 0; a < length(); a++) {
char& c = (*this)[a];
//TODO use unicode
c = (char)tolower(c);
}
return *this;
}
CString CString::AsUpper() const {
CString sRet = *this;
sRet.MakeUpper();
return sRet;
}
CString CString::AsLower() const {
CString sRet = *this;
sRet.MakeLower();
return sRet;
}
CString::EEscape CString::ToEscape(const CString& sEsc) {
if (sEsc.Equals("ASCII")) {
return EASCII;
} else if (sEsc.Equals("HTML")) {
return EHTML;
} else if (sEsc.Equals("URL")) {
return EURL;
} else if (sEsc.Equals("SQL")) {
return ESQL;
} else if (sEsc.Equals("NAMEDFMT")) {
return ENAMEDFMT;
} else if (sEsc.Equals("DEBUG")) {
return EDEBUG;
} else if (sEsc.Equals("MSGTAG")) {
return EMSGTAG;
} else if (sEsc.Equals("HEXCOLON")) {
return EHEXCOLON;
}
return EASCII;
}
CString CString::Escape_n(EEscape eFrom, EEscape eTo) const {
CString sRet;
const char szHex[] = "0123456789ABCDEF";
const unsigned char *pStart = (const unsigned char*) data();
const unsigned char *p = (const unsigned char*) data();
size_type iLength = length();
sRet.reserve(iLength *3);
unsigned char pTmp[21];
unsigned int iCounted = 0;
for (unsigned int a = 0; a < iLength; a++, p = pStart + a) {
unsigned char ch = 0;
switch (eFrom) {
case EHTML:
if ((*p == '&') && (strnchr((unsigned char*) p, ';', sizeof(pTmp) - 1, pTmp, &iCounted))) {
// please note that we do not have any Unicode or UTF-8 support here at all.
if ((iCounted >= 3) && (pTmp[1] == '#')) { // do XML and HTML a <
int base = 10;
if ((pTmp[2] & 0xDF) == 'X') {
base = 16;
}
char* endptr = NULL;
unsigned long int b = strtol((const char*) (pTmp +2 + (base == 16)), &endptr, base);
if ((*endptr == ';') && (b <= 255)) { // incase they do something like �
ch = (unsigned char)b;
a += iCounted;
break;
}
}
if (ch == 0) {
if (!strncasecmp((const char*) &pTmp, "<", 2)) ch = '<';
else if (!strncasecmp((const char*) &pTmp, ">", 2)) ch = '>';
else if (!strncasecmp((const char*) &pTmp, """, 4)) ch = '"';
else if (!strncasecmp((const char*) &pTmp, "&", 3)) ch = '&';
}
if (ch > 0) {
a += iCounted;
} else {
ch = *p; // Not a valid escape, just record the &
}
} else {
ch = *p;
}
break;
case EASCII:
ch = *p;
break;
case EURL:
if (*p == '%' && (a +2) < iLength && isxdigit(*(p +1)) && isxdigit(*(p +2))) {
p++;
if (isdigit(*p)) {
ch = (unsigned char)((*p - '0') << 4);
} else {
ch = (unsigned char)((tolower(*p) - 'a' +10) << 4);
}
p++;
if (isdigit(*p)) {
ch |= (unsigned char)(*p - '0');
} else {
ch |= (unsigned char)(tolower(*p) - 'a' +10);
}
a += 2;
} else if (pStart[a] == '+') {
ch = ' ';
} else {
ch = *p;
}
break;
case ESQL:
if (*p != '\\' || iLength < (a +1)) {
ch = *p;
} else {
a++;
p++;
if (*p == 'n') {
ch = '\n';
} else if (*p == 'r') {
ch = '\r';
} else if (*p == '0') {
ch = '\0';
} else if (*p == 't') {
ch = '\t';
} else if (*p == 'b') {
ch = '\b';
} else {
ch = *p;
}
}
break;
case ENAMEDFMT:
if (*p != '\\' || iLength < (a +1)) {
ch = *p;
} else {
a++;
p++;
ch = *p;
}
break;
case EDEBUG:
if (*p == '\\' && (a +3) < iLength && *(p +1) == 'x' && isxdigit(*(p +2)) && isxdigit(*(p +3))) {
p += 2;
if (isdigit(*p)) {
ch = (unsigned char)((*p - '0') << 4);
} else {
ch = (unsigned char)((tolower(*p) - 'a' +10) << 4);
}
p++;
if (isdigit(*p)) {
ch |= (unsigned char)(*p - '0');
} else {
ch |= (unsigned char)(tolower(*p) - 'a' +10);
}
a += 3;
} else if (*p == '\\' && a+1 < iLength && *(p+1) == '.') {
a++;
p++;
ch = '\\';
} else {
ch = *p;
}
break;
case EMSGTAG:
if (*p != '\\' || iLength < (a +1)) {
ch = *p;
} else {
a++;
p++;
if (*p == ':') {
ch = ';';
} else if (*p == 's') {
ch = ' ';
} else if (*p == '0') {
ch = '\0';
} else if (*p == '\\') {
ch = '\\';
} else if (*p == 'r') {
ch = '\r';
} else if (*p == 'n') {
ch = '\n';
} else {
ch = *p;
}
}
break;
case EHEXCOLON: {
while (!isxdigit(*p) && a < iLength) {
a++;
p++;
}
if (a == iLength) {
continue;
}
if (isdigit(*p)) {
ch = (unsigned char)((*p - '0') << 4);
} else {
ch = (unsigned char)((tolower(*p) - 'a' +10) << 4);
}
a++;
p++;
while (!isxdigit(*p) && a < iLength) {
a++;
p++;
}
if (a == iLength) {
continue;
}
if (isdigit(*p)) {
ch |= (unsigned char)(*p - '0');
} else {
ch |= (unsigned char)(tolower(*p) - 'a' +10);
}
}
break;
}
switch (eTo) {
case EHTML:
if (ch == '<') sRet += "<";
else if (ch == '>') sRet += ">";
else if (ch == '"') sRet += """;
else if (ch == '&') sRet += "&";
else {
sRet += ch;
}
break;
case EASCII:
sRet += ch;
break;
case EURL:
if (isalnum(ch) || ch == '_' || ch == '.' || ch == '-') {
sRet += ch;
} else if (ch == ' ') {
sRet += '+';
} else {
sRet += '%';
sRet += szHex[ch >> 4];
sRet += szHex[ch & 0xf];
}
break;
case ESQL:
if (ch == '\0') { sRet += '\\'; sRet += '0';
} else if (ch == '\n') { sRet += '\\'; sRet += 'n';
} else if (ch == '\t') { sRet += '\\'; sRet += 't';
} else if (ch == '\r') { sRet += '\\'; sRet += 'r';
} else if (ch == '\b') { sRet += '\\'; sRet += 'b';
} else if (ch == '\"') { sRet += '\\'; sRet += '\"';
} else if (ch == '\'') { sRet += '\\'; sRet += '\'';
} else if (ch == '\\') { sRet += '\\'; sRet += '\\';
} else { sRet += ch; }
break;
case ENAMEDFMT:
if (ch == '\\') { sRet += '\\'; sRet += '\\';
} else if (ch == '{') { sRet += '\\'; sRet += '{';
} else if (ch == '}') { sRet += '\\'; sRet += '}';
} else { sRet += ch; }
break;
case EDEBUG:
if (ch < 0x20 || ch == 0x7F) {
sRet += "\\x";
sRet += szHex[ch >> 4];
sRet += szHex[ch & 0xf];
} else if (ch == '\\') {
sRet += "\\.";
} else {
sRet += ch;
}
break;
case EMSGTAG:
if (ch == ';') { sRet += '\\'; sRet += ':';
} else if (ch == ' ') { sRet += '\\'; sRet += 's';
} else if (ch == '\0') { sRet += '\\'; sRet += '0';
} else if (ch == '\\') { sRet += '\\'; sRet += '\\';
} else if (ch == '\r') { sRet += '\\'; sRet += 'r';
} else if (ch == '\n') { sRet += '\\'; sRet += 'n';
} else { sRet += ch; }
break;
case EHEXCOLON: {
sRet += tolower(szHex[ch >> 4]);
sRet += tolower(szHex[ch & 0xf]);
sRet += ":";
}
break;
}
}
if (eTo == EHEXCOLON) {
sRet.TrimRight(":");
}
return sRet;
}
CString CString::Escape_n(EEscape eTo) const {
return Escape_n(EASCII, eTo);
}
CString& CString::Escape(EEscape eFrom, EEscape eTo) {
return (*this = Escape_n(eFrom, eTo));
}
CString& CString::Escape(EEscape eTo) {
return (*this = Escape_n(eTo));
}
CString CString::Replace_n(const CString& sReplace, const CString& sWith, const CString& sLeft, const CString& sRight, bool bRemoveDelims) const {
CString sRet = *this;
CString::Replace(sRet, sReplace, sWith, sLeft, sRight, bRemoveDelims);
return sRet;
}
unsigned int CString::Replace(const CString& sReplace, const CString& sWith, const CString& sLeft, const CString& sRight, bool bRemoveDelims) {
return CString::Replace(*this, sReplace, sWith, sLeft, sRight, bRemoveDelims);
}
unsigned int CString::Replace(CString& sStr, const CString& sReplace, const CString& sWith, const CString& sLeft, const CString& sRight, bool bRemoveDelims) {
unsigned int uRet = 0;
CString sCopy = sStr;
sStr.clear();
size_type uReplaceWidth = sReplace.length();
size_type uLeftWidth = sLeft.length();
size_type uRightWidth = sRight.length();
const char* p = sCopy.c_str();
bool bInside = false;
while (*p) {
if (!bInside && uLeftWidth && strncmp(p, sLeft.c_str(), uLeftWidth) == 0) {
if (!bRemoveDelims) {
sStr += sLeft;
}
p += uLeftWidth -1;
bInside = true;
} else if (bInside && uRightWidth && strncmp(p, sRight.c_str(), uRightWidth) == 0) {
if (!bRemoveDelims) {
sStr += sRight;
}
p += uRightWidth -1;
bInside = false;
} else if (!bInside && strncmp(p, sReplace.c_str(), uReplaceWidth) == 0) {
sStr += sWith;
p += uReplaceWidth -1;
uRet++;
} else {
sStr.append(p, 1);
}
p++;
}
return uRet;
}
CString CString::Token(size_t uPos, bool bRest, const CString& sSep, bool bAllowEmpty,
const CString& sLeft, const CString& sRight, bool bTrimQuotes) const {
VCString vsTokens;
if (Split(sSep, vsTokens, bAllowEmpty, sLeft, sRight, bTrimQuotes) > uPos) {
CString sRet;
for (size_t a = uPos; a < vsTokens.size(); a++) {
if (a > uPos) {
sRet += sSep;
}
sRet += vsTokens[a];
if (!bRest) {
break;
}
}
return sRet;
}
return Token(uPos, bRest, sSep, bAllowEmpty);
}
CString CString::Token(size_t uPos, bool bRest, const CString& sSep, bool bAllowEmpty) const {
const char *sep_str = sSep.c_str();
size_t sep_len = sSep.length();
const char *str = c_str();
size_t str_len = length();
size_t start_pos = 0;
size_t end_pos;
if (!bAllowEmpty) {
while (strncmp(&str[start_pos], sep_str, sep_len) == 0) {
start_pos += sep_len;
}
}
// First, find the start of our token
while (uPos != 0 && start_pos < str_len) {
bool bFoundSep = false;
while (strncmp(&str[start_pos], sep_str, sep_len) == 0 && (!bFoundSep || !bAllowEmpty)) {
start_pos += sep_len;
bFoundSep = true;
}
if (bFoundSep) {
uPos--;
} else {
start_pos++;
}
}
// String is over?
if (start_pos >= str_len)
return "";
// If they want everything from here on, give it to them
if (bRest) {
return substr(start_pos);
}
// Now look for the end of the token they want
end_pos = start_pos;
while (end_pos < str_len) {
if (strncmp(&str[end_pos], sep_str, sep_len) == 0)
return substr(start_pos, end_pos - start_pos);
end_pos++;
}
// They want the last token in the string, not something in between
return substr(start_pos);
}
CString CString::Ellipsize(unsigned int uLen) const {
if (uLen >= size()) {
return *this;
}
string sRet;
// @todo this looks suspect
if (uLen < 4) {
for (unsigned int a = 0; a < uLen; a++) {
sRet += ".";
}
return sRet;
}
sRet = substr(0, uLen -3) + "...";
return sRet;
}
CString CString::Left(size_type uCount) const {
uCount = (uCount > length()) ? length() : uCount;
return substr(0, uCount);
}
CString CString::Right(size_type uCount) const {
uCount = (uCount > length()) ? length() : uCount;
return substr(length() - uCount, uCount);
}
CString::size_type CString::URLSplit(MCString& msRet) const {
msRet.clear();
VCString vsPairs;
Split("&", vsPairs);
for (size_t a = 0; a < vsPairs.size(); a++) {
const CString& sPair = vsPairs[a];
msRet[sPair.Token(0, false, "=").Escape(CString::EURL, CString::EASCII)] = sPair.Token(1, true, "=").Escape(CString::EURL, CString::EASCII);
}
return msRet.size();
}
CString::size_type CString::OptionSplit(MCString& msRet, bool bUpperKeys) const {
CString sName;
CString sCopy(*this);
msRet.clear();
while (!sCopy.empty()) {
sName = sCopy.Token(0, false, "=", false, "\"", "\"", false).Trim_n();
sCopy = sCopy.Token(1, true, "=", false, "\"", "\"", false).TrimLeft_n();
if (sName.empty()) {
continue;
}
VCString vsNames;
sName.Split(" ", vsNames, false, "\"", "\"");
for (unsigned int a = 0; a < vsNames.size(); a++) {
CString sKeyName = vsNames[a];
if (bUpperKeys) {
sKeyName.MakeUpper();
}
if ((a +1) == vsNames.size()) {
msRet[sKeyName] = sCopy.Token(0, false, " ", false, "\"", "\"");
sCopy = sCopy.Token(1, true, " ", false, "\"", "\"", false);
} else {
msRet[sKeyName] = "";
}
}
}
return msRet.size();
}
CString::size_type CString::QuoteSplit(VCString& vsRet) const {
vsRet.clear();
return Split(" ", vsRet, false, "\"", "\"", true);
}
CString::size_type CString::Split(const CString& sDelim, VCString& vsRet, bool bAllowEmpty,
const CString& sLeft, const CString& sRight, bool bTrimQuotes, bool bTrimWhiteSpace) const {
vsRet.clear();
if (empty()) {
return 0;
}
CString sTmp;
bool bInside = false;
size_type uDelimLen = sDelim.length();
size_type uLeftLen = sLeft.length();
size_type uRightLen = sRight.length();
const char* p = c_str();
if (!bAllowEmpty) {
while (strncasecmp(p, sDelim.c_str(), uDelimLen) == 0) {
p += uDelimLen;
}
}
while (*p) {
if (uLeftLen && uRightLen && !bInside && strncasecmp(p, sLeft.c_str(), uLeftLen) == 0) {
if (!bTrimQuotes) {
sTmp += sLeft;
}
p += uLeftLen;
bInside = true;
continue;
}
if (uLeftLen && uRightLen && bInside && strncasecmp(p, sRight.c_str(), uRightLen) == 0) {
if (!bTrimQuotes) {
sTmp += sRight;
}
p += uRightLen;
bInside = false;
continue;
}
if (uDelimLen && !bInside && strncasecmp(p, sDelim.c_str(), uDelimLen) == 0) {
if (bTrimWhiteSpace) {
sTmp.Trim();
}
vsRet.push_back(sTmp);
sTmp.clear();
p += uDelimLen;
if (!bAllowEmpty) {
while (strncasecmp(p, sDelim.c_str(), uDelimLen) == 0) {
p += uDelimLen;
}
}
bInside = false;
continue;
} else {
sTmp += *p;
}
p++;
}
if (!sTmp.empty()) {
if (bTrimWhiteSpace) {
sTmp.Trim();
}
vsRet.push_back(sTmp);
}
return vsRet.size();
}
CString::size_type CString::Split(const CString& sDelim, SCString& ssRet, bool bAllowEmpty, const CString& sLeft, const CString& sRight, bool bTrimQuotes, bool bTrimWhiteSpace) const {
VCString vsTokens;
Split(sDelim, vsTokens, bAllowEmpty, sLeft, sRight, bTrimQuotes, bTrimWhiteSpace);
ssRet.clear();
for (size_t a = 0; a < vsTokens.size(); a++) {
ssRet.insert(vsTokens[a]);
}
return ssRet.size();
}
CString CString::NamedFormat(const CString& sFormat, const MCString& msValues) {
CString sRet;
CString sKey;
bool bEscape = false;
bool bParam = false;
const char* p = sFormat.c_str();
while (*p) {
if (!bParam) {
if (bEscape) {
sRet += *p;
bEscape = false;
} else if (*p == '\\') {
bEscape = true;
} else if (*p == '{') {
bParam = true;
sKey.clear();
} else {
sRet += *p;
}
} else {
if (bEscape) {
sKey += *p;
bEscape = false;
} else if (*p == '\\') {
bEscape = true;
} else if (*p == '}') {
bParam = false;
MCString::const_iterator it = msValues.find(sKey);
if (it != msValues.end()) {
sRet += (*it).second;
}
} else {
sKey += *p;
}
}
p++;
}
return sRet;
}
CString CString::RandomString(unsigned int uLength) {
const char chars[] = "abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!?.,:;/*-+_()";
// -1 because sizeof() includes the trailing '\0' byte
const size_t len = sizeof(chars) / sizeof(chars[0]) - 1;
size_t p;
CString sRet;
for (unsigned int a = 0; a < uLength; a++) {
p = (size_t) (len * (rand() / (RAND_MAX + 1.0)));
sRet += chars[p];
}
return sRet;
}
bool CString::Base64Encode(unsigned int uWrap) {
CString sCopy(*this);
return sCopy.Base64Encode(*this, uWrap);
}
unsigned long CString::Base64Decode() {
CString sCopy(*this);
return sCopy.Base64Decode(*this);
}
CString CString::Base64Encode_n(unsigned int uWrap) const {
CString sRet;
Base64Encode(sRet, uWrap);
return sRet;
}
CString CString::Base64Decode_n() const {
CString sRet;
Base64Decode(sRet);
return sRet;
}
bool CString::Base64Encode(CString& sRet, unsigned int uWrap) const {
const char b64table[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
sRet.clear();
size_t len = size();
const unsigned char* input = (const unsigned char*) c_str();
unsigned char *output, *p;
size_t i = 0, mod = len % 3, toalloc;
toalloc = (len / 3) * 4 + (3 - mod) % 3 + 1 + 8;
if (uWrap) {
toalloc += len / 57;
if (len % 57) {
toalloc++;
}
}
if (toalloc < len) {
return 0;
}
p = output = new unsigned char [toalloc];
while (i < len - mod) {
*p++ = b64table[input[i++] >> 2];
*p++ = b64table[((input[i - 1] << 4) | (input[i] >> 4)) & 0x3f];
*p++ = b64table[((input[i] << 2) | (input[i + 1] >> 6)) & 0x3f];
*p++ = b64table[input[i + 1] & 0x3f];
i += 2;
if (uWrap && !(i % 57)) {
*p++ = '\n';
}
}
if (!mod) {
if (uWrap && i % 57) {
*p++ = '\n';
}
} else {
*p++ = b64table[input[i++] >> 2];
*p++ = b64table[((input[i - 1] << 4) | (input[i] >> 4)) & 0x3f];
if (mod == 1) {
*p++ = '=';
} else {
*p++ = b64table[(input[i] << 2) & 0x3f];
}
*p++ = '=';
if (uWrap) {
*p++ = '\n';
}
}
*p = 0;
sRet = (char*) output;
delete[] output;
return true;
}
unsigned long CString::Base64Decode(CString& sRet) const {
CString sTmp(*this);
// remove new lines
sTmp.Replace("\r", "");
sTmp.Replace("\n", "");
const char* in = sTmp.c_str();
char c, c1, *p;
unsigned long i;
unsigned long uLen = sTmp.size();
char* out = new char[uLen + 1];
for (i = 0, p = out; i < uLen; i++) {
c = (char)base64_table[(unsigned char)in[i++]];
c1 = (char)base64_table[(unsigned char)in[i++]];
*p++ = char((c << 2) | ((c1 >> 4) & 0x3));
if (i < uLen) {
if (in[i] == '=') {
break;
}
c = (char)base64_table[(unsigned char)in[i]];
*p++ = char(((c1 << 4) & 0xf0) | ((c >> 2) & 0xf));
}
if (++i < uLen) {
if (in[i] == '=') {
break;
}
*p++ = char(((c << 6) & 0xc0) | (char)base64_table[(unsigned char)in[i]]);
}
}
*p = '\0';
unsigned long uRet = p - out;
sRet.clear();
sRet.append(out, uRet);
delete[] out;
return uRet;
}
CString CString::MD5() const {
return (const char*) CMD5(*this);
}
CString CString::SHA256() const {
unsigned char digest[SHA256_DIGEST_SIZE];
char digest_hex[SHA256_DIGEST_SIZE * 2 + 1];
const unsigned char *message = (const unsigned char *) c_str();
sha256(message, length(), digest);
snprintf(digest_hex, sizeof(digest_hex),
"%02x%02x%02x%02x%02x%02x%02x%02x"
"%02x%02x%02x%02x%02x%02x%02x%02x"
"%02x%02x%02x%02x%02x%02x%02x%02x"
"%02x%02x%02x%02x%02x%02x%02x%02x",
digest[ 0], digest[ 1], digest[ 2], digest[ 3], digest[ 4], digest[ 5], digest[ 6], digest[ 7],
digest[ 8], digest[ 9], digest[10], digest[11], digest[12], digest[13], digest[14], digest[15],
digest[16], digest[17], digest[18], digest[19], digest[20], digest[21], digest[22], digest[23],
digest[24], digest[25], digest[26], digest[27], digest[28], digest[29], digest[30], digest[31]);
return digest_hex;
}
#ifdef HAVE_LIBSSL
CString CString::Encrypt_n(const CString& sPass, const CString& sIvec) const {
CString sRet;
sRet.Encrypt(sPass, sIvec);
return sRet;
}
CString CString::Decrypt_n(const CString& sPass, const CString& sIvec) const {
CString sRet;
sRet.Decrypt(sPass, sIvec);
return sRet;
}
void CString::Encrypt(const CString& sPass, const CString& sIvec) {
Crypt(sPass, true, sIvec);
}
void CString::Decrypt(const CString& sPass, const CString& sIvec) {
Crypt(sPass, false, sIvec);
}
void CString::Crypt(const CString& sPass, bool bEncrypt, const CString& sIvec) {
unsigned char szIvec[8] = {0,0,0,0,0,0,0,0};
BF_KEY bKey;
if (sIvec.length() >= 8) {
memcpy(szIvec, sIvec.data(), 8);
}
BF_set_key(&bKey, (unsigned int)sPass.length(), (unsigned char*) sPass.data());
unsigned int uPad = (length() % 8);
if (uPad) {
uPad = 8 - uPad;
append(uPad, '\0');
}
size_t uLen = length();
unsigned char* szBuff = (unsigned char*) malloc(uLen);
BF_cbc_encrypt((const unsigned char*) data(), szBuff, uLen, &bKey, szIvec, ((bEncrypt) ? BF_ENCRYPT : BF_DECRYPT));
clear();
append((const char*) szBuff, uLen);
free(szBuff);
}
#endif // HAVE_LIBSSL
CString CString::ToPercent(double d) {
char szRet[32];
snprintf(szRet, 32, "%.02f%%", d);
return szRet;
}
CString CString::ToByteStr(unsigned long long d) {
const unsigned long long KiB = 1024;
const unsigned long long MiB = KiB * 1024;
const unsigned long long GiB = MiB * 1024;
const unsigned long long TiB = GiB * 1024;
if (d > TiB) {
return CString(d / TiB) + " TiB";
} else if (d > GiB) {
return CString(d / GiB) + " GiB";
} else if (d > MiB) {
return CString(d / MiB) + " MiB";
} else if (d > KiB) {
return CString(d / KiB) + " KiB";
}
return CString(d) + " B";
}
CString CString::ToTimeStr(unsigned long s) {
const unsigned long m = 60;
const unsigned long h = m * 60;
const unsigned long d = h * 24;
const unsigned long w = d * 7;
const unsigned long y = d * 365;
CString sRet;
#define TIMESPAN(time, str) \
if (s >= time) { \
sRet += CString(s / time) + str " "; \
s = s % time; \
}
TIMESPAN(y, "y");
TIMESPAN(w, "w");
TIMESPAN(d, "d");
TIMESPAN(h, "h");
TIMESPAN(m, "m");
TIMESPAN(1, "s");
if (sRet.empty())
return "0s";
return sRet.RightChomp_n();
}
bool CString::ToBool() const {
CString sTrimmed = Trim_n();
return (!sTrimmed.Trim_n("0").empty() &&
!sTrimmed.Equals("false") &&
!sTrimmed.Equals("off") &&
!sTrimmed.Equals("no") &&
!sTrimmed.Equals("n"));
}
short CString::ToShort() const { return (short int)strtol(this->c_str(), (char**) NULL, 10); }
unsigned short CString::ToUShort() const { return (unsigned short int)strtoul(this->c_str(), (char**) NULL, 10); }
unsigned int CString::ToUInt() const { return (unsigned int)strtoul(this->c_str(), (char**) NULL, 10); }
int CString::ToInt() const { return (int)strtol(this->c_str(), (char**) NULL, 10); }
long CString::ToLong() const { return strtol(this->c_str(), (char**) NULL, 10); }
unsigned long CString::ToULong() const { return strtoul(c_str(), NULL, 10); }
unsigned long long CString::ToULongLong() const { return strtoull(c_str(), NULL, 10); }
long long CString::ToLongLong() const { return strtoll(c_str(), NULL, 10); }
double CString::ToDouble() const { return strtod(c_str(), NULL); }
bool CString::Trim(const CString& s) {
bool bLeft = TrimLeft(s);
return (TrimRight(s) || bLeft);
}
bool CString::TrimLeft(const CString& s) {
size_type i = find_first_not_of(s);
if (i == 0)
return false;
if (i != npos)
this->erase(0, i);
else
this->clear();
return true;
}
bool CString::TrimRight(const CString& s) {
size_type i = find_last_not_of(s);
if (i + 1 == length())
return false;
if (i != npos)
this->erase(i + 1, npos);
else
this->clear();
return true;
}
CString CString::Trim_n(const CString& s) const {
CString sRet = *this;
sRet.Trim(s);
return sRet;
}
CString CString::TrimLeft_n(const CString& s) const {
CString sRet = *this;
sRet.TrimLeft(s);
return sRet;
}
CString CString::TrimRight_n(const CString& s) const {
CString sRet = *this;
sRet.TrimRight(s);
return sRet;
}
bool CString::TrimPrefix(const CString& sPrefix) {
if (Equals(sPrefix, false, sPrefix.length())) {
LeftChomp(sPrefix.length());
return true;
} else {
return false;
}
}
bool CString::TrimSuffix(const CString& sSuffix) {
if (Right(sSuffix.length()).Equals(sSuffix)) {
RightChomp(sSuffix.length());
return true;
} else {
return false;
}
}
size_t CString::Find(const CString& s, CaseSensitivity cs) const {
if (cs == CaseSensitive) {
return find(s);
} else {
return AsLower().find(s.AsLower());
}
}
bool CString::StartsWith(const CString& sPrefix, CaseSensitivity cs) const {
return Left(sPrefix.length()).Equals(sPrefix, cs);
}
bool CString::EndsWith(const CString& sSuffix, CaseSensitivity cs) const {
return Right(sSuffix.length()).Equals(sSuffix, cs);
}
bool CString::Contains(const CString& s, CaseSensitivity cs) const {
return Find(s, cs) != npos;
}
CString CString::TrimPrefix_n(const CString& sPrefix) const {
CString sRet = *this;
sRet.TrimPrefix(sPrefix);
return sRet;
}
CString CString::TrimSuffix_n(const CString& sSuffix) const {
CString sRet = *this;
sRet.TrimSuffix(sSuffix);
return sRet;
}
CString CString::LeftChomp_n(size_type uLen) const {
CString sRet = *this;
sRet.LeftChomp(uLen);
return sRet;
}
CString CString::RightChomp_n(size_type uLen) const {
CString sRet = *this;
sRet.RightChomp(uLen);
return sRet;
}
bool CString::LeftChomp(size_type uLen) {
bool bRet = false;
while ((uLen--) && (length())) {
erase(0, 1);
bRet = true;
}
return bRet;
}
bool CString::RightChomp(size_type uLen) {
bool bRet = false;
while ((uLen--) && (length())) {
erase(length() -1);
bRet = true;
}
return bRet;
}
CString CString::StripControls_n() const {
CString sRet;
const unsigned char *pStart = (const unsigned char*) data();
unsigned char ch = *pStart;
size_type iLength = length();
sRet.reserve(iLength);
bool colorCode = false;
unsigned int digits = 0;
bool comma = false;
for (unsigned int a = 0; a < iLength; a++, ch = pStart[a]) {
// Color code. Format: \x03([0-9]{1,2}(,[0-9]{1,2})?)?
if (ch == 0x03) {
colorCode = true;
digits = 0;
comma = false;
continue;
}
if (colorCode) {
if (isdigit(ch) && digits < 2) {
digits++;
continue;
}
if (ch == ',' && !comma && digits > 0) {
comma = true;
digits = 0;
continue;
}
colorCode = false;
if (digits == 0 && comma) { // There was a ',' which wasn't followed by digits, we should print it.
sRet += ',';
}
}
// CO controls codes
if (ch < 0x20 || ch == 0x7F)
continue;
sRet += ch;
}
if (colorCode && digits == 0 && comma) {
sRet += ',';
}
sRet.reserve(0);
return sRet;
}
CString& CString::StripControls() {
return (*this = StripControls_n());
}
//////////////// MCString ////////////////
const MCString MCString::EmptyMap;
MCString::status_t MCString::WriteToDisk(const CString& sPath, mode_t iMode) const {
CFile cFile(sPath);
if (this->empty()) {
if (!cFile.Exists())
return MCS_SUCCESS;
if (cFile.Delete())
return MCS_SUCCESS;
}
if (!cFile.Open(O_WRONLY|O_CREAT|O_TRUNC, iMode)) {
return MCS_EOPEN;
}
for (MCString::const_iterator it = this->begin(); it != this->end(); ++it) {
CString sKey = it->first;
CString sValue = it->second;
if (!WriteFilter(sKey, sValue)) {
return MCS_EWRITEFIL;
}
if (sKey.empty()) {
continue;
}
if (cFile.Write(Encode(sKey) + " " + Encode(sValue) + "\n") <= 0) {
return MCS_EWRITE;
}
}
cFile.Close();
return MCS_SUCCESS;
}
MCString::status_t MCString::ReadFromDisk(const CString& sPath) {
clear();
CFile cFile(sPath);
if (!cFile.Open(O_RDONLY)) {
return MCS_EOPEN;
}
CString sBuffer;
while (cFile.ReadLine(sBuffer)) {
sBuffer.Trim();
CString sKey = sBuffer.Token(0);
CString sValue = sBuffer.Token(1);
Decode(sKey);
Decode(sValue);
if (!ReadFilter(sKey, sValue))
return MCS_EREADFIL;
(*this)[sKey] = sValue;
}
cFile.Close();
return MCS_SUCCESS;
}
static const char hexdigits[] = "0123456789abcdef";
CString& MCString::Encode(CString& sValue) const {
CString sTmp;
for (CString::iterator it = sValue.begin(); it != sValue.end(); ++it) {
// isalnum() needs unsigned char as argument and this code
// assumes unsigned, too.
unsigned char c = *it;
if (isalnum(c)) {
sTmp += c;
} else {
sTmp += "%";
sTmp += hexdigits[c >> 4];
sTmp += hexdigits[c & 0xf];
sTmp += ";";
}
}
sValue = sTmp;
return sValue;
}
CString& MCString::Decode(CString& sValue) const {
const char *pTmp = sValue.c_str();
char *endptr;
CString sTmp;
while (*pTmp) {
if (*pTmp != '%') {
sTmp += *pTmp++;
} else {
char ch = (char) strtol(pTmp + 1, &endptr, 16);
if (*endptr == ';') {
sTmp += ch;
pTmp = ++endptr;
} else {
sTmp += *pTmp++;
}
}
}
sValue = sTmp;
return sValue;
}
| 22.823989 | 184 | 0.57225 | md-5 |
8b21cdf51788de20f811639ed0f7e4eeda1560b1 | 21,287 | cpp | C++ | opencl/source/helpers/task_information.cpp | 8tab/compute-runtime | 71bd96ad7184df83c7af04ffa8e0d6678ab26f99 | [
"MIT"
] | 1 | 2020-04-17T05:46:04.000Z | 2020-04-17T05:46:04.000Z | opencl/source/helpers/task_information.cpp | 8tab/compute-runtime | 71bd96ad7184df83c7af04ffa8e0d6678ab26f99 | [
"MIT"
] | null | null | null | opencl/source/helpers/task_information.cpp | 8tab/compute-runtime | 71bd96ad7184df83c7af04ffa8e0d6678ab26f99 | [
"MIT"
] | null | null | null | /*
* Copyright (C) 2017-2020 Intel Corporation
*
* SPDX-License-Identifier: MIT
*
*/
#include "opencl/source/helpers/task_information.h"
#include "shared/source/command_stream/command_stream_receiver.h"
#include "shared/source/command_stream/csr_deps.h"
#include "shared/source/command_stream/linear_stream.h"
#include "shared/source/command_stream/preemption.h"
#include "shared/source/helpers/aligned_memory.h"
#include "shared/source/helpers/engine_node_helper.h"
#include "shared/source/helpers/string.h"
#include "shared/source/memory_manager/internal_allocation_storage.h"
#include "shared/source/memory_manager/surface.h"
#include "opencl/source/built_ins/builtins_dispatch_builder.h"
#include "opencl/source/cl_device/cl_device.h"
#include "opencl/source/command_queue/command_queue.h"
#include "opencl/source/command_queue/enqueue_common.h"
#include "opencl/source/device_queue/device_queue.h"
#include "opencl/source/gtpin/gtpin_notify.h"
#include "opencl/source/helpers/enqueue_properties.h"
#include "opencl/source/helpers/task_information.inl"
#include "opencl/source/mem_obj/mem_obj.h"
namespace NEO {
template void KernelOperation::ResourceCleaner::operator()<LinearStream>(LinearStream *);
template void KernelOperation::ResourceCleaner::operator()<IndirectHeap>(IndirectHeap *);
CommandMapUnmap::CommandMapUnmap(MapOperationType operationType, MemObj &memObj, MemObjSizeArray ©Size, MemObjOffsetArray ©Offset, bool readOnly,
CommandQueue &commandQueue)
: Command(commandQueue), memObj(memObj), copySize(copySize), copyOffset(copyOffset), readOnly(readOnly), operationType(operationType) {
memObj.incRefInternal();
}
CompletionStamp &CommandMapUnmap::submit(uint32_t taskLevel, bool terminated) {
if (terminated) {
memObj.decRefInternal();
return completionStamp;
}
auto &commandStreamReceiver = commandQueue.getGpgpuCommandStreamReceiver();
auto commandStreamReceiverOwnership = commandStreamReceiver.obtainUniqueOwnership();
auto &queueCommandStream = commandQueue.getCS(0);
size_t offset = queueCommandStream.getUsed();
MultiDispatchInfo multiDispatch;
Device &device = commandQueue.getDevice();
DispatchFlags dispatchFlags(
{}, //csrDependencies
nullptr, //barrierTimestampPacketNodes
{}, //pipelineSelectArgs
commandQueue.flushStamp->getStampReference(), //flushStampReference
commandQueue.getThrottle(), //throttle
PreemptionHelper::taskPreemptionMode(device, multiDispatch), //preemptionMode
GrfConfig::DefaultGrfNumber, //numGrfRequired
L3CachingSettings::l3CacheOn, //l3CacheSettings
ThreadArbitrationPolicy::NotPresent, //threadArbitrationPolicy
commandQueue.getSliceCount(), //sliceCount
true, //blocking
true, //dcFlush
false, //useSLM
true, //guardCommandBufferWithPipeControl
false, //GSBA32BitRequired
false, //requiresCoherency
commandQueue.getPriority() == QueuePriority::LOW, //lowPriority
false, //implicitFlush
commandQueue.getGpgpuCommandStreamReceiver().isNTo1SubmissionModelEnabled(), //outOfOrderExecutionAllowed
false, //epilogueRequired
false //usePerDssBackedBuffer
);
DEBUG_BREAK_IF(taskLevel >= CompletionStamp::levelNotReady);
gtpinNotifyPreFlushTask(&commandQueue);
completionStamp = commandStreamReceiver.flushTask(queueCommandStream,
offset,
commandQueue.getIndirectHeap(IndirectHeap::DYNAMIC_STATE, 0u),
commandQueue.getIndirectHeap(IndirectHeap::INDIRECT_OBJECT, 0u),
commandQueue.getIndirectHeap(IndirectHeap::SURFACE_STATE, 0u),
taskLevel,
dispatchFlags,
commandQueue.getDevice());
if (!memObj.isMemObjZeroCopy()) {
commandQueue.waitUntilComplete(completionStamp.taskCount, completionStamp.flushStamp, false);
if (operationType == MAP) {
memObj.transferDataToHostPtr(copySize, copyOffset);
} else if (!readOnly) {
DEBUG_BREAK_IF(operationType != UNMAP);
memObj.transferDataFromHostPtr(copySize, copyOffset);
}
}
memObj.decRefInternal();
return completionStamp;
}
CommandComputeKernel::CommandComputeKernel(CommandQueue &commandQueue, std::unique_ptr<KernelOperation> &kernelOperation, std::vector<Surface *> &surfaces,
bool flushDC, bool usesSLM, bool ndRangeKernel, std::unique_ptr<PrintfHandler> printfHandler,
PreemptionMode preemptionMode, Kernel *kernel, uint32_t kernelCount)
: Command(commandQueue, kernelOperation), flushDC(flushDC), slmUsed(usesSLM),
NDRangeKernel(ndRangeKernel), printfHandler(std::move(printfHandler)), kernel(kernel),
kernelCount(kernelCount), preemptionMode(preemptionMode) {
for (auto surface : surfaces) {
this->surfaces.push_back(surface);
}
UNRECOVERABLE_IF(nullptr == this->kernel);
kernel->incRefInternal();
}
CommandComputeKernel::~CommandComputeKernel() {
kernel->decRefInternal();
}
CompletionStamp &CommandComputeKernel::submit(uint32_t taskLevel, bool terminated) {
if (terminated) {
for (auto surface : surfaces) {
delete surface;
}
surfaces.clear();
return completionStamp;
}
auto &commandStreamReceiver = commandQueue.getGpgpuCommandStreamReceiver();
bool executionModelKernel = kernel->isParentKernel;
auto devQueue = commandQueue.getContext().getDefaultDeviceQueue();
auto commandStreamReceiverOwnership = commandStreamReceiver.obtainUniqueOwnership();
bool isCcsUsed = EngineHelpers::isCcs(commandQueue.getGpgpuEngine().osContext->getEngineType());
if (executionModelKernel) {
while (!devQueue->isEMCriticalSectionFree())
;
devQueue->resetDeviceQueue();
devQueue->acquireEMCriticalSection();
}
IndirectHeap *dsh = kernelOperation->dsh.get();
IndirectHeap *ioh = kernelOperation->ioh.get();
IndirectHeap *ssh = kernelOperation->ssh.get();
auto requiresCoherency = false;
auto anyUncacheableArgs = false;
for (auto &surface : surfaces) {
DEBUG_BREAK_IF(!surface);
surface->makeResident(commandStreamReceiver);
requiresCoherency |= surface->IsCoherent;
if (!surface->allowsL3Caching()) {
anyUncacheableArgs = true;
}
}
if (printfHandler) {
printfHandler.get()->makeResident(commandStreamReceiver);
}
makeTimestampPacketsResident(commandStreamReceiver);
if (executionModelKernel) {
uint32_t taskCount = commandStreamReceiver.peekTaskCount() + 1;
devQueue->setupExecutionModelDispatch(*ssh, *dsh, kernel, kernelCount,
commandStreamReceiver.getTagAllocation()->getGpuAddress(), taskCount, timestamp, isCcsUsed);
SchedulerKernel &scheduler = commandQueue.getContext().getSchedulerKernel();
scheduler.setArgs(devQueue->getQueueBuffer(),
devQueue->getStackBuffer(),
devQueue->getEventPoolBuffer(),
devQueue->getSlbBuffer(),
dsh->getGraphicsAllocation(),
kernel->getKernelReflectionSurface(),
devQueue->getQueueStorageBuffer(),
ssh->getGraphicsAllocation(),
devQueue->getDebugQueue());
devQueue->dispatchScheduler(
*kernelOperation->commandStream,
scheduler,
preemptionMode,
ssh,
dsh,
isCcsUsed);
scheduler.makeResident(commandStreamReceiver);
// Update SLM usage
slmUsed |= scheduler.slmTotalSize > 0;
this->kernel->getProgram()->getBlockKernelManager()->makeInternalAllocationsResident(commandStreamReceiver);
}
if (kernelOperation->blitPropertiesContainer.size() > 0) {
auto &bcsCsr = *commandQueue.getBcsCommandStreamReceiver();
CsrDependencies csrDeps;
eventsRequest.fillCsrDependencies(csrDeps, bcsCsr, CsrDependencies::DependenciesType::All);
BlitProperties::setupDependenciesForAuxTranslation(kernelOperation->blitPropertiesContainer, *timestampPacketDependencies,
*currentTimestampPacketNodes, csrDeps,
commandQueue.getGpgpuCommandStreamReceiver(), bcsCsr);
auto bcsTaskCount = bcsCsr.blitBuffer(kernelOperation->blitPropertiesContainer, false);
commandQueue.updateBcsTaskCount(bcsTaskCount);
}
DispatchFlags dispatchFlags(
{}, //csrDependencies
nullptr, //barrierTimestampPacketNodes
{false, kernel->isVmeKernel()}, //pipelineSelectArgs
commandQueue.flushStamp->getStampReference(), //flushStampReference
commandQueue.getThrottle(), //throttle
preemptionMode, //preemptionMode
kernel->getKernelInfo().patchInfo.executionEnvironment->NumGRFRequired, //numGrfRequired
L3CachingSettings::l3CacheOn, //l3CacheSettings
kernel->getThreadArbitrationPolicy(), //threadArbitrationPolicy
commandQueue.getSliceCount(), //sliceCount
true, //blocking
flushDC, //dcFlush
slmUsed, //useSLM
true, //guardCommandBufferWithPipeControl
NDRangeKernel, //GSBA32BitRequired
requiresCoherency, //requiresCoherency
commandQueue.getPriority() == QueuePriority::LOW, //lowPriority
false, //implicitFlush
commandQueue.getGpgpuCommandStreamReceiver().isNTo1SubmissionModelEnabled(), //outOfOrderExecutionAllowed
false, //epilogueRequired
kernel->requiresPerDssBackedBuffer() //usePerDssBackedBuffer
);
if (timestampPacketDependencies) {
eventsRequest.fillCsrDependencies(dispatchFlags.csrDependencies, commandStreamReceiver, CsrDependencies::DependenciesType::OutOfCsr);
dispatchFlags.barrierTimestampPacketNodes = ×tampPacketDependencies->barrierNodes;
}
dispatchFlags.pipelineSelectArgs.specialPipelineSelectMode = kernel->requiresSpecialPipelineSelectMode();
if (anyUncacheableArgs) {
dispatchFlags.l3CacheSettings = L3CachingSettings::l3CacheOff;
} else if (!kernel->areStatelessWritesUsed()) {
dispatchFlags.l3CacheSettings = L3CachingSettings::l3AndL1On;
}
if (commandQueue.dispatchHints != 0) {
dispatchFlags.engineHints = commandQueue.dispatchHints;
dispatchFlags.epilogueRequired = true;
}
DEBUG_BREAK_IF(taskLevel >= CompletionStamp::levelNotReady);
gtpinNotifyPreFlushTask(&commandQueue);
completionStamp = commandStreamReceiver.flushTask(*kernelOperation->commandStream,
0,
*dsh,
*ioh,
*ssh,
taskLevel,
dispatchFlags,
commandQueue.getDevice());
if (gtpinIsGTPinInitialized()) {
gtpinNotifyFlushTask(completionStamp.taskCount);
}
if (printfHandler) {
commandQueue.waitUntilComplete(completionStamp.taskCount, completionStamp.flushStamp, false);
printfHandler.get()->printEnqueueOutput();
}
for (auto surface : surfaces) {
delete surface;
}
surfaces.clear();
return completionStamp;
}
void CommandWithoutKernel::dispatchBlitOperation() {
auto bcsCsr = commandQueue.getBcsCommandStreamReceiver();
UNRECOVERABLE_IF(bcsCsr == nullptr);
UNRECOVERABLE_IF(kernelOperation->blitPropertiesContainer.size() != 1);
auto &blitProperties = *kernelOperation->blitPropertiesContainer.begin();
eventsRequest.fillCsrDependencies(blitProperties.csrDependencies, *bcsCsr, CsrDependencies::DependenciesType::All);
blitProperties.csrDependencies.push_back(×tampPacketDependencies->cacheFlushNodes);
blitProperties.csrDependencies.push_back(×tampPacketDependencies->previousEnqueueNodes);
blitProperties.csrDependencies.push_back(×tampPacketDependencies->barrierNodes);
blitProperties.outputTimestampPacket = currentTimestampPacketNodes->peekNodes()[0];
auto bcsTaskCount = bcsCsr->blitBuffer(kernelOperation->blitPropertiesContainer, false);
commandQueue.updateBcsTaskCount(bcsTaskCount);
}
CompletionStamp &CommandWithoutKernel::submit(uint32_t taskLevel, bool terminated) {
if (terminated) {
return completionStamp;
}
auto &commandStreamReceiver = commandQueue.getGpgpuCommandStreamReceiver();
if (!kernelOperation) {
completionStamp.taskCount = commandStreamReceiver.peekTaskCount();
completionStamp.taskLevel = commandStreamReceiver.peekTaskLevel();
completionStamp.flushStamp = commandStreamReceiver.obtainCurrentFlushStamp();
return completionStamp;
}
auto lockCSR = commandStreamReceiver.obtainUniqueOwnership();
if (kernelOperation->blitEnqueue) {
if (commandStreamReceiver.isStallingPipeControlOnNextFlushRequired()) {
timestampPacketDependencies->barrierNodes.add(commandStreamReceiver.getTimestampPacketAllocator()->getTag());
}
dispatchBlitOperation();
}
DispatchFlags dispatchFlags(
{}, //csrDependencies
×tampPacketDependencies->barrierNodes, //barrierTimestampPacketNodes
{}, //pipelineSelectArgs
commandQueue.flushStamp->getStampReference(), //flushStampReference
commandQueue.getThrottle(), //throttle
commandQueue.getDevice().getPreemptionMode(), //preemptionMode
GrfConfig::DefaultGrfNumber, //numGrfRequired
L3CachingSettings::l3CacheOn, //l3CacheSettings
ThreadArbitrationPolicy::NotPresent, //threadArbitrationPolicy
commandQueue.getSliceCount(), //sliceCount
true, //blocking
false, //dcFlush
false, //useSLM
true, //guardCommandBufferWithPipeControl
false, //GSBA32BitRequired
false, //requiresCoherency
commandQueue.getPriority() == QueuePriority::LOW, //lowPriority
false, //implicitFlush
commandStreamReceiver.isNTo1SubmissionModelEnabled(), //outOfOrderExecutionAllowed
false, //epilogueRequired
false //usePerDssBackedBuffer
);
UNRECOVERABLE_IF(!commandStreamReceiver.peekTimestampPacketWriteEnabled());
eventsRequest.fillCsrDependencies(dispatchFlags.csrDependencies, commandStreamReceiver, CsrDependencies::DependenciesType::OutOfCsr);
makeTimestampPacketsResident(commandStreamReceiver);
gtpinNotifyPreFlushTask(&commandQueue);
completionStamp = commandStreamReceiver.flushTask(*kernelOperation->commandStream,
0,
commandQueue.getIndirectHeap(IndirectHeap::DYNAMIC_STATE, 0u),
commandQueue.getIndirectHeap(IndirectHeap::INDIRECT_OBJECT, 0u),
commandQueue.getIndirectHeap(IndirectHeap::SURFACE_STATE, 0u),
taskLevel,
dispatchFlags,
commandQueue.getDevice());
return completionStamp;
}
void Command::setEventsRequest(EventsRequest &eventsRequest) {
this->eventsRequest = eventsRequest;
if (eventsRequest.numEventsInWaitList > 0) {
eventsWaitlist.resize(eventsRequest.numEventsInWaitList);
auto size = eventsRequest.numEventsInWaitList * sizeof(cl_event);
memcpy_s(&eventsWaitlist[0], size, eventsRequest.eventWaitList, size);
this->eventsRequest.eventWaitList = &eventsWaitlist[0];
}
}
void Command::setTimestampPacketNode(TimestampPacketContainer ¤t, TimestampPacketDependencies &&dependencies) {
currentTimestampPacketNodes = std::make_unique<TimestampPacketContainer>();
currentTimestampPacketNodes->assignAndIncrementNodesRefCounts(current);
timestampPacketDependencies = std::make_unique<TimestampPacketDependencies>();
*timestampPacketDependencies = std::move(dependencies);
}
Command::~Command() {
auto &commandStreamReceiver = commandQueue.getGpgpuCommandStreamReceiver();
if (commandStreamReceiver.peekTimestampPacketWriteEnabled()) {
for (cl_event &eventFromWaitList : eventsWaitlist) {
auto event = castToObjectOrAbort<Event>(eventFromWaitList);
event->decRefInternal();
}
}
}
void Command::makeTimestampPacketsResident(CommandStreamReceiver &commandStreamReceiver) {
if (commandStreamReceiver.peekTimestampPacketWriteEnabled()) {
for (cl_event &eventFromWaitList : eventsWaitlist) {
auto event = castToObjectOrAbort<Event>(eventFromWaitList);
if (event->getTimestampPacketNodes()) {
event->getTimestampPacketNodes()->makeResident(commandStreamReceiver);
}
}
}
if (currentTimestampPacketNodes) {
currentTimestampPacketNodes->makeResident(commandStreamReceiver);
}
if (timestampPacketDependencies) {
timestampPacketDependencies->cacheFlushNodes.makeResident(commandStreamReceiver);
timestampPacketDependencies->previousEnqueueNodes.makeResident(commandStreamReceiver);
}
}
Command::Command(CommandQueue &commandQueue) : commandQueue(commandQueue) {}
Command::Command(CommandQueue &commandQueue, std::unique_ptr<KernelOperation> &kernelOperation)
: commandQueue(commandQueue), kernelOperation(std::move(kernelOperation)) {}
} // namespace NEO
| 51.047962 | 155 | 0.584864 | 8tab |
8b2207ddb588860a7cbda8fe9b2e844b701cb641 | 7,396 | cpp | C++ | app/bin/miner/xmr-stak/xmrstak/backend/cpu/crypto/cryptonight_common.cpp | chrisknepper/electron-gui-crypto-miner | e154b1f1ea6ce8285c7a682a8dcef90f17a5c8a2 | [
"MIT"
] | 2 | 2018-01-25T04:29:57.000Z | 2020-02-13T15:30:55.000Z | app/bin/miner/xmr-stak/xmrstak/backend/cpu/crypto/cryptonight_common.cpp | chrisknepper/electron-gui-crypto-miner | e154b1f1ea6ce8285c7a682a8dcef90f17a5c8a2 | [
"MIT"
] | 1 | 2019-05-26T17:51:57.000Z | 2019-05-26T17:51:57.000Z | app/bin/miner/xmr-stak/xmrstak/backend/cpu/crypto/cryptonight_common.cpp | chrisknepper/electron-gui-crypto-miner | e154b1f1ea6ce8285c7a682a8dcef90f17a5c8a2 | [
"MIT"
] | 5 | 2018-02-17T11:32:37.000Z | 2021-02-26T22:26:07.000Z | /*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Additional permission under GNU GPL version 3 section 7
*
* If you modify this Program, or any covered work, by linking or combining
* it with OpenSSL (or a modified version of that library), containing parts
* covered by the terms of OpenSSL License and SSLeay License, the licensors
* of this Program grant you additional permission to convey the resulting work.
*
*/
extern "C"
{
#include "c_groestl.h"
#include "c_blake256.h"
#include "c_jh.h"
#include "c_skein.h"
}
#include "cryptonight.h"
#include "cryptonight_aesni.h"
#include "xmrstak/backend/cryptonight.hpp"
#include "xmrstak/jconf.hpp"
#include <stdio.h>
#include <stdlib.h>
#ifdef __GNUC__
#include <mm_malloc.h>
#else
#include <malloc.h>
#endif // __GNUC__
#if defined(__APPLE__)
#include <mach/vm_statistics.h>
#endif
#ifdef _WIN32
#include <windows.h>
#include <ntsecapi.h>
#else
#include <sys/mman.h>
#include <errno.h>
#include <string.h>
#endif // _WIN32
void do_blake_hash(const void* input, size_t len, char* output) {
blake256_hash((uint8_t*)output, (const uint8_t*)input, len);
}
void do_groestl_hash(const void* input, size_t len, char* output) {
groestl((const uint8_t*)input, len * 8, (uint8_t*)output);
}
void do_jh_hash(const void* input, size_t len, char* output) {
jh_hash(32 * 8, (const uint8_t*)input, 8 * len, (uint8_t*)output);
}
void do_skein_hash(const void* input, size_t len, char* output) {
skein_hash(8 * 32, (const uint8_t*)input, 8 * len, (uint8_t*)output);
}
void (* const extra_hashes[4])(const void *, size_t, char *) = {do_blake_hash, do_groestl_hash, do_jh_hash, do_skein_hash};
#ifdef _WIN32
BOOL bRebootDesirable = FALSE; //If VirtualAlloc fails, suggest a reboot
BOOL AddPrivilege(TCHAR* pszPrivilege)
{
HANDLE hToken;
TOKEN_PRIVILEGES tp;
BOOL status;
if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &hToken))
return FALSE;
if (!LookupPrivilegeValue(NULL, pszPrivilege, &tp.Privileges[0].Luid))
return FALSE;
tp.PrivilegeCount = 1;
tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
status = AdjustTokenPrivileges(hToken, FALSE, &tp, 0, (PTOKEN_PRIVILEGES)NULL, 0);
if (!status || (GetLastError() != ERROR_SUCCESS))
return FALSE;
CloseHandle(hToken);
return TRUE;
}
BOOL AddLargePageRights()
{
HANDLE hToken;
PTOKEN_USER user = NULL;
if (OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &hToken) == TRUE)
{
TOKEN_ELEVATION Elevation;
DWORD cbSize = sizeof(TOKEN_ELEVATION);
BOOL bIsElevated = FALSE;
if (GetTokenInformation(hToken, TokenElevation, &Elevation, sizeof(Elevation), &cbSize))
bIsElevated = Elevation.TokenIsElevated;
DWORD size = 0;
GetTokenInformation(hToken, TokenUser, NULL, 0, &size);
if (size > 0 && bIsElevated)
{
user = (PTOKEN_USER)LocalAlloc(LPTR, size);
GetTokenInformation(hToken, TokenUser, user, size, &size);
}
CloseHandle(hToken);
}
if (!user)
return FALSE;
LSA_HANDLE handle;
LSA_OBJECT_ATTRIBUTES attributes;
ZeroMemory(&attributes, sizeof(attributes));
BOOL result = FALSE;
if (LsaOpenPolicy(NULL, &attributes, POLICY_ALL_ACCESS, &handle) == 0)
{
LSA_UNICODE_STRING lockmem;
lockmem.Buffer = L"SeLockMemoryPrivilege";
lockmem.Length = 42;
lockmem.MaximumLength = 44;
PLSA_UNICODE_STRING rights = NULL;
ULONG cnt = 0;
BOOL bHasRights = FALSE;
if (LsaEnumerateAccountRights(handle, user->User.Sid, &rights, &cnt) == 0)
{
for (size_t i = 0; i < cnt; i++)
{
if (rights[i].Length == lockmem.Length &&
memcmp(rights[i].Buffer, lockmem.Buffer, 42) == 0)
{
bHasRights = TRUE;
break;
}
}
LsaFreeMemory(rights);
}
if(!bHasRights)
result = LsaAddAccountRights(handle, user->User.Sid, &lockmem, 1) == 0;
LsaClose(handle);
}
LocalFree(user);
return result;
}
#endif
size_t cryptonight_init(size_t use_fast_mem, size_t use_mlock, alloc_msg* msg)
{
#ifdef _WIN32
if(use_fast_mem == 0)
return 1;
if(AddPrivilege(TEXT("SeLockMemoryPrivilege")) == 0)
{
if(AddLargePageRights())
{
msg->warning = "Added SeLockMemoryPrivilege to the current account. You need to reboot for it to work";
bRebootDesirable = TRUE;
}
else
msg->warning = "Obtaning SeLockMemoryPrivilege failed.";
return 0;
}
bRebootDesirable = TRUE;
return 1;
#else
return 1;
#endif // _WIN32
}
cryptonight_ctx* cryptonight_alloc_ctx(size_t use_fast_mem, size_t use_mlock, alloc_msg* msg)
{
size_t hashMemSize;
if(::jconf::inst()->IsCurrencyMonero())
{
hashMemSize = MONERO_MEMORY;
}
else
{
hashMemSize = AEON_MEMORY;
}
cryptonight_ctx* ptr = (cryptonight_ctx*)_mm_malloc(sizeof(cryptonight_ctx), 4096);
if(use_fast_mem == 0)
{
// use 2MiB aligned memory
ptr->long_state = (uint8_t*)_mm_malloc(hashMemSize, hashMemSize);
ptr->ctx_info[0] = 0;
ptr->ctx_info[1] = 0;
return ptr;
}
#ifdef _WIN32
SIZE_T iLargePageMin = GetLargePageMinimum();
if(hashMemSize > iLargePageMin)
iLargePageMin *= 2;
ptr->long_state = (uint8_t*)VirtualAlloc(NULL, iLargePageMin,
MEM_COMMIT | MEM_RESERVE | MEM_LARGE_PAGES, PAGE_READWRITE);
if(ptr->long_state == NULL)
{
_mm_free(ptr);
if(bRebootDesirable)
msg->warning = "VirtualAlloc failed. Reboot might help.";
else
msg->warning = "VirtualAlloc failed.";
return NULL;
}
else
{
ptr->ctx_info[0] = 1;
return ptr;
}
#else
#if defined(__APPLE__)
ptr->long_state = (uint8_t*)mmap(0, hashMemSize, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, VM_FLAGS_SUPERPAGE_SIZE_2MB, 0);
#elif defined(__FreeBSD__)
ptr->long_state = (uint8_t*)mmap(0, hashMemSize, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_ALIGNED_SUPER | MAP_PREFAULT_READ, -1, 0);
#else
ptr->long_state = (uint8_t*)mmap(0, hashMemSize, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, 0, 0);
#endif
if (ptr->long_state == MAP_FAILED)
{
_mm_free(ptr);
msg->warning = "mmap failed";
return NULL;
}
ptr->ctx_info[0] = 1;
if(madvise(ptr->long_state, hashMemSize, MADV_RANDOM|MADV_WILLNEED) != 0)
msg->warning = "madvise failed";
ptr->ctx_info[1] = 0;
if(use_mlock != 0 && mlock(ptr->long_state, hashMemSize) != 0)
msg->warning = "mlock failed";
else
ptr->ctx_info[1] = 1;
return ptr;
#endif // _WIN32
}
void cryptonight_free_ctx(cryptonight_ctx* ctx)
{
size_t hashMemSize;
if(::jconf::inst()->IsCurrencyMonero())
{
hashMemSize = MONERO_MEMORY;
}
else
{
hashMemSize = AEON_MEMORY;
}
if(ctx->ctx_info[0] != 0)
{
#ifdef _WIN32
VirtualFree(ctx->long_state, 0, MEM_RELEASE);
#else
if(ctx->ctx_info[1] != 0)
munlock(ctx->long_state, hashMemSize);
munmap(ctx->long_state, hashMemSize);
#endif // _WIN32
}
else
_mm_free(ctx->long_state);
_mm_free(ctx);
}
| 24.409241 | 123 | 0.70768 | chrisknepper |
8b25238e89a48ce19f3ed57de0a4692b7f8ce2c5 | 50,386 | cpp | C++ | CloakCompiler/MeshCompiler.cpp | Bizzarrus/CloakEngine | 0890eaada76b91be89702d2a6ec2dcf9b2901fb9 | [
"BSD-2-Clause"
] | null | null | null | CloakCompiler/MeshCompiler.cpp | Bizzarrus/CloakEngine | 0890eaada76b91be89702d2a6ec2dcf9b2901fb9 | [
"BSD-2-Clause"
] | null | null | null | CloakCompiler/MeshCompiler.cpp | Bizzarrus/CloakEngine | 0890eaada76b91be89702d2a6ec2dcf9b2901fb9 | [
"BSD-2-Clause"
] | null | null | null | #include "stdafx.h"
#include "CloakCompiler/Mesh.h"
#include "CloakEngine/Files/ExtendedBuffers.h"
#include "Engine/TempHandler.h"
#include "Engine/BoundingVolume.h"
#include <assert.h>
#include <sstream>
//#define DEBUG_ENFORCE_STRIPS
//#define DEBUG_ENFORCE_NO_STRIPS
#define ALLOW_STRIP_LIST_MIX
#define STRIP_FUNC(name) size_t name(In size_t triID, In size_t swapEdge, In const CE::List<HalfEdge>& edges, In const CE::List<Triangle>& faces, In uint64_t floodFillVisited, In bool firstCall)
#define PRINT_STRIPS
namespace CloakCompiler {
namespace API {
namespace Mesh {
typedef std::function<void(const RespondeInfo&)> RespondeFunc;
constexpr float g_floatDelta = 0.0001f;
constexpr size_t STRIP_CUT_VALUE = 0xFFFFFFFF;
constexpr size_t STRIP_CUT_VALUE_16 = 0xFFFF;
constexpr size_t MAX_IB16_SIZE = 1 << 16;
//Minimum required number of single triangles to not use them as strip
// Since draw calls have a cost, we prefere to have a bit longer index buffers than additional
// draw calls with a few triangles. This value is just a guess, so it might change in future
constexpr size_t MIN_TRI_COUNT = 128;
enum class IndexBufferType {
IB16,
IB16ShortCut,
IB32,
};
struct FinalVertex {
CloakEngine::Global::Math::Vector Position;
CloakEngine::Global::Math::Vector Normal;
CloakEngine::Global::Math::Vector Binormal;
CloakEngine::Global::Math::Vector Tangent;
size_t MaterialID;
TexCoord TexCoord;
Bone Bones[4];
bool CLOAK_CALL CompareForIndex(In const FinalVertex& b) const
{
bool r = Position == b.Position;
r = r && Normal == b.Normal;
r = r && Binormal == b.Binormal;
r = r && Tangent == b.Tangent;
r = r && abs(TexCoord.U - b.TexCoord.U) < g_floatDelta;
r = r && abs(TexCoord.V - b.TexCoord.V) < g_floatDelta;
for (size_t a = 0; a < 4 && r; a++)
{
r = r && Bones[a].BoneID == b.Bones[a].BoneID;
r = r && abs(Bones[a].Weight - b.Bones[a].Weight) < g_floatDelta;
}
return r;
}
};
struct MaterialRange {
size_t MaterialID;
size_t ListCount;
size_t StripCount;
};
constexpr size_t VertexSize = sizeof(FinalVertex) - sizeof(size_t);
constexpr CloakEngine::Files::FileType g_TempFileType{ "MeshTemp","CEMT",1000 };
namespace Strips {
//Number of indices the triangle-list index buffer should at least include to justify the additional draw call:
constexpr size_t MIN_TRIANGLE_LIST_SIZE = 128;
//Percentage of indices (relative to the triangle-strip index buffer) the triangle-list index buffer should at least include to justify the additional draw call:
constexpr float MIN_TRIANGLE_LIST_PERCENTAGE = 0.1f;
struct HalfEdge {
size_t Vertex;
size_t OppositeEdge;
};
struct Triangle {
size_t Edge;
size_t AdjacentCount;
size_t MaterialID;
uint64_t FloodFillVisited;
};
CLOAK_FORCEINLINE constexpr size_t TriangleByEdge(In size_t edgeID)
{
return edgeID / 3;
}
CLOAK_FORCEINLINE constexpr size_t FirstEdgeOfTrinalge(In size_t edgeID)
{
return edgeID * 3;
}
CLOAK_FORCEINLINE constexpr size_t NextEdge(In size_t edgeID)
{
const size_t tID = TriangleByEdge(edgeID);
return (3 * tID) + ((edgeID + 1) % 3);
}
CLOAK_FORCEINLINE constexpr size_t PrevEdge(In size_t edgeID)
{
const size_t tID = TriangleByEdge(edgeID);
return (3 * tID) + ((edgeID + 2) % 3);
}
inline size_t CLOAK_CALL AdjacentTriangleCount(In size_t triangleID, In const CE::List<HalfEdge>& edges, In const CE::List<Triangle>& faces, In uint64_t floodFillVisited)
{
if (triangleID >= edges.size() / 3) { return 0; }
size_t res = 0;
for (size_t a = 0; a < 3; a++)
{
const size_t i = (3 * triangleID) + a;
const size_t j = edges[i].OppositeEdge;
if (j < edges.size() && faces[TriangleByEdge(j)].MaterialID == faces[TriangleByEdge(j)].MaterialID && faces[TriangleByEdge(j)].FloodFillVisited != floodFillVisited) { res++; }
}
return res;
}
inline void CLOAK_CALL UpdateVisited(In size_t triangleID, In const CE::List<HalfEdge>& edges, Inout CE::List<Triangle>* faces, In uint64_t floodFillVisited)
{
faces->at(triangleID).FloodFillVisited = floodFillVisited;
for (size_t a = 0, b = FirstEdgeOfTrinalge(triangleID); a < 3; a++, b = NextEdge(b))
{
const size_t e = edges[b].OppositeEdge;
if (e != edges.size())
{
const size_t t = TriangleByEdge(e);
faces->at(t).AdjacentCount = AdjacentTriangleCount(t, edges, *faces, floodFillVisited);
}
}
}
typedef STRIP_FUNC((*StripFunc));
STRIP_FUNC(LNLS)
{
size_t bstEdge = edges.size();
size_t adjNum = static_cast<size_t>(-1);
for (size_t a = FirstEdgeOfTrinalge(triID), b = 0; b < 3; b++, a = NextEdge(a))
{
const size_t opEdge = edges[a].OppositeEdge;
if (opEdge < edges.size() && faces[TriangleByEdge(opEdge)].FloodFillVisited != floodFillVisited)
{
const size_t adjTri = TriangleByEdge(opEdge);
const size_t adj = AdjacentTriangleCount(adjTri, edges, faces, floodFillVisited);
//Check if edge require swap:
const bool swap = !firstCall && a == swapEdge;
if (adj < adjNum || (adj == adjNum && swap == false))
{
adjNum = adj;
bstEdge = a;
}
}
}
return bstEdge;
}
STRIP_FUNC(LNLN)
{
size_t bstEdge = edges.size();
size_t adjNum = static_cast<size_t>(-1);
size_t adjNumSec = static_cast<size_t>(-1);
for (size_t a = FirstEdgeOfTrinalge(triID), b = 0; b < 3; b++, a = NextEdge(a))
{
const size_t opEdge = edges[a].OppositeEdge;
if (opEdge < edges.size() && faces[TriangleByEdge(opEdge)].FloodFillVisited != floodFillVisited)
{
const size_t adjTri = TriangleByEdge(opEdge);
const size_t adj = AdjacentTriangleCount(adjTri, edges, faces, floodFillVisited);
const bool swap = !firstCall && a == swapEdge;
size_t adjSec = static_cast<size_t>(-1);
if (adj <= adjNum)
{
//Look one step ahead:
for (size_t c = adjTri * 3, d = 0; d < 3; d++, c = NextEdge(c))
{
const size_t opSec = edges[c].OppositeEdge;
if (opSec < edges.size() && opSec != a && faces[TriangleByEdge(opSec)].FloodFillVisited != floodFillVisited)
{
const size_t adjTriSec = TriangleByEdge(opSec);
const size_t sadj = AdjacentTriangleCount(adjTriSec, edges, faces, floodFillVisited);
adjSec = min(adjSec, sadj);
}
}
}
if (adj < adjNum || (adj == adjNum && adjSec < adjNumSec) || (adj == adjNum && adjSec == adjNumSec && swap == false))
{
adjNum = adj;
adjNumSec = adjSec;
bstEdge = a;
}
}
}
return bstEdge;
}
constexpr StripFunc Functions[] = { LNLS, LNLN };
inline bool CLOAK_CALL CalculateIndexBufferStrips(In const CE::List<size_t>& indexBaseBuffer, Inout CE::List<size_t>* indexBuffer, Inout CloakEngine::List<MaterialRange>* materialRanges)
{
bool res = false;
size_t firstIndexPos = 0;
#ifndef DEBUG_ENFORCE_NO_STRIPS
#ifdef DEBUG_ENFORCE_STRIPS
indexBuffer->clear();
#endif
//Create half-edge structure:
CE::List<Triangle> faces(indexBaseBuffer.size() / 3);
CE::List<HalfEdge> edges(faces.size() * 3);
//This array allows to find an edge by two entries of an index buffer:
CE::FlatMap<std::pair<size_t, size_t>, size_t> vertexToEdge;
//To test whether we included an face already in our new index buffer, we use a flood fill counter
uint64_t floodFillVisited = 1;
#define VERTEX_TO_EDGE(v0, v1) vertexToEdge[std::make_pair((v0),(v1))]
#define VERTEX_EDGE_EXIST(v0, v1) (vertexToEdge.find(std::make_pair((v0), (v1))) != vertexToEdge.end())
//Calculate edges:
for (size_t a = 0; a < indexBaseBuffer.size(); a += 3)
{
size_t vli = a + 2;
for (size_t b = 0; b < 3; b++)
{
const size_t vni = a + b;
const size_t vl = indexBaseBuffer[vli];
const size_t vn = indexBaseBuffer[vni];
edges[vni].Vertex = vl;
edges[vni].OppositeEdge = edges.size();
VERTEX_TO_EDGE(vl, vn) = vni;
vli = vni;
}
}
//Initialize face values:
for (size_t a = 0; a < faces.size(); a++)
{
faces[a].Edge = a * 3;
faces[a].FloodFillVisited = 0;
faces[a].AdjacentCount = 0;
faces[a].MaterialID = ~0;
}
//Initialize material IDs:
size_t firstEdge = 0;
for (size_t a = 0; a < materialRanges->size(); a++)
{
CLOAK_ASSUME(materialRanges->at(a).StripCount == 0);
const size_t lastEdge = min(firstEdge + materialRanges->at(a).ListCount, edges.size());
CLOAK_ASSUME(lastEdge % 3 == 0);
for (size_t c = firstEdge; c < lastEdge; c += 3)
{
const size_t t = TriangleByEdge(c);
faces[t].MaterialID = materialRanges->at(a).MaterialID;
}
firstEdge = lastEdge;
}
//Calculate opposite edges:
for (size_t a = 0; a < indexBaseBuffer.size(); a += 3)
{
size_t vli = a + 2;
for (size_t b = 0; b < 3; b++)
{
const size_t vni = a + b;
const size_t vl = indexBaseBuffer[vli];
const size_t vn = indexBaseBuffer[vni];
const size_t eM = VERTEX_TO_EDGE(vl, vn);
if (VERTEX_EDGE_EXIST(vn, vl))
{
const size_t eO = VERTEX_TO_EDGE(vn, vl);
const size_t tM = TriangleByEdge(eM);
const size_t tO = TriangleByEdge(eO);
if (faces[tM].MaterialID == faces[tO].MaterialID)
{
edges[eM].OppositeEdge = eO;
vli = vni;
continue;
}
}
edges[eM].OppositeEdge = edges.size();
vli = vni;
}
}
CE::List<size_t> newIBStrip;
CE::List<size_t> newIBList;
firstEdge = 0;
for (size_t a = 0; a < materialRanges->size(); a++)
{
CLOAK_ASSUME(materialRanges->at(a).StripCount == 0);
if (materialRanges->at(a).ListCount < 3) { continue; }
const size_t lastEdge = min(firstEdge + materialRanges->at(a).ListCount, edges.size());
CLOAK_ASSUME(lastEdge % 3 == 0);
//Calculate triangle strips:
for (size_t b = 0; b < ARRAYSIZE(Functions); b++)
{
//Main Algorithm:
do {
//Find best face (with lowest adjacent count) to start with:
size_t face = faces.size();
size_t bstAdjC = ~0;
for (size_t c = firstEdge; c < lastEdge; c += 3)
{
const size_t t = TriangleByEdge(c);
faces[t].AdjacentCount = AdjacentTriangleCount(t, edges, faces, floodFillVisited);
if (faces[t].FloodFillVisited != floodFillVisited && faces[t].AdjacentCount < bstAdjC)
{
bstAdjC = faces[t].AdjacentCount;
face = t;
}
}
if (face == faces.size()) { break; } // No more faces
UpdateVisited(face, edges, &faces, floodFillVisited);
size_t edge = Functions[b](face, edges.size(), edges, faces, floodFillVisited, true);
if (edge == edges.size())
{
//Single triangle, all adjacent triangles were already used:
edge = FirstEdgeOfTrinalge(face);
#ifdef ALLOW_STRIP_LIST_MIX
newIBList.push_back(edges[edge].Vertex);
edge = NextEdge(edge);
newIBList.push_back(edges[edge].Vertex);
edge = NextEdge(edge);
newIBList.push_back(edges[edge].Vertex);
#else
newIBStrip.push_back(edges[edge].Vertex);
edge = NextEdge(edge);
newIBStrip.push_back(edges[edge].Vertex);
edge = NextEdge(edge);
newIBStrip.push_back(edges[edge].Vertex);
newIBStrip.push_back(STRIP_CUT_VALUE);
#endif
}
else
{
const size_t startEdge = edge;
bool extendSecondDir = false;
//Insert first triangle:
newIBStrip.push_back(edges[PrevEdge(edge)].Vertex);
newIBStrip.push_back(edges[edge].Vertex);
size_t curEdge = edges[edge].OppositeEdge;
CLOAK_ASSUME(curEdge < edges.size());
newIBStrip.push_back(edges[curEdge].Vertex);
//Walk along strip:
do {
insert_ccw_triangle:
size_t swapEdge = NextEdge(curEdge);
face = TriangleByEdge(curEdge);
UpdateVisited(face, edges, &faces, floodFillVisited);
edge = Functions[b](face, swapEdge, edges, faces, floodFillVisited, false);
if (edge == edges.size())
{
//Strip ended (insert strip cut twice to enforce even number of indices)
newIBStrip.push_back(edges[PrevEdge(curEdge)].Vertex);
newIBStrip.push_back(STRIP_CUT_VALUE);
newIBStrip.push_back(STRIP_CUT_VALUE);
break;
}
else if (edge == swapEdge)
{
//Swap
newIBStrip.push_back(edges[edge].Vertex);
curEdge = edges[edge].OppositeEdge;
CLOAK_ASSUME(curEdge < edges.size());
newIBStrip.push_back(edges[curEdge].Vertex);
goto insert_ccw_triangle;
}
else
{
newIBStrip.push_back(edges[edge].Vertex);
curEdge = edges[edge].OppositeEdge;
CLOAK_ASSUME(curEdge < edges.size());
}
insert_cw_triangle:
swapEdge = PrevEdge(curEdge);
face = TriangleByEdge(curEdge);
UpdateVisited(face, edges, &faces, floodFillVisited);
edge = Functions[b](face, swapEdge, edges, faces, floodFillVisited, false);
if (edge == edges.size())
{
//Strip ended
newIBStrip.push_back(edges[PrevEdge(curEdge)].Vertex);
newIBStrip.push_back(STRIP_CUT_VALUE);
break;
}
else if (edge == swapEdge)
{
//Swap
newIBStrip.push_back(edges[curEdge].Vertex);
newIBStrip.push_back(edges[edge].Vertex);
curEdge = edges[edge].OppositeEdge;
CLOAK_ASSUME(curEdge < edges.size());
goto insert_cw_triangle;
}
else
{
curEdge = edges[edge].OppositeEdge;
CLOAK_ASSUME(curEdge < edges.size());
newIBStrip.push_back(edges[curEdge].Vertex);
}
} while (true);
//Try to extend in second direction:
if (extendSecondDir == false)
{
extendSecondDir = true;
size_t swapEdge = NextEdge(startEdge);
face = TriangleByEdge(startEdge);
edge = Functions[b](face, swapEdge, edges, faces, floodFillVisited, false);
if (edge < edges.size())
{
//Remove last strip cuts from strip:
while (newIBStrip.empty() == false && newIBStrip.back() == STRIP_CUT_VALUE) { newIBStrip.pop_back(); }
//Reverse strip:
// To reverse the strip, we need an even amount of indices in the strip. Otherwise, we would also reverse face directions
if (newIBStrip.size() % 2 != 0) { newIBStrip.push_back(newIBStrip.back()); }
for (size_t a = 0; a < newIBStrip.size() >> 1; a++) { std::swap(newIBStrip[a], newIBStrip[newIBStrip.size() - (a + 1)]); }
if (edge == swapEdge)
{
newIBStrip.pop_back();
newIBStrip.push_back(edges[edge].Vertex);
curEdge = edges[edge].OppositeEdge;
CLOAK_ASSUME(curEdge < edges.size());
newIBStrip.push_back(edges[curEdge].Vertex);
goto insert_ccw_triangle;
}
else
{
curEdge = edges[edge].OppositeEdge;
CLOAK_ASSUME(curEdge < edges.size());
goto insert_cw_triangle;
}
}
}
}
} while (true);
//If the number of single triangles in the list does not justify a second draw call, we will merge it into the strips:
if (newIBStrip.size() > 0 && (newIBList.size() < MIN_TRIANGLE_LIST_SIZE || newIBList.size() < static_cast<size_t>(newIBStrip.size() * MIN_TRIANGLE_LIST_PERCENTAGE)))
{
for (size_t a = 0; a < newIBList.size(); a += 3)
{
newIBStrip.push_back(newIBList[a + 0]);
newIBStrip.push_back(newIBList[a + 1]);
newIBStrip.push_back(newIBList[a + 2]);
newIBStrip.push_back(STRIP_CUT_VALUE);
}
newIBList.clear();
}
//Remove last strip cuts from strip:
while (newIBStrip.empty() == false && newIBStrip.back() == STRIP_CUT_VALUE) { newIBStrip.pop_back(); }
//Compare and copy new index buffer:
#ifndef DEBUG_ENFORCE_STRIPS
if (newIBStrip.size() + newIBList.size() < materialRanges->at(a).ListCount + materialRanges->at(a).StripCount)
#else
if (b == 0 || newIBStrip.size() + newIBList.size() < materialRanges->at(a).ListCount + materialRanges->at(a).StripCount)
#endif
{
res = true;
materialRanges->at(a).ListCount = newIBList.size();
materialRanges->at(a).StripCount = newIBStrip.size();
CLOAK_ASSUME(firstIndexPos + newIBList.size() + newIBStrip.size() <= indexBuffer->size());
#ifndef DEBUG_ENFORCE_STRIPS
size_t p = firstIndexPos;
for (size_t b = 0; b < newIBStrip.size(); b++, p++) { indexBuffer->at(p) = newIBStrip[b]; }
for (size_t b = 0; b < newIBList.size(); b++, p++) { indexBuffer->at(p) = newIBList[b]; }
#else
indexBuffer->resize(firstIndexPos);
for (size_t b = 0; b < newIBStrip.size(); b++) { indexBuffer->push_back(newIBStrip[b]); }
for (size_t b = 0; b < newIBList.size(); b++) { indexBuffer->push_back(newIBList[b]); }
#endif
}
newIBStrip.clear();
newIBList.clear();
floodFillVisited++;
}
firstIndexPos += materialRanges->at(a).ListCount + materialRanges->at(a).StripCount;
firstEdge = lastEdge;
}
indexBuffer->resize(firstIndexPos);
#undef VERTEX_TO_EDGE
#endif
#ifdef PRINT_STRIPS
CE::Global::Log::WriteToLog("Final index buffer (" + std::to_string(indexBuffer->size()) + " Indices):");
firstIndexPos = 0;
for (size_t a = 0; a < materialRanges->size(); a++)
{
CE::Global::Log::WriteToLog("\tMaterial " + std::to_string(a));
for (size_t b = 0, s = 0; b < materialRanges->at(a).StripCount; s++)
{
std::stringstream r;
r << "\t\tStrip " << s << ": ";
if (indexBuffer->at(firstIndexPos + b) != STRIP_CUT_VALUE)
{
r << indexBuffer->at(firstIndexPos + b++);
while (b < materialRanges->at(a).StripCount && indexBuffer->at(firstIndexPos + b) != STRIP_CUT_VALUE) { r << " | " << indexBuffer->at(firstIndexPos + b++); }
}
while (b < materialRanges->at(a).StripCount && indexBuffer->at(firstIndexPos + b) == STRIP_CUT_VALUE) { r << " | CUT"; b++; }
CE::Global::Log::WriteToLog(r.str());
}
if (materialRanges->at(a).ListCount > 0)
{
std::stringstream r;
r << "\t\tList: " << indexBuffer->at(firstIndexPos + materialRanges->at(a).StripCount);
for (size_t b = 1; b < materialRanges->at(a).ListCount; b++) { r << " | " << indexBuffer->at(firstIndexPos + materialRanges->at(a).StripCount + b); }
CE::Global::Log::WriteToLog(r.str());
}
firstIndexPos += materialRanges->at(a).ListCount + materialRanges->at(a).StripCount;
}
#endif
return res;
}
}
inline void CLOAK_CALL SendResponse(In RespondeFunc func, In RespondeCode code, In size_t Polygon, In_opt size_t Vertex = 0, In_opt std::string msg = "")
{
RespondeInfo info;
info.Code = code;
info.Polygon = Polygon;
info.Vertex = Vertex;
info.Msg = msg;
func(info);
}
inline void CLOAK_CALL SendResponse(In RespondeFunc func, In RespondeCode code, In_opt std::string msg = "")
{
SendResponse(func, code, 0, 0, msg);
}
inline void CLOAK_CALL CopyVertex(In const Vertex& a, In const CloakEngine::Global::Math::Vector& normal, In const CloakEngine::Global::Math::Vector& binormal, In const CloakEngine::Global::Math::Vector& tangent, In uint32_t material, Out FinalVertex* b)
{
Bone tb[4];
//Remove bones with same ID
for (size_t c = 0; c < 4; c++)
{
if (a.Bones[c].Weight > 0)
{
bool f = false;
for (size_t d = 0; d < c && f == false; d++)
{
if (tb[d].Weight > 0 && tb[d].BoneID == a.Bones[c].BoneID)
{
f = true;
tb[d].Weight += a.Bones[c].Weight;
}
}
if (f == false)
{
tb[c].BoneID = a.Bones[c].BoneID;
tb[c].Weight = a.Bones[c].Weight;
}
else
{
tb[c].BoneID = 0;
tb[c].Weight = 0;
}
}
else
{
tb[c].BoneID = 0;
tb[c].Weight = 0;
}
}
float wNorm = 0;
//Normalize bone weights
for (size_t c = 0; c < 4; c++)
{
if (tb[c].Weight > 0) { wNorm += tb[c].Weight; }
}
//Set final vertex
b->Position = a.Position;
b->TexCoord = a.TexCoord;
b->Normal = normal;
b->Binormal = binormal;
b->Tangent = tangent;
b->MaterialID = material;
for (size_t c = 0; c < 4; c++)
{
if (tb[c].Weight > 0)
{
b->Bones[c].BoneID = tb[c].BoneID;
b->Bones[c].Weight = tb[c].Weight / wNorm;
}
else
{
b->Bones[c].BoneID = 0;
b->Bones[c].Weight = 0;
}
}
}
inline void CLOAK_CALL CalculatePolygonVertices(In const Vertex& a, In const Vertex& b, In const Vertex& c, In bool calcNorms, In size_t material, Out FinalVertex res[3])
{
TexCoord tex[2];
tex[0].U = b.TexCoord.U - a.TexCoord.U;
tex[0].V = b.TexCoord.V - a.TexCoord.V;
tex[1].U = c.TexCoord.U - a.TexCoord.U;
tex[1].V = c.TexCoord.V - a.TexCoord.V;
CloakEngine::Global::Math::Vector vec[2];
vec[0] = static_cast<const CloakEngine::Global::Math::Vector>(b.Position) - a.Position;
vec[1] = static_cast<const CloakEngine::Global::Math::Vector>(c.Position) - a.Position;
float det = (tex[0].U*tex[1].V) - (tex[1].U*tex[0].V);
if (fabsf(det) < 1e-4f)
{
tex[0].U = 1;
tex[0].V = 0;
tex[1].U = 1;
tex[1].V = -1;
det = -1;
}
const float den = 1.0f / det;
CloakEngine::Global::Math::Vector tangent = (den * ((tex[1].V * vec[0]) - (tex[0].V * vec[1]))).Normalize();
CloakEngine::Global::Math::Vector binormal = (den * ((tex[0].U * vec[1]) - (tex[1].U * vec[0]))).Normalize();
if (calcNorms)
{
CloakEngine::Global::Math::Vector normal = tangent.Cross(binormal).Normalize();
tangent -= binormal * (binormal.Dot(tangent));
if (normal.Cross(tangent).Dot(binormal) < 0) { tangent = -tangent; }
tangent = tangent.Normalize();
CopyVertex(a, normal, binormal, tangent, static_cast<uint32_t>(material), &res[0]);
CopyVertex(b, normal, binormal, tangent, static_cast<uint32_t>(material), &res[1]);
CopyVertex(c, normal, binormal, tangent, static_cast<uint32_t>(material), &res[2]);
}
else
{
const CloakEngine::Global::Math::Vector an = static_cast<CloakEngine::Global::Math::Vector>(a.Normal).Normalize();
const CloakEngine::Global::Math::Vector bn = static_cast<CloakEngine::Global::Math::Vector>(b.Normal).Normalize();
const CloakEngine::Global::Math::Vector cn = static_cast<CloakEngine::Global::Math::Vector>(c.Normal).Normalize();
const CloakEngine::Global::Math::Vector at = (tangent - (an.Dot(tangent)*an)).Normalize();
const CloakEngine::Global::Math::Vector bt = (tangent - (bn.Dot(tangent)*bn)).Normalize();
const CloakEngine::Global::Math::Vector ct = (tangent - (cn.Dot(tangent)*cn)).Normalize();
const CloakEngine::Global::Math::Vector ab = (binormal - ((an.Dot(binormal)*an) + (at.Dot(binormal)*at))).Normalize();
const CloakEngine::Global::Math::Vector bb = (binormal - ((bn.Dot(binormal)*bn) + (bt.Dot(binormal)*bt))).Normalize();
const CloakEngine::Global::Math::Vector cb = (binormal - ((cn.Dot(binormal)*cn) + (ct.Dot(binormal)*ct))).Normalize();
CopyVertex(a, a.Normal, ab, at, static_cast<uint32_t>(material), &res[0]);
CopyVertex(b, b.Normal, bb, bt, static_cast<uint32_t>(material), &res[1]);
CopyVertex(c, c.Normal, cb, ct, static_cast<uint32_t>(material), &res[2]);
}
}
inline IndexBufferType CLOAK_CALL CalculateIndexBuffer(In const FinalVertex* vb, In size_t vbs, Out CE::List<size_t>* ib, Out CE::List<bool>* usedVB, Out CloakEngine::List<MaterialRange>* materialRanges)
{
usedVB->resize(vbs);
ib->clear();
ib->reserve(vbs);
materialRanges->clear();
//Create Index Reference Buffer:
CE::List<size_t> irb(vbs);
for (size_t a = 0; a < vbs; a++)
{
const FinalVertex& v = vb[a];
for (size_t b = 0; b < a; b++)
{
const FinalVertex& i = vb[irb[b]];
if (v.CompareForIndex(i))
{
usedVB->at(a) = false;
irb[a] = irb[b];
goto irb_found_index;
}
}
usedVB->at(a) = true;
irb[a] = a;
irb_found_index:
continue;
}
//Create rebased index buffer:
CE::List<size_t> ibb(vbs);
ibb[0] = 0;
for (size_t a = 1; a < vbs; a++)
{
if (usedVB->at(a) == true) { ibb[a] = ibb[a - 1] + 1; }
else { ibb[a] = ibb[a - 1]; }
}
for (size_t a = 0; a < vbs; a++)
{
size_t p = a;
while (p != irb[p])
{
CLOAK_ASSUME(usedVB->at(p) == false);
p = irb[p];
}
ibb[a] = ibb[p];
}
#ifdef PRINT_STRIPS
CE::Global::Log::WriteToLog("Simple Index Buffer ("+std::to_string(ibb.size())+" Indices):");
#endif
size_t ibStart = 0;
for (size_t a = 0; a < vbs; a++)
{
ib->push_back(ibb[a]);
#ifdef PRINT_STRIPS
CE::Global::Log::WriteToLog("\tIB[" + std::to_string(a) + "] = " + std::to_string(ibb[a]));
#endif
if (a > 0 && vb[a].MaterialID != vb[a - 1].MaterialID)
{
MaterialRange mr;
mr.ListCount = a - ibStart;
mr.StripCount = 0;
mr.MaterialID = vb[a - 1].MaterialID;
materialRanges->push_back(mr);
ibStart = a;
}
}
//Add last material range:
MaterialRange mr;
mr.ListCount = vbs - ibStart;
mr.StripCount = 0;
mr.MaterialID = vb[vbs - 1].MaterialID;
materialRanges->push_back(mr);
//Calculate triangle strips:
IndexBufferType res;
if (Strips::CalculateIndexBufferStrips(ibb, ib, materialRanges) == false)
{
res = ib->size() < MAX_IB16_SIZE ? IndexBufferType::IB16 : IndexBufferType::IB32;
}
else
{
res = ib->size() < MAX_IB16_SIZE ? IndexBufferType::IB16 : IndexBufferType::IB32;
if (res == IndexBufferType::IB16)
{
for (size_t a = 0; a < ib->size(); a++)
{
if (ib->at(a) == STRIP_CUT_VALUE) { res = IndexBufferType::IB16ShortCut; }
}
}
}
return res;
}
inline bool CLOAK_CALL CheckFloat(In CloakEngine::Files::IReader* r, In const float& f)
{
const float i = static_cast<float>(r->ReadDouble(32));
return abs(i - f) < g_floatDelta;
}
inline bool CLOAK_CALL CheckVector(In CloakEngine::Files::IReader* r, In const CloakEngine::Global::Math::Vector& v)
{
CloakEngine::Global::Math::Point p(v);
return CheckFloat(r, p.X) && CheckFloat(r, p.Y) && CheckFloat(r, p.Z);
}
inline void CLOAK_CALL WriteVector(In CloakEngine::Files::IWriter* w, In const CloakEngine::Global::Math::Vector& v)
{
CloakEngine::Global::Math::Point p(v);
w->WriteDouble(32, p.X);
w->WriteDouble(32, p.Y);
w->WriteDouble(32, p.Z);
}
inline bool CLOAK_CALL CheckTemp(In CloakEngine::Files::IWriter* output, In const EncodeDesc& encode, In const Desc& desc, In RespondeFunc func)
{
bool suc = false;
if ((encode.flags & EncodeFlags::NO_TEMP_READ) == EncodeFlags::NONE)
{
CloakEngine::Files::IReader* read = nullptr;
CREATE_INTERFACE(CE_QUERY_ARGS(&read));
if (read != nullptr)
{
const std::u16string tmpPath = encode.tempPath;
suc = read->SetTarget(tmpPath, g_TempFileType, false, true) == g_TempFileType.Version;
if (suc) { SendResponse(func, RespondeCode::CHECK_TMP); }
if (suc) { suc = !Engine::TempHandler::CheckGameID(read, encode.targetGameID); }
if (suc) { suc = static_cast<BoundingVolume>(read->ReadBits(8)) == desc.Bounding; }
if (suc) { suc = read->ReadDynamic() == desc.Vertices.size(); }
if (suc) { suc = read->ReadDynamic() == desc.Polygons.size(); }
if (suc)
{
for (size_t a = 0; a < desc.Polygons.size() && suc; a++)
{
const Polygon& p = desc.Polygons[a];
suc = suc && ((read->ReadBits(1) == 1) == p.AutoGenerateNormal);
suc = suc && (read->ReadBits(32) == p.Material);
suc = suc && (read->ReadBits(32) == p.Point[0]);
suc = suc && (read->ReadBits(32) == p.Point[1]);
suc = suc && (read->ReadBits(32) == p.Point[2]);
}
}
if (suc)
{
for (size_t a = 0; a < desc.Vertices.size() && suc; a++)
{
const Vertex& v = desc.Vertices[a];
suc = suc && CheckVector(read, v.Position);
suc = suc && CheckVector(read, v.Normal);
suc = suc && CheckFloat(read, v.TexCoord.U);
suc = suc && CheckFloat(read, v.TexCoord.V);
for (size_t b = 0; b < 4 && suc; b++)
{
suc = suc && (read->ReadBits(32) == v.Bones[b].BoneID);
suc = suc && CheckFloat(read, v.Bones[b].Weight);
}
}
}
if (suc)
{
uint32_t bys = static_cast<uint32_t>(read->ReadBits(32));
uint8_t bis = static_cast<uint8_t>(read->ReadBits(3));
for (uint32_t a = 0; a < bys; a++) { output->WriteBits(8, read->ReadBits(8)); }
if (bis > 0) { output->WriteBits(bis, read->ReadBits(bis)); }
}
}
SAVE_RELEASE(read);
}
return !suc;
}
inline void CLOAK_CALL WriteTemp(In CloakEngine::Files::IVirtualWriteBuffer* data, In uint32_t bys, In uint8_t bis, In const EncodeDesc& encode, In const Desc& desc, In RespondeFunc response)
{
if ((encode.flags & EncodeFlags::NO_TEMP_WRITE) == EncodeFlags::NONE)
{
SendResponse(response, RespondeCode::WRITE_TMP);
CloakEngine::Files::IWriter* write = nullptr;
CREATE_INTERFACE(CE_QUERY_ARGS(&write));
write->SetTarget(encode.tempPath, g_TempFileType, CloakEngine::Files::CompressType::NONE);
Engine::TempHandler::WriteGameID(write, encode.targetGameID);
write->WriteBits(8, static_cast<uint8_t>(desc.Bounding));
write->WriteDynamic(desc.Vertices.size());
write->WriteDynamic(desc.Polygons.size());
for (size_t a = 0; a < desc.Polygons.size(); a++)
{
const Polygon& p = desc.Polygons[a];
write->WriteBits(1, p.AutoGenerateNormal ? 1 : 0);
write->WriteBits(32, p.Material);
write->WriteBits(32, p.Point[0]);
write->WriteBits(32, p.Point[1]);
write->WriteBits(32, p.Point[2]);
}
for (size_t a = 0; a < desc.Vertices.size(); a++)
{
const Vertex& v = desc.Vertices[a];
WriteVector(write, v.Position);
WriteVector(write, v.Normal);
write->WriteDouble(32, v.TexCoord.U);
write->WriteDouble(32, v.TexCoord.V);
for (size_t b = 0; b < 4; b++)
{
write->WriteBits(32, v.Bones[b].BoneID);
write->WriteDouble(32, v.Bones[b].Weight);
}
}
write->WriteBits(32, bys);
write->WriteBits(3, bis);
write->WriteBuffer(data, bys, bis);
SAVE_RELEASE(write);
}
}
inline void CLOAK_CALL WriteSingleVertex(In CloakEngine::Files::IWriter* write, In const FinalVertex& v, In bool saveBones)
{
WriteVector(write, v.Position);
WriteVector(write, v.Normal);
WriteVector(write, v.Binormal);
WriteVector(write, v.Tangent);
write->WriteDouble(32, v.TexCoord.U);
write->WriteDouble(32, v.TexCoord.V);
if (saveBones)
{
size_t bc = 0;
for (size_t a = 0; a < 4; a++)
{
if (v.Bones[a].Weight > 0)
{
bc++;
}
}
write->WriteBits(2, bc);
for (size_t a = 0; a < 4; a++)
{
if (v.Bones[a].Weight > 0)
{
write->WriteBits(32, v.Bones[a].BoneID);
write->WriteDouble(32, v.Bones[a].Weight);
}
}
}
}
#ifdef _DEBUG
inline void CLOAK_CALL __DBG_PrintVertex(In const FinalVertex& v)
{
CloakEngine::Global::Math::Point h(v.Position);
CloakDebugLog("\tPosition = [" + std::to_string(h.X) + "|" + std::to_string(h.Y) + "|" + std::to_string(h.Z) + "]");
h = static_cast<CloakEngine::Global::Math::Point>(v.Normal);
CloakDebugLog("\tNormal = [" + std::to_string(h.X) + "|" + std::to_string(h.Y) + "|" + std::to_string(h.Z) + "]");
h = static_cast<CloakEngine::Global::Math::Point>(v.Binormal);
CloakDebugLog("\tBinormal = [" + std::to_string(h.X) + "|" + std::to_string(h.Y) + "|" + std::to_string(h.Z) + "]");
h = static_cast<CloakEngine::Global::Math::Point>(v.Tangent);
CloakDebugLog("\tTangent = [" + std::to_string(h.X) + "|" + std::to_string(h.Y) + "|" + std::to_string(h.Z) + "]");
CloakDebugLog("\tTexCoord = [" + std::to_string(v.TexCoord.U) + "|" + std::to_string(v.TexCoord.V) + "]");
}
#define PrintVertex(v) __DBG_PrintVertex(v)
#else
#define PrintVertex(v)
#endif
inline void CLOAK_CALL WriteIndexVertexBuffer(In CloakEngine::Files::IWriter* write, In size_t size, In size_t boneCount, In_reads(size) const FinalVertex* vertexBuffer, In_reads(size) const CE::List<bool>& used)
{
#ifdef _DEBUG
size_t vertI = 0;
#endif
for (size_t a = 0; a < size; a++)
{
if (used[a] == true)
{
#ifdef _DEBUG
CloakDebugLog("Write vertex " + std::to_string(a) + " at index " + std::to_string(vertI));
PrintVertex(vertexBuffer[a]);
vertI++;
#endif
WriteSingleVertex(write, vertexBuffer[a], boneCount > 0);
}
}
}
inline void CLOAK_CALL WriteBoneUsage(In CloakEngine::Files::IWriter* write, In size_t begin, In size_t end, In size_t boneCount, In_reads(end) const FinalVertex* vertexBuffer)
{
if (boneCount > 0)
{
bool* usage = new bool[boneCount];
for (size_t a = 0; a < boneCount; a++) { usage[a] = false; }
for (size_t a = begin; a < end; a++)
{
const FinalVertex& v = vertexBuffer[a];
for (size_t b = 0; b < 4; b++)
{
if (v.Bones[b].Weight > 0)
{
usage[v.Bones[b].BoneID] = true;
}
}
}
for (size_t a = 0; a < boneCount; a++) { write->WriteBits(1, usage[a] ? 1 : 0); }
delete[] usage;
}
}
inline void CLOAK_CALL WriteIndexBuffer(In CloakEngine::Files::IWriter* write, In size_t ibsl, In bool shortStripCut, In size_t boneCount, In_reads(size) const FinalVertex* vertexBuffer, In_reads(size) const CE::List<size_t>& indexBuffer, In const CloakEngine::List<MaterialRange>& matRanges)
{
for (size_t a = 0, b = 0; a < indexBuffer.size() && b < matRanges.size(); b++)
{
const MaterialRange& mr = matRanges[b];
const size_t cS = min(mr.StripCount, indexBuffer.size() - a);
const size_t cL = min(mr.ListCount, indexBuffer.size() - (a + cS));
CloakDebugLog("Write material " + std::to_string(mr.MaterialID) + " (" + std::to_string(cS) + " triangle strip vertices, " + std::to_string(cL) + " triangle list vertices)");
write->WriteBits(32, cS);
write->WriteBits(32, cL);
write->WriteBits(32, mr.MaterialID);
WriteBoneUsage(write, a, a + cS + cL, boneCount, vertexBuffer);
for (size_t c = 0; c < cS; a++, c++)
{
CLOAK_ASSUME(a < indexBuffer.size());
CloakDebugLog("Write Index[" + std::to_string(a) + "]: " + (indexBuffer[a] == STRIP_CUT_VALUE ? "Strip Cut" : std::to_string(indexBuffer[a])));
if (shortStripCut == true && indexBuffer[a] == STRIP_CUT_VALUE) { write->WriteBits(ibsl, STRIP_CUT_VALUE_16); }
else { write->WriteBits(ibsl, indexBuffer[a]); }
}
for (size_t c = 0; c < cL; a++, c++)
{
CLOAK_ASSUME(a < indexBuffer.size());
CloakDebugLog("Write Index[" + std::to_string(a) + "]: " + (indexBuffer[a] == STRIP_CUT_VALUE ? "Strip Cut" : std::to_string(indexBuffer[a])));
write->WriteBits(ibsl, indexBuffer[a]);
}
}
}
inline void CLOAK_CALL WriteRawVertexBuffer(In CloakEngine::Files::IWriter* write, In size_t size, In size_t boneCount, In_reads(size) const FinalVertex* vertexBuffer)
{
size_t s = 0;
size_t lMat = 0;
for (size_t a = 0; a < size; a++, s++)
{
if (a == 0) { lMat = vertexBuffer[a].MaterialID; }
else if (lMat != vertexBuffer[a].MaterialID)
{
CloakDebugLog("Write material " + std::to_string(lMat) + " (" + std::to_string(s) + " vertices)");
write->WriteBits(32, s);
write->WriteBits(32, lMat);
WriteBoneUsage(write, a - s, a, boneCount, vertexBuffer);
for (size_t b = a - s; b < a; b++)
{
CloakDebugLog("Write vertex " + std::to_string(b));
PrintVertex(vertexBuffer[b]);
const FinalVertex& v = vertexBuffer[b];
WriteSingleVertex(write, v, boneCount > 0);
}
lMat = vertexBuffer[a].MaterialID;
s = 0;
}
}
CloakDebugLog("Write material " + std::to_string(lMat) + " (" + std::to_string(s) + " vertices)");
write->WriteBits(32, s);
write->WriteBits(32, lMat);
WriteBoneUsage(write, size - s, size, boneCount, vertexBuffer);
for (size_t b = size - s; b < size; b++)
{
CloakDebugLog("Write vertex " + std::to_string(b));
PrintVertex(vertexBuffer[b]);
const FinalVertex& v = vertexBuffer[b];
WriteSingleVertex(write, v, boneCount > 0);
}
}
CLOAKCOMPILER_API Vector::Vector()
{
X = Y = Z = W = 0;
}
CLOAKCOMPILER_API Vector::Vector(In const CloakEngine::Global::Math::Vector& v)
{
CloakEngine::Global::Math::Point p(v);
X = p.X;
Y = p.Y;
Z = p.Z;
W = p.W;
}
CLOAKCOMPILER_API Vector& Vector::operator=(In const Vector& p)
{
X = p.X;
Y = p.Y;
Z = p.Z;
W = p.W;
return *this;
}
CLOAKCOMPILER_API Vector& Vector::operator=(In const CloakEngine::Global::Math::Vector& v)
{
CloakEngine::Global::Math::Point p(v);
X = p.X;
Y = p.Y;
Z = p.Z;
W = p.W;
return *this;
}
CLOAKCOMPILER_API Vector::operator CloakEngine::Global::Math::Vector()
{
return CloakEngine::Global::Math::Vector(X, Y, Z, W);
}
CLOAKCOMPILER_API Vector::operator const CloakEngine::Global::Math::Vector() const
{
return CloakEngine::Global::Math::Vector(X, Y, Z, W);
}
CLOAKCOMPILER_API void CLOAK_CALL EncodeToFile(In CloakEngine::Files::IWriter* output, In const EncodeDesc& encode, In const Desc& desc, In std::function<void(const RespondeInfo&)> response)
{
bool suc = true;
if (CheckTemp(output, encode, desc, response))
{
CloakEngine::Files::IVirtualWriteBuffer* wrBuf = CloakEngine::Files::CreateVirtualWriteBuffer();
CloakEngine::Files::IWriter* write = nullptr;
CREATE_INTERFACE(CE_QUERY_ARGS(&write));
write->SetTarget(wrBuf);
const size_t vbs = desc.Polygons.size() * 3;
FinalVertex* vb = NewArray(FinalVertex, vbs);
size_t matCount = 0;
const size_t polS = desc.Polygons.size();
//Check polygon-vertex aviability
for (size_t a = 0; a < desc.Polygons.size() && suc == true; a++)
{
const Polygon& p = desc.Polygons[a];
for (size_t b = 0; b < 3 && suc == true; b++)
{
if (p.Point[b] >= desc.Vertices.size())
{
SendResponse(response, RespondeCode::ERROR_VERTEXREF, a, p.Point[b], "Polygon " + std::to_string(a) + " refers to vertex " + std::to_string(p.Point[b]) + " but there are only " + std::to_string(desc.Vertices.size()) + " vertices!");
suc = false;
}
}
}
if (suc == true)
{
//Write required minimum of bones in animation skeleton
SendResponse(response, RespondeCode::CALC_BONES);
size_t maxBone = 0;
for (size_t a = 0; a < desc.Vertices.size(); a++)
{
const Vertex& v = desc.Vertices[a];
for (size_t b = 0; b < 4; b++)
{
if (v.Bones[b].Weight > 0)
{
maxBone = max(maxBone, v.Bones[b].BoneID);
}
}
}
const size_t boneCount = maxBone > 0 ? maxBone + 1 : 0;
write->WriteBits(32, boneCount);
//Sort polygons by material
SendResponse(response, RespondeCode::CALC_SORT);
size_t* const sortHeap = NewArray(size_t, polS * 2);
size_t* sorted[2] = { &sortHeap[0],&sortHeap[polS] };
size_t count[16];
for (size_t a = 0; a < polS; a++) { sorted[0][a] = a; }
for (uint32_t a = 0xf, b = 0; b < 8; a <<= 4, b++)
{
for (size_t c = 0; c < 16; c++) { count[c] = 0; }
for (size_t c = 0; c < polS; c++) { count[(desc.Polygons[sorted[0][c]].Material & a) >> (4 * b)]++; }
for (size_t c = 1; c < 16; c++) { count[c] += count[c - 1]; }
for (size_t c = polS; c > 0; c--)
{
const size_t p = (desc.Polygons[sorted[0][c - 1]].Material & a) >> (4 * b);
sorted[1][count[p] - 1] = sorted[0][c - 1];
count[p]--;
}
size_t* t = sorted[0];
sorted[0] = sorted[1];
sorted[1] = t;
}
//Remap material ids to zero-based array indices
SendResponse(response, RespondeCode::CALC_MATERIALS);
uint32_t lastMat;
for (size_t a = 0, b = 0; a < polS; a++)
{
const Polygon& p = desc.Polygons[sorted[0][a]];
if (a == 0) { lastMat = p.Material; }
else if (lastMat != p.Material)
{
lastMat = p.Material;
b++;
matCount = max(matCount, b + 1);
}
}
matCount = max(1, matCount);
//Calculate binormals/tangents & copy vertex data
SendResponse(response, RespondeCode::CALC_BINORMALS);
for (size_t a = 0; a < polS; a++)
{
const Polygon& p = desc.Polygons[sorted[0][a]];
CalculatePolygonVertices(desc.Vertices[p.Point[0]], desc.Vertices[p.Point[1]], desc.Vertices[p.Point[2]], p.AutoGenerateNormal, p.Material, &vb[3 * a]);
}
if (suc == true)
{
if (boneCount == 0)
{
//Calculate bounding volume for full mesh
if (desc.Bounding != BoundingVolume::None) { SendResponse(response, RespondeCode::CALC_BOUNDING); }
const size_t posSize = desc.Vertices.size();
uint8_t* poslHeap = new uint8_t[(sizeof(CloakEngine::Global::Math::Vector)*posSize) + 15];
CloakEngine::Global::Math::Vector* posl = reinterpret_cast<CloakEngine::Global::Math::Vector*>((reinterpret_cast<uintptr_t>(poslHeap) + 15) & ~static_cast<uintptr_t>(0xF));
for (size_t a = 0; a < posSize; a++) { posl[a] = desc.Vertices[a].Position; }
switch (desc.Bounding)
{
case BoundingVolume::None:
{
write->WriteBits(2, 0);
break;
}
case BoundingVolume::OOBB:
{
Engine::BoundingVolume::BoundingOOBB bound = Engine::BoundingVolume::CalculateBoundingOOBB(posl, posSize);
if (bound.Enabled)
{
SendResponse(response, RespondeCode::WRITE_BOUNDING);
write->WriteBits(2, 1);
WriteVector(write, bound.Center);
for (size_t a = 0; a < 3; a++) { WriteVector(write, bound.Axis[a]); }
for (size_t a = 0; a < 3; a++) { write->WriteDouble(32, bound.HalfSize[a]); }
CloakEngine::Global::Math::Point cen(bound.Center);
CloakDebugLog("Bounding Box center: " + std::to_string(cen.X) + " | " + std::to_string(cen.Y) + " | " + std::to_string(cen.Z));
for (size_t a = 0; a < 3; a++)
{
CloakEngine::Global::Math::Point p(bound.Axis[a]);
CloakDebugLog("Bounding Box axis[" + std::to_string(a) + "]: " + std::to_string(p.X) + " | " + std::to_string(p.Y) + " | " + std::to_string(p.Z));
}
for (size_t a = 0; a < 3; a++)
{
CloakDebugLog("Bounding Box axis[" + std::to_string(a) + "] scale: " + std::to_string(bound.HalfSize[a]));
}
break;
}
else
{
SendResponse(response, RespondeCode::ERROR_BOUNDING, "Failed to calculate OOBB, switch to bounding-Volume: sphere");
}
//Fall through
}
case BoundingVolume::Sphere:
{
Engine::BoundingVolume::BoundingSphere bound = Engine::BoundingVolume::CalculateBoundingSphere(posl, posSize);
if (bound.Enabled)
{
SendResponse(response, RespondeCode::WRITE_BOUNDING);
write->WriteBits(2, 2);
WriteVector(write, bound.Center);
write->WriteDouble(32, bound.Radius);
CloakEngine::Global::Math::Point cen(bound.Center);
CloakDebugLog("Bounding Sphere center: " + std::to_string(cen.X) + " | " + std::to_string(cen.Y) + " | " + std::to_string(cen.Z));
CloakDebugLog("Bounding Sphere radius: " + std::to_string(bound.Radius));
break;
}
else
{
SendResponse(response, RespondeCode::ERROR_BOUNDING, "Failed to calculate bounding sphere!");
suc = false;
}
break;
}
default:
suc = false;
SendResponse(response, RespondeCode::ERROR_BOUNDING, "Unknown bounding volume type");
break;
}
delete[] poslHeap;
}
else
{
std::vector<std::vector<size_t>> boneToVertex(boneCount);
for (size_t a = 0; a < vbs; a++)
{
const FinalVertex& v = vb[a];
for (size_t b = 0; b < 4; b++)
{
if (v.Bones[b].Weight > 0)
{
boneToVertex[v.Bones[b].BoneID].push_back(a);
}
}
}
for (size_t a = 0; a < boneCount; a++)
{
if (boneToVertex[a].size() == 0) { continue; }
//TODO: per-Bone bounding volume
//TODO: Find solution to interpolated vertices (vertices between bones -> weight of one bone < 1)
SendResponse(response, RespondeCode::ERROR_BOUNDING, "Bounding volume calculation for animated objects is not yet implemented!");
suc = false;
}
}
}
if (suc == true)
{
bool useIndexBuffer = false;
if (desc.UseIndexBuffer)
{
//Calculate index buffer
SendResponse(response, RespondeCode::CALC_INDICES);
CE::List<bool> vbUsed(vbs);
CE::List<size_t> ib;
CloakEngine::List<MaterialRange> matRanges;
const IndexBufferType stripCut = CalculateIndexBuffer(vb, vbs, &ib, &vbUsed, &matRanges);
size_t finVbs = 0;
for (size_t a = 0; a < vbs; a++)
{
if (vbUsed[a] == true) { finVbs++; }
}
const size_t ibsL = stripCut == IndexBufferType::IB32 ? 4 : 2;
CloakDebugLog("IB Comparison: IB = " + std::to_string((finVbs*VertexSize) + (ib.size()*ibsL)) + " (" + std::to_string(finVbs) + " Vertices) Raw = " + std::to_string(vbs*VertexSize) + " (" + std::to_string(vbs) + " Vertices)");
//Test whether we use an index buffer at all:
if ((vbs - finVbs)*VertexSize > (ib.size()*ibsL))
{
useIndexBuffer = true;
//Write all vertices + sorted per material indices
SendResponse(response, RespondeCode::WRITE_VERTICES);
write->WriteBits(16, matRanges.size() - 1);
write->WriteBits(32, finVbs);
write->WriteBits(1, 1); //Use index buffer
write->WriteBits(1, ibsL == 2 ? 0 : 1); //16 or 32 bit index buffer size
if (ibsL == 2) { write->WriteBits(1, stripCut == IndexBufferType::IB16ShortCut ? 1 : 0); } //short strip cuts?
write->WriteBits(32, ib.size());
CloakDebugLog("Write Index + Vertex Buffer");
WriteIndexVertexBuffer(write, vbs, boneCount, vb, vbUsed);
WriteIndexBuffer(write, ibsL << 3, stripCut == IndexBufferType::IB16ShortCut, boneCount, vb, ib, matRanges);
}
}
if (useIndexBuffer == false)
{
//Write vertices, sorted and seperated by materials
SendResponse(response, RespondeCode::WRITE_VERTICES);
write->WriteBits(16, matCount - 1);
write->WriteBits(32, vbs);
write->WriteBits(1, 0);
CloakDebugLog("Write raw vertex buffer");
WriteRawVertexBuffer(write, vbs, boneCount, vb);
}
}
DeleteArray(sortHeap);
}
DeleteArray(vb);
const uint32_t bys = static_cast<uint32_t>(write->GetPosition());
const uint8_t bis = static_cast<uint8_t>(write->GetBitPosition());
write->Save();
if (suc)
{
output->WriteBuffer(wrBuf, bys, bis);
WriteTemp(wrBuf, bys, bis, encode, desc, response);
}
SAVE_RELEASE(write);
SAVE_RELEASE(wrBuf);
}
SendResponse(response, suc ? RespondeCode::FINISH_SUCCESS : RespondeCode::FINISH_ERROR);
}
}
}
} | 38.462595 | 296 | 0.575934 | Bizzarrus |
8b2633dcefee9dc0b5ad9a4b7878986a5675fb66 | 927 | cc | C++ | below2.1/busyschedule.cc | danzel-py/Kattis-Problem-Archive | bce1929d654b1bceb104f96d68c74349273dd1ff | [
"Apache-2.0"
] | null | null | null | below2.1/busyschedule.cc | danzel-py/Kattis-Problem-Archive | bce1929d654b1bceb104f96d68c74349273dd1ff | [
"Apache-2.0"
] | null | null | null | below2.1/busyschedule.cc | danzel-py/Kattis-Problem-Archive | bce1929d654b1bceb104f96d68c74349273dd1ff | [
"Apache-2.0"
] | null | null | null | #include <algorithm>
#include <iostream>
#include <utility>
using namespace std;
int main(){
int n;
while (true)
{
cin>>n;
cin.ignore(255,'\n');
if(n == 0){
break;
}
string s;
pair<string,int> arr[n];
string raw[n];
for (int i = 0; i < n; i++)
{
getline(cin,s);
raw[i] = s;
if(s.length() == 9){
s = "0"+s;
}
if(s[0] == '1' && s[1] == '2'){
s[0] = '0';
s[1] = '0';
}
string dpn = s.substr(0,5);
string blkg = s.substr(6,1);
s = blkg+dpn;
arr[i] = make_pair(s,i);
}
sort(arr,arr+n);
for (int i = 0; i < n; i++)
{
cout<<raw[arr[i].second]<<"\n";
}
cout<<'\n';
}
return 0;
} | 20.152174 | 43 | 0.332255 | danzel-py |
8b29b1ce9bcceb73f0e8bd0d279e333787967fe9 | 1,696 | cc | C++ | src/lib/TileSet.cc | jube/libtmx | 495d94ddb11d10b3dd2e7707b2c57916675a9f2b | [
"0BSD"
] | 5 | 2015-04-29T14:05:30.000Z | 2021-03-26T19:27:13.000Z | src/lib/TileSet.cc | jube/libtmx | 495d94ddb11d10b3dd2e7707b2c57916675a9f2b | [
"0BSD"
] | 1 | 2015-03-24T08:57:28.000Z | 2015-03-24T08:57:28.000Z | src/lib/TileSet.cc | jube/libtmx | 495d94ddb11d10b3dd2e7707b2c57916675a9f2b | [
"0BSD"
] | 4 | 2015-03-23T23:42:37.000Z | 2020-07-09T08:23:13.000Z | /*
* Copyright (c) 2013-2014, Julien Bernard
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <tmx/TileSet.h>
namespace tmx {
const Tile *TileSet::getTile(unsigned id) const noexcept {
for (auto tile : *this) {
if (tile->getId() == id) {
return tile;
}
}
return nullptr;
}
Rect TileSet::getCoords(unsigned id, Size size) const noexcept {
unsigned width = (size.width - 2 * m_margin + m_spacing) / (m_tilewidth + m_spacing); // number of tiles
unsigned height = (size.height - 2 * m_margin + m_spacing) / (m_tileheight + m_spacing); // number of tiles
unsigned tu = id % width;
unsigned tv = id / width;
assert(tv < height);
unsigned du = m_margin + tu * m_spacing + m_x;
unsigned dv = m_margin + tv * m_spacing + m_y;
assert((tu + 1) * m_tilewidth + du <= size.width);
assert((tv + 1) * m_tileheight + dv <= size.height);
return { tu * m_tilewidth + du, tv * m_tileheight + dv, m_tilewidth, m_tileheight };
}
}
| 36.085106 | 111 | 0.685142 | jube |
8b2b4ea1edf1d7f52c6c0a80f5cd2cc24224d672 | 3,489 | inl | C++ | include/base64_decode.inl | bcrist/bengine-util | 398ccedf39ce8d85c15ad0b8334991a6498ac80d | [
"MIT"
] | null | null | null | include/base64_decode.inl | bcrist/bengine-util | 398ccedf39ce8d85c15ad0b8334991a6498ac80d | [
"MIT"
] | null | null | null | include/base64_decode.inl | bcrist/bengine-util | 398ccedf39ce8d85c15ad0b8334991a6498ac80d | [
"MIT"
] | null | null | null | #if !defined(BE_UTIL_STRING_BASE64_DECODE_HPP_) && !defined(DOXYGEN)
#include "base64_decode.hpp"
#elif !defined(BE_UTIL_STRING_BASE64_DECODE_INL_)
#define BE_UTIL_STRING_BASE64_DECODE_INL_
namespace be::util {
namespace detail {
///////////////////////////////////////////////////////////////////////////////
template <char S62, char S63, char P>
UC base64_index(char symbol) {
if (symbol >= 'A' && symbol <= 'Z') {
return UC(symbol - 'A');
} else if (symbol >= 'a' && symbol <= 'z') {
return UC(26 + symbol - 'a');
} else if (symbol >= '0' && symbol <= '9') {
return UC(52 + symbol - '0');
} else if (symbol == S62) {
return 62u;
} else if (symbol == S63) {
return 63u;
} else if (symbol == P) {
return UC(-2);
} else {
return UC(-1);
}
}
///////////////////////////////////////////////////////////////////////////////
inline void base64_decode_3_bytes(UC a, UC b, UC c, UC d, UC* out) {
out[0] = (a << 2) | (b >> 4);
out[1] = (b << 4) | (c >> 2);
out[2] = (c << 6) | d;
}
///////////////////////////////////////////////////////////////////////////////
inline void base64_decode_2_bytes(UC a, UC b, UC c, UC* out) {
out[0] = (a << 2) | (b >> 4);
out[1] = (b << 4) | (c >> 2);
}
///////////////////////////////////////////////////////////////////////////////
inline void base64_decode_1_byte(UC a, UC b, UC* out) {
out[0] = (a << 2) | (b >> 4);
}
///////////////////////////////////////////////////////////////////////////////
template <char S62, char S63, char P>
std::size_t base64_decode(SV encoded_data, UC* out) {
std::size_t remaining_bytes = encoded_data.size();
const char* ptr = &(*encoded_data.begin());
UC* begin = out;
UC indices[4];
UC n_indices = 0;
while (remaining_bytes > 0) {
UC index = base64_index<S62, S63, P>(*ptr);
indices[n_indices] = index;
++ptr;
if (index <= 63u) {
++n_indices;
if (n_indices == 4) {
base64_decode_3_bytes(indices[0], indices[1], indices[2], indices[3], out);
out += 3;
n_indices = 0;
}
} else if (index == UC(-2)) {
break; // if we find a pad character, ignore the rest of the input
}
--remaining_bytes;
}
if (n_indices == 3) {
base64_decode_2_bytes(indices[0], indices[1], indices[2], out);
out += 2;
} else if (n_indices == 2) {
base64_decode_1_byte(indices[0], indices[1], out);
++out;
}
return std::size_t(out - begin);
}
} // be::util::detail
///////////////////////////////////////////////////////////////////////////////
template <char S62, char S63, char P>
S base64_decode_string(SV encoded_data) {
S decoded;
if (encoded_data.empty()) {
return decoded;
}
decoded.resize((encoded_data.size() / 4) * 3 + 3);
std::size_t size = detail::base64_decode<S62, S63, P>(encoded_data, reinterpret_cast<UC*>(&(decoded[0])));
decoded.resize(size);
return decoded;
}
///////////////////////////////////////////////////////////////////////////////
template <char S62, char S63, char P>
Buf<UC> base64_decode_buf(SV encoded_data) {
if (encoded_data.empty()) {
return Buf<UC>();
}
Buf<UC> buf = make_buf<UC>((encoded_data.size() / 4) * 3 + 3);
std::size_t size = detail::base64_decode<S62, S63, P>(encoded_data, buf.get());
buf.release();
return Buf<UC>(buf.get(), size, be::detail::delete_array);
}
} // be::util
#endif
| 28.834711 | 109 | 0.4818 | bcrist |
8b2bb95f3f9879c38eaabd0cf0635e78cb47e11a | 83,980 | cpp | C++ | module-services/service-cellular/ServiceCellular.cpp | SP2FET/MuditaOS-1 | 2906bb8a2fb3cdd39b167e600f6cc6d9ce1327bd | [
"BSL-1.0"
] | null | null | null | module-services/service-cellular/ServiceCellular.cpp | SP2FET/MuditaOS-1 | 2906bb8a2fb3cdd39b167e600f6cc6d9ce1327bd | [
"BSL-1.0"
] | null | null | null | module-services/service-cellular/ServiceCellular.cpp | SP2FET/MuditaOS-1 | 2906bb8a2fb3cdd39b167e600f6cc6d9ce1327bd | [
"BSL-1.0"
] | null | null | null | // Copyright (c) 2017-2021, Mudita Sp. z.o.o. All rights reserved.
// For licensing, see https://github.com/mudita/MuditaOS/LICENSE.md
#include "endpoints/developerMode/event/ATRequest.hpp"
#include "handler/RawATHandler.hpp"
#include "CellularUrcHandler.hpp"
#include "service-cellular/CellularCall.hpp"
#include "service-cellular/CellularMessage.hpp"
#include "service-cellular/CellularServiceAPI.hpp"
#include "service-cellular/ServiceCellular.hpp"
#include "service-cellular/SignalStrength.hpp"
#include "service-cellular/State.hpp"
#include "service-cellular/USSD.hpp"
#include "service-cellular/MessageConstants.hpp"
#include "service-cellular/connection-manager/ConnectionManagerCellularCommands.hpp"
#include "SimCard.hpp"
#include "NetworkSettings.hpp"
#include "service-cellular/RequestFactory.hpp"
#include "service-cellular/CellularRequestHandler.hpp"
#include "system/messages/SentinelRegistrationMessage.hpp"
#include <Audio/AudioCommon.hpp>
#include <BaseInterface.hpp>
#include <CalllogRecord.hpp>
#include <Commands.hpp>
#include <at/ATFactory.hpp>
#include <Common/Common.hpp>
#include <Common/Query.hpp>
#include <MessageType.hpp>
#include <modem/ATCommon.hpp>
#include <modem/ATParser.hpp>
#include <modem/mux/DLCChannel.h>
#include <modem/mux/CellularMux.h>
#include <NotificationsRecord.hpp>
#include <PhoneNumber.hpp>
#include <Result.hpp>
#include <Service/Message.hpp>
#include <Service/Service.hpp>
#include <Timers/TimerFactory.hpp>
#include <Tables/CalllogTable.hpp>
#include <Tables/Record.hpp>
#include <Utils.hpp>
#include <Utility.hpp>
#include <at/cmd/CLCC.hpp>
#include <at/cmd/CFUN.hpp>
#include <at/UrcClip.hpp>
#include <at/UrcCmti.hpp>
#include <at/UrcCreg.hpp>
#include <at/UrcCtze.hpp>
#include <at/UrcCusd.hpp>
#include <at/UrcQind.hpp>
#include <at/UrcCpin.hpp> // for Cpin
#include <at/response.hpp>
#include <bsp/cellular/bsp_cellular.hpp>
#include <EventStore.hpp>
#include <country.hpp>
#include <log/log.hpp>
#include <at/UrcFactory.hpp>
#include <queries/messages/sms/QuerySMSSearchByType.hpp>
#include <queries/notifications/QueryNotificationsIncrement.hpp>
#include <queries/notifications/QueryNotificationsMultipleIncrement.hpp>
#include <projdefs.h>
#include <service-antenna/AntennaMessage.hpp>
#include <service-antenna/AntennaServiceAPI.hpp>
#include <service-antenna/ServiceAntenna.hpp>
#include <service-appmgr/Constants.hpp>
#include <service-appmgr/Controller.hpp>
#include <service-db/agents/settings/SystemSettings.hpp>
#include <service-db/DBServiceAPI.hpp>
#include <service-db/DBNotificationMessage.hpp>
#include <service-db/QueryMessage.hpp>
#include <service-evtmgr/Constants.hpp>
#include <service-evtmgr/EventManagerServiceAPI.hpp>
#include <service-evtmgr/EVMessages.hpp>
#include <service-desktop/DesktopMessages.hpp>
#include <service-desktop/DeveloperModeMessage.hpp>
#include <service-time/service-time/TimeMessage.hpp>
#include <task.h>
#include <ucs2/UCS2.hpp>
#include <utf8/UTF8.hpp>
#include <queries/messages/sms/QuerySMSUpdate.hpp>
#include <queries/messages/sms/QuerySMSAdd.hpp>
#include <algorithm>
#include <bits/exception.h>
#include <cassert>
#include <iostream>
#include <map>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "checkSmsCenter.hpp"
#include <service-desktop/Constants.hpp>
#include <gsl/util>
#include <ticks.hpp>
#include "ServiceCellularPriv.hpp"
#include <service-cellular/api/request/sim.hpp>
#include <service-cellular/api/notification/notification.hpp>
#include <ctime>
#include <at/cmd/QCFGUsbnet.hpp>
const char *ServiceCellular::serviceName = cellular::service::name;
inline constexpr auto cellularStack = 8000;
using namespace cellular;
using namespace cellular::msg;
using cellular::service::State;
ServiceCellular::ServiceCellular()
: sys::Service(serviceName, "", cellularStack, sys::ServicePriority::Idle),
phoneModeObserver{std::make_unique<sys::phone_modes::Observer>()},
priv{std::make_unique<internal::ServiceCellularPriv>(this)}
{
LOG_INFO("[ServiceCellular] Initializing");
bus.channels.push_back(sys::BusChannel::ServiceCellularNotifications);
bus.channels.push_back(sys::BusChannel::ServiceDBNotifications);
bus.channels.push_back(sys::BusChannel::ServiceEvtmgrNotifications);
bus.channels.push_back(sys::BusChannel::PhoneModeChanges);
callStateTimer = sys::TimerFactory::createPeriodicTimer(
this, "call_state", std::chrono::milliseconds{1000}, [this](sys::Timer &) { CallStateTimerHandler(); });
callEndedRecentlyTimer = sys::TimerFactory::createSingleShotTimer(
this, "callEndedRecentlyTimer", std::chrono::seconds{5}, [this](sys::Timer &timer) {
priv->outSMSHandler.sendMessageIfDelayed();
});
stateTimer = sys::TimerFactory::createPeriodicTimer(
this, "state", std::chrono::milliseconds{1000}, [&](sys::Timer &) { handleStateTimer(); });
ussdTimer = sys::TimerFactory::createPeriodicTimer(
this, "ussd", std::chrono::milliseconds{1000}, [this](sys::Timer &) { handleUSSDTimer(); });
sleepTimer = sys::TimerFactory::createPeriodicTimer(
this, "sleep", constants::sleepTimerInterval, [this](sys::Timer &) { SleepTimerHandler(); });
connectionTimer =
sys::TimerFactory::createPeriodicTimer(this, "connection", std::chrono::seconds{60}, [this](sys::Timer &) {
utility::conditionally_invoke(
[this]() { return phoneModeObserver->isInMode(sys::phone_modes::PhoneMode::Offline); },
[this]() {
if (connectionManager != nullptr)
connectionManager->onTimerTick();
});
});
simTimer = sys::TimerFactory::createSingleShotTimer(
this, "simTimer", std::chrono::milliseconds{6000}, [this](sys::Timer &) { priv->simCard->handleSimTimer(); });
ongoingCall.setStartCallAction([=](const CalllogRecord &rec) {
auto call = DBServiceAPI::CalllogAdd(this, rec);
if (call.ID == DB_ID_NONE) {
LOG_ERROR("CalllogAdd failed");
}
return call;
});
ongoingCall.setEndCallAction([=](const CalllogRecord &rec) {
if (DBServiceAPI::CalllogUpdate(this, rec) && rec.type == CallType::CT_MISSED) {
DBServiceAPI::GetQuery(this,
db::Interface::Name::Notifications,
std::make_unique<db::query::notifications::Increment>(
NotificationsRecord::Key::Calls, rec.phoneNumber));
}
return true;
});
notificationCallback = [this](std::string &data) {
LOG_DEBUG("Notifications callback called with %u data bytes", static_cast<unsigned int>(data.size()));
std::string logStr = utils::removeNewLines(data);
LOG_SENSITIVE(LOGDEBUG, "Data: %s", logStr.c_str());
atURCStream.write(data);
auto vUrc = atURCStream.getURCList();
for (const auto &urc : vUrc) {
std::string message;
auto msg = identifyNotification(urc);
if (msg != std::nullopt) {
bus.sendMulticast(msg.value(), sys::BusChannel::ServiceCellularNotifications);
}
}
};
packetData = std::make_unique<packet_data::PacketData>(*this); /// call in apnListChanged handler
registerMessageHandlers();
}
ServiceCellular::~ServiceCellular()
{
LOG_INFO("[ServiceCellular] Cleaning resources");
}
void ServiceCellular::SleepTimerHandler()
{
auto currentTime = cpp_freertos::Ticks::TicksToMs(cpp_freertos::Ticks::GetTicks());
auto lastCommunicationTimestamp = cmux->getLastCommunicationTimestamp();
auto timeOfInactivity = currentTime >= lastCommunicationTimestamp
? currentTime - lastCommunicationTimestamp
: std::numeric_limits<TickType_t>::max() - lastCommunicationTimestamp + currentTime;
if (!ongoingCall.isValid() && priv->state->get() == State::ST::Ready &&
timeOfInactivity >= constants::enterSleepModeTime.count()) {
cmux->enterSleepMode();
cpuSentinel->ReleaseMinimumFrequency();
}
}
void ServiceCellular::CallStateTimerHandler()
{
LOG_DEBUG("CallStateTimerHandler");
auto msg = std::make_shared<CellularListCallsMessage>();
bus.sendUnicast(std::move(msg), ServiceCellular::serviceName);
}
sys::ReturnCodes ServiceCellular::InitHandler()
{
board = cmux->getBoard();
settings = std::make_unique<settings::Settings>();
settings->init(::service::ServiceProxy(shared_from_this()));
connectionManager = std::make_unique<ConnectionManager>(
utils::getNumericValue<bool>(
settings->getValue(settings::Cellular::offlineMode, settings::SettingsScope::Global)),
static_cast<std::chrono::minutes>(utils::getNumericValue<int>(settings->getValue(
settings->getValue(settings::Offline::connectionFrequency, settings::SettingsScope::Global)))),
std::make_shared<ConnectionManagerCellularCommands>(*this));
priv->state->set(State::ST::WaitForStartPermission);
settings->registerValueChange(
settings::Cellular::volte_on,
[this](const std::string &value) { volteChanged(value); },
::settings::SettingsScope::Global);
settings->registerValueChange(
settings::Cellular::apn_list,
[this](const std::string &value) { apnListChanged(value); },
::settings::SettingsScope::Global);
priv->setInitialMultiPartSMSUID(static_cast<std::uint8_t>(utils::getNumericValue<int>(
settings->getValue(settings::Cellular::currentUID, settings::SettingsScope::Global))));
priv->saveNewMultiPartSMSUIDCallback = [this](std::uint8_t uid) -> void {
settings->setValue(
settings::Cellular::currentUID, std::to_string(static_cast<int>(uid)), settings::SettingsScope::Global);
};
cpuSentinel = std::make_shared<sys::CpuSentinel>(serviceName, this);
ongoingCall.setCpuSentinel(cpuSentinel);
auto sentinelRegistrationMsg = std::make_shared<sys::SentinelRegistrationMessage>(cpuSentinel);
bus.sendUnicast(sentinelRegistrationMsg, ::service::name::system_manager);
cmux->registerCellularDevice();
return sys::ReturnCodes::Success;
}
sys::ReturnCodes ServiceCellular::DeinitHandler()
{
settings->deinit();
return sys::ReturnCodes::Success;
}
void ServiceCellular::ProcessCloseReason(sys::CloseReason closeReason)
{
sendCloseReadyMessage(this);
}
sys::ReturnCodes ServiceCellular::SwitchPowerModeHandler(const sys::ServicePowerMode mode)
{
LOG_INFO("[ServiceCellular] PowerModeHandler: %s", c_str(mode));
switch (mode) {
case sys::ServicePowerMode ::Active:
cmux->exitSleepMode();
break;
case sys::ServicePowerMode ::SuspendToRAM:
case sys::ServicePowerMode ::SuspendToNVM:
cmux->enterSleepMode();
break;
}
return sys::ReturnCodes::Success;
}
void ServiceCellular::registerMessageHandlers()
{
phoneModeObserver->connect(this);
phoneModeObserver->subscribe(
[this](sys::phone_modes::PhoneMode mode) { connectionManager->onPhoneModeChange(mode); });
phoneModeObserver->subscribe([&](sys::phone_modes::Tethering tethering) {
if (tethering == sys::phone_modes::Tethering::On) {
priv->tetheringHandler->enable();
}
else {
priv->tetheringHandler->disable();
logTetheringCalls();
}
});
priv->connectSimCard();
priv->connectNetworkTime();
priv->connectSimContacts();
priv->connectImeiGetHandler();
connect(typeid(CellularStartOperatorsScanMessage), [&](sys::Message *request) -> sys::MessagePointer {
auto msg = static_cast<CellularStartOperatorsScanMessage *>(request);
return handleCellularStartOperatorsScan(msg);
});
connect(typeid(CellularGetActiveContextsMessage), [&](sys::Message *request) -> sys::MessagePointer {
auto msg = static_cast<CellularGetActiveContextsMessage *>(request);
return handleCellularGetActiveContextsMessage(msg);
});
connect(typeid(CellularRequestCurrentOperatorNameMessage), [&](sys::Message *request) -> sys::MessagePointer {
auto msg = static_cast<CellularRequestCurrentOperatorNameMessage *>(request);
return handleCellularRequestCurrentOperatorName(msg);
});
connect(typeid(CellularGetAPNMessage), [&](sys::Message *request) -> sys::MessagePointer {
auto msg = static_cast<CellularGetAPNMessage *>(request);
return handleCellularGetAPNMessage(msg);
});
connect(typeid(CellularSetAPNMessage), [&](sys::Message *request) -> sys::MessagePointer {
auto msg = static_cast<CellularSetAPNMessage *>(request);
return handleCellularSetAPNMessage(msg);
});
connect(typeid(CellularNewAPNMessage), [&](sys::Message *request) -> sys::MessagePointer {
auto msg = static_cast<CellularNewAPNMessage *>(request);
return handleCellularNewAPNMessage(msg);
});
connect(typeid(CellularSetDataTransferMessage), [&](sys::Message *request) -> sys::MessagePointer {
auto msg = static_cast<CellularSetDataTransferMessage *>(request);
return handleCellularSetDataTransferMessage(msg);
});
connect(typeid(CellularGetDataTransferMessage), [&](sys::Message *request) -> sys::MessagePointer {
auto msg = static_cast<CellularGetDataTransferMessage *>(request);
return handleCellularGetDataTransferMessage(msg);
});
connect(typeid(CellularActivateContextMessage), [&](sys::Message *request) -> sys::MessagePointer {
auto msg = static_cast<CellularActivateContextMessage *>(request);
return handleCellularActivateContextMessage(msg);
});
connect(typeid(CellularDeactivateContextMessage), [&](sys::Message *request) -> sys::MessagePointer {
auto msg = static_cast<CellularDeactivateContextMessage *>(request);
return handleCellularDeactivateContextMessage(msg);
});
connect(typeid(CellularChangeVoLTEDataMessage), [&](sys::Message *request) -> sys::MessagePointer {
auto msg = static_cast<CellularChangeVoLTEDataMessage *>(request);
volteOn = msg->getVoLTEon();
settings->setValue(settings::Cellular::volte_on, std::to_string(volteOn), settings::SettingsScope::Global);
NetworkSettings networkSettings(*this);
auto vstate = networkSettings.getVoLTEConfigurationState();
if ((vstate != VoLTEState::On) && volteOn) {
LOG_DEBUG("VoLTE On");
if (networkSettings.setVoLTEState(VoLTEState::On) == at::Result::Code::OK) {
priv->modemResetHandler->performSoftReset();
}
}
else if (!volteOn) {
LOG_DEBUG("VoLTE Off");
if (networkSettings.setVoLTEState(VoLTEState::Off) == at::Result::Code::OK) {
priv->modemResetHandler->performSoftReset();
}
}
return std::make_shared<CellularResponseMessage>(true);
});
connect(typeid(CellularSetFlightModeMessage), [&](sys::Message *request) -> sys::MessagePointer {
auto msg = static_cast<CellularSetFlightModeMessage *>(request);
return handleCellularSetFlightModeMessage(msg);
});
connect(typeid(CellularPowerStateChange), [&](sys::Message *request) -> sys::MessagePointer {
auto msg = static_cast<CellularPowerStateChange *>(request);
priv->nextPowerState = msg->getNewState();
handle_power_state_change();
return sys::MessageNone{};
});
connect(typeid(sdesktop::developerMode::DeveloperModeRequest), [&](sys::Message *request) -> sys::MessagePointer {
auto msg = static_cast<sdesktop::developerMode::DeveloperModeRequest *>(request);
if (typeid(*msg->event.get()) == typeid(sdesktop::developerMode::CellularHotStartEvent)) {
priv->simCard->setChannel(nullptr);
priv->networkTime->setChannel(nullptr);
priv->simContacts->setChannel(nullptr);
priv->imeiGetHandler->setChannel(nullptr);
cmux->closeChannels();
///> change state - simulate hot start
handle_power_up_request();
}
if (typeid(*msg->event.get()) == typeid(sdesktop::developerMode::CellularStateInfoRequestEvent)) {
auto event = std::make_unique<sdesktop::developerMode::CellularStateInfoRequestEvent>(priv->state->c_str());
auto message = std::make_shared<sdesktop::developerMode::DeveloperModeRequest>(std::move(event));
bus.sendUnicast(std::move(message), ::service::name::service_desktop);
}
if (typeid(*msg->event.get()) == typeid(sdesktop::developerMode::CellularSleepModeInfoRequestEvent)) {
auto event = std::make_unique<sdesktop::developerMode::CellularSleepModeInfoRequestEvent>(
cmux->isCellularInSleepMode());
auto message = std::make_shared<sdesktop::developerMode::DeveloperModeRequest>(std::move(event));
bus.sendUnicast(std::move(message), ::service::name::service_desktop);
}
if (typeid(*msg->event.get()) == typeid(sdesktop::developerMode::ATResponseEvent)) {
auto channel = cmux->get(CellularMux::Channel::Commands);
assert(channel);
auto handler = cellular::RawATHandler(*channel);
return handler.handle(msg);
}
return sys::MessageNone{};
});
connect(typeid(CellularNewIncomingSMSMessage), [&](sys::Message *request) -> sys::MessagePointer {
auto msg = static_cast<CellularNewIncomingSMSMessage *>(request);
auto ret = receiveSMS(msg->getData());
return std::make_shared<CellularResponseMessage>(ret);
});
connect(typeid(CellularAnswerIncomingCallMessage), [&](sys::Message *request) -> sys::MessagePointer {
auto msg = static_cast<CellularAnswerIncomingCallMessage *>(request);
return handleCellularAnswerIncomingCallMessage(msg);
});
connect(typeid(CellularCallRequestMessage), [&](sys::Message *request) -> sys::MessagePointer {
if (phoneModeObserver->isInMode(sys::phone_modes::PhoneMode::Offline)) {
this->bus.sendUnicast(std::make_shared<CellularCallRejectedByOfflineNotification>(),
::service::name::appmgr);
return std::make_shared<CellularResponseMessage>(true);
}
auto msg = static_cast<CellularCallRequestMessage *>(request);
return handleCellularCallRequestMessage(msg);
});
connect(typeid(CellularHangupCallMessage), [&](sys::Message *request) -> sys::MessagePointer {
auto msg = static_cast<CellularHangupCallMessage *>(request);
handleCellularHangupCallMessage(msg);
return sys::MessageNone{};
});
connect(typeid(CellularDismissCallMessage), [&](sys::Message *request) -> sys::MessagePointer {
handleCellularDismissCallMessage(request);
return sys::MessageNone{};
});
connect(typeid(db::QueryResponse), [&](sys::Message *request) -> sys::MessagePointer {
auto msg = static_cast<db::QueryResponse *>(request);
return handleDBQueryResponseMessage(msg);
});
connect(typeid(CellularListCallsMessage), [&](sys::Message *request) -> sys::MessagePointer {
auto msg = static_cast<CellularListCallsMessage *>(request);
return handleCellularListCallsMessage(msg);
});
connect(typeid(db::NotificationMessage), [&](sys::Message *request) -> sys::MessagePointer {
auto msg = static_cast<db::NotificationMessage *>(request);
return handleDBNotificationMessage(msg);
});
connect(typeid(CellularRingingMessage), [&](sys::Message *request) -> sys::MessagePointer {
auto msg = static_cast<CellularRingingMessage *>(request);
return handleCellularRingingMessage(msg);
});
connect(typeid(CellularIncominCallMessage),
[&](sys::Message *request) -> sys::MessagePointer { return handleCellularIncomingCallMessage(request); });
connect(typeid(CellularCallerIdMessage), [&](sys::Message *request) -> sys::MessagePointer {
auto msg = static_cast<CellularCallerIdMessage *>(request);
return handleCellularCallerIdMessage(msg);
});
connect(typeid(CellularGetIMSIMessage),
[&](sys::Message *request) -> sys::MessagePointer { return handleCellularGetIMSIMessage(request); });
connect(typeid(CellularGetOwnNumberMessage),
[&](sys::Message *request) -> sys::MessagePointer { return handleCellularGetOwnNumberMessage(request); });
connect(typeid(CellularGetNetworkInfoMessage),
[&](sys::Message *request) -> sys::MessagePointer { return handleCellularGetNetworkInfoMessage(request); });
connect(typeid(CellularAntennaRequestMessage),
[&](sys::Message *request) -> sys::MessagePointer { return handleCellularSelectAntennaMessage(request); });
connect(typeid(CellularSetScanModeMessage),
[&](sys::Message *request) -> sys::MessagePointer { return handleCellularSetScanModeMessage(request); });
connect(typeid(CellularGetScanModeMessage),
[&](sys::Message *request) -> sys::MessagePointer { return handleCellularGetScanModeMessage(request); });
connect(typeid(CellularGetFirmwareVersionMessage), [&](sys::Message *request) -> sys::MessagePointer {
return handleCellularGetFirmwareVersionMessage(request);
});
connect(typeid(sevm::StatusStateMessage),
[&](sys::Message *request) -> sys::MessagePointer { return handleEVMStatusMessage(request); });
connect(typeid(CellularGetCsqMessage),
[&](sys::Message *request) -> sys::MessagePointer { return handleCellularGetCsqMessage(request); });
connect(typeid(CellularGetCregMessage),
[&](sys::Message *request) -> sys::MessagePointer { return handleCellularGetCregMessage(request); });
connect(typeid(CellularGetNwinfoMessage),
[&](sys::Message *request) -> sys::MessagePointer { return handleCellularGetNwinfoMessage(request); });
connect(typeid(CellularGetAntennaMessage),
[&](sys::Message *request) -> sys::MessagePointer { return handleCellularGetAntennaMessage(request); });
connect(typeid(CellularDtmfRequestMessage),
[&](sys::Message *request) -> sys::MessagePointer { return handleCellularDtmfRequestMessage(request); });
connect(typeid(CellularUSSDMessage),
[&](sys::Message *request) -> sys::MessagePointer { return handleCellularUSSDMessage(request); });
connect(typeid(cellular::StateChange),
[&](sys::Message *request) -> sys::MessagePointer { return handleStateRequestMessage(request); });
connect(typeid(CellularCallActiveNotification),
[&](sys::Message *request) -> sys::MessagePointer { return handleCallActiveNotification(request); });
connect(typeid(CellularCallAbortedNotification),
[&](sys::Message *request) -> sys::MessagePointer { return handleCallAbortedNotification(request); });
connect(typeid(CellularPowerUpProcedureCompleteNotification), [&](sys::Message *request) -> sys::MessagePointer {
return handlePowerUpProcedureCompleteNotification(request);
});
connect(typeid(CellularPowerDownDeregisteringNotification), [&](sys::Message *request) -> sys::MessagePointer {
return handlePowerDownDeregisteringNotification(request);
});
connect(typeid(CellularPowerDownDeregisteredNotification), [&](sys::Message *request) -> sys::MessagePointer {
return handlePowerDownDeregisteredNotification(request);
});
connect(typeid(CellularNewIncomingSMSNotification),
[&](sys::Message *request) -> sys::MessagePointer { return handleNewIncomingSMSNotification(request); });
connect(typeid(CellularSmsDoneNotification),
[&](sys::Message *request) -> sys::MessagePointer { return handleSmsDoneNotification(request); });
connect(typeid(CellularSignalStrengthUpdateNotification), [&](sys::Message *request) -> sys::MessagePointer {
return handleSignalStrengthUpdateNotification(request);
});
connect(typeid(CellularNetworkStatusUpdateNotification), [&](sys::Message *request) -> sys::MessagePointer {
return handleNetworkStatusUpdateNotification(request);
});
connect(typeid(CellularUrcIncomingNotification),
[&](sys::Message *request) -> sys::MessagePointer { return handleUrcIncomingNotification(request); });
connect(typeid(CellularRingNotification),
[&](sys::Message *request) -> sys::MessagePointer { return handleCellularRingNotification(request); });
connect(typeid(CellularCallerIdNotification),
[&](sys::Message *request) -> sys::MessagePointer { return handleCellularCallerIdNotification(request); });
connect(typeid(CellularSetConnectionFrequencyMessage), [&](sys::Message *request) -> sys::MessagePointer {
return handleCellularSetConnectionFrequencyMessage(request);
});
handle_CellularGetChannelMessage();
}
void ServiceCellular::change_state(cellular::StateChange *msg)
{
assert(msg);
switch (msg->request) {
case State::ST::Idle:
handle_idle();
break;
case State::ST::WaitForStartPermission:
handle_wait_for_start_permission();
break;
case State::ST::PowerUpRequest:
handle_power_up_request();
break;
case State::ST::StatusCheck:
handle_status_check();
break;
case State::ST::PowerUpInProgress:
handle_power_up_in_progress_procedure();
break;
case State::ST::PowerUpProcedure:
handle_power_up_procedure();
break;
case State::ST::BaudDetect:
if (nextPowerStateChangeAwaiting) {
handle_power_state_change();
}
else {
handle_baud_detect();
}
break;
case State::ST::AudioConfigurationProcedure:
handle_audio_conf_procedure();
break;
case State::ST::CellularPrivInit:
handle_cellular_priv_init();
break;
case State::ST::CellularConfProcedure:
handle_start_conf_procedure();
break;
case State::ST::APNConfProcedure:
handle_apn_conf_procedure();
break;
case State::ST::SanityCheck:
handle_sim_sanity_check();
break;
case State::ST::ModemOn:
handle_modem_on();
break;
case State::ST::URCReady:
handle_URCReady();
break;
case State::ST::ModemFatalFailure:
handle_fatal_failure();
break;
case State::ST::Failed:
handle_failure();
break;
case State::ST::Ready:
handle_ready();
break;
case State::ST::PowerDownStarted:
handle_power_down_started();
break;
case State::ST::PowerDownWaiting:
handle_power_down_waiting();
break;
case State::ST::PowerDown:
handle_power_down();
if (nextPowerStateChangeAwaiting) {
handle_power_state_change();
}
break;
};
}
bool ServiceCellular::handle_idle()
{
LOG_DEBUG("Idle");
return true;
}
bool ServiceCellular::handle_wait_for_start_permission()
{
auto msg = std::make_shared<CellularCheckIfStartAllowedMessage>();
bus.sendUnicast(msg, ::service::name::system_manager);
return true;
}
bool ServiceCellular::handle_power_up_request()
{
cmux->selectAntenna(bsp::cellular::antenna::lowBand);
switch (board) {
case bsp::Board::RT1051:
priv->state->set(State::ST::StatusCheck);
break;
case bsp::Board::Linux:
priv->state->set(State::ST::PowerUpProcedure);
break;
case bsp::Board::none:
return false;
break;
}
return true;
}
bool ServiceCellular::handle_power_up_procedure()
{
switch (board) {
case bsp::Board::RT1051: {
LOG_DEBUG("RT1051 - cold start");
cmux->turnOnModem();
// wait for status pin change to change state
break;
}
case bsp::Board::Linux: {
// check baud once to determine if it's already turned on
auto ret = cmux->baudDetectOnce();
if (ret == CellularMux::ConfState::Success) {
// it's on aka hot start.
LOG_DEBUG("Linux - hot start");
priv->state->set(State::ST::CellularConfProcedure);
break;
}
else {
// it's off aka cold start
LOG_DEBUG("Linux - cold start");
LOG_WARN("Press PWR_KEY for 2 sec on modem eval board!");
vTaskDelay(pdMS_TO_TICKS(2000)); // give some 2 secs more for user input
// if it's Linux, then wait for status pin to become active, to align its starting position with RT1051
vTaskDelay(pdMS_TO_TICKS(8000));
priv->state->set(State::ST::PowerUpInProgress);
break;
}
}
case bsp::Board::none:
default:
LOG_FATAL("Board not known!");
assert(0);
break;
}
return true;
}
bool ServiceCellular::handle_power_up_in_progress_procedure(void)
{
if (priv->modemResetHandler->isResetInProgress()) {
constexpr auto msModemUartInitTime = 12000;
vTaskDelay(pdMS_TO_TICKS(msModemUartInitTime));
}
priv->state->set(State::ST::BaudDetect);
return true;
}
bool ServiceCellular::handle_baud_detect()
{
auto ret = cmux->baudDetectProcedure();
if (ret == CellularMux::ConfState::Success) {
priv->state->set(State::ST::CellularConfProcedure);
return true;
}
else {
priv->state->set(State::ST::ModemFatalFailure);
return false;
}
}
bool ServiceCellular::handle_power_down_started()
{
/// we should not send anything to the modem from now on
return true;
}
bool ServiceCellular::handle_power_down_waiting()
{
switch (board) {
case bsp::Board::RT1051:
// wait for pin status become inactive (handled elsewhere)
break;
case bsp::Board::Linux:
// if it's Linux, then wait for status pin to become inactive, to align with RT1051
vTaskDelay(pdMS_TO_TICKS(17000)); // according to docs this shouldn't be needed, but better be safe than Quectel
priv->state->set(State::ST::PowerDown);
break;
default:
LOG_ERROR("Powering 'down an unknown device not handled");
return false;
}
return true;
}
bool ServiceCellular::handle_power_down()
{
LOG_DEBUG("Powered Down");
cmux->closeChannels();
cmux.reset();
cmux = std::make_unique<CellularMux>(PortSpeed_e::PS460800, this);
if (priv->modemResetHandler->isResetInProgress()) {
priv->state->set(State::ST::Idle);
}
return true;
}
bool ServiceCellular::handle_start_conf_procedure()
{
// Start configuration procedure, if it's first run modem will be restarted
auto confRet = cmux->confProcedure();
if (confRet == CellularMux::ConfState::Success) {
priv->state->set(State::ST::AudioConfigurationProcedure);
return true;
}
priv->state->set(State::ST::Failed);
return false;
}
bool ServiceCellular::handle_audio_conf_procedure()
{
auto audioRet = cmux->audioConfProcedure();
if (audioRet == CellularMux::ConfState::ModemNeedsReset) {
priv->modemResetHandler->performReboot();
return false;
}
if (audioRet == CellularMux::ConfState::Success) {
auto cmd = at::factory(at::AT::IPR) + std::to_string(ATPortSpeeds_text[cmux->getStartParams().PortSpeed]);
LOG_DEBUG("Setting baudrate %i baud", ATPortSpeeds_text[cmux->getStartParams().PortSpeed]);
if (!cmux->getParser()->cmd(cmd)) {
LOG_ERROR("Baudrate setup error");
priv->state->set(State::ST::Failed);
return false;
}
cmux->getCellular()->setSpeed(ATPortSpeeds_text[cmux->getStartParams().PortSpeed]);
vTaskDelay(1000);
if (cmux->startMultiplexer() == CellularMux::ConfState::Success) {
LOG_DEBUG("[ServiceCellular] Modem is fully operational");
// open channel - notifications
DLCChannel *notificationsChannel = cmux->get(CellularMux::Channel::Notifications);
if (notificationsChannel != nullptr) {
LOG_DEBUG("Setting up notifications callback");
notificationsChannel->setCallback(notificationCallback);
}
priv->state->set(State::ST::CellularPrivInit);
return true;
}
else {
priv->state->set(State::ST::Failed);
return false;
}
}
else if (audioRet == CellularMux::ConfState::Failure) {
/// restart
priv->state->set(State::ST::AudioConfigurationProcedure);
return true;
}
// Reset procedure started, do nothing here
priv->state->set(State::ST::Idle);
return true;
}
bool ServiceCellular::handle_cellular_priv_init()
{
auto channel = cmux->get(CellularMux::Channel::Commands);
priv->simCard->setChannel(channel);
priv->networkTime->setChannel(channel);
priv->simContacts->setChannel(channel);
priv->imeiGetHandler->setChannel(channel);
if (!priv->tetheringHandler->configure()) {
priv->modemResetHandler->performHardReset();
return true;
}
auto flightMode =
settings->getValue(settings::Cellular::offlineMode, settings::SettingsScope::Global) == "1" ? true : false;
connectionManager->setFlightMode(flightMode);
auto interval = 0;
if (utils::toNumeric(settings->getValue(settings::Offline::connectionFrequency, settings::SettingsScope::Global),
interval)) {
connectionManager->setInterval(std::chrono::minutes{interval});
}
if (!connectionManager->onPhoneModeChange(phoneModeObserver->getCurrentPhoneMode())) {
priv->state->set(State::ST::Failed);
LOG_ERROR("Failed to handle phone mode");
return false;
}
priv->state->set(State::ST::APNConfProcedure);
return true;
}
auto ServiceCellular::handle(db::query::SMSSearchByTypeResult *response) -> bool
{
if (response->getResults().empty()) {
priv->outSMSHandler.handleNoMoreDbRecords();
}
else {
for (auto &rec : response->getResults()) {
if (rec.type == SMSType::QUEUED) {
priv->outSMSHandler.handleIncomingDbRecord(rec, callEndedRecentlyTimer.isActive());
}
}
}
return true;
}
/**
* NOTICE: URC handling function identifyNotification works on different thread, so sending
* any AT commands is not allowed here (also in URC handlers and other functions called from here)
* @return
*/
std::optional<std::shared_ptr<sys::Message>> ServiceCellular::identifyNotification(const std::string &data)
{
CellularUrcHandler urcHandler(*this);
std::string str(data.begin(), data.end());
std::string logStr = utils::removeNewLines(str);
LOG_SENSITIVE(LOGDEBUG, "Notification:: %s", logStr.c_str());
auto urc = at::urc::UrcFactory::Create(str);
urc->Handle(urcHandler);
if (!urc->isHandled()) {
LOG_SENSITIVE(LOGWARN, "Unhandled notification: %s", logStr.c_str());
}
return urcHandler.getResponse();
}
auto ServiceCellular::receiveSMS(std::string messageNumber) -> bool
{
constexpr auto ucscSetMaxRetries = 3;
auto retVal = true;
auto channel = cmux->get(CellularMux::Channel::Commands);
if (channel == nullptr) {
retVal = false;
return retVal;
}
auto ucscSetRetries = 0;
while (ucscSetRetries < ucscSetMaxRetries) {
if (!channel->cmd(at::AT::SMS_UCSC2)) {
++ucscSetRetries;
LOG_ERROR("Could not set UCS2 charset mode for TE. Retry %d", ucscSetRetries);
}
else {
break;
}
}
auto _ = gsl::finally([&channel, &retVal, &messageNumber] {
if (!channel->cmd(at::AT::SMS_GSM)) {
LOG_ERROR("Could not set GSM (default) charset mode for TE");
}
// delete message from modem memory
if (retVal && !channel->cmd(at::factory(at::AT::CMGD) + messageNumber)) {
LOG_ERROR("Could not delete SMS from modem");
}
});
bool messageParsed = false;
std::string messageRawBody;
UTF8 receivedNumber;
const auto &cmd = at::factory(at::AT::QCMGR);
auto ret = channel->cmd(cmd + messageNumber, cmd.getTimeout());
if (!ret) {
LOG_ERROR("!!!! Could not read text message !!!!");
retVal = false;
}
else {
for (std::size_t i = 0; i < ret.response.size(); i++) {
if (ret.response[i].find("QCMGR") != std::string::npos) {
std::istringstream ss(ret.response[i]);
std::string token;
std::vector<std::string> tokens;
while (std::getline(ss, token, ',')) {
tokens.push_back(token);
}
tokens[1].erase(std::remove(tokens[1].begin(), tokens[1].end(), '\"'), tokens[1].end());
/*
* tokens:
* [0] - +QCMGR
* [1] - sender number
* [2] - none
* [3] - date YY/MM/DD
* [4] - hour HH/MM/SS/timezone
* concatenaded messages
* [5] - unique concatenated message id
* [6] - current message number
* [7] - total messages count
*
*/
// parse sender number
receivedNumber = UCS2(tokens[1]).toUTF8();
// parse date
tokens[3].erase(std::remove(tokens[3].begin(), tokens[3].end(), '\"'), tokens[3].end());
auto messageDate = std::time(nullptr);
if (tokens.size() == 5) {
LOG_DEBUG("Single message");
messageRawBody = ret.response[i + 1];
messageParsed = true;
}
else if (tokens.size() == 8) {
LOG_DEBUG("Concatenated message");
uint32_t last = 0;
uint32_t current = 0;
try {
last = std::stoi(tokens[7]);
current = std::stoi(tokens[6]);
}
catch (const std::exception &e) {
LOG_ERROR("ServiceCellular::receiveSMS error %s", e.what());
retVal = false;
break;
}
LOG_DEBUG("part %" PRIu32 "from %" PRIu32, current, last);
if (current == last) {
messageParts.push_back(ret.response[i + 1]);
for (std::size_t j = 0; j < messageParts.size(); j++) {
messageRawBody += messageParts[j];
}
messageParts.clear();
messageParsed = true;
}
else {
messageParts.push_back(ret.response[i + 1]);
}
}
if (messageParsed) {
messageParsed = false;
const auto decodedMessage = UCS2(messageRawBody).toUTF8();
const auto record = createSMSRecord(decodedMessage, receivedNumber, messageDate);
if (!dbAddSMSRecord(record)) {
LOG_ERROR("Failed to add text message to db");
retVal = false;
break;
}
}
}
}
}
return retVal;
}
bool ServiceCellular::getOwnNumber(std::string &destination)
{
auto ret = cmux->get(CellularMux::Channel::Commands)->cmd(at::AT::CNUM);
if (ret) {
auto begin = ret.response[0].find(',');
auto end = ret.response[0].rfind(',');
if (begin != std::string::npos && end != std::string::npos) {
std::string number;
try {
number = ret.response[0].substr(begin, end - begin);
}
catch (std::exception &e) {
LOG_ERROR("ServiceCellular::getOwnNumber exception: %s", e.what());
return false;
}
number.erase(std::remove(number.begin(), number.end(), '"'), number.end());
number.erase(std::remove(number.begin(), number.end(), ','), number.end());
destination = number;
return true;
}
}
LOG_ERROR("ServiceCellular::getOwnNumber failed.");
return false;
}
bool ServiceCellular::getIMSI(std::string &destination, bool fullNumber)
{
auto ret = cmux->get(CellularMux::Channel::Commands)->cmd(at::AT::CIMI);
if (ret) {
if (fullNumber) {
destination = ret.response[0];
}
else {
try {
destination = ret.response[0].substr(0, 3);
}
catch (std::exception &e) {
LOG_ERROR("ServiceCellular::getIMSI exception: %s", e.what());
return false;
}
}
return true;
}
LOG_ERROR("ServiceCellular::getIMSI failed.");
return false;
}
std::vector<std::string> ServiceCellular::getNetworkInfo(void)
{
std::vector<std::string> data;
auto channel = cmux->get(CellularMux::Channel::Commands);
if (channel) {
auto resp = channel->cmd(at::AT::CSQ);
if (resp.code == at::Result::Code::OK) {
data.push_back(resp.response[0]);
}
else {
LOG_ERROR("CSQ Error");
data.push_back("");
}
resp = channel->cmd(at::AT::CREG);
if (resp.code == at::Result::Code::OK) {
data.push_back(resp.response[0]);
}
else {
LOG_ERROR("CREG Error");
data.push_back("");
}
resp = channel->cmd(at::AT::QNWINFO);
if (resp.code == at::Result::Code::OK) {
std::string ret;
if (at::response::parseQNWINFO(resp.response[0], ret)) {
data.push_back(ret);
}
else {
data.push_back("");
}
}
else {
LOG_ERROR("QNWINFO Error");
data.push_back("");
}
}
return data;
}
std::vector<std::string> get_last_AT_error(DLCChannel *channel)
{
auto ret = channel->cmd(at::AT::CEER);
return std::move(ret.response);
}
void log_last_AT_error(DLCChannel *channel)
{
std::vector<std::string> atErrors(get_last_AT_error(channel));
int i = 1;
for (auto &msg_line : atErrors) {
LOG_ERROR("%d/%d: %s", i, static_cast<int>(atErrors.size()), msg_line.c_str());
i++;
}
}
bool is_SIM_detection_enabled(DLCChannel *channel)
{
auto ret = channel->cmd(at::AT::SIM_DET);
if (ret) {
if (ret.response[0].find("+QSIMDET: 1") != std::string::npos) {
LOG_DEBUG("SIM detecition enabled!");
return true;
}
}
else {
LOG_FATAL("Cant check sim detection status!");
log_last_AT_error(channel);
}
return false;
}
bool enable_SIM_detection(DLCChannel *channel)
{
auto ret = channel->cmd(at::AT::SIM_DET_ON);
if (!ret) {
log_last_AT_error(channel);
return false;
}
return true;
}
bool is_SIM_status_enabled(DLCChannel *channel)
{
auto ret = channel->cmd(at::AT::QSIMSTAT);
if (ret) {
if (ret.response[0].find("+QSIMSTAT: 1") != std::string::npos) {
LOG_DEBUG("SIM swap enabled!");
return true;
}
}
else {
LOG_FATAL("SIM swap status failure! %s", ret.response[0].c_str());
log_last_AT_error(channel);
}
return false;
}
bool enable_SIM_status(DLCChannel *channel)
{
auto ret = channel->cmd(at::AT::SIMSTAT_ON);
if (!ret) {
log_last_AT_error(channel);
return false;
}
return true;
}
void save_SIM_detection_status(DLCChannel *channel)
{
auto ret = channel->cmd(at::AT::STORE_SETTINGS_ATW);
if (!ret) {
log_last_AT_error(channel);
}
}
bool sim_check_hot_swap(DLCChannel *channel)
{
assert(channel);
bool reboot_needed = false;
if (!is_SIM_detection_enabled(channel)) {
reboot_needed = true;
}
if (!is_SIM_status_enabled(channel)) {
reboot_needed = true;
}
if (reboot_needed) {
enable_SIM_detection(channel);
enable_SIM_status(channel);
save_SIM_detection_status(channel);
LOG_FATAL("Modem reboot required, Please remove battery!");
}
return !reboot_needed;
}
bool ServiceCellular::handle_sim_sanity_check()
{
auto ret = sim_check_hot_swap(cmux->get(CellularMux::Channel::Commands));
if (ret) {
priv->state->set(State::ST::ModemOn);
}
else {
LOG_ERROR("Sanity check failure - modem has to be rebooted");
priv->modemResetHandler->performHardReset();
}
return ret;
}
bool ServiceCellular::handle_modem_on()
{
auto channel = cmux->get(CellularMux::Channel::Commands);
channel->cmd("AT+CCLK?");
// inform host ap ready
cmux->informModemHostWakeup();
tetheringTurnOnURC();
priv->state->set(State::ST::URCReady);
LOG_DEBUG("AP ready");
return true;
}
bool ServiceCellular::handle_URCReady()
{
auto channel = cmux->get(CellularMux::Channel::Commands);
bool ret = true;
priv->requestNetworkTimeSettings();
ret = ret && channel->cmd(at::AT::ENABLE_NETWORK_REGISTRATION_URC);
bus.sendMulticast<cellular::msg::notification::ModemStateChanged>(cellular::api::ModemState::Ready);
LOG_DEBUG("%s", priv->state->c_str());
return ret;
}
bool ServiceCellular::handleTextMessagesInit()
{
auto channel = cmux->get(CellularMux::Channel::Commands);
if (channel == nullptr) {
LOG_ERROR("Cant configure sim! no Commands channel!");
return false;
}
auto commands = at::getCommadsSet(at::commadsSet::smsInit);
for (const auto &command : commands) {
if (!channel->cmd(command)) {
LOG_ERROR("Text messages init failed!");
return false;
}
}
if (!receiveAllMessages()) {
LOG_ERROR("Receiving all messages from modem failed");
return true; // this is not blocking issue
}
return true;
}
SMSRecord ServiceCellular::createSMSRecord(const UTF8 &decodedMessage,
const UTF8 &receivedNumber,
const time_t messageDate,
const SMSType &smsType) const noexcept
{
SMSRecord record{};
record.body = decodedMessage;
record.number = utils::PhoneNumber::getReceivedNumberView(receivedNumber);
record.type = SMSType::INBOX;
record.date = messageDate;
return record;
}
bool ServiceCellular::dbAddSMSRecord(const SMSRecord &record)
{
return DBServiceAPI::AddSMS(
this, record, db::QueryCallback::fromFunction([this, number = record.number](auto response) {
auto result = dynamic_cast<db::query::SMSAddResult *>(response);
if (result == nullptr || !result->result) {
return false;
}
onSMSReceived(number);
return true;
}));
}
void ServiceCellular::onSMSReceived(const utils::PhoneNumber::View &number)
{
DBServiceAPI::GetQuery(
this,
db::Interface::Name::Notifications,
std::make_unique<db::query::notifications::Increment>(NotificationsRecord::Key::Sms, number));
bus.sendMulticast(std::make_shared<CellularIncomingSMSNotificationMessage>(),
sys::BusChannel::ServiceCellularNotifications);
}
bool ServiceCellular::receiveAllMessages()
{
auto channel = cmux->get(CellularMux::Channel::Commands);
if (channel == nullptr) {
return false;
}
constexpr std::string_view cmd = "CMGL: ";
if (auto ret = channel->cmd(at::AT::LIST_MESSAGES)) {
for (std::size_t i = 0; i < ret.response.size(); i++) {
if (auto pos = ret.response[i].find(cmd); pos != std::string::npos) {
auto startPos = pos + cmd.size();
auto endPos = ret.response[i].find_first_of(',');
if (receiveSMS(ret.response[i].substr(startPos, endPos - startPos))) {
LOG_WARN("Cannot receive text message - %" PRIu32 " / %" PRIu32,
static_cast<uint32_t>(i),
static_cast<uint32_t>(ret.response.size()));
}
}
}
return true;
}
else {
return false;
}
}
bool ServiceCellular::handle_failure()
{
priv->state->set(State::ST::Idle);
bus.sendMulticast<cellular::msg::notification::ModemStateChanged>(cellular::api::ModemState::Fail);
return true;
}
bool ServiceCellular::handle_fatal_failure()
{
LOG_FATAL("Await for death!");
bus.sendMulticast<cellular::msg::notification::ModemStateChanged>(cellular::api::ModemState::Fatal);
while (true) {
vTaskDelay(500);
}
return true;
}
bool ServiceCellular::handle_ready()
{
LOG_DEBUG("%s", priv->state->c_str());
sleepTimer.start();
return true;
}
bool ServiceCellular::SetScanMode(std::string mode)
{
auto channel = cmux->get(CellularMux::Channel::Commands);
if (channel) {
auto command = at::factory(at::AT::SET_SCANMODE);
auto resp = channel->cmd(command.getCmd() + mode + ",1", command.getTimeout(), 1);
if (resp.code == at::Result::Code::OK) {
return true;
}
}
return false;
}
std::string ServiceCellular::GetScanMode(void)
{
auto channel = cmux->get(CellularMux::Channel::Commands);
if (channel) {
auto resp = channel->cmd(at::AT::GET_SCANMODE);
if (resp.code == at::Result::Code::OK) {
auto beg = resp.response[0].find(",");
if (beg != std::string::npos) {
auto response = resp.response[0].substr(beg + 1, 1);
return response;
}
}
else {
LOG_ERROR("Unable to get network search mode configuration");
}
}
return {};
}
bool ServiceCellular::transmitDtmfTone(uint32_t digit)
{
auto channel = cmux->get(CellularMux::Channel::Commands);
at::Result resp;
if (channel) {
auto command = at::factory(at::AT::QLDTMF);
std::string dtmfString = "\"" + std::string(1, digit) + "\"";
resp = channel->cmd(command.getCmd() + dtmfString);
if (resp) {
command = at::factory(at::AT::VTS);
resp = channel->cmd(command.getCmd() + dtmfString);
}
}
return resp.code == at::Result::Code::OK;
}
void ServiceCellular::handle_CellularGetChannelMessage()
{
connect(CellularGetChannelMessage(), [&](sys::Message *req) {
auto getChannelMsg = static_cast<CellularGetChannelMessage *>(req);
LOG_DEBUG("Handle request for channel: %s", CellularMux::name(getChannelMsg->dataChannel).c_str());
std::shared_ptr<CellularGetChannelResponseMessage> channelResponsMessage =
std::make_shared<CellularGetChannelResponseMessage>(cmux->get(getChannelMsg->dataChannel));
LOG_DEBUG("channel ptr: %p", channelResponsMessage->dataChannelPtr);
bus.sendUnicast(std::move(channelResponsMessage), req->sender);
return sys::MessageNone{};
});
}
bool ServiceCellular::handle_status_check(void)
{
LOG_INFO("Checking modem status.");
auto modemActive = cmux->isModemActive();
if (modemActive) {
// modem is already turned on, call configutarion procedure
LOG_INFO("Modem is already turned on.");
LOG_DEBUG("RT1051 - hot start");
priv->state->set(State::ST::PowerUpInProgress);
}
else {
priv->state->set(State::ST::PowerUpProcedure);
}
return true;
}
void ServiceCellular::startStateTimer(uint32_t timeout)
{
stateTimeout = timeout;
stateTimer.start();
}
void ServiceCellular::stopStateTimer()
{
stateTimeout = 0;
stateTimer.stop();
}
void ServiceCellular::handleStateTimer(void)
{
stateTimeout--;
if (stateTimeout == 0) {
stopStateTimer();
LOG_FATAL("State %s timeout occured!", priv->state->c_str());
priv->state->set(State::ST::ModemFatalFailure);
}
}
void ServiceCellular::handle_power_state_change()
{
nextPowerStateChangeAwaiting = false;
auto modemActive = cmux->isModemActive();
if (priv->nextPowerState == State::PowerState::On) {
if (priv->state->get() == State::ST::PowerDownWaiting) {
LOG_DEBUG("Powerdown in progress. Powerup request queued.");
nextPowerStateChangeAwaiting = true;
}
else if (priv->state->get() == State::ST::PowerUpProcedure ||
priv->state->get() == State::ST::PowerUpInProgress) {
LOG_DEBUG("Powerup already in progress");
}
else if (priv->state->get() == State::ST::PowerDown ||
priv->state->get() == State::ST::WaitForStartPermission) {
LOG_INFO("Modem Power UP.");
priv->state->set(State::ST::PowerUpRequest);
}
else {
LOG_DEBUG("Modem already powered up.");
}
}
else {
if (priv->state->get() == State::ST::PowerUpProcedure || priv->state->get() == State::ST::PowerUpInProgress) {
LOG_DEBUG("Powerup in progress. Powerdown request queued.");
nextPowerStateChangeAwaiting = true;
}
else if (priv->state->get() == State::ST::PowerDownWaiting) {
LOG_DEBUG("Powerdown already in progress.");
}
else if (priv->state->get() == State::ST::PowerDown) {
LOG_DEBUG("Modem already powered down.");
}
else if (priv->state->get() == State::ST::WaitForStartPermission && !modemActive) {
LOG_DEBUG("Modem already powered down.");
priv->state->set(State::ST::PowerDown);
}
else {
LOG_INFO("Modem Power DOWN.");
cmux->turnOffModem();
priv->state->set(State::ST::PowerDownWaiting);
}
}
}
bool ServiceCellular::handleUSSDRequest(CellularUSSDMessage::RequestType requestType, const std::string &request)
{
constexpr uint32_t commandTimeout = 120000;
auto channel = cmux->get(CellularMux::Channel::Commands);
if (channel != nullptr) {
if (requestType == CellularUSSDMessage::RequestType::pullSesionRequest) {
channel->cmd(at::AT::SMS_GSM);
std::string command = at::factory(at::AT::CUSD_SEND) + request + ",15";
auto result = channel->cmd(command, std::chrono::milliseconds(commandTimeout));
if (result.code == at::Result::Code::OK) {
ussdState = ussd::State::pullRequestSent;
setUSSDTimer();
}
}
else if (requestType == CellularUSSDMessage::RequestType::abortSesion) {
ussdState = ussd::State::sesionAborted;
auto result = channel->cmd(at::AT::CUSD_CLOSE_SESSION);
if (result.code == at::Result::Code::OK) {
CellularServiceAPI::USSDRequest(this, CellularUSSDMessage::RequestType::pushSesionRequest);
}
else {
CellularServiceAPI::USSDRequest(this, CellularUSSDMessage::RequestType::abortSesion);
}
}
else if (requestType == CellularUSSDMessage::RequestType::pushSesionRequest) {
ussdState = ussd::State::pushSesion;
auto result = channel->cmd(at::AT::CUSD_OPEN_SESSION);
if (result.code == at::Result::Code::OK) {}
}
return true;
}
return false;
}
void ServiceCellular::handleUSSDTimer(void)
{
if (ussdTimeout > 0) {
ussdTimeout -= 1;
}
else {
LOG_WARN("USSD timeout occured, abotrig current session");
ussdTimer.stop();
CellularServiceAPI::USSDRequest(this, CellularUSSDMessage::RequestType::abortSesion);
}
}
void ServiceCellular::setUSSDTimer(void)
{
switch (ussdState) {
case ussd::State::pullRequestSent:
ussdTimeout = ussd::pullResponseTimeout;
break;
case ussd::State::pullResponseReceived:
ussdTimeout = ussd::pullSesionTimeout;
break;
case ussd::State::pushSesion:
case ussd::State::sesionAborted:
case ussd::State::none:
ussdTimeout = ussd::noTimeout;
break;
}
if (ussdTimeout == ussd::noTimeout) {
ussdTimer.stop();
return;
}
ussdTimer.start();
}
std::shared_ptr<cellular::RawCommandRespAsync> ServiceCellular::handleCellularStartOperatorsScan(
CellularStartOperatorsScanMessage *msg)
{
LOG_INFO("CellularStartOperatorsScan handled");
auto ret = std::make_shared<cellular::RawCommandRespAsync>(CellularMessage::Type::OperatorsScanResult);
NetworkSettings networkSettings(*this);
ret->data = networkSettings.scanOperators(msg->getFullInfo());
bus.sendUnicast(ret, msg->sender);
return ret;
}
bool ServiceCellular::handle_apn_conf_procedure()
{
LOG_DEBUG("APN on modem configuration");
packetData->setupAPNSettings();
priv->state->set(State::ST::SanityCheck);
return true;
}
std::shared_ptr<CellularCurrentOperatorNameResponse> ServiceCellular::handleCellularRequestCurrentOperatorName(
CellularRequestCurrentOperatorNameMessage *msg)
{
LOG_INFO("CellularRequestCurrentOperatorName handled");
NetworkSettings networkSettings(*this);
const auto currentNetworkOperatorName = networkSettings.getCurrentOperatorName();
Store::GSM::get()->setNetworkOperatorName(currentNetworkOperatorName);
return std::make_shared<CellularCurrentOperatorNameResponse>(currentNetworkOperatorName);
}
std::shared_ptr<CellularGetAPNResponse> ServiceCellular::handleCellularGetAPNMessage(CellularGetAPNMessage *msg)
{
std::vector<std::shared_ptr<packet_data::APN::Config>> apns;
if (auto type = msg->getAPNType(); type) {
if (auto apn = packetData->getAPNFirst(*type); apn) {
apns.push_back(*apn);
}
return std::make_shared<CellularGetAPNResponse>(apns);
}
if (auto ctxid = msg->getContextId(); ctxid) {
if (auto apn = packetData->getAPN(*ctxid); apn) {
apns.push_back(*apn);
}
return std::make_shared<CellularGetAPNResponse>(apns);
}
return std::make_shared<CellularGetAPNResponse>(packetData->getAPNs());
}
std::shared_ptr<CellularSetAPNResponse> ServiceCellular::handleCellularSetAPNMessage(CellularSetAPNMessage *msg)
{
auto apn = msg->getAPNConfig();
auto ret = packetData->setAPN(apn);
settings->setValue(settings::Cellular::apn_list, packetData->saveAPNSettings(), settings::SettingsScope::Global);
return std::make_shared<CellularSetAPNResponse>(ret);
}
std::shared_ptr<CellularNewAPNResponse> ServiceCellular::handleCellularNewAPNMessage(CellularNewAPNMessage *msg)
{
auto apn = msg->getAPNConfig();
std::uint8_t newId = 0;
auto ret = packetData->newAPN(apn, newId);
settings->setValue(settings::Cellular::apn_list, packetData->saveAPNSettings(), settings::SettingsScope::Global);
return std::make_shared<CellularNewAPNResponse>(ret, newId);
}
std::shared_ptr<CellularSetDataTransferResponse> ServiceCellular::handleCellularSetDataTransferMessage(
CellularSetDataTransferMessage *msg)
{
packetData->setDataTransfer(msg->getDataTransfer());
return std::make_shared<CellularSetDataTransferResponse>(at::Result::Code::OK);
}
std::shared_ptr<CellularGetDataTransferResponse> ServiceCellular::handleCellularGetDataTransferMessage(
CellularGetDataTransferMessage *msg)
{
return std::make_shared<CellularGetDataTransferResponse>(packetData->getDataTransfer());
}
std::shared_ptr<CellularActivateContextResponse> ServiceCellular::handleCellularActivateContextMessage(
CellularActivateContextMessage *msg)
{
return std::make_shared<CellularActivateContextResponse>(packetData->activateContext(msg->getContextId()),
msg->getContextId());
}
std::shared_ptr<CellularDeactivateContextResponse> ServiceCellular::handleCellularDeactivateContextMessage(
CellularDeactivateContextMessage *msg)
{
return std::make_shared<CellularDeactivateContextResponse>(packetData->deactivateContext(msg->getContextId()),
msg->getContextId());
}
std::shared_ptr<CellularGetActiveContextsResponse> ServiceCellular::handleCellularGetActiveContextsMessage(
CellularGetActiveContextsMessage *msg)
{
return std::make_shared<CellularGetActiveContextsResponse>(packetData->getActiveContexts());
}
std::shared_ptr<CellularSetOperatorAutoSelectResponse> ServiceCellular::handleCellularSetOperatorAutoSelect(
CellularSetOperatorAutoSelectMessage *msg)
{
LOG_INFO("CellularSetOperatorAutoSelect handled");
NetworkSettings networkSettings(*this);
return std::make_shared<CellularSetOperatorAutoSelectResponse>(networkSettings.setOperatorAutoSelect());
}
std::shared_ptr<CellularSetOperatorResponse> ServiceCellular::handleCellularSetOperator(CellularSetOperatorMessage *msg)
{
LOG_INFO("CellularSetOperatorAutoSelect handled");
NetworkSettings networkSettings(*this);
return std::make_shared<CellularSetOperatorResponse>(
networkSettings.setOperator(msg->getMode(), msg->getFormat(), msg->getName()));
}
void ServiceCellular::volteChanged(const std::string &value)
{
if (!value.empty()) {
LOG_INFO("VoLTE setting state changed to '%s'.", value.c_str());
volteOn = utils::getNumericValue<bool>(value);
}
}
void ServiceCellular::apnListChanged(const std::string &value)
{
if (!value.empty()) {
LOG_INFO("apn_list setting state changed to '%s'.", value.c_str());
packetData->loadAPNSettings(value);
}
}
auto ServiceCellular::handleCellularAnswerIncomingCallMessage(CellularMessage *msg)
-> std::shared_ptr<CellularResponseMessage>
{
LOG_INFO("%s", __PRETTY_FUNCTION__);
if (ongoingCall.getType() != CallType::CT_INCOMING) {
return std::make_shared<CellularResponseMessage>(true);
}
auto channel = cmux->get(CellularMux::Channel::Commands);
auto ret = false;
if (channel) {
auto response = channel->cmd(at::AT::ATA);
if (response) {
// Propagate "CallActive" notification into system
bus.sendMulticast(std::make_shared<CellularCallActiveNotification>(),
sys::BusChannel::ServiceCellularNotifications);
ret = true;
}
}
return std::make_shared<CellularResponseMessage>(ret);
}
auto ServiceCellular::handleCellularCallRequestMessage(CellularCallRequestMessage *msg)
-> std::shared_ptr<CellularResponseMessage>
{
LOG_INFO("%s", __PRETTY_FUNCTION__);
auto channel = cmux->get(CellularMux::Channel::Commands);
if (channel == nullptr) {
return std::make_shared<CellularResponseMessage>(false);
}
cellular::RequestFactory factory(
msg->number.getEntered(), *channel, msg->callMode, Store::GSM::get()->simCardInserted());
auto request = factory.create();
CellularRequestHandler handler(*this);
auto result = channel->cmd(request->command());
request->handle(handler, result);
return std::make_shared<CellularResponseMessage>(request->isHandled());
}
void ServiceCellular::handleCellularHangupCallMessage(CellularHangupCallMessage *msg)
{
LOG_INFO("%s", __PRETTY_FUNCTION__);
auto channel = cmux->get(CellularMux::Channel::Commands);
if (channel) {
if (channel->cmd(at::AT::ATH)) {
callManager.hangUp();
callStateTimer.stop();
callEndedRecentlyTimer.start();
if (!ongoingCall.endCall(CellularCall::Forced::True)) {
LOG_ERROR("Failed to end ongoing call");
}
bus.sendMulticast(std::make_shared<CellularResponseMessage>(true, msg->type),
sys::BusChannel::ServiceCellularNotifications);
}
else {
LOG_ERROR("Call not aborted");
bus.sendMulticast(std::make_shared<CellularResponseMessage>(false, msg->type),
sys::BusChannel::ServiceCellularNotifications);
}
}
bus.sendMulticast(std::make_shared<CellularResponseMessage>(false, msg->type),
sys::BusChannel::ServiceCellularNotifications);
}
void ServiceCellular::handleCellularDismissCallMessage(sys::Message *msg)
{
LOG_INFO("%s", __PRETTY_FUNCTION__);
auto message = static_cast<CellularDismissCallMessage *>(msg);
hangUpCall();
if (message->addNotificationRequired()) {
handleCallAbortedNotification(msg);
}
}
auto ServiceCellular::handleDBQueryResponseMessage(db::QueryResponse *msg) -> std::shared_ptr<sys::ResponseMessage>
{
bool responseHandled = false;
auto result = msg->getResult();
if (auto response = dynamic_cast<db::query::SMSSearchByTypeResult *>(result.get())) {
responseHandled = handle(response);
}
else if (result->hasListener()) {
responseHandled = result->handle();
}
if (responseHandled) {
return std::make_shared<sys::ResponseMessage>();
}
else {
return std::make_shared<sys::ResponseMessage>(sys::ReturnCodes::Unresolved);
}
}
auto ServiceCellular::handleCellularListCallsMessage(CellularMessage *msg) -> std::shared_ptr<sys::ResponseMessage>
{
at::cmd::CLCC cmd;
auto base = cmux->get(CellularMux::Channel::Commands)->cmd(cmd);
if (auto response = cmd.parseCLCC(base); response) {
const auto &data = response.getData();
auto it = std::find_if(std::begin(data), std::end(data), [&](const auto &entry) {
return entry.stateOfCall == ModemCall::CallState::Active && entry.mode == ModemCall::CallMode::Voice;
});
if (it != std::end(data)) {
auto notification = std::make_shared<CellularCallActiveNotification>();
bus.sendMulticast(std::move(notification), sys::BusChannel::ServiceCellularNotifications);
callStateTimer.stop();
return std::make_shared<CellularResponseMessage>(true);
}
}
return std::make_shared<CellularResponseMessage>(false);
}
auto ServiceCellular::handleDBNotificationMessage(db::NotificationMessage *msg) -> std::shared_ptr<sys::ResponseMessage>
{
if (msg->interface == db::Interface::Name::SMS &&
(msg->type == db::Query::Type::Create || msg->type == db::Query::Type::Update)) {
priv->outSMSHandler.handleDBNotification();
return std::make_shared<sys::ResponseMessage>();
}
return std::make_shared<sys::ResponseMessage>(sys::ReturnCodes::Failure);
}
auto ServiceCellular::handleCellularRingingMessage(CellularRingingMessage *msg) -> std::shared_ptr<sys::ResponseMessage>
{
LOG_INFO("%s", __PRETTY_FUNCTION__);
return std::make_shared<CellularResponseMessage>(ongoingCall.startCall(msg->number, CallType::CT_OUTGOING));
}
auto ServiceCellular::handleCellularIncomingCallMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
LOG_INFO("%s", __PRETTY_FUNCTION__);
auto ret = true;
auto message = static_cast<CellularIncominCallMessage *>(msg);
if (!ongoingCall.isValid()) {
ret = ongoingCall.startCall(message->number, CallType::CT_INCOMING);
}
return std::make_shared<CellularResponseMessage>(ret);
}
auto ServiceCellular::handleCellularCallerIdMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
auto message = static_cast<CellularCallerIdMessage *>(msg);
ongoingCall.setNumber(message->number);
return sys::MessageNone{};
}
auto ServiceCellular::handleCellularGetIMSIMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
std::string temp;
if (getIMSI(temp)) {
return std::make_shared<CellularResponseMessage>(true, temp);
}
return std::make_shared<CellularResponseMessage>(false);
}
auto ServiceCellular::handleCellularGetOwnNumberMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
std::string temp;
if (getOwnNumber(temp)) {
return std::make_shared<CellularGetOwnNumberResponseMessage>(true, temp);
}
return std::make_shared<CellularGetOwnNumberResponseMessage>(false);
}
auto ServiceCellular::handleCellularGetNetworkInfoMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
auto message = std::make_shared<cellular::RawCommandRespAsync>(CellularMessage::Type::NetworkInfoResult);
message->data = getNetworkInfo();
bus.sendUnicast(message, msg->sender);
return std::make_shared<CellularResponseMessage>(true);
}
auto ServiceCellular::handleCellularSelectAntennaMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
auto message = static_cast<CellularAntennaRequestMessage *>(msg);
cmux->selectAntenna(message->antenna);
vTaskDelay(50); // sleep for 50 ms...
auto actualAntenna = cmux->getAntenna();
if (actualAntenna == bsp::cellular::antenna::lowBand) {
LOG_INFO("Low band antenna set");
}
else {
LOG_INFO("High band antenna set");
}
bool changedAntenna = (actualAntenna == message->antenna);
auto notification = std::make_shared<AntennaChangedMessage>();
bus.sendMulticast(notification, sys::BusChannel::AntennaNotifications);
return std::make_shared<CellularResponseMessage>(changedAntenna);
}
auto ServiceCellular::handleCellularSetScanModeMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
auto message = static_cast<CellularSetScanModeMessage *>(msg);
bool ret = SetScanMode(message->data);
return std::make_shared<CellularResponseMessage>(ret);
}
auto ServiceCellular::handleCellularGetScanModeMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
auto mode = GetScanMode();
if (mode != "") {
auto response = std::make_shared<cellular::RawCommandRespAsync>(CellularMessage::Type::GetScanModeResult);
response->data.push_back(mode);
bus.sendUnicast(response, msg->sender);
return std::make_shared<CellularResponseMessage>(true);
}
return std::make_shared<CellularResponseMessage>(false);
}
auto ServiceCellular::handleCellularGetFirmwareVersionMessage(sys::Message *msg)
-> std::shared_ptr<sys::ResponseMessage>
{
std::string response;
auto channel = cmux->get(CellularMux::Channel::Commands);
if (channel) {
auto resp = channel->cmd(at::AT::QGMR);
if (resp.code == at::Result::Code::OK) {
response = resp.response[0];
return std::make_shared<CellularResponseMessage>(true, response);
}
}
return std::make_shared<CellularResponseMessage>(false);
}
auto ServiceCellular::handleEVMStatusMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
using namespace bsp::cellular::status;
auto message = static_cast<sevm::StatusStateMessage *>(msg);
auto status_pin = message->state;
if (priv->modemResetHandler->handleStatusPinEvent(status_pin == value::ACTIVE)) {
return std::make_shared<CellularResponseMessage>(true);
}
if (status_pin == value::ACTIVE) {
if (priv->state->get() == State::ST::PowerUpProcedure) {
priv->state->set(State::ST::PowerUpInProgress); // and go to baud detect as usual
}
}
if (status_pin == value::INACTIVE) {
if (priv->state->get() == State::ST::PowerDownWaiting) {
priv->state->set(State::ST::PowerDown);
}
}
return std::make_shared<CellularResponseMessage>(true);
}
auto ServiceCellular::handleCellularGetCsqMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
auto channel = cmux->get(CellularMux::Channel::Commands);
if (channel) {
auto modemResponse = channel->cmd(at::AT::CSQ);
if (modemResponse.code == at::Result::Code::OK) {
return std::make_shared<CellularResponseMessage>(true, modemResponse.response[0]);
}
}
return std::make_shared<CellularResponseMessage>(false);
}
auto ServiceCellular::handleCellularGetCregMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
auto channel = cmux->get(CellularMux::Channel::Commands);
if (channel) {
auto resp = channel->cmd(at::AT::CREG);
if (resp.code == at::Result::Code::OK) {
return std::make_shared<CellularResponseMessage>(true, resp.response[0]);
}
}
return std::make_shared<CellularResponseMessage>(false);
}
auto ServiceCellular::handleCellularGetNwinfoMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
auto channel = cmux->get(CellularMux::Channel::Commands);
if (channel) {
auto resp = channel->cmd(at::AT::QNWINFO);
if (resp.code == at::Result::Code::OK) {
return std::make_shared<CellularResponseMessage>(true, resp.response[0]);
}
}
return std::make_shared<CellularResponseMessage>(false);
}
auto ServiceCellular::handleCellularGetAntennaMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
auto antenna = cmux->getAntenna();
return std::make_shared<CellularAntennaResponseMessage>(true, antenna, CellularMessage::Type::GetAntenna);
}
auto ServiceCellular::handleCellularDtmfRequestMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
auto message = static_cast<CellularDtmfRequestMessage *>(msg);
auto resp = transmitDtmfTone(message->getDigit());
return std::make_shared<CellularResponseMessage>(resp);
}
auto ServiceCellular::handleCellularUSSDMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
auto message = static_cast<CellularUSSDMessage *>(msg);
return std::make_shared<CellularResponseMessage>(handleUSSDRequest(message->type, message->data));
}
auto ServiceCellular::handleStateRequestMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
change_state(dynamic_cast<cellular::StateChange *>(msg));
return std::make_shared<CellularResponseMessage>(true);
}
auto ServiceCellular::handleCallActiveNotification(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
auto ret = std::make_shared<CellularResponseMessage>(ongoingCall.setActive());
NetworkSettings networkSettings(*this);
auto currentNAT = networkSettings.getCurrentNAT();
if (currentNAT) {
auto currentSimpleNAT = NetworkSettings::toSimpleNAT(*currentNAT);
LOG_INFO("Current NAT %s(%s)",
utils::enumToString(*currentNAT).c_str(),
utils::enumToString(currentSimpleNAT).c_str());
if (currentSimpleNAT == NetworkSettings::SimpleNAT::LTE) {
LOG_INFO("VoLTE call");
}
else {
LOG_INFO("Non VoLTE call");
}
}
else {
LOG_WARN("Cannot get current NAT");
}
return ret;
}
auto ServiceCellular::handleCallAbortedNotification(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
callStateTimer.stop();
auto ret = ongoingCall.endCall();
callManager.hangUp();
return std::make_shared<CellularResponseMessage>(ret);
}
auto ServiceCellular::handlePowerUpProcedureCompleteNotification(sys::Message *msg)
-> std::shared_ptr<sys::ResponseMessage>
{
if (board == bsp::Board::Linux) {
priv->state->set(State::ST::CellularConfProcedure);
}
return std::make_shared<CellularResponseMessage>(true);
}
auto ServiceCellular::handlePowerDownDeregisteringNotification(sys::Message *msg)
-> std::shared_ptr<sys::ResponseMessage>
{
if (priv->state->get() != State::ST::PowerDownWaiting) {
priv->state->set(State::ST::PowerDownStarted);
return std::make_shared<CellularResponseMessage>(true);
}
return std::make_shared<CellularResponseMessage>(false);
}
auto ServiceCellular::handlePowerDownDeregisteredNotification(sys::Message *msg)
-> std::shared_ptr<sys::ResponseMessage>
{
priv->state->set(State::ST::PowerDownWaiting);
return std::make_shared<CellularResponseMessage>(true);
}
auto ServiceCellular::handleNewIncomingSMSNotification(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
auto message = static_cast<CellularNewIncomingSMSNotification *>(msg);
auto notification = std::make_shared<CellularNewIncomingSMSMessage>(message->data);
bus.sendUnicast(std::move(notification), msg->sender);
return std::make_shared<CellularResponseMessage>(true);
}
auto ServiceCellular::handleSmsDoneNotification(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
auto resp = handleTextMessagesInit();
return std::make_shared<CellularResponseMessage>(resp);
}
auto ServiceCellular::handleSignalStrengthUpdateNotification(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
return std::make_shared<CellularResponseMessage>(false);
}
auto ServiceCellular::handleNetworkStatusUpdateNotification(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
return std::make_shared<CellularResponseMessage>(false);
}
auto ServiceCellular::handleUrcIncomingNotification(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
// when handling URC, the CPU frequency does not go below a certain level
cpuSentinel->HoldMinimumFrequency(bsp::CpuFrequencyHz::Level_4);
cmux->exitSleepMode();
return std::make_shared<CellularResponseMessage>(true);
}
auto ServiceCellular::handleCellularSetFlightModeMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
auto setMsg = static_cast<CellularSetFlightModeMessage *>(msg);
settings->setValue(
settings::Cellular::offlineMode, std::to_string(setMsg->flightModeOn), settings::SettingsScope::Global);
connectionManager->setFlightMode(setMsg->flightModeOn);
connectionManager->onPhoneModeChange(phoneModeObserver->getCurrentPhoneMode());
return std::make_shared<CellularResponseMessage>(true);
}
auto ServiceCellular::handleCellularRingNotification(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
LOG_INFO("%s", __PRETTY_FUNCTION__);
if (phoneModeObserver->isTetheringOn() || connectionManager->forceDismissCalls()) {
return std::make_shared<CellularResponseMessage>(hangUpCall());
}
if (!callManager.isIncomingCallPropagated()) {
auto message = static_cast<CellularRingNotification *>(msg);
bus.sendMulticast(std::make_shared<CellularIncominCallMessage>(message->getNubmer()),
sys::BusChannel::ServiceCellularNotifications);
callManager.ring();
}
return std::make_shared<CellularResponseMessage>(true);
}
auto ServiceCellular::handleCellularCallerIdNotification(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage>
{
if (connectionManager->forceDismissCalls()) {
return std::make_shared<CellularResponseMessage>(hangUpCall());
}
auto message = static_cast<CellularCallerIdNotification *>(msg);
if (phoneModeObserver->isTetheringOn()) {
tetheringCalllog.push_back(CalllogRecord{CallType::CT_MISSED, message->getNubmer()});
return std::make_shared<CellularResponseMessage>(hangUpCallBusy());
}
if (!callManager.isCallerInfoComplete()) {
bus.sendMulticast(std::make_shared<CellularCallerIdMessage>(message->getNubmer()),
sys::BusChannel::ServiceCellularNotifications);
callManager.completeCallerInfo();
}
return std::make_shared<CellularResponseMessage>(true);
}
auto ServiceCellular::handleCellularSetConnectionFrequencyMessage(sys::Message *msg)
-> std::shared_ptr<sys::ResponseMessage>
{
auto setMsg = static_cast<CellularSetConnectionFrequencyMessage *>(msg);
settings->setValue(settings::Offline::connectionFrequency,
std::to_string(setMsg->getConnectionFrequency()),
settings::SettingsScope::Global);
connectionManager->setInterval(std::chrono::minutes{setMsg->getConnectionFrequency()});
connectionManager->onPhoneModeChange(phoneModeObserver->getCurrentPhoneMode());
return std::make_shared<CellularResponseMessage>(true);
}
auto ServiceCellular::hangUpCall() -> bool
{
auto channel = cmux->get(CellularMux::Channel::Commands);
if (channel != nullptr) {
if (channel->cmd(at::factory(at::AT::ATH))) {
callManager.hangUp();
return true;
}
}
LOG_ERROR("Failed to hang up call");
return false;
}
auto ServiceCellular::hangUpCallBusy() -> bool
{
auto channel = cmux->get(CellularMux::Channel::Commands);
if (channel != nullptr) {
if (channel->cmd(at::factory(at::AT::QHUP_BUSY))) {
return true;
}
}
LOG_ERROR("Failed to hang up call");
return false;
}
auto ServiceCellular::tetheringTurnOffURC() -> bool
{
auto channel = cmux->get(CellularMux::Channel::Commands);
if (channel != nullptr) {
if (!channel->cmd(at::factory(at::AT::CSQ_URC_OFF))) {
LOG_ERROR("Failed to stop CSQ URC");
return false;
}
if (!channel->cmd(at::factory(at::AT::ACT_URC_OFF))) {
LOG_ERROR("Failed to stop ACT URC");
return false;
}
if (!channel->cmd(at::factory(at::AT::SMS_URC_OFF))) {
LOG_ERROR("Failed to stop SMS URC");
return false;
}
if (!channel->cmd(at::factory(at::AT::RING_URC_OFF))) {
LOG_ERROR("Failed to stop RING URC");
return false;
}
}
return true;
}
auto ServiceCellular::tetheringTurnOnURC() -> bool
{
auto channel = cmux->get(CellularMux::Channel::Commands);
if (channel != nullptr) {
if (!channel->cmd(at::factory(at::AT::CSQ_URC_ON))) {
LOG_ERROR("Failed to stop CSQ URC");
return false;
}
if (!channel->cmd(at::factory(at::AT::ACT_URC_ON))) {
LOG_ERROR("Failed to stop ACT URC");
return false;
}
if (!channel->cmd(at::factory(at::AT::SMS_URC_ON))) {
LOG_ERROR("Failed to stop SMS URC");
return false;
}
if (!channel->cmd(at::factory(at::AT::RING_URC_ON))) {
LOG_ERROR("Failed to stop RING URC");
return false;
}
}
return true;
}
auto ServiceCellular::logTetheringCalls() -> void
{
if (!tetheringCalllog.empty()) {
for (auto callRecord : tetheringCalllog) {
auto call = DBServiceAPI::CalllogAdd(this, callRecord);
if (call.ID == DB_ID_NONE) {
LOG_ERROR("CalllogAdd failed");
}
}
std::vector<utils::PhoneNumber::View> numbers;
for (auto calllogRecord : tetheringCalllog) {
numbers.push_back(calllogRecord.phoneNumber);
}
DBServiceAPI::GetQuery(
this,
db::Interface::Name::Notifications,
std::make_unique<db::query::notifications::MultipleIncrement>(NotificationsRecord::Key::Calls, numbers));
tetheringCalllog.clear();
}
}
TaskHandle_t ServiceCellular::getTaskHandle()
{
return xTaskGetCurrentTaskHandle();
}
| 36.768827 | 126 | 0.651643 | SP2FET |
8b2bbd9da35d3774d949b40fdc27ec6110b0762c | 20,461 | hpp | C++ | boost/lib/include/boost/asio/impl/compose.hpp | mamil/demo | 32240d95b80175549e6a1904699363ce672a1591 | [
"MIT"
] | 177 | 2021-02-19T02:01:04.000Z | 2022-03-30T07:31:21.000Z | boost/lib/include/boost/asio/impl/compose.hpp | mamil/demo | 32240d95b80175549e6a1904699363ce672a1591 | [
"MIT"
] | 188 | 2021-02-19T04:15:55.000Z | 2022-03-26T09:42:15.000Z | boost/lib/include/boost/asio/impl/compose.hpp | mamil/demo | 32240d95b80175549e6a1904699363ce672a1591 | [
"MIT"
] | 78 | 2021-03-05T03:01:13.000Z | 2022-03-29T07:10:01.000Z | //
// impl/compose.hpp
// ~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2020 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef BOOST_ASIO_IMPL_COMPOSE_HPP
#define BOOST_ASIO_IMPL_COMPOSE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include <boost/asio/detail/config.hpp>
#include <boost/asio/associated_executor.hpp>
#include <boost/asio/detail/handler_alloc_helpers.hpp>
#include <boost/asio/detail/handler_cont_helpers.hpp>
#include <boost/asio/detail/handler_invoke_helpers.hpp>
#include <boost/asio/detail/type_traits.hpp>
#include <boost/asio/detail/variadic_templates.hpp>
#include <boost/asio/execution/executor.hpp>
#include <boost/asio/execution/outstanding_work.hpp>
#include <boost/asio/executor_work_guard.hpp>
#include <boost/asio/is_executor.hpp>
#include <boost/asio/system_executor.hpp>
#include <boost/asio/detail/push_options.hpp>
namespace boost {
namespace asio {
namespace detail
{
template <typename Executor, typename = void>
class composed_work_guard
{
public:
typedef typename decay<
typename prefer_result<Executor,
execution::outstanding_work_t::tracked_t
>::type
>::type executor_type;
composed_work_guard(const Executor& ex)
: executor_(boost::asio::prefer(ex, execution::outstanding_work.tracked))
{
}
void reset()
{
}
executor_type get_executor() const BOOST_ASIO_NOEXCEPT
{
return executor_;
}
private:
executor_type executor_;
};
#if !defined(BOOST_ASIO_NO_TS_EXECUTORS)
template <typename Executor>
struct composed_work_guard<Executor,
typename enable_if<
!execution::is_executor<Executor>::value
>::type> : executor_work_guard<Executor>
{
composed_work_guard(const Executor& ex)
: executor_work_guard<Executor>(ex)
{
}
};
#endif // !defined(BOOST_ASIO_NO_TS_EXECUTORS)
template <typename>
struct composed_io_executors;
template <>
struct composed_io_executors<void()>
{
composed_io_executors() BOOST_ASIO_NOEXCEPT
: head_(system_executor())
{
}
typedef system_executor head_type;
system_executor head_;
};
inline composed_io_executors<void()> make_composed_io_executors()
{
return composed_io_executors<void()>();
}
template <typename Head>
struct composed_io_executors<void(Head)>
{
explicit composed_io_executors(const Head& ex) BOOST_ASIO_NOEXCEPT
: head_(ex)
{
}
typedef Head head_type;
Head head_;
};
template <typename Head>
inline composed_io_executors<void(Head)>
make_composed_io_executors(const Head& head)
{
return composed_io_executors<void(Head)>(head);
}
#if defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES)
template <typename Head, typename... Tail>
struct composed_io_executors<void(Head, Tail...)>
{
explicit composed_io_executors(const Head& head,
const Tail&... tail) BOOST_ASIO_NOEXCEPT
: head_(head),
tail_(tail...)
{
}
void reset()
{
head_.reset();
tail_.reset();
}
typedef Head head_type;
Head head_;
composed_io_executors<void(Tail...)> tail_;
};
template <typename Head, typename... Tail>
inline composed_io_executors<void(Head, Tail...)>
make_composed_io_executors(const Head& head, const Tail&... tail)
{
return composed_io_executors<void(Head, Tail...)>(head, tail...);
}
#else // defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES)
#define BOOST_ASIO_PRIVATE_COMPOSED_IO_EXECUTORS_DEF(n) \
template <typename Head, BOOST_ASIO_VARIADIC_TPARAMS(n)> \
struct composed_io_executors<void(Head, BOOST_ASIO_VARIADIC_TARGS(n))> \
{ \
explicit composed_io_executors(const Head& head, \
BOOST_ASIO_VARIADIC_CONSTREF_PARAMS(n)) BOOST_ASIO_NOEXCEPT \
: head_(head), \
tail_(BOOST_ASIO_VARIADIC_BYVAL_ARGS(n)) \
{ \
} \
\
void reset() \
{ \
head_.reset(); \
tail_.reset(); \
} \
\
typedef Head head_type; \
Head head_; \
composed_io_executors<void(BOOST_ASIO_VARIADIC_TARGS(n))> tail_; \
}; \
\
template <typename Head, BOOST_ASIO_VARIADIC_TPARAMS(n)> \
inline composed_io_executors<void(Head, BOOST_ASIO_VARIADIC_TARGS(n))> \
make_composed_io_executors(const Head& head, \
BOOST_ASIO_VARIADIC_CONSTREF_PARAMS(n)) \
{ \
return composed_io_executors< \
void(Head, BOOST_ASIO_VARIADIC_TARGS(n))>( \
head, BOOST_ASIO_VARIADIC_BYVAL_ARGS(n)); \
} \
/**/
BOOST_ASIO_VARIADIC_GENERATE(BOOST_ASIO_PRIVATE_COMPOSED_IO_EXECUTORS_DEF)
#undef BOOST_ASIO_PRIVATE_COMPOSED_IO_EXECUTORS_DEF
#endif // defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES)
template <typename>
struct composed_work;
template <>
struct composed_work<void()>
{
typedef composed_io_executors<void()> executors_type;
composed_work(const executors_type&) BOOST_ASIO_NOEXCEPT
: head_(system_executor())
{
}
void reset()
{
head_.reset();
}
typedef system_executor head_type;
composed_work_guard<system_executor> head_;
};
template <typename Head>
struct composed_work<void(Head)>
{
typedef composed_io_executors<void(Head)> executors_type;
explicit composed_work(const executors_type& ex) BOOST_ASIO_NOEXCEPT
: head_(ex.head_)
{
}
void reset()
{
head_.reset();
}
typedef Head head_type;
composed_work_guard<Head> head_;
};
#if defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES)
template <typename Head, typename... Tail>
struct composed_work<void(Head, Tail...)>
{
typedef composed_io_executors<void(Head, Tail...)> executors_type;
explicit composed_work(const executors_type& ex) BOOST_ASIO_NOEXCEPT
: head_(ex.head_),
tail_(ex.tail_)
{
}
void reset()
{
head_.reset();
tail_.reset();
}
typedef Head head_type;
composed_work_guard<Head> head_;
composed_work<void(Tail...)> tail_;
};
#else // defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES)
#define BOOST_ASIO_PRIVATE_COMPOSED_WORK_DEF(n) \
template <typename Head, BOOST_ASIO_VARIADIC_TPARAMS(n)> \
struct composed_work<void(Head, BOOST_ASIO_VARIADIC_TARGS(n))> \
{ \
typedef composed_io_executors<void(Head, \
BOOST_ASIO_VARIADIC_TARGS(n))> executors_type; \
\
explicit composed_work(const executors_type& ex) BOOST_ASIO_NOEXCEPT \
: head_(ex.head_), \
tail_(ex.tail_) \
{ \
} \
\
void reset() \
{ \
head_.reset(); \
tail_.reset(); \
} \
\
typedef Head head_type; \
composed_work_guard<Head> head_; \
composed_work<void(BOOST_ASIO_VARIADIC_TARGS(n))> tail_; \
}; \
/**/
BOOST_ASIO_VARIADIC_GENERATE(BOOST_ASIO_PRIVATE_COMPOSED_WORK_DEF)
#undef BOOST_ASIO_PRIVATE_COMPOSED_WORK_DEF
#endif // defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES)
#if defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES)
template <typename Impl, typename Work, typename Handler, typename Signature>
class composed_op;
template <typename Impl, typename Work, typename Handler,
typename R, typename... Args>
class composed_op<Impl, Work, Handler, R(Args...)>
#else // defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES)
template <typename Impl, typename Work, typename Handler, typename Signature>
class composed_op
#endif // defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES)
{
public:
template <typename I, typename W, typename H>
composed_op(BOOST_ASIO_MOVE_ARG(I) impl,
BOOST_ASIO_MOVE_ARG(W) work,
BOOST_ASIO_MOVE_ARG(H) handler)
: impl_(BOOST_ASIO_MOVE_CAST(I)(impl)),
work_(BOOST_ASIO_MOVE_CAST(W)(work)),
handler_(BOOST_ASIO_MOVE_CAST(H)(handler)),
invocations_(0)
{
}
#if defined(BOOST_ASIO_HAS_MOVE)
composed_op(composed_op&& other)
: impl_(BOOST_ASIO_MOVE_CAST(Impl)(other.impl_)),
work_(BOOST_ASIO_MOVE_CAST(Work)(other.work_)),
handler_(BOOST_ASIO_MOVE_CAST(Handler)(other.handler_)),
invocations_(other.invocations_)
{
}
#endif // defined(BOOST_ASIO_HAS_MOVE)
typedef typename associated_executor<Handler,
typename composed_work_guard<
typename Work::head_type
>::executor_type
>::type executor_type;
executor_type get_executor() const BOOST_ASIO_NOEXCEPT
{
return (get_associated_executor)(handler_, work_.head_.get_executor());
}
typedef typename associated_allocator<Handler,
std::allocator<void> >::type allocator_type;
allocator_type get_allocator() const BOOST_ASIO_NOEXCEPT
{
return (get_associated_allocator)(handler_, std::allocator<void>());
}
#if defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES)
template<typename... T>
void operator()(BOOST_ASIO_MOVE_ARG(T)... t)
{
if (invocations_ < ~0u)
++invocations_;
impl_(*this, BOOST_ASIO_MOVE_CAST(T)(t)...);
}
void complete(Args... args)
{
this->work_.reset();
this->handler_(BOOST_ASIO_MOVE_CAST(Args)(args)...);
}
#else // defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES)
void operator()()
{
if (invocations_ < ~0u)
++invocations_;
impl_(*this);
}
void complete()
{
this->work_.reset();
this->handler_();
}
#define BOOST_ASIO_PRIVATE_COMPOSED_OP_DEF(n) \
template<BOOST_ASIO_VARIADIC_TPARAMS(n)> \
void operator()(BOOST_ASIO_VARIADIC_MOVE_PARAMS(n)) \
{ \
if (invocations_ < ~0u) \
++invocations_; \
impl_(*this, BOOST_ASIO_VARIADIC_MOVE_ARGS(n)); \
} \
\
template<BOOST_ASIO_VARIADIC_TPARAMS(n)> \
void complete(BOOST_ASIO_VARIADIC_MOVE_PARAMS(n)) \
{ \
this->work_.reset(); \
this->handler_(BOOST_ASIO_VARIADIC_MOVE_ARGS(n)); \
} \
/**/
BOOST_ASIO_VARIADIC_GENERATE(BOOST_ASIO_PRIVATE_COMPOSED_OP_DEF)
#undef BOOST_ASIO_PRIVATE_COMPOSED_OP_DEF
#endif // defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES)
//private:
Impl impl_;
Work work_;
Handler handler_;
unsigned invocations_;
};
template <typename Impl, typename Work, typename Handler, typename Signature>
inline asio_handler_allocate_is_deprecated
asio_handler_allocate(std::size_t size,
composed_op<Impl, Work, Handler, Signature>* this_handler)
{
#if defined(BOOST_ASIO_NO_DEPRECATED)
boost_asio_handler_alloc_helpers::allocate(size, this_handler->handler_);
return asio_handler_allocate_is_no_longer_used();
#else // defined(BOOST_ASIO_NO_DEPRECATED)
return boost_asio_handler_alloc_helpers::allocate(
size, this_handler->handler_);
#endif // defined(BOOST_ASIO_NO_DEPRECATED)
}
template <typename Impl, typename Work, typename Handler, typename Signature>
inline asio_handler_deallocate_is_deprecated
asio_handler_deallocate(void* pointer, std::size_t size,
composed_op<Impl, Work, Handler, Signature>* this_handler)
{
boost_asio_handler_alloc_helpers::deallocate(
pointer, size, this_handler->handler_);
#if defined(BOOST_ASIO_NO_DEPRECATED)
return asio_handler_deallocate_is_no_longer_used();
#endif // defined(BOOST_ASIO_NO_DEPRECATED)
}
template <typename Impl, typename Work, typename Handler, typename Signature>
inline bool asio_handler_is_continuation(
composed_op<Impl, Work, Handler, Signature>* this_handler)
{
return this_handler->invocations_ > 1 ? true
: boost_asio_handler_cont_helpers::is_continuation(
this_handler->handler_);
}
template <typename Function, typename Impl,
typename Work, typename Handler, typename Signature>
inline asio_handler_invoke_is_deprecated
asio_handler_invoke(Function& function,
composed_op<Impl, Work, Handler, Signature>* this_handler)
{
boost_asio_handler_invoke_helpers::invoke(
function, this_handler->handler_);
#if defined(BOOST_ASIO_NO_DEPRECATED)
return asio_handler_invoke_is_no_longer_used();
#endif // defined(BOOST_ASIO_NO_DEPRECATED)
}
template <typename Function, typename Impl,
typename Work, typename Handler, typename Signature>
inline asio_handler_invoke_is_deprecated
asio_handler_invoke(const Function& function,
composed_op<Impl, Work, Handler, Signature>* this_handler)
{
boost_asio_handler_invoke_helpers::invoke(
function, this_handler->handler_);
#if defined(BOOST_ASIO_NO_DEPRECATED)
return asio_handler_invoke_is_no_longer_used();
#endif // defined(BOOST_ASIO_NO_DEPRECATED)
}
template <typename Signature, typename Executors>
class initiate_composed_op
{
public:
typedef typename composed_io_executors<Executors>::head_type executor_type;
template <typename T>
explicit initiate_composed_op(int, BOOST_ASIO_MOVE_ARG(T) executors)
: executors_(BOOST_ASIO_MOVE_CAST(T)(executors))
{
}
executor_type get_executor() const BOOST_ASIO_NOEXCEPT
{
return executors_.head_;
}
template <typename Handler, typename Impl>
void operator()(BOOST_ASIO_MOVE_ARG(Handler) handler,
BOOST_ASIO_MOVE_ARG(Impl) impl) const
{
composed_op<typename decay<Impl>::type, composed_work<Executors>,
typename decay<Handler>::type, Signature>(
BOOST_ASIO_MOVE_CAST(Impl)(impl),
composed_work<Executors>(executors_),
BOOST_ASIO_MOVE_CAST(Handler)(handler))();
}
private:
composed_io_executors<Executors> executors_;
};
template <typename Signature, typename Executors>
inline initiate_composed_op<Signature, Executors> make_initiate_composed_op(
BOOST_ASIO_MOVE_ARG(composed_io_executors<Executors>) executors)
{
return initiate_composed_op<Signature, Executors>(0,
BOOST_ASIO_MOVE_CAST(composed_io_executors<Executors>)(executors));
}
template <typename IoObject>
inline typename IoObject::executor_type
get_composed_io_executor(IoObject& io_object,
typename enable_if<
!is_executor<IoObject>::value
&& !execution::is_executor<IoObject>::value
>::type* = 0)
{
return io_object.get_executor();
}
template <typename Executor>
inline const Executor& get_composed_io_executor(const Executor& ex,
typename enable_if<
is_executor<Executor>::value
|| execution::is_executor<Executor>::value
>::type* = 0)
{
return ex;
}
} // namespace detail
#if !defined(GENERATING_DOCUMENTATION)
#if defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES)
template <typename CompletionToken, typename Signature,
typename Implementation, typename... IoObjectsOrExecutors>
BOOST_ASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, Signature)
async_compose(BOOST_ASIO_MOVE_ARG(Implementation) implementation,
BOOST_ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token,
BOOST_ASIO_MOVE_ARG(IoObjectsOrExecutors)... io_objects_or_executors)
{
return async_initiate<CompletionToken, Signature>(
detail::make_initiate_composed_op<Signature>(
detail::make_composed_io_executors(
detail::get_composed_io_executor(
BOOST_ASIO_MOVE_CAST(IoObjectsOrExecutors)(
io_objects_or_executors))...)),
token, BOOST_ASIO_MOVE_CAST(Implementation)(implementation));
}
#else // defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES)
template <typename CompletionToken, typename Signature, typename Implementation>
BOOST_ASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, Signature)
async_compose(BOOST_ASIO_MOVE_ARG(Implementation) implementation,
BOOST_ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token)
{
return async_initiate<CompletionToken, Signature>(
detail::make_initiate_composed_op<Signature>(
detail::make_composed_io_executors()),
token, BOOST_ASIO_MOVE_CAST(Implementation)(implementation));
}
# define BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR(n) \
BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_##n
# define BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_1 \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T1)(x1))
# define BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_2 \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T1)(x1)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T2)(x2))
# define BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_3 \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T1)(x1)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T2)(x2)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T3)(x3))
# define BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_4 \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T1)(x1)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T2)(x2)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T3)(x3)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T4)(x4))
# define BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_5 \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T1)(x1)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T2)(x2)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T3)(x3)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T4)(x4)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T5)(x5))
# define BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_6 \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T1)(x1)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T2)(x2)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T3)(x3)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T4)(x4)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T5)(x5)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T6)(x6))
# define BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_7 \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T1)(x1)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T2)(x2)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T3)(x3)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T4)(x4)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T5)(x5)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T6)(x6)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T7)(x7))
# define BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_8 \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T1)(x1)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T2)(x2)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T3)(x3)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T4)(x4)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T5)(x5)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T6)(x6)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T7)(x7)), \
detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T8)(x8))
#define BOOST_ASIO_PRIVATE_ASYNC_COMPOSE_DEF(n) \
template <typename CompletionToken, typename Signature, \
typename Implementation, BOOST_ASIO_VARIADIC_TPARAMS(n)> \
BOOST_ASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, Signature) \
async_compose(BOOST_ASIO_MOVE_ARG(Implementation) implementation, \
BOOST_ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token, \
BOOST_ASIO_VARIADIC_MOVE_PARAMS(n)) \
{ \
return async_initiate<CompletionToken, Signature>( \
detail::make_initiate_composed_op<Signature>( \
detail::make_composed_io_executors( \
BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR(n))), \
token, BOOST_ASIO_MOVE_CAST(Implementation)(implementation)); \
} \
/**/
BOOST_ASIO_VARIADIC_GENERATE(BOOST_ASIO_PRIVATE_ASYNC_COMPOSE_DEF)
#undef BOOST_ASIO_PRIVATE_ASYNC_COMPOSE_DEF
#undef BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR
#undef BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_1
#undef BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_2
#undef BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_3
#undef BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_4
#undef BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_5
#undef BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_6
#undef BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_7
#undef BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_8
#endif // defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES)
#endif // !defined(GENERATING_DOCUMENTATION)
} // namespace asio
} // namespace boost
#include <boost/asio/detail/pop_options.hpp>
#endif // BOOST_ASIO_IMPL_COMPOSE_HPP
| 32.070533 | 80 | 0.740335 | mamil |
8b2dc994901a53642a4b7d3101ba4968a5d21caa | 4,922 | cpp | C++ | GridGenerator/GridGenerator.cpp | Izay0i/SuperMarioBros3 | 46d254aa1ae4396175145d9743932c5c4fbf1763 | [
"MIT"
] | 1 | 2020-11-09T09:08:02.000Z | 2020-11-09T09:08:02.000Z | GridGenerator/GridGenerator.cpp | Izay0i/SuperMarioBros3 | 46d254aa1ae4396175145d9743932c5c4fbf1763 | [
"MIT"
] | null | null | null | GridGenerator/GridGenerator.cpp | Izay0i/SuperMarioBros3 | 46d254aa1ae4396175145d9743932c5c4fbf1763 | [
"MIT"
] | null | null | null | #include <Windows.h>
#include <iostream>
#include <fstream>
#include <vector>
#include <string>
//const unsigned int SCREEN_WIDTH = 256;
//const unsigned int SCREEN_HEIGHT = 224;
const unsigned int SCREEN_WIDTH = 500;
const unsigned int SCREEN_HEIGHT = 500;
const unsigned int CELL_WIDTH = SCREEN_WIDTH;
const unsigned int CELL_HEIGHT = SCREEN_HEIGHT;
//const unsigned int CELL_WIDTH = static_cast<unsigned int>(SCREEN_WIDTH / 2);
//const unsigned int CELL_HEIGHT = static_cast<unsigned int>(SCREEN_HEIGHT / 2);
const unsigned int MAX_FILE_LINE = 5000;
bool wroteTagOnce = false;
unsigned int xCells = 0, yCells = 0;
enum class GridFileSection {
GRIDFILE_SECTION_UNKNOWN,
GRIDFILE_SECTION_SCENESIZE,
GRIDFILE_SECTION_ENTITYDATA
};
std::vector<std::string> SplitStr(std::string line, std::string delimeter = "\t") {
std::vector<std::string> tokens;
size_t last = 0, next = 0;
while ((next = line.find(delimeter, last)) != std::string::npos) {
tokens.push_back(line.substr(last, next - last));
last = next + 1;
}
tokens.push_back(line.substr(last));
return tokens;
}
void ParseSceneSize(std::ofstream& outFile, std::string line) {
std::vector<std::string> tokens = SplitStr(line);
if (tokens.size() < 2) {
return;
}
unsigned int sceneWidth = std::stoul(tokens.at(0));
unsigned int sceneHeight = std::stoul(tokens.at(1));
//Do float division and then ceil the value to get an extra row and column offscreen
//Then cast it to unsigned int to truncate the decimals
xCells = static_cast<unsigned int>(ceil((static_cast<float>(sceneWidth) / CELL_WIDTH)));
yCells = static_cast<unsigned int>(ceil((static_cast<float>(sceneHeight) / CELL_HEIGHT)));
//Be sure to use std::flush or std::endl to flush the buffer
outFile << "[GRIDCELLS]\n";
outFile << xCells << '\t' << yCells << '\n';
outFile << "[/]\n\n";
}
void ParseEntityData(std::ofstream& outFile, std::string line) {
std::vector<std::string> tokens = SplitStr(line);
if (tokens.size() < 5) {
return;
}
float posX = std::stof(tokens.at(3));
float posY = std::stof(tokens.at(4));
unsigned int objectID = std::stoul(tokens.at(0));
//Convert world space to cell space
unsigned int cellPosX = static_cast<unsigned int>(posX / CELL_WIDTH);
unsigned int cellPosY = static_cast<unsigned int>(posY / CELL_HEIGHT);
if (cellPosX < 0) {
cellPosX = 0;
}
else if (cellPosX >= xCells) {
cellPosX = xCells - 1;
}
if (cellPosY < 0) {
cellPosY = 0;
}
else if (cellPosY >= yCells) {
cellPosY = yCells - 1;
}
if (!wroteTagOnce) {
wroteTagOnce = true;
outFile << "#objID" << '\t' << "Cell_X" << '\t' << "Cell_Y\n";
outFile << "[POSITIONS]\n";
}
outFile << objectID << '\t' << cellPosX << '\t' << cellPosY << '\n';
}
int main() {
std::string file = "stage_fortress.txt";
std::ofstream outputFile("grid_stage_fortress.txt");
outputFile.clear();
std::ifstream readFile;
readFile.open(file, std::ios::in);
if (!readFile.is_open()) {
std::cout << "[GRID GENERATOR] Failed to read file\n";
return -1;
}
GridFileSection gridFileSection = GridFileSection::GRIDFILE_SECTION_UNKNOWN;
char str[MAX_FILE_LINE];
while (readFile.getline(str, MAX_FILE_LINE)) {
std::string line(str);
if (line.empty() || line.front() == '#') {
continue;
}
if (line == "[/]") {
gridFileSection = GridFileSection::GRIDFILE_SECTION_UNKNOWN;
continue;
}
if (line == "[SCENESIZE]") {
gridFileSection = GridFileSection::GRIDFILE_SECTION_SCENESIZE;
continue;
}
if (line == "[ENTITYDATA]") {
gridFileSection = GridFileSection::GRIDFILE_SECTION_ENTITYDATA;
continue;
}
switch (gridFileSection) {
case GridFileSection::GRIDFILE_SECTION_SCENESIZE:
ParseSceneSize(outputFile, line);
break;
case GridFileSection::GRIDFILE_SECTION_ENTITYDATA:
ParseEntityData(outputFile, line);
break;
}
}
//Add closing tag
outputFile << "[/]" << std::flush;
readFile.close();
std::cout << xCells << '\t' << yCells << std::endl;
/*unsigned int width = 240;
unsigned int height = 32;
unsigned int offset = 16;
unsigned int starting_x = 1856;
unsigned int starting_y = 752;
char format[] = "231\t34\t247\t50\t%u\t%u\n";
char debug[100];
for (unsigned int i = 0; i < width; i += offset) {
for (unsigned int j = 0; j < height; j += offset) {
sprintf_s(debug, format, starting_x + i, starting_y + j);
OutputDebugStringA(debug);
}
}*/
return 0;
} | 28.287356 | 94 | 0.609102 | Izay0i |
8b3114c6134d5018fee2d93708f7a6ce984dc7e6 | 2,625 | cpp | C++ | src/test/pq/notifications.cpp | skaae/taopq | a621cbba1f63c599819466f3da7ef7d352bdaf0d | [
"BSL-1.0"
] | 142 | 2018-12-10T10:12:50.000Z | 2022-03-26T16:01:06.000Z | src/test/pq/notifications.cpp | skaae/taopq | a621cbba1f63c599819466f3da7ef7d352bdaf0d | [
"BSL-1.0"
] | 45 | 2018-11-29T13:13:59.000Z | 2022-01-17T07:03:30.000Z | src/test/pq/notifications.cpp | skaae/taopq | a621cbba1f63c599819466f3da7ef7d352bdaf0d | [
"BSL-1.0"
] | 34 | 2018-11-29T14:03:59.000Z | 2022-03-15T13:08:13.000Z | // Copyright (c) 2016-2021 Daniel Frey and Dr. Colin Hirsch
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt)
#include "../getenv.hpp"
#include "../macros.hpp"
#include <tao/pq/connection.hpp>
#if defined( _WIN32 )
#include <winsock.h>
#else
#include <unistd.h>
#endif
std::size_t counter = 0;
void handle_notification( const tao::pq::notification& n )
{
std::cout << "channel '" << n.channel() << "' received '" << n.payload() << "'\n";
++counter;
}
std::size_t foo_counter = 0;
void handle_foo_notification( const char* payload )
{
std::cout << "foo handler received '" << payload << "'\n";
++foo_counter;
}
void run()
{
// overwrite the default with an environment variable if needed
const auto connection_string = tao::pq::internal::getenv( "TAOPQ_TEST_DATABASE", "dbname=template1" );
const auto connection = tao::pq::connection::create( connection_string );
TEST_EXECUTE( connection->set_notification_handler( handle_notification ) );
TEST_EXECUTE( connection->listen( "FOO", handle_foo_notification ) );
TEST_ASSERT( counter == 0 );
TEST_ASSERT( foo_counter == 0 );
TEST_EXECUTE( connection->notify( "FOO" ) );
TEST_ASSERT( counter == 1 );
TEST_ASSERT( foo_counter == 1 );
TEST_ASSERT( connection->notification_handler( "FOO" ) );
TEST_ASSERT( !connection->notification_handler( "BAR" ) );
TEST_EXECUTE( connection->reset_notification_handler( "FOO" ) );
TEST_ASSERT( !connection->notification_handler( "FOO" ) );
TEST_EXECUTE( connection->notify( "FOO", "with payload" ) );
TEST_ASSERT( counter == 2 );
TEST_ASSERT( foo_counter == 1 );
TEST_EXECUTE( connection->unlisten( "FOO" ) );
TEST_EXECUTE( connection->notify( "FOO" ) );
TEST_EXECUTE( connection->get_notifications() );
TEST_ASSERT( counter == 2 );
TEST_ASSERT( connection->notification_handler() );
TEST_EXECUTE( connection->reset_notification_handler() );
TEST_ASSERT( !connection->notification_handler() );
#if defined( _WIN32 )
closesocket( PQsocket( connection->underlying_raw_ptr() ) );
#else
close( PQsocket( connection->underlying_raw_ptr() ) );
#endif
TEST_THROWS( connection->get_notifications() );
}
auto main() -> int // NOLINT(bugprone-exception-escape)
{
try {
run();
}
// LCOV_EXCL_START
catch( const std::exception& e ) {
std::cerr << "exception: " << e.what() << std::endl;
throw;
}
catch( ... ) {
std::cerr << "unknown exception" << std::endl;
throw;
}
// LCOV_EXCL_STOP
}
| 29.166667 | 105 | 0.670095 | skaae |
8b3142098669ae216ebb5d300178f723667b43e8 | 2,955 | hpp | C++ | src/cpu/x64/lrn/jit_avx512_common_lrn_bwd_nhwc.hpp | NomotoKazuhiro/oneDNN | 18795301d6776bc1431ec808cba7bdf83d191bf8 | [
"Apache-2.0"
] | 13 | 2020-05-29T07:39:23.000Z | 2021-11-22T14:01:28.000Z | src/cpu/x64/lrn/jit_avx512_common_lrn_bwd_nhwc.hpp | NomotoKazuhiro/oneDNN | 18795301d6776bc1431ec808cba7bdf83d191bf8 | [
"Apache-2.0"
] | 8 | 2020-09-04T02:05:19.000Z | 2021-12-24T02:18:37.000Z | src/cpu/x64/lrn/jit_avx512_common_lrn_bwd_nhwc.hpp | NomotoKazuhiro/oneDNN | 18795301d6776bc1431ec808cba7bdf83d191bf8 | [
"Apache-2.0"
] | 24 | 2020-08-07T04:21:48.000Z | 2021-12-09T02:03:35.000Z | /*******************************************************************************
* Copyright 2020 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#ifndef CPU_X64_LRN_JIT_AVX512_COMMON_LRN_BWD_NHWC_HPP
#define CPU_X64_LRN_JIT_AVX512_COMMON_LRN_BWD_NHWC_HPP
#include "cpu/x64/lrn/jit_avx512_common_lrn_bwd_base.hpp"
#include "cpu/x64/lrn/jit_avx512_common_lrn_utils.hpp"
namespace dnnl {
namespace impl {
namespace cpu {
namespace x64 {
namespace lrn {
using namespace dnnl::impl::status;
using namespace dnnl::impl::utils;
using namespace data_type;
using namespace Xbyak;
using namespace Xbyak::util;
template <data_type_t d_type>
class jit_avx512_common_lrn_kernel_bwd_nhwc_t
: public jit_avx512_common_lrn_kernel_bwd_t<d_type> {
public:
jit_avx512_common_lrn_kernel_bwd_nhwc_t(unsigned C, float alpha, float beta,
int local_size, void *code_ptr = nullptr,
size_t code_size = 1 * Xbyak::DEFAULT_MAX_CODE_SIZE);
DECLARE_CPU_JIT_AUX_FUNCTIONS(jit_avx512_common_lrn_kernel_bwd_nhwc_t)
private:
void set_up_ker_params();
void execute_compute_loop(unsigned num_full_16c_blocks, unsigned C_tail);
void compute_loop(across_version version, tail_mode tail_proc,
unsigned C_tail = 0, int loop_size_param = 1);
void compute(int loop_size_param, tail_mode tail_proc);
void increment_loop_params(std::size_t offset);
void load_compute_data(
across_version version, tail_mode tail_proc, int loop_size_param);
void store_compute_data(
int loop_size_param, tail_mode tail_m, unsigned C_tail);
void reserve_stack_space(std::size_t space);
void unreserve_stack_space(std::size_t space);
void load_data_to_stack(
unsigned C_tail, across_version version, tail_mode tail_proc);
int get_stack_offset(const Reg64 reg, tail_mode tail_proc);
const std::vector<int> tmp_mask_prev_;
const std::vector<int> tmp_mask_next_;
static constexpr int zmm_size_ = 64;
static constexpr int tmp_load_to_stack_idx_prev_ = 12;
static constexpr int tmp_load_to_stack_idx_tail_ = 13;
static constexpr int tmp_store_from_stack_idx_tail_ = 14;
const Reg64 mask_ = r11;
const Reg64 blockC_ = r12;
const int half_ls_;
};
} // namespace lrn
} // namespace x64
} // namespace cpu
} // namespace impl
} // namespace dnnl
#endif
| 35.60241 | 80 | 0.720474 | NomotoKazuhiro |
8b3287ce970f6aea0e3ce65b9b5531055ed91c25 | 875 | cpp | C++ | Problems/rotateImage.cpp | ShivamDureja/Data-Structures-And-Algorithms | 49c8fe18cbb57c54e8af900ca166604f967e7285 | [
"Unlicense"
] | 1 | 2021-11-24T05:25:42.000Z | 2021-11-24T05:25:42.000Z | Problems/rotateImage.cpp | ShivamDureja/Data-Structures-And-Algorithms | 49c8fe18cbb57c54e8af900ca166604f967e7285 | [
"Unlicense"
] | null | null | null | Problems/rotateImage.cpp | ShivamDureja/Data-Structures-And-Algorithms | 49c8fe18cbb57c54e8af900ca166604f967e7285 | [
"Unlicense"
] | null | null | null | #include <iostream>
#include <algorithm>
void rotate_image(int a[][4], int n)
{
for (int i = 0; i < n; i++)
{
std::reverse(a[i], a[i] + n);
}
//take transpose
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
if(i<j)
std::swap(a[i][j], a[j][i]);
}
}
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
std::cout << a[i][j] << " ";
}
std::cout << std::endl;
}
}
int main()
{
int a[4][4] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
std::cout << a[i][j] << " ";
}
std::cout << std::endl;
}
std::cout << "Rotated matrix-" << std::endl;
rotate_image(a, 4);
return 0;
} | 19.886364 | 74 | 0.354286 | ShivamDureja |
8b347c3494d34d4d749fdbc9eb8960a8cd5a10da | 31,004 | cpp | C++ | lib/src/AMRTools/QuadCFInterp.cpp | rmrsk/Chombo-3.3 | f2119e396460c1bb19638effd55eb71c2b35119e | [
"BSD-3-Clause-LBNL"
] | 10 | 2018-02-01T20:57:36.000Z | 2022-03-17T02:57:49.000Z | lib/src/AMRTools/QuadCFInterp.cpp | rmrsk/Chombo-3.3 | f2119e396460c1bb19638effd55eb71c2b35119e | [
"BSD-3-Clause-LBNL"
] | 19 | 2018-10-04T21:37:18.000Z | 2022-02-25T16:20:11.000Z | lib/src/AMRTools/QuadCFInterp.cpp | rmrsk/Chombo-3.3 | f2119e396460c1bb19638effd55eb71c2b35119e | [
"BSD-3-Clause-LBNL"
] | 11 | 2019-01-12T23:33:32.000Z | 2021-08-09T15:19:50.000Z | #ifdef CH_LANG_CC
/*
* _______ __
* / ___/ / ___ __ _ / / ___
* / /__/ _ \/ _ \/ V \/ _ \/ _ \
* \___/_//_/\___/_/_/_/_.__/\___/
* Please refer to Copyright.txt, in Chombo's root directory.
*/
#endif
#include "QuadCFInterp.H"
#include "BoxIterator.H"
#include "QuadCFInterpF_F.H"
#include "LayoutIterator.H"
#include "DataIterator.H"
#include "CH_Timer.H"
#include "CFIVS.H"
#include "TensorCFInterp.H"
#include "NamespaceHeader.H"
using std::endl;
/***********************/
// default constructor
/***********************/
bool QuadCFInterp::newCFInterMode = true;
/**/
void
QuadCFInterp::
interpPhiOnIVS(LevelData<FArrayBox>& a_phif,
const FArrayBox& a_phistar,
const DataIndex& a_datInd,
const int a_idir,
const Side::LoHiSide a_hiorlo,
const IntVectSet& a_interpIVS,
Real a_dxLevel,
Real a_dxCrse,
int a_ncomp)
{
IVSIterator fine_ivsit(a_interpIVS);
FArrayBox& a_phi = a_phif[a_datInd];
Real x1 = a_dxLevel;
Real x2 = 0.5*(3.*a_dxLevel+a_dxCrse);
Real denom = 1.0-((x1+x2)/x1);
Real idenom = 1.0/(denom); // divide is more expensive usually
Real x = 2.*a_dxLevel;
Real xsquared = x*x;
Real m1 = 1/(x1*x1);
Real m2 = 1/(x1*(x1-x2));
Real q1 = 1/(x1-x2);
Real q2 = x1+x2;
int ihilo = sign(a_hiorlo);
IntVect ai = -2*ihilo*BASISV(a_idir);
IntVect bi = - ihilo*BASISV(a_idir);
IntVect ci = ihilo*BASISV(a_idir);
for (fine_ivsit.begin(); fine_ivsit.ok(); ++fine_ivsit)
{
IntVect ivf = fine_ivsit();
// quadratic interpolation
for (int ivar = 0; ivar < a_ncomp; ivar++)
{
Real pa = a_phi(ivf + ai, ivar);
Real pb = a_phi(ivf + bi, ivar);
Real pc = a_phistar(ivf + ci, ivar);
//phi = ax**2 + bx + c, x = 0 at pa
Real a = (pb-pa)*m1 - (pb-pc)*m2;
a *= idenom;
Real b = (pb-pc)*q1 - a*q2;
Real c = pa;
a_phi(ivf,ivar) = a*xsquared + b*x + c;
} //end loop over components
} //end loop over fine intvects
}
/**/
void
QuadCFInterp::
homogeneousCFInterpPhi(LevelData<FArrayBox>& a_phif,
const DataIndex& a_datInd,
int a_idir,
Side::LoHiSide a_hiorlo,
LayoutData<CFIVS> a_loCFIVS[SpaceDim],
LayoutData<CFIVS> a_hiCFIVS[SpaceDim],
Real a_dxLevel,
Real a_dxCrse,
int a_ncomp)
{
const CFIVS* cfivs_ptr = NULL;
if (a_hiorlo == Side::Lo)
cfivs_ptr = &a_loCFIVS[a_idir][a_datInd];
else
cfivs_ptr = &a_hiCFIVS[a_idir][a_datInd];
const IntVectSet& interp_ivs = cfivs_ptr->getFineIVS();
if (!interp_ivs.isEmpty())
{
int ihilo = sign(a_hiorlo);
Box phistarbox = interp_ivs.minBox();
phistarbox.shift(a_idir, ihilo);
FArrayBox phistar(phistarbox, a_ncomp);
//hence the homogeneous...
phistar.setVal(0.);
//given phistar, interpolate on fine ivs to fill ghost cells for phi
interpPhiOnIVS(a_phif, phistar, a_datInd, a_idir, a_hiorlo,
interp_ivs, a_dxLevel, a_dxCrse, a_ncomp);
}
}
/**/
void
QuadCFInterp::
homogeneousCFInterpTanGrad(LevelData<FArrayBox>& a_tanGrad,
const LevelData<FArrayBox>& a_phi,
const DataIndex& a_DatInd,
int a_idir,
Side::LoHiSide a_hiorlo,
Real a_dxLevel,
Real a_dxCrse,
int a_ncomp,
LayoutData<TensorFineStencilSet> a_loTanStencilSets[SpaceDim],
LayoutData<TensorFineStencilSet> a_hiTanStencilSets[SpaceDim])
{
const TensorFineStencilSet* cfstencil_ptr = NULL;
if (a_hiorlo == Side::Lo)
cfstencil_ptr = &a_loTanStencilSets[a_idir][a_DatInd];
else
cfstencil_ptr = &a_hiTanStencilSets[a_idir][a_DatInd];
Real x1 = a_dxLevel;
Real x2 = 0.5*(3.*a_dxLevel+a_dxCrse);
Real denom = 1.0-((x1+x2)/x1);
Real idenom = 1.0/(denom); // divide is more expensive usually
Real x = 2.*a_dxLevel;
Real xsquared = x*x;
Real m1 = 1/(x1*x1);
Real m2 = 1/(x1*(x1-x2));
Real q1 = 1/(x1-x2);
Real q2 = x1+x2;
const FArrayBox& phi = a_phi[a_DatInd];
FArrayBox& tanGrad = a_tanGrad[a_DatInd];
// loop over gradient directions
for (int gradDir = 0; gradDir<SpaceDim; gradDir++)
{
if (gradDir != a_idir)
{
// first do centered stencil
const IntVectSet& centeredIVS =
cfstencil_ptr->getCenteredStencilSet(gradDir);
int ihilo = sign(a_hiorlo);
if (!centeredIVS.isEmpty())
{
// do centered computation
IVSIterator cntrd_ivs(centeredIVS);
// want to average fine-grid gradient with coarse
// grid gradient, which is 0 (which is where the
// extra factor of one-half comes from)
Real gradMult = (0.5/a_dxLevel);
for (cntrd_ivs.begin(); cntrd_ivs.ok(); ++cntrd_ivs)
{
IntVect ivf = cntrd_ivs();
IntVect finePhiLoc = ivf - ihilo*BASISV(a_idir);
IntVect finePhiLoc2 = finePhiLoc - ihilo*BASISV(a_idir);
// loop over variables
for (int ivar = 0; ivar<a_phi.nComp(); ivar++)
{
Real fineHi = phi(finePhiLoc2+BASISV(gradDir),ivar);
Real fineLo = phi(finePhiLoc2-BASISV(gradDir),ivar);
Real fineGrada = gradMult*(fineHi-fineLo);
fineHi = phi(finePhiLoc+BASISV(gradDir),ivar);
fineLo = phi(finePhiLoc-BASISV(gradDir),ivar);
Real fineGradb = gradMult*(fineHi-fineLo);
// homogeneous interp implies that gradc is 0
Real gradc = 0;
int gradComp = TensorCFInterp::gradIndex(ivar,gradDir);
Real a = (fineGradb-fineGrada)*m1 - (fineGradb-gradc)*m2;
a *= idenom;
Real b = (fineGradb-gradc)*q1 - a*q2;
Real c = fineGrada;
tanGrad(ivf,gradComp) = a*xsquared + b*x + c;
}
} // end loop over centered difference cells
} // end if there are centered cells
// now do forward-difference cells
const IntVectSet& forwardIVS =
cfstencil_ptr->getForwardStencilSet(gradDir);
if (!forwardIVS.isEmpty())
{
// do forward-difference computations
IVSIterator fwd_ivs(forwardIVS);
// set up multipliers for gradient; since we want to average
// fine-grid gradient with coarse-grid gradient (which is 0),
// include an extra factor of one-half here.
Real mult0 = -1.5/a_dxLevel;
Real mult1 = 2.0/a_dxLevel;
Real mult2 = -0.5/a_dxLevel;
for (fwd_ivs.begin(); fwd_ivs.ok(); ++fwd_ivs)
{
IntVect ivf = fwd_ivs();
IntVect finePhiLoc = ivf - ihilo*BASISV(a_idir);
IntVect finePhiLoc2 = finePhiLoc - ihilo*BASISV(a_idir);
//now loop overvariables
for (int var= 0; var<a_phi.nComp(); var++)
{
Real fine0 = phi(finePhiLoc2,var);
Real fine1 = phi(finePhiLoc2+BASISV(gradDir),var);
Real fine2 = phi(finePhiLoc2+2*BASISV(gradDir),var);
Real fineGrada = mult0*fine0 +mult1*fine1 +mult2*fine2;
fine0 = phi(finePhiLoc,var);
fine1 = phi(finePhiLoc+BASISV(gradDir),var);
fine2 = phi(finePhiLoc+2*BASISV(gradDir),var);
Real fineGradb = mult0*fine0 +mult1*fine1 +mult2*fine2;
Real gradc = 0.0;
int gradComp = TensorCFInterp::gradIndex(var,gradDir);
// now compute gradient
Real a = (fineGradb-fineGrada)*m1 - (fineGradb-gradc)*m2;
a *= idenom;
Real b = (fineGradb-gradc)*q1 - a*q2;
Real c = fineGrada;
tanGrad(ivf,gradComp) = a*xsquared + b*x + c;
} // end loop over variables
} // end loop over forward-difference locations
} // end if there are forward-difference cells
// now do backward-difference cells
const IntVectSet& backwardIVS =
cfstencil_ptr->getBackwardStencilSet(gradDir);
if (!backwardIVS.isEmpty())
{
IVSIterator back_ivs(backwardIVS);
// set up multipliers for gradient -- since we want to average
// fine-grid gradient with coarse-grid gradient (which is 0),
// include an extra factor of one-half here.
Real mult0 = -1.5/a_dxLevel;
Real mult1 = 2.0/a_dxLevel;
Real mult2 = -0.5/a_dxLevel;
for (back_ivs.begin(); back_ivs.ok(); ++back_ivs)
{
IntVect ivf = back_ivs();
IntVect finePhiLoc = ivf - ihilo*BASISV(a_idir);
IntVect finePhiLoc2 = finePhiLoc - ihilo*BASISV(a_idir);
// now loop over variables
for (int var=0; var<a_phi.nComp(); var++)
{
Real fine0 = phi(finePhiLoc2,var);
Real fine1 = phi(finePhiLoc2-BASISV(gradDir),var);
Real fine2 = phi(finePhiLoc2-2*BASISV(gradDir),var);
Real fineGrada = mult0*fine0 +mult1*fine1 +mult2*fine2;
fine0 = phi(finePhiLoc,var);
fine1 = phi(finePhiLoc-BASISV(gradDir),var);
fine2 = phi(finePhiLoc-2*BASISV(gradDir),var);
Real fineGradb = mult0*fine0 +mult1*fine1 +mult2*fine2;
Real gradc = 0.0;
int gradComp = TensorCFInterp::gradIndex(var,gradDir);
Real a = (fineGradb-fineGrada)*m1 - (fineGradb-gradc)*m2;
a *= idenom;
Real b = (fineGradb-gradc)*q1 - a*q2;
Real c = fineGrada;
tanGrad(ivf,gradComp) = a*xsquared + b*x + c;
} // end loop over variables
} // end loop over backward-difference cells
} // end if there are backward-difference cells
} // end if gradDir is a tangential direction
} // end loop over gradient directions
}
/***********************/
// does homogeneous coarse/fine interpolation
/***********************/
void
QuadCFInterp::
homogeneousCFInterp(LevelData<FArrayBox>& a_phif,
LevelData<FArrayBox>& a_tanGrad,
LayoutData<CFIVS> a_loCFIVS[SpaceDim],
LayoutData<CFIVS> a_hiCFIVS[SpaceDim],
Real a_dxLevel,
Real a_dxCrse,
int a_ncomp,
LayoutData<TensorFineStencilSet> a_loTanStencilSets[SpaceDim],
LayoutData<TensorFineStencilSet> a_hiTanStencilSets[SpaceDim])
{
// need to do this to be sure that tangential derivatives are computed
// correctly
a_phif.exchange(a_phif.interval());
DataIterator dit = a_phif.dataIterator();
for (dit.begin(); dit.ok(); ++dit)
{
const DataIndex& datInd = dit();
// first fill in cells for phi
for (int idir = 0; idir < SpaceDim; idir++)
{
SideIterator sit;
for (sit.begin(); sit.ok(); sit.next())
{
homogeneousCFInterpPhi(a_phif,datInd,idir,sit(),
a_loCFIVS, a_hiCFIVS,
a_dxLevel, a_dxCrse, a_ncomp);
}
}
// now fill in tangential gradient cells
for (int idir = 0; idir<SpaceDim; idir++)
{
SideIterator sit;
for (sit.begin(); sit.ok(); sit.next())
{
homogeneousCFInterpTanGrad(a_tanGrad, a_phif,
datInd,idir,sit(),
a_dxLevel, a_dxCrse, a_ncomp,
a_loTanStencilSets, a_hiTanStencilSets);
}
}
}
}
void
QuadCFInterp::clear()
{
m_isDefined = false;
m_level = -1;
m_dxFine = -1;
}
QuadCFInterp::QuadCFInterp()
{
clear();
}
/***********************/
/***********************/
QuadCFInterp::QuadCFInterp(
const DisjointBoxLayout& a_fineBoxes,
const DisjointBoxLayout* a_coarBoxes,
Real a_dxFine,
int a_refRatio, int a_nComp,
const Box& a_domf)
{
ProblemDomain fineProbDomain(a_domf);
define(a_fineBoxes,a_coarBoxes, a_dxFine,a_refRatio,a_nComp, fineProbDomain);
}
/***********************/
/***********************/
QuadCFInterp::QuadCFInterp(
const DisjointBoxLayout& a_fineBoxes,
const DisjointBoxLayout* a_coarBoxes,
Real a_dxFine,
int a_refRatio, int a_nComp,
const ProblemDomain& a_domf)
{
define(a_fineBoxes,a_coarBoxes, a_dxFine,a_refRatio,a_nComp, a_domf);
}
/***********************/
/***********************/
void
QuadCFInterp::define(
const DisjointBoxLayout& a_fineBoxes,
const DisjointBoxLayout* a_coarBoxesPtr,
Real a_dxLevel,
int a_refRatio, int a_nComp,
const ProblemDomain& a_domf)
{
CH_TIME("QuadCFInterp::define");
clear();
m_isDefined = true;
CH_assert(a_nComp > 0);
CH_assert (!a_domf.isEmpty());
// consistency check
CH_assert (a_fineBoxes.checkPeriodic(a_domf));
m_domainFine = a_domf;
m_dxFine = a_dxLevel;
m_refRatio = a_refRatio;
m_nComp = a_nComp;
m_inputFineLayout = a_fineBoxes;
bool fineCoversCoarse = false;
if (a_coarBoxesPtr != NULL)
{
int factor = D_TERM6(a_refRatio, *a_refRatio, *a_refRatio,
*a_refRatio, *a_refRatio, *a_refRatio);
long long numPts = a_fineBoxes.numCells()/factor;
numPts -= a_coarBoxesPtr->numCells();
if (numPts == 0) fineCoversCoarse = true;
}
m_fineCoversCoarse = fineCoversCoarse;
if (a_coarBoxesPtr == NULL || fineCoversCoarse)
m_level = 0;
else
m_level = 1;
if (m_level > 0)
{
// (DFM) only check for valid refRatio if a coarser level exists
CH_assert(a_refRatio >= 1);
const DisjointBoxLayout& coarBoxes = *a_coarBoxesPtr;
m_inputCoarLayout = coarBoxes;
CH_assert (coarBoxes.checkPeriodic(coarsen(a_domf,a_refRatio)));
for (int idir = 0; idir < SpaceDim; idir++)
{
m_loQCFS[idir].define(a_fineBoxes);
m_hiQCFS[idir].define(a_fineBoxes);
}
//locoarboxes and hicoarboxes are now open
//and have same processor mapping as a_fineboxes
//make boxes for coarse buffers
m_coarBoxes.deepCopy(a_fineBoxes);
m_coarBoxes.coarsen(m_refRatio);
m_coarBoxes.grow(2);
m_coarBoxes.close();
m_coarBuffer.define(m_coarBoxes, m_nComp);
m_copier.define(coarBoxes, m_coarBoxes);
if (!newCFInterMode) //old n^2 algorithm (bvs)
{
//make cfstencils and boxes for coarse buffers
DataIterator dit = a_fineBoxes.dataIterator();
for (dit.begin(); dit.ok(); ++dit)
{
const Box& fineBox = a_fineBoxes[dit()];
for (int idir = 0; idir < SpaceDim; idir++)
{
//low side cfstencil
m_loQCFS[idir][dit()].define(a_domf,
fineBox,
a_fineBoxes,
coarBoxes,
a_refRatio,
idir,
Side::Lo);
//high side cfstencil
m_hiQCFS[idir][dit()].define(a_domf,
fineBox,
a_fineBoxes,
coarBoxes,
a_refRatio,
idir,
Side::Hi);
}
}
} else
{
//new "moving window" version of CF stencil building
Vector<Box> periodicFine;
CFStencil::buildPeriodicVector(periodicFine, a_domf, a_fineBoxes);
Vector<Box> coarsenedFine(periodicFine);
for (int i=0; i<coarsenedFine.size(); ++i)
{
coarsenedFine[i].coarsen(a_refRatio);
}
DataIterator dit = a_fineBoxes.dataIterator();
for (dit.begin(); dit.ok(); ++dit)
{
const Box& fineBox = a_fineBoxes[dit()];
for (int idir = 0; idir < SpaceDim; idir++)
{
//low side cfstencil
m_loQCFS[idir][dit()].define(a_domf,
fineBox,
periodicFine,
coarsenedFine,
coarBoxes,
a_refRatio,
idir,
Side::Lo);
//high side cfstencil
m_hiQCFS[idir][dit()].define(a_domf,
fineBox,
periodicFine,
coarsenedFine,
coarBoxes,
a_refRatio,
idir,
Side::Hi);
}
}
}
}
}
/***********************/
// apply coarse-fine boundary conditions -- assume that phi grids
// are grown by one
/***********************/
void
QuadCFInterp::coarseFineInterp(BaseFab<Real> & a_phif,
const BaseFab<Real> & a_phic,
const QuadCFStencil& a_qcfs,
const Side::LoHiSide a_hiorlo,
const int a_idir,
const Interval& a_variables) const
{
CH_TIME("QuadCFInterp::coarseFineInterp(BaseFab<Real> & a_phif,...)");
CH_assert(isDefined());
//nothing happens if m_level == 0
if (m_level > 0)
{
if (!a_qcfs.isEmpty())
{
BaseFab<Real> phistar;
//first find extended value phistar
//includes finding slopes of coarse solution bar
getPhiStar(phistar, a_phic, a_qcfs, a_hiorlo, a_idir, a_variables);
//given phistar, interpolate on fine ivs
interpOnIVS(a_phif, phistar, a_qcfs, a_hiorlo, a_idir, a_variables);
}
}
}
/***********************/
//get extended phi (lives next to interpivs)
/***********************/
void
QuadCFInterp::getPhiStar(BaseFab<Real> & a_phistar,
const BaseFab<Real> & a_phic,
const QuadCFStencil& a_qcfs,
const Side::LoHiSide a_hiorlo,
const int a_idir,
const Interval& a_variables) const
{
CH_TIMERS("QuadCFInterp::getPhiStar");
//CH_TIMER("QuadCFInterp::computeFirstDerivative", t1st);
//CH_TIMER("QuadCFInterp::computesecondDerivative", t2nd);
//CH_TIMER("QuadCFInterp::computemixedDerivative", tmixed);
CH_TIMER("QuadCFInterp::slopes", tslopes);
CH_TIMER("QuadCFInterp::notPacked", tnp);
CH_TIMER("QuadCFInterp::preamble", tpreamble);
CH_assert(isDefined());
CH_assert(a_qcfs.isDefined());
#if (CH_SPACEDIM > 1)
Real dxf = m_dxFine;
Real dxc = m_refRatio*dxf;
#endif
// if we think of a_idir as the "me" direction, then
// the other directions can be "you1" and "you2"
#if (CH_SPACEDIM == 3)
int you1, you2;
if (a_idir == 0)
{
you1 = 1;
you2 = 2;
}
else if (a_idir == 1)
{
you1 = 0;
you2 = 2;
}
else
{
you1 = 0;
you2 = 1;
}
#else // (CH_SPACEDIM == 2)
int you1;
if (a_idir == 0)
{
you1 = 1;
}
else
{
you1 = 0;
}
#endif
//if cfsten is empty, nothing to interpolate.
if (!a_qcfs.isEmpty())
{
CH_START(tpreamble);
CH_assert(m_level > 0);
const IntVectSet& interp_ivs = a_qcfs.getFineIVS();
const IntVectSet& coarsl_ivs = a_qcfs.getCoarIVS();
if (!coarsl_ivs.isDense())
{
MayDay::Error("What the hell?? TreeIntVectSet ???");
}
if (!interp_ivs.isDense())
{
MayDay::Error("What the hell?? TreeIntVectSet ???");
}
Box coarinterpbox = coarsl_ivs.minBox();
int ncomp = a_phic.nComp();
CH_assert(ncomp == m_nComp);
CH_assert(a_phic.box().contains((coarinterpbox)));
// allocate phistar here
int ihilo = sign(a_hiorlo);
Box phistarbox = interp_ivs.minBox();
phistarbox.shift(a_idir, ihilo);
a_phistar.define(phistarbox, ncomp);
CH_STOP(tpreamble);
for (int ivar = a_variables.begin(); ivar <= a_variables.end(); ivar++)
{
CH_START(tslopes);
//phi = phino + slope*x + half*x*x*curvature
BaseFab<Real> coarslope(coarinterpbox, SpaceDim);
BaseFab<Real> coarcurva(coarinterpbox, SpaceDim);
#if (CH_SPACEDIM == 3)
BaseFab<Real> coarmixed(coarinterpbox, 1);
#endif
// coarslope.setVal(0.);
//first find extended value phistar. get slopes of coarse solution
IVSIterator coar_ivsit(coarsl_ivs);
for (coar_ivsit.begin(); coar_ivsit.ok(); ++coar_ivsit)
{
// this isn't relevant for 1D
#if (CH_SPACEDIM > 1)
const IntVect& coariv = coar_ivsit();
// coarslope(coariv, a_idir) = 0.0;
// coarcurva(coariv, a_idir) = 0.0;
coarslope(coariv, you1) =
a_qcfs.computeFirstDerivative (a_phic, you1, ivar, coariv, dxc);
coarcurva(coariv, you1) =
a_qcfs.computeSecondDerivative(a_phic, you1, ivar, coariv, dxc);
#endif
#if (CH_SPACEDIM == 3)
coarslope(coariv, you2) =
a_qcfs.computeFirstDerivative (a_phic, you2, ivar, coariv, dxc);
coarcurva(coariv, you2) =
a_qcfs.computeSecondDerivative(a_phic, you2, ivar, coariv, dxc);
coarmixed(coariv) =
a_qcfs.computeMixedDerivative(a_phic, ivar, coariv, dxc);
#endif
} //end loop over coarse intvects
CH_STOP(tslopes);
if (a_qcfs.finePacked() && CH_SPACEDIM==3)
{
const IntVect& iv = phistarbox.smallEnd();
IntVect civ(iv);
civ.coarsen(m_refRatio);
Box region = a_qcfs.packedBox();
#if (CH_SPACEDIM == 3)
FORT_PHISTAR(CHF_FRA_SHIFT(a_phistar, iv),
CHF_BOX_SHIFT(region, iv),
CHF_CONST_FRA_SHIFT(a_phic, civ),
CHF_FRA_SHIFT(coarslope, civ),
CHF_FRA_SHIFT(coarcurva, civ),
CHF_FRA_SHIFT(coarmixed, civ),
CHF_CONST_REAL(dxf),
CHF_CONST_INT(ivar),
CHF_CONST_INT(a_idir),
CHF_CONST_INT(ihilo),
CHF_CONST_INT(m_refRatio));
#endif
}
else
{
CH_START(tnp);
IntVect ivf, ivc, ivstar;
// ifdef is here to prevent unused variable wasrnings in 1D
#if (CH_SPACEDIM > 1)
int jf, jc;
Real xf, xc, x1;
#endif
Real pc, update1=0, update2=0, update3=0;
IVSIterator fine_ivsit(interp_ivs);
for (fine_ivsit.begin(); fine_ivsit.ok(); ++fine_ivsit)
{
ivf = fine_ivsit();
ivc = coarsen(ivf, m_refRatio);
ivstar = ivf;
ivstar.shift(a_idir, ihilo);
pc = a_phic(ivc,ivar);
// for 1D, none of this is necessary -- just copy
// coarse value into phiStar
#if (CH_SPACEDIM > 1)
jf = ivf[you1];
jc = ivc[you1];
xf = (jf+0.5)*dxf;
xc = (jc+0.5)*dxc;
x1 = xf-xc;
update1= x1*coarslope(ivc, you1) + 0.5*x1*x1*coarcurva(ivc, you1);
#endif
#if (CH_SPACEDIM==3)
Real x2;
jf = ivf[you2];
jc = ivc[you2];
xf = (jf+0.5)*dxf;
xc = (jc+0.5)*dxc;
x2 = xf-xc;
update2 = x2*coarslope(ivc, you2) + 0.5*x2*x2*coarcurva(ivc, you2);
//add in mixed derivative component
update3 = x1*x2*coarmixed(ivc);
#endif
a_phistar(ivstar, ivar) = pc+update1+update2+update3;
} //end loop over fine intvects
CH_STOP(tnp);
} // end if for not packed optimization
}//end loop over variables
} //end if (level>0 && !hocfs.isempty())
} //end function getphistar
/***********************/
/***********************/
bool
QuadCFInterp::isDefined() const
{
return m_isDefined;
}
void
QuadCFInterp::interpOnIVS(BaseFab<Real> & a_phif,
const BaseFab<Real> & a_phistar,
const QuadCFStencil& a_qcfs,
const Side::LoHiSide a_hiorlo,
const int a_idir,
const Interval& a_variables) const
{
CH_TIME("QuadCFInterp::interpOnIVS");
CH_assert(isDefined());
CH_assert(a_qcfs.isDefined());
//if cfsten is empty, nothing to interpolate.
if (!a_qcfs.isEmpty())
{
//if there IS something to interpolate, the level ident
//had better be greater than zero. Otherwise a null
//was sent in as coarse grids on construction
CH_assert(m_level > 0);
const IntVectSet& interp_ivs = a_qcfs.getFineIVS();
int ihilo = sign(a_hiorlo);
int nref = m_refRatio;
if (!a_qcfs.finePacked())
{
IVSIterator fine_ivsit(interp_ivs);
CH_assert(a_phistar.nComp() == a_phif.nComp());
CH_assert(a_phistar.nComp() == m_nComp);
for (fine_ivsit.begin(); fine_ivsit.ok(); ++fine_ivsit)
{
IntVect ivf = fine_ivsit();
// quadratic interpolation
for (int ivar = a_variables.begin(); ivar <= a_variables.end(); ivar++)
{
Real pa = a_phif (ivf -2*ihilo*BASISV(a_idir), ivar);
Real pb = a_phif (ivf - ihilo*BASISV(a_idir), ivar);
Real ps = a_phistar(ivf + ihilo*BASISV(a_idir), ivar);
//phi = ax**2 + bx + c, x = 0 at pa
Real h = m_dxFine;
Real a = (2./h/h)*(2.*ps + pa*(nref+1.0) -pb*(nref+3.0))/
(nref*nref + 4*nref + 3.0);
Real b = (pb-pa)/h - a*h;
Real c = pa;
Real x = 2.*h;
a_phif (ivf,ivar) = a*x*x + b*x + c;
} //end loop over components
} //end loop over fine intvects
} else
{ // data is packed, just call Fortran
int b=a_variables.begin();
int e=a_variables.end();
FORT_QUADINTERP(CHF_FRA(a_phif),
CHF_CONST_FRA(a_phistar),
CHF_BOX(a_qcfs.packedBox()),
CHF_CONST_INT(ihilo),
CHF_CONST_REAL(m_dxFine),
CHF_CONST_INT(a_idir),
CHF_CONST_INT(b),
CHF_CONST_INT(e),
CHF_CONST_INT(nref));
}
} //end if (level>0 && !oscfs.isempty())
} //end function interponivs
/***********************/
// apply coarse-fine boundary conditions -- assume that phi grids
// are grown by one
/***********************/
void
QuadCFInterp::coarseFineInterp(LevelData<FArrayBox>& a_phif,
const LevelData<FArrayBox>& a_phic)
{
CH_TIME("QuadCFInterp::coarseFineInterp");
CH_assert(isDefined());
Interval variables = a_phic.interval();
if (m_level > 0)
{
CH_assert(a_phif.nComp() == m_nComp);
CH_assert(a_phic.nComp() == m_nComp);
CH_assert(a_phif.ghostVect() >= IntVect::Unit);
CH_assert(a_phic.boxLayout() == m_inputCoarLayout);
CH_assert(a_phif.boxLayout() == m_inputFineLayout);
a_phic.copyTo(a_phic.interval(), m_coarBuffer, m_coarBuffer.interval(),
m_copier);
for (int idir = 0; idir < SpaceDim; idir++)
{
DataIterator ditFine = a_phif.dataIterator();
for (ditFine.begin(); ditFine.ok(); ++ditFine)
{
DataIndex datIndGlo =ditFine();
BaseFab<Real> & phif = a_phif[datIndGlo];
const BaseFab<Real> & phiC = m_coarBuffer[datIndGlo];
//lo side cfinterp
//recall that buffers have fine processor mapping
{
const QuadCFStencil& loQCFS = m_loQCFS[idir][datIndGlo];
coarseFineInterp(phif, phiC ,loQCFS, Side::Lo, idir, variables);
}
//hi side cfinterp
{
const QuadCFStencil& hiQCFS = m_hiQCFS[idir][datIndGlo];
coarseFineInterp(phif, phiC, hiQCFS, Side::Hi, idir, variables);
}
}//end iteration over boxes in fine grid
} //end iteration over directions
}
}
/***********************/
/***********************/
QuadCFInterp::~QuadCFInterp()
{
clear();
}
#include "NamespaceFooter.H"
| 34.564103 | 89 | 0.505967 | rmrsk |
8b3656201ca82e44df019eb542cb94d58c49daca | 2,195 | hpp | C++ | include/memoria/context/fixedsize_stack.hpp | victor-smirnov/memoria | c36a957c63532176b042b411b1646c536e71a658 | [
"BSL-1.0",
"Apache-2.0",
"OLDAP-2.8",
"BSD-3-Clause"
] | 2 | 2021-07-30T16:54:24.000Z | 2021-09-08T15:48:17.000Z | include/memoria/context/fixedsize_stack.hpp | victor-smirnov/memoria | c36a957c63532176b042b411b1646c536e71a658 | [
"BSL-1.0",
"Apache-2.0",
"OLDAP-2.8",
"BSD-3-Clause"
] | null | null | null | include/memoria/context/fixedsize_stack.hpp | victor-smirnov/memoria | c36a957c63532176b042b411b1646c536e71a658 | [
"BSL-1.0",
"Apache-2.0",
"OLDAP-2.8",
"BSD-3-Clause"
] | 2 | 2020-03-14T15:15:25.000Z | 2020-06-15T11:26:56.000Z |
// Copyright Oliver Kowalke 2014.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef MEMORIA_CONTEXT_FIXEDSIZE_H
#define MEMORIA_CONTEXT_FIXEDSIZE_H
#include <cstddef>
#include <cstdlib>
#include <new>
#include <boost/assert.hpp>
#include <boost/config.hpp>
#include <memoria/context/detail/config.hpp>
#include <memoria/context/stack_context.hpp>
#include <memoria/context/stack_traits.hpp>
#if defined(MEMORIA_CONTEXT_USE_MAP_STACK)
extern "C" {
#include <sys/mman.h>
}
#endif
#if defined(BOOST_USE_VALGRIND)
#include <valgrind/valgrind.h>
#endif
namespace memoria {
namespace context {
template< typename traitsT >
class basic_fixedsize_stack {
private:
std::size_t size_;
public:
typedef traitsT traits_type;
basic_fixedsize_stack( std::size_t size = traits_type::default_size() ) noexcept :
size_( size) {
}
stack_context allocate() {
#if defined(MEMORIA_CONTEXT_USE_MAP_STACK)
void * vp = ::mmap( 0, size_, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_STACK, -1, 0);
if ( vp == MAP_FAILED) {
throw std::bad_alloc();
}
#else
void * vp = std::malloc( size_);
if ( ! vp) {
throw std::bad_alloc();
}
#endif
stack_context sctx;
sctx.size = size_;
sctx.sp = static_cast< char * >( vp) + sctx.size;
#if defined(BOOST_USE_VALGRIND)
sctx.valgrind_stack_id = VALGRIND_STACK_REGISTER( sctx.sp, vp);
#endif
return sctx;
}
void deallocate( stack_context & sctx) noexcept {
BOOST_ASSERT( sctx.sp);
#if defined(BOOST_USE_VALGRIND)
VALGRIND_STACK_DEREGISTER( sctx.valgrind_stack_id);
#endif
void * vp = static_cast< char * >( sctx.sp) - sctx.size;
#if defined(MEMORIA_CONTEXT_USE_MAP_STACK)
::munmap( vp, sctx.size);
#else
std::free( vp);
#endif
}
};
typedef basic_fixedsize_stack< stack_traits > fixedsize_stack;
# if ! defined(MEMORIA_USE_SEGMENTED_STACKS)
typedef fixedsize_stack default_stack;
# endif
}}
#endif // MEMORIA_CONTEXT_FIXEDSIZE_H
| 23.858696 | 105 | 0.680182 | victor-smirnov |
8b3807485cad4609617e9017bb5fcb8d213dbb88 | 8,241 | cpp | C++ | CSGOSimple/helpers/utils.cpp | Tyler-Admin/CSGOSimple | 99a659a1368ed9445b9ccf8ec4514d25d9bf81d6 | [
"MIT"
] | 400 | 2018-10-30T14:52:13.000Z | 2022-03-29T11:46:24.000Z | CSGOSimple/helpers/utils.cpp | Tyler-Admin/CSGOSimple | 99a659a1368ed9445b9ccf8ec4514d25d9bf81d6 | [
"MIT"
] | 126 | 2018-12-03T15:54:57.000Z | 2022-03-23T17:11:53.000Z | CSGOSimple/helpers/utils.cpp | Tyler-Admin/CSGOSimple | 99a659a1368ed9445b9ccf8ec4514d25d9bf81d6 | [
"MIT"
] | 316 | 2018-11-09T22:38:38.000Z | 2022-03-25T13:35:09.000Z | #include "Utils.hpp"
#define NOMINMAX
#include <Windows.h>
#include <stdio.h>
#include <string>
#include <vector>
#include "../valve_sdk/csgostructs.hpp"
#include "Math.hpp"
HANDLE _out = NULL, _old_out = NULL;
HANDLE _err = NULL, _old_err = NULL;
HANDLE _in = NULL, _old_in = NULL;
namespace Utils {
std::vector<char> HexToBytes(const std::string& hex) {
std::vector<char> res;
for (auto i = 0u; i < hex.length(); i += 2) {
std::string byteString = hex.substr(i, 2);
char byte = (char)strtol(byteString.c_str(), NULL, 16);
res.push_back(byte);
}
return res;
}
std::string BytesToString(unsigned char* data, int len) {
constexpr char hexmap[] = { '0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
std::string res(len * 2, ' ');
for (int i = 0; i < len; ++i) {
res[2 * i] = hexmap[(data[i] & 0xF0) >> 4];
res[2 * i + 1] = hexmap[data[i] & 0x0F];
}
return res;
}
std::vector<std::string> Split(const std::string& str, const char* delim) {
std::vector<std::string> res;
char* pTempStr = _strdup(str.c_str());
char* context = NULL;
char* pWord = strtok_s(pTempStr, delim, &context);
while (pWord != NULL) {
res.push_back(pWord);
pWord = strtok_s(NULL, delim, &context);
}
free(pTempStr);
return res;
}
unsigned int FindInDataMap(datamap_t *pMap, const char *name) {
while (pMap) {
for (int i = 0; i<pMap->dataNumFields; i++) {
if (pMap->dataDesc[i].fieldName == NULL)
continue;
if (strcmp(name, pMap->dataDesc[i].fieldName) == 0)
return pMap->dataDesc[i].fieldOffset[TD_OFFSET_NORMAL];
if (pMap->dataDesc[i].fieldType == FIELD_EMBEDDED) {
if (pMap->dataDesc[i].td) {
unsigned int offset;
if ((offset = FindInDataMap(pMap->dataDesc[i].td, name)) != 0)
return offset;
}
}
}
pMap = pMap->baseMap;
}
return 0;
}
/*
* @brief Create console
*
* Create and attach a console window to the current process
*/
void AttachConsole()
{
_old_out = GetStdHandle(STD_OUTPUT_HANDLE);
_old_err = GetStdHandle(STD_ERROR_HANDLE);
_old_in = GetStdHandle(STD_INPUT_HANDLE);
::AllocConsole() && ::AttachConsole(GetCurrentProcessId());
_out = GetStdHandle(STD_OUTPUT_HANDLE);
_err = GetStdHandle(STD_ERROR_HANDLE);
_in = GetStdHandle(STD_INPUT_HANDLE);
SetConsoleMode(_out,
ENABLE_PROCESSED_OUTPUT | ENABLE_WRAP_AT_EOL_OUTPUT);
SetConsoleMode(_in,
ENABLE_INSERT_MODE | ENABLE_EXTENDED_FLAGS |
ENABLE_PROCESSED_INPUT | ENABLE_QUICK_EDIT_MODE);
}
/*
* @brief Detach console
*
* Detach and destroy the attached console
*/
void DetachConsole()
{
if(_out && _err && _in) {
FreeConsole();
if(_old_out)
SetStdHandle(STD_OUTPUT_HANDLE, _old_out);
if(_old_err)
SetStdHandle(STD_ERROR_HANDLE, _old_err);
if(_old_in)
SetStdHandle(STD_INPUT_HANDLE, _old_in);
}
}
/*
* @brief Print to console
*
* Replacement to printf that works with the newly created console
*/
bool ConsolePrint(const char* fmt, ...)
{
if(!_out)
return false;
char buf[1024];
va_list va;
va_start(va, fmt);
_vsnprintf_s(buf, 1024, fmt, va);
va_end(va);
return !!WriteConsoleA(_out, buf, static_cast<DWORD>(strlen(buf)), nullptr, nullptr);
}
/*
* @brief Blocks execution until a key is pressed on the console window
*
*/
char ConsoleReadKey()
{
if(!_in)
return false;
auto key = char{ 0 };
auto keysread = DWORD{ 0 };
ReadConsoleA(_in, &key, 1, &keysread, nullptr);
return key;
}
/*
* @brief Wait for all the given modules to be loaded
*
* @param timeout How long to wait
* @param modules List of modules to wait for
*
* @returns See WaitForSingleObject return values.
*/
int WaitForModules(std::int32_t timeout, const std::initializer_list<std::wstring>& modules)
{
bool signaled[32] = { 0 };
bool success = false;
std::uint32_t totalSlept = 0;
if(timeout == 0) {
for(auto& mod : modules) {
if(GetModuleHandleW(std::data(mod)) == NULL)
return WAIT_TIMEOUT;
}
return WAIT_OBJECT_0;
}
if(timeout < 0)
timeout = INT32_MAX;
while(true) {
for(auto i = 0u; i < modules.size(); ++i) {
auto& module = *(modules.begin() + i);
if(!signaled[i] && GetModuleHandleW(std::data(module)) != NULL) {
signaled[i] = true;
//
// Checks if all modules are signaled
//
bool done = true;
for(auto j = 0u; j < modules.size(); ++j) {
if(!signaled[j]) {
done = false;
break;
}
}
if(done) {
success = true;
goto exit;
}
}
}
if(totalSlept > std::uint32_t(timeout)) {
break;
}
Sleep(10);
totalSlept += 10;
}
exit:
return success ? WAIT_OBJECT_0 : WAIT_TIMEOUT;
}
/*
* @brief Scan for a given byte pattern on a module
*
* @param module Base of the module to search
* @param signature IDA-style byte array pattern
*
* @returns Address of the first occurence
*/
std::uint8_t* PatternScan(void* module, const char* signature)
{
static auto pattern_to_byte = [](const char* pattern) {
auto bytes = std::vector<int>{};
auto start = const_cast<char*>(pattern);
auto end = const_cast<char*>(pattern) + strlen(pattern);
for(auto current = start; current < end; ++current) {
if(*current == '?') {
++current;
if(*current == '?')
++current;
bytes.push_back(-1);
} else {
bytes.push_back(strtoul(current, ¤t, 16));
}
}
return bytes;
};
auto dosHeader = (PIMAGE_DOS_HEADER)module;
auto ntHeaders = (PIMAGE_NT_HEADERS)((std::uint8_t*)module + dosHeader->e_lfanew);
auto sizeOfImage = ntHeaders->OptionalHeader.SizeOfImage;
auto patternBytes = pattern_to_byte(signature);
auto scanBytes = reinterpret_cast<std::uint8_t*>(module);
auto s = patternBytes.size();
auto d = patternBytes.data();
for(auto i = 0ul; i < sizeOfImage - s; ++i) {
bool found = true;
for(auto j = 0ul; j < s; ++j) {
if(scanBytes[i + j] != d[j] && d[j] != -1) {
found = false;
break;
}
}
if(found) {
return &scanBytes[i];
}
}
return nullptr;
}
/*
* @brief Set player clantag
*
* @param tag New clantag
*/
void SetClantag(const char* tag)
{
static auto fnClantagChanged = (int(__fastcall*)(const char*, const char*))PatternScan(GetModuleHandleW(L"engine.dll"), "53 56 57 8B DA 8B F9 FF 15");
fnClantagChanged(tag, tag);
}
/*
* @brief Set player name
*
* @param name New name
*/
void SetName(const char* name)
{
static auto nameConvar = g_CVar->FindVar("name");
nameConvar->m_fnChangeCallbacks.m_Size = 0;
// Fix so we can change names how many times we want
// This code will only run once because of `static`
static auto do_once = (nameConvar->SetValue("\n���"), true);
nameConvar->SetValue(name);
}
}
| 27.47 | 158 | 0.522752 | Tyler-Admin |
8b3caed5e086b375e3f851f8eb858ac79e2db7df | 9,916 | cpp | C++ | examples/example-ftxui-extension.cpp | perfkitpp/perfkit | 19b691c8337b47594ed6be28051b3a5ee31af389 | [
"MIT"
] | null | null | null | examples/example-ftxui-extension.cpp | perfkitpp/perfkit | 19b691c8337b47594ed6be28051b3a5ee31af389 | [
"MIT"
] | 6 | 2021-10-14T15:38:06.000Z | 2022-01-08T12:55:04.000Z | examples/example-ftxui-extension.cpp | perfkitpp/perfkit | 19b691c8337b47594ed6be28051b3a5ee31af389 | [
"MIT"
] | null | null | null | // MIT License
//
// Copyright (c) 2021-2022. Seungwoo Kang
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
// project home: https://github.com/perfkitpp
#include "ftxui/component/captured_mouse.hpp" // for ftxui
#include "ftxui/component/component.hpp" // for Checkbox, Vertical
#include "ftxui/component/screen_interactive.hpp" // for ScreenInteractive
#include "perfkit/configs.h"
#include "perfkit/ftxui-extension.hpp"
#include "perfkit/traces.h"
#include "spdlog/fmt/fmt.h"
using namespace std::literals;
std::map<std::string, std::map<std::string, std::string>> ved{
{"asd", {{"asd", "weqw"}, {"vafe, ewqew", "dwrew"}}},
{"vadsfew", {{"dav ,ea w", "Ewqsad"}, {"scxz ss", "dwqewqew"}}}};
PERFKIT_CATEGORY(cfg)
{
PERFKIT_CONFIGURE(active, true).confirm();
PERFKIT_CONFIGURE(active_async, true).confirm();
PERFKIT_SUBCATEGORY(labels)
{
PERFKIT_CONFIGURE(foo, 1).confirm();
PERFKIT_CONFIGURE(bar, false).confirm();
PERFKIT_CONFIGURE(ce, "ola ollalala").confirm();
PERFKIT_CONFIGURE(ced, std::vector({1, 2, 3, 4, 5, 6})).confirm();
PERFKIT_CONFIGURE(cedr, (std::map<std::string, int>{
{"fdf", 2},
{"erwe", 4}}))
.confirm();
PERFKIT_CONFIGURE(bb, (std::map<std::string, bool>{
{"fdf", false},
{"erwe", true}}))
.confirm();
PERFKIT_CONFIGURE(cedrs, 3.141592).confirm();
PERFKIT_CONFIGURE(cedrstt, std::move(ved)).confirm();
}
PERFKIT_SUBCATEGORY(lomo)
{
PERFKIT_SUBCATEGORY(movdo)
{
PERFKIT_CONFIGURE(ce, 1).confirm();
PERFKIT_CONFIGURE(ced, 1).confirm();
PERFKIT_CONFIGURE(cedr, 1).confirm();
PERFKIT_SUBCATEGORY(cef)
{
}
PERFKIT_SUBCATEGORY(ccra)
{
PERFKIT_CONFIGURE(foo, 1).confirm();
PERFKIT_CONFIGURE(bar, 1).confirm();
PERFKIT_CONFIGURE(ce, 1).confirm();
PERFKIT_CONFIGURE(ced, 1).confirm();
PERFKIT_CONFIGURE(cedr, 1).confirm();
PERFKIT_CONFIGURE(cedrs, 1).confirm();
PERFKIT_CONFIGURE(a_foo, 1).confirm();
PERFKIT_CONFIGURE(a_bar, 1).confirm();
PERFKIT_CONFIGURE(a_ce, 1).confirm();
PERFKIT_CONFIGURE(a_ced, 1).confirm();
PERFKIT_CONFIGURE(a_cedr, 1).confirm();
PERFKIT_CONFIGURE(a_cedrs, 1).confirm();
PERFKIT_CONFIGURE(b_foo, 1).confirm();
PERFKIT_CONFIGURE(b_bar, 1).confirm();
PERFKIT_CONFIGURE(b_ce, 1).confirm();
PERFKIT_CONFIGURE(b_ced, 1).confirm();
PERFKIT_CONFIGURE(b_cedr, 1).confirm();
PERFKIT_CONFIGURE(b_cedrs, 1).confirm();
PERFKIT_CONFIGURE(c_foo, 1).confirm();
PERFKIT_CONFIGURE(c_bar, 1).confirm();
PERFKIT_CONFIGURE(c_ce, 1).confirm();
PERFKIT_CONFIGURE(c_ced, 1).confirm();
PERFKIT_CONFIGURE(c_cedr, 1).confirm();
PERFKIT_CONFIGURE(c_cedrs, 1).confirm();
PERFKIT_CONFIGURE(d_foo, 1).confirm();
PERFKIT_CONFIGURE(d_bar, 1).confirm();
PERFKIT_CONFIGURE(d_ce, 1).confirm();
PERFKIT_CONFIGURE(d_ced, 1).confirm();
PERFKIT_CONFIGURE(d_cedr, 1).confirm();
PERFKIT_CONFIGURE(d_cedrs, 1).confirm();
}
}
}
PERFKIT_CONFIGURE(foo, 1).confirm();
PERFKIT_CONFIGURE(bar, 1).confirm();
PERFKIT_CONFIGURE(ce, 1).confirm();
PERFKIT_CONFIGURE(ced, 1).confirm();
PERFKIT_CONFIGURE(cedr, 1).confirm();
PERFKIT_CONFIGURE(cedrs, 1).confirm();
PERFKIT_CONFIGURE(a_foo, 1).confirm();
PERFKIT_CONFIGURE(a_bar, 1).confirm();
PERFKIT_CONFIGURE(a_ce, 1).confirm();
PERFKIT_CONFIGURE(a_ced, 1).confirm();
PERFKIT_CONFIGURE(a_cedr, 1).confirm();
PERFKIT_CONFIGURE(a_cedrs, 1).confirm();
PERFKIT_CONFIGURE(b_foo, 1).confirm();
PERFKIT_CONFIGURE(b_bar, 1).confirm();
PERFKIT_CONFIGURE(b_ce, 1).confirm();
PERFKIT_CONFIGURE(b_ced, 1).confirm();
PERFKIT_CONFIGURE(b_cedr, 1).confirm();
PERFKIT_CONFIGURE(b_cedrs, 1).confirm();
PERFKIT_CONFIGURE(c_foo, 1).confirm();
PERFKIT_CONFIGURE(c_bar, 1).confirm();
PERFKIT_CONFIGURE(c_ce, 1).confirm();
PERFKIT_CONFIGURE(c_ced, 1).confirm();
PERFKIT_CONFIGURE(c_cedr, 1).confirm();
PERFKIT_CONFIGURE(c_cedrs, 1).confirm();
PERFKIT_CONFIGURE(d_foo, 1).confirm();
PERFKIT_CONFIGURE(d_bar, 1).confirm();
PERFKIT_CONFIGURE(d_ce, 1).confirm();
PERFKIT_CONFIGURE(d_ced, 1).confirm();
PERFKIT_CONFIGURE(d_cedr, 1).confirm();
PERFKIT_CONFIGURE(d_cedrs, 1).confirm();
PERFKIT_CONFIGURE(e_foo, 1).confirm();
PERFKIT_CONFIGURE(e_bar, 1).confirm();
PERFKIT_CONFIGURE(e_ce, 1).confirm();
PERFKIT_CONFIGURE(e_ced, 1).confirm();
PERFKIT_CONFIGURE(e_cedr, 1).confirm();
PERFKIT_CONFIGURE(e_cedrs, 1).confirm();
}
PERFKIT_CATEGORY(vlao)
{
PERFKIT_CONFIGURE(e_cedrs, 1).confirm();
PERFKIT_CONFIGURE(e_cedrsd, "").confirm();
}
PERFKIT_CATEGORY(vlao1) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); }
PERFKIT_CATEGORY(vlao2) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); }
PERFKIT_CATEGORY(vlao3) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); }
PERFKIT_CATEGORY(vlao4) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); }
PERFKIT_CATEGORY(vlao5) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); }
PERFKIT_CATEGORY(vlao6) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); }
PERFKIT_CATEGORY(vlao7) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); }
PERFKIT_CATEGORY(vlao8) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); }
PERFKIT_CATEGORY(vlao9) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); }
PERFKIT_CATEGORY(vlao22) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); }
PERFKIT_CATEGORY(vlao33) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); }
PERFKIT_CATEGORY(vlao44) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); }
PERFKIT_CATEGORY(vlao55) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); }
using namespace ftxui;
perfkit::tracer traces[] = {
{0, "root (1)"},
{1, "A (2)"},
{31, "B (4)"},
{-51, "C (0)"},
{14, "D (3)"},
};
class my_subscriber : public perfkit_ftxui::if_subscriber
{
public:
bool on_update(update_param_type const& param_type, perfkit::trace_variant_type const& value) override
{
traces[1].fork("Value update A")["NAME"] = param_type.name;
return true;
}
void on_end(update_param_type const& param_type) override
{
vlao::e_cedrs.async_modify(vlao::e_cedrs.get() + 1);
vlao::e_cedrsd.async_modify(std::string(param_type.name));
vlao::registry().apply_update_and_check_if_dirty();
}
};
int main(int argc, char const* argv[])
{
auto screen = ScreenInteractive::Fullscreen();
std::shared_ptr<perfkit_ftxui::string_queue> commands;
auto preset = perfkit_ftxui::PRESET(&commands, {}, std::make_shared<my_subscriber>());
auto kill_switch = perfkit_ftxui::launch_async_loop(&screen, preset);
for (int ic = 0; perfkit_ftxui::is_alive(kill_switch.get()); ++ic) {
std::this_thread::sleep_for(10ms);
cfg::registry().apply_update_and_check_if_dirty();
auto trc_root = traces[0].fork("Root Trace");
auto timer = trc_root.timer("Some Timer");
trc_root["Value 0"] = 3;
trc_root["Value 1"] = *cfg::labels::foo;
trc_root["Value 2"] = fmt::format("Hell, world! {}", *cfg::labels::foo);
trc_root["Value 3"] = false;
trc_root["Value 3"]["Subvalue 0"] = ic;
trc_root["Value 3"]["Subvalue GR"] = std::vector<int>{3, 4, 5};
trc_root["Value 3"]["Subvalue 1"] = double(ic);
trc_root["Value 3"]["Subvalue 2"] = !!(ic & 1);
trc_root["Value 4"]["Subvalue 3"] = fmt::format("Hell, world! {}", ic);
auto r = trc_root["Value 5"];
trc_root["Value 5"]["Subvalue 0"] = ic;
if (r) { trc_root["Value 5"]["Subvalue 1 Cond"] = double(ic); }
trc_root["Value 5"]["Subvalue 2"] = !!(ic & 1);
std::string to_get;
if (commands->try_getline(to_get)) {
trc_root["TEXT"] = to_get;
}
cfg::labels::foo.async_modify(cfg::labels::foo.get() + 1);
if (cfg::active_async.get() == false) {
kill_switch.reset();
break;
}
}
return 0;
}
// Copyright 2020 Arthur Sonzogni. All rights reserved.
// Use of this source code is governed by the MIT license that can be found in
// the LICENSE file. | 40.145749 | 130 | 0.619907 | perfkitpp |
8b3d56d6f717976bb780f72b942c32c9839a2262 | 945 | cc | C++ | node/binding/webgl/WebGLTexture.cc | jwxbond/GCanvas | f4bbb207d3b47abab3103da43371f47a86b0a3bd | [
"Apache-2.0"
] | 4 | 2017-08-04T04:31:33.000Z | 2019-11-01T06:32:23.000Z | node/binding/webgl/WebGLTexture.cc | jwxbond/GCanvas | f4bbb207d3b47abab3103da43371f47a86b0a3bd | [
"Apache-2.0"
] | 2 | 2017-10-31T11:55:16.000Z | 2018-02-01T10:33:39.000Z | node/binding/webgl/WebGLTexture.cc | jwxbond/GCanvas | f4bbb207d3b47abab3103da43371f47a86b0a3bd | [
"Apache-2.0"
] | null | null | null | /**
* Created by G-Canvas Open Source Team.
* Copyright (c) 2017, Alibaba, Inc. All rights reserved.
*
* This source code is licensed under the Apache Licence 2.0.
* For the full copyright and license information, please view
* the LICENSE file in the root directory of this source tree.
*/
#include "WebGLTexture.h"
namespace NodeBinding
{
Napi::FunctionReference WebGLTexture::constructor;
WebGLTexture::WebGLTexture(const Napi::CallbackInfo &info) : Napi::ObjectWrap<WebGLTexture>(info)
{
mId = info[0].As<Napi::Number>().Uint32Value();
}
void WebGLTexture::Init(Napi::Env env)
{
Napi::HandleScope scope(env);
Napi::Function func = DefineClass(env, "WebGLTexture", {});
constructor = Napi::Persistent(func);
constructor.SuppressDestruct();
}
Napi::Object WebGLTexture::NewInstance(Napi::Env env, const Napi::Value arg)
{
Napi::Object obj = constructor.New({arg});
return obj;
}
} // namespace NodeBinding | 29.53125 | 97 | 0.719577 | jwxbond |
8b3e21556e4ec325a557384cae395eda73b602c2 | 3,819 | cpp | C++ | test/unit/module/math/cosd.cpp | the-moisrex/eve | 80b52663eefee11460abb0aedf4158a5067cf7dc | [
"MIT"
] | 340 | 2020-09-16T21:12:48.000Z | 2022-03-28T15:40:33.000Z | test/unit/module/math/cosd.cpp | the-moisrex/eve | 80b52663eefee11460abb0aedf4158a5067cf7dc | [
"MIT"
] | 383 | 2020-09-17T06:56:35.000Z | 2022-03-13T15:58:53.000Z | test/unit/module/math/cosd.cpp | the-moisrex/eve | 80b52663eefee11460abb0aedf4158a5067cf7dc | [
"MIT"
] | 28 | 2021-02-27T23:11:23.000Z | 2022-03-25T12:31:29.000Z | //==================================================================================================
/**
EVE - Expressive Vector Engine
Copyright : EVE Contributors & Maintainers
SPDX-License-Identifier: MIT
**/
//==================================================================================================
#include "test.hpp"
#include <eve/concept/value.hpp>
#include <eve/constant/valmin.hpp>
#include <eve/constant/valmax.hpp>
#include <eve/constant/invpi.hpp>
#include <eve/function/cosd.hpp>
#include <eve/function/diff/cosd.hpp>
#include <eve/function/deginrad.hpp>
#include <cmath>
#include <eve/module/math/detail/constant/rempio2_limits.hpp>
#include <eve/detail/function/tmp/boost_math_cospi.hpp>
#include <eve/detail/function/tmp/boost_math_sinpi.hpp>
//==================================================================================================
// Types tests
//==================================================================================================
EVE_TEST_TYPES( "Check return types of cosd"
, eve::test::simd::ieee_reals
)
<typename T>(eve::as<T>)
{
using v_t = eve::element_type_t<T>;
TTS_EXPR_IS( eve::cosd(T()) , T);
TTS_EXPR_IS( eve::cosd(v_t()), v_t);
};
//==================================================================================================
// cosd tests
//==================================================================================================
auto mquarter_c = []<typename T>(eve::as<T> const & ){ return T(-45); };
auto quarter_c = []<typename T>(eve::as<T> const & ){ return T( 45); };
auto mhalf_c = []<typename T>(eve::as<T> const & ){ return T(-90 ); };
auto half_c = []<typename T>(eve::as<T> const & ){ return T( 90 ); };
auto mmed = []<typename T>(eve::as<T> const & ){ return -5000; };
auto med = []<typename T>(eve::as<T> const & ){ return 5000; };
EVE_TEST( "Check behavior of cosd on wide"
, eve::test::simd::ieee_reals
, eve::test::generate( eve::test::randoms(mquarter_c, quarter_c)
, eve::test::randoms(mhalf_c, half_c)
, eve::test::randoms(mmed, med))
)
<typename T>(T const& a0, T const& a1, T const& a2)
{
using eve::detail::map;
using eve::cosd;
using eve::diff;
using eve::deginrad;
using v_t = eve::element_type_t<T>;
auto ref = [](auto e) -> v_t { return boost::math::cos_pi(e/180.0l); };
TTS_ULP_EQUAL(eve::quarter_circle(cosd)(a0) , map(ref, a0), 2);
TTS_ULP_EQUAL(eve::half_circle(cosd)(a0) , map(ref, a0), 2);
TTS_ULP_EQUAL(eve::half_circle(cosd)(a1) , map(ref, a1), 30);
TTS_ULP_EQUAL(cosd(a0) , map(ref, a0), 2);
TTS_ULP_EQUAL(cosd(a1) , map(ref, a1), 30);
TTS_ULP_EQUAL(cosd(a2) , map(ref, a2), 420);
auto dinr = 1.7453292519943295769236907684886127134428718885417e-2l;
TTS_ULP_EQUAL(diff(cosd)(a0), map([dinr](auto e) -> v_t { return -dinr*boost::math::sin_pi(e/180.0l); }, a0), 2);
};
EVE_TEST_TYPES( "Check return types of cosd"
, eve::test::simd::ieee_reals
)
<typename T>(eve::as<T>)
{
TTS_ULP_EQUAL(eve::cosd(T(1)) , T(0.9998476951563912391570115588139148516927403105832) , 0.5 );
TTS_ULP_EQUAL(eve::cosd(T(-1)) , T(0.9998476951563912391570115588139148516927403105832) , 0.5 );
TTS_ULP_EQUAL(eve::cosd(T(45.0)) , T(0.70710678118654752440084436210484903928483593768847) , 0.5 );
TTS_ULP_EQUAL(eve::cosd(-T(45.0)) , T(0.70710678118654752440084436210484903928483593768847) , 0.5 );
TTS_ULP_EQUAL(eve::cosd(T(-500.0)) , T(-0.7660444431189780352023926505554166739358324570804) , 3.5 );
TTS_ULP_EQUAL(eve::cosd(T(500.0)) , T(-0.7660444431189780352023926505554166739358324570804) , 3.5 );
};
| 46.012048 | 116 | 0.532338 | the-moisrex |
8b3e4f0a9886e7b1a98fc35c7f3e579114ba0685 | 12,285 | hpp | C++ | include/Zenject/SubContainerCreatorByNewGameObjectMethod_1.hpp | RedBrumbler/BeatSaber-Quest-Codegen | 73dda50b5a3e51f10d86b766dcaa24b0c6226e25 | [
"Unlicense"
] | null | null | null | include/Zenject/SubContainerCreatorByNewGameObjectMethod_1.hpp | RedBrumbler/BeatSaber-Quest-Codegen | 73dda50b5a3e51f10d86b766dcaa24b0c6226e25 | [
"Unlicense"
] | null | null | null | include/Zenject/SubContainerCreatorByNewGameObjectMethod_1.hpp | RedBrumbler/BeatSaber-Quest-Codegen | 73dda50b5a3e51f10d86b766dcaa24b0c6226e25 | [
"Unlicense"
] | null | null | null | // Autogenerated from CppHeaderCreator
// Created by Sc2ad
// =========================================================================
#pragma once
// Begin includes
#include "beatsaber-hook/shared/utils/typedefs.h"
#include "beatsaber-hook/shared/utils/byref.hpp"
// Including type: Zenject.SubContainerCreatorByNewGameObjectDynamicContext
#include "Zenject/SubContainerCreatorByNewGameObjectDynamicContext.hpp"
#include "beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp"
#include "beatsaber-hook/shared/utils/il2cpp-utils-properties.hpp"
#include "beatsaber-hook/shared/utils/il2cpp-utils-fields.hpp"
#include "beatsaber-hook/shared/utils/utils.h"
// Completed includes
// Begin forward declares
// Forward declaring namespace: Zenject
namespace Zenject {
// Skipping declaration: <>c__DisplayClass2_0 because it is already included!
// Forward declaring type: DiContainer
class DiContainer;
// Forward declaring type: GameObjectCreationParameters
class GameObjectCreationParameters;
// Forward declaring type: GameObjectContext
class GameObjectContext;
// Forward declaring type: InjectTypeInfo
class InjectTypeInfo;
}
// Forward declaring namespace: System
namespace System {
// Forward declaring type: Action`2<T1, T2>
template<typename T1, typename T2>
class Action_2;
}
// Forward declaring namespace: System::Collections::Generic
namespace System::Collections::Generic {
// Forward declaring type: List`1<T>
template<typename T>
class List_1;
}
// Completed forward declares
// Type namespace: Zenject
namespace Zenject {
// Forward declaring type: SubContainerCreatorByNewGameObjectMethod`1<TParam1>
template<typename TParam1>
class SubContainerCreatorByNewGameObjectMethod_1;
}
#include "beatsaber-hook/shared/utils/il2cpp-type-check.hpp"
DEFINE_IL2CPP_ARG_TYPE_GENERIC_CLASS(::Zenject::SubContainerCreatorByNewGameObjectMethod_1, "Zenject", "SubContainerCreatorByNewGameObjectMethod`1");
// Type namespace: Zenject
namespace Zenject {
// WARNING Size may be invalid!
// Autogenerated type: Zenject.SubContainerCreatorByNewGameObjectMethod`1
// [TokenAttribute] Offset: FFFFFFFF
// [NoReflectionBakingAttribute] Offset: FFFFFFFF
template<typename TParam1>
class SubContainerCreatorByNewGameObjectMethod_1 : public ::Zenject::SubContainerCreatorByNewGameObjectDynamicContext {
public:
// Nested type: ::Zenject::SubContainerCreatorByNewGameObjectMethod_1::$$c__DisplayClass2_0<TParam1>
class $$c__DisplayClass2_0;
// WARNING Size may be invalid!
// Autogenerated type: Zenject.SubContainerCreatorByNewGameObjectMethod`1/Zenject.<>c__DisplayClass2_0
// [TokenAttribute] Offset: FFFFFFFF
// [CompilerGeneratedAttribute] Offset: FFFFFFFF
class $$c__DisplayClass2_0 : public ::il2cpp_utils::il2cpp_type_check::NestedType, public ::Il2CppObject {
public:
using declaring_type = SubContainerCreatorByNewGameObjectMethod_1<TParam1>*;
static constexpr std::string_view NESTED_NAME = "<>c__DisplayClass2_0";
static constexpr bool IS_VALUE_TYPE = false;
#ifdef USE_CODEGEN_FIELDS
public:
#else
#ifdef CODEGEN_FIELD_ACCESSIBILITY
CODEGEN_FIELD_ACCESSIBILITY:
#else
protected:
#endif
#endif
// public Zenject.SubContainerCreatorByNewGameObjectMethod`1<TParam1> <>4__this
// Size: 0x8
// Offset: 0x0
::Zenject::SubContainerCreatorByNewGameObjectMethod_1<TParam1>* $$4__this;
// Field size check
static_assert(sizeof(::Zenject::SubContainerCreatorByNewGameObjectMethod_1<TParam1>*) == 0x8);
// public System.Collections.Generic.List`1<Zenject.TypeValuePair> args
// Size: 0x8
// Offset: 0x0
::System::Collections::Generic::List_1<::Zenject::TypeValuePair>* args;
// Field size check
static_assert(sizeof(::System::Collections::Generic::List_1<::Zenject::TypeValuePair>*) == 0x8);
public:
// Autogenerated instance field getter
// Get instance field: public Zenject.SubContainerCreatorByNewGameObjectMethod`1<TParam1> <>4__this
::Zenject::SubContainerCreatorByNewGameObjectMethod_1<TParam1>*& dyn_$$4__this() {
static auto ___internal__logger = ::Logger::get().WithContext("::Zenject::SubContainerCreatorByNewGameObjectMethod_1::$$c__DisplayClass2_0::dyn_$$4__this");
auto ___internal__instance = this;
static auto ___internal__field__offset = THROW_UNLESS(il2cpp_utils::FindField(___internal__instance, "<>4__this"))->offset;
return *reinterpret_cast<::Zenject::SubContainerCreatorByNewGameObjectMethod_1<TParam1>**>(reinterpret_cast<char*>(this) + ___internal__field__offset);
}
// Autogenerated instance field getter
// Get instance field: public System.Collections.Generic.List`1<Zenject.TypeValuePair> args
::System::Collections::Generic::List_1<::Zenject::TypeValuePair>*& dyn_args() {
static auto ___internal__logger = ::Logger::get().WithContext("::Zenject::SubContainerCreatorByNewGameObjectMethod_1::$$c__DisplayClass2_0::dyn_args");
auto ___internal__instance = this;
static auto ___internal__field__offset = THROW_UNLESS(il2cpp_utils::FindField(___internal__instance, "args"))->offset;
return *reinterpret_cast<::System::Collections::Generic::List_1<::Zenject::TypeValuePair>**>(reinterpret_cast<char*>(this) + ___internal__field__offset);
}
// System.Void <AddInstallers>b__0(Zenject.DiContainer subContainer)
// Offset: 0xFFFFFFFFFFFFFFFF
void $AddInstallers$b__0(::Zenject::DiContainer* subContainer) {
static auto ___internal__logger = ::Logger::get().WithContext("::Zenject::SubContainerCreatorByNewGameObjectMethod_1::$$c__DisplayClass2_0::<AddInstallers>b__0");
static auto* ___internal__method = THROW_UNLESS((::il2cpp_utils::FindMethod(this, "<AddInstallers>b__0", std::vector<Il2CppClass*>{}, ::std::vector<const Il2CppType*>{::il2cpp_utils::ExtractType(subContainer)})));
::il2cpp_utils::RunMethodRethrow<void, false>(this, ___internal__method, subContainer);
}
// static private System.Object __zenCreate(System.Object[] P_0)
// Offset: 0xFFFFFFFFFFFFFFFF
static ::Il2CppObject* __zenCreate(::ArrayW<::Il2CppObject*> P_0) {
static auto ___internal__logger = ::Logger::get().WithContext("::Zenject::SubContainerCreatorByNewGameObjectMethod_1::$$c__DisplayClass2_0::__zenCreate");
static auto* ___internal__method = THROW_UNLESS((::il2cpp_utils::FindMethod(::il2cpp_utils::il2cpp_type_check::il2cpp_no_arg_class<typename SubContainerCreatorByNewGameObjectMethod_1<TParam1>::$$c__DisplayClass2_0*>::get(), "__zenCreate", std::vector<Il2CppClass*>{}, ::std::vector<const Il2CppType*>{::il2cpp_utils::ExtractType(P_0)})));
return ::il2cpp_utils::RunMethodRethrow<::Il2CppObject*, false>(static_cast<Il2CppObject*>(nullptr), ___internal__method, P_0);
}
// static private Zenject.InjectTypeInfo __zenCreateInjectTypeInfo()
// Offset: 0xFFFFFFFFFFFFFFFF
static ::Zenject::InjectTypeInfo* __zenCreateInjectTypeInfo() {
static auto ___internal__logger = ::Logger::get().WithContext("::Zenject::SubContainerCreatorByNewGameObjectMethod_1::$$c__DisplayClass2_0::__zenCreateInjectTypeInfo");
static auto* ___internal__method = THROW_UNLESS((::il2cpp_utils::FindMethod(::il2cpp_utils::il2cpp_type_check::il2cpp_no_arg_class<typename SubContainerCreatorByNewGameObjectMethod_1<TParam1>::$$c__DisplayClass2_0*>::get(), "__zenCreateInjectTypeInfo", std::vector<Il2CppClass*>{}, ::std::vector<const Il2CppType*>{})));
return ::il2cpp_utils::RunMethodRethrow<::Zenject::InjectTypeInfo*, false>(static_cast<Il2CppObject*>(nullptr), ___internal__method);
}
// public System.Void .ctor()
// Offset: 0xFFFFFFFFFFFFFFFF
// Implemented from: System.Object
// Base method: System.Void Object::.ctor()
template<::il2cpp_utils::CreationType creationType = ::il2cpp_utils::CreationType::Temporary>
static typename SubContainerCreatorByNewGameObjectMethod_1<TParam1>::$$c__DisplayClass2_0* New_ctor() {
static auto ___internal__logger = ::Logger::get().WithContext("::Zenject::SubContainerCreatorByNewGameObjectMethod_1::$$c__DisplayClass2_0::.ctor");
return THROW_UNLESS((::il2cpp_utils::New<typename SubContainerCreatorByNewGameObjectMethod_1<TParam1>::$$c__DisplayClass2_0*, creationType>()));
}
}; // Zenject.SubContainerCreatorByNewGameObjectMethod`1/Zenject.<>c__DisplayClass2_0
// Could not write size check! Type: Zenject.SubContainerCreatorByNewGameObjectMethod`1/Zenject.<>c__DisplayClass2_0 is generic, or has no fields that are valid for size checks!
#ifdef USE_CODEGEN_FIELDS
public:
#else
#ifdef CODEGEN_FIELD_ACCESSIBILITY
CODEGEN_FIELD_ACCESSIBILITY:
#else
protected:
#endif
#endif
// private readonly System.Action`2<Zenject.DiContainer,TParam1> _installerMethod
// Size: 0x8
// Offset: 0x0
::System::Action_2<::Zenject::DiContainer*, TParam1>* installerMethod;
// Field size check
static_assert(sizeof(::System::Action_2<::Zenject::DiContainer*, TParam1>*) == 0x8);
public:
// Autogenerated instance field getter
// Get instance field: private readonly System.Action`2<Zenject.DiContainer,TParam1> _installerMethod
::System::Action_2<::Zenject::DiContainer*, TParam1>*& dyn__installerMethod() {
static auto ___internal__logger = ::Logger::get().WithContext("::Zenject::SubContainerCreatorByNewGameObjectMethod_1::dyn__installerMethod");
auto ___internal__instance = this;
static auto ___internal__field__offset = THROW_UNLESS(il2cpp_utils::FindField(___internal__instance, "_installerMethod"))->offset;
return *reinterpret_cast<::System::Action_2<::Zenject::DiContainer*, TParam1>**>(reinterpret_cast<char*>(this) + ___internal__field__offset);
}
// public System.Void .ctor(Zenject.DiContainer container, Zenject.GameObjectCreationParameters gameObjectBindInfo, System.Action`2<Zenject.DiContainer,TParam1> installerMethod)
// Offset: 0xFFFFFFFFFFFFFFFF
template<::il2cpp_utils::CreationType creationType = ::il2cpp_utils::CreationType::Temporary>
static SubContainerCreatorByNewGameObjectMethod_1<TParam1>* New_ctor(::Zenject::DiContainer* container, ::Zenject::GameObjectCreationParameters* gameObjectBindInfo, ::System::Action_2<::Zenject::DiContainer*, TParam1>* installerMethod) {
static auto ___internal__logger = ::Logger::get().WithContext("::Zenject::SubContainerCreatorByNewGameObjectMethod_1::.ctor");
return THROW_UNLESS((::il2cpp_utils::New<SubContainerCreatorByNewGameObjectMethod_1<TParam1>*, creationType>(container, gameObjectBindInfo, installerMethod)));
}
// protected override System.Void AddInstallers(System.Collections.Generic.List`1<Zenject.TypeValuePair> args, Zenject.GameObjectContext context)
// Offset: 0xFFFFFFFFFFFFFFFF
// Implemented from: Zenject.SubContainerCreatorDynamicContext
// Base method: System.Void SubContainerCreatorDynamicContext::AddInstallers(System.Collections.Generic.List`1<Zenject.TypeValuePair> args, Zenject.GameObjectContext context)
void AddInstallers(::System::Collections::Generic::List_1<::Zenject::TypeValuePair>* args, ::Zenject::GameObjectContext* context) {
static auto ___internal__logger = ::Logger::get().WithContext("::Zenject::SubContainerCreatorByNewGameObjectMethod_1::AddInstallers");
auto* ___internal__method = THROW_UNLESS((::il2cpp_utils::FindMethod(this, "AddInstallers", std::vector<Il2CppClass*>{}, ::std::vector<const Il2CppType*>{::il2cpp_utils::ExtractType(args), ::il2cpp_utils::ExtractType(context)})));
::il2cpp_utils::RunMethodRethrow<void, false>(this, ___internal__method, args, context);
}
}; // Zenject.SubContainerCreatorByNewGameObjectMethod`1
// Could not write size check! Type: Zenject.SubContainerCreatorByNewGameObjectMethod`1 is generic, or has no fields that are valid for size checks!
}
#include "beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp"
| 67.131148 | 347 | 0.745136 | RedBrumbler |
8b4f1b0fee4fbc0a88e890a4008cce88d17c4842 | 1,519 | cpp | C++ | event_loop_base_kqueue.cpp | looyao/mevent | fa268e93b33264c71d086ba9387b5ab2fabd0257 | [
"MIT"
] | 42 | 2017-03-07T02:45:22.000Z | 2019-02-26T15:26:25.000Z | event_loop_base_kqueue.cpp | looyao/mevent | fa268e93b33264c71d086ba9387b5ab2fabd0257 | [
"MIT"
] | null | null | null | event_loop_base_kqueue.cpp | looyao/mevent | fa268e93b33264c71d086ba9387b5ab2fabd0257 | [
"MIT"
] | 11 | 2017-03-07T06:42:30.000Z | 2019-03-06T03:15:46.000Z | #include "event_loop_base.h"
#include <sys/event.h>
#include <stdlib.h>
namespace mevent {
int EventLoopBase::Create() {
return kqueue();
}
int EventLoopBase::Add(int evfd, int fd, int mask, void *data) {
struct kevent ev;
if (mask & MEVENT_IN) {
EV_SET(&ev, fd, EVFILT_READ, EV_ADD, 0, 0, data);
kevent(evfd, &ev, 1, NULL, 0, NULL);
}
if (mask & MEVENT_OUT) {
EV_SET(&ev, fd, EVFILT_WRITE, EV_ADD, 0, 0, data);
kevent(evfd, &ev, 1, NULL, 0, NULL);
}
return 0;
}
int EventLoopBase::Modify(int evfd, int fd, int mask, void *data) {
return Add(evfd, fd, mask, data);
}
int EventLoopBase::Poll(int evfd, EventLoopBase::Event *events, int size, struct timeval *tv) {
struct kevent evs[size];
int nfds;
if (tv) {
struct timespec timeout;
timeout.tv_sec = tv->tv_sec;
timeout.tv_nsec = tv->tv_usec * 1000;
nfds = kevent(evfd, NULL, 0, evs, size, &timeout);
} else {
nfds = kevent(evfd, NULL, 0, evs, size, NULL);
}
if (nfds > 0) {
for (int i = 0; i < nfds; i++) {
events[i].data.ptr = evs[i].udata;
events[i].mask = 0;
if (evs[i].filter == EVFILT_READ) {
events[i].mask |= MEVENT_IN;
}
if (evs[i].filter == EVFILT_WRITE) {
events[i].mask |= MEVENT_OUT;
}
}
}
return nfds;
}
}//namespace mevent | 23.734375 | 95 | 0.520737 | looyao |
332d77eb71a4311cc316f682b76f34991f99d338 | 2,324 | cpp | C++ | automated-tests/src/dali/dali-test-suite-utils/test-render-surface.cpp | nui-dali/dali-core | bc9255ec35bec7223cd6a18fb2b3a6fcc273936b | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | automated-tests/src/dali/dali-test-suite-utils/test-render-surface.cpp | nui-dali/dali-core | bc9255ec35bec7223cd6a18fb2b3a6fcc273936b | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2020-03-22T10:19:17.000Z | 2020-03-22T10:19:17.000Z | automated-tests/src/dali/dali-test-suite-utils/test-render-surface.cpp | fayhot/dali-core | a69ea317f30961164520664a645ac36c387055ef | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | /*
* Copyright (c) 2019 Samsung Electronics Co., Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "test-render-surface.h"
namespace Dali
{
TestRenderSurface::TestRenderSurface( Dali::PositionSize positionSize )
: mPositionSize( positionSize ),
mBackgroundColor()
{
}
TestRenderSurface::~TestRenderSurface()
{
}
Dali::PositionSize TestRenderSurface::GetPositionSize() const
{
return mPositionSize;
};
void TestRenderSurface::GetDpi( unsigned int& dpiHorizontal, unsigned int& dpiVertical )
{
dpiHorizontal = dpiVertical = 96;
};
void TestRenderSurface::InitializeGraphics()
{
}
void TestRenderSurface::CreateSurface()
{
}
void TestRenderSurface::DestroySurface()
{
}
bool TestRenderSurface::ReplaceGraphicsSurface()
{
return false;
}
void TestRenderSurface::MoveResize( Dali::PositionSize positionSize )
{
mPositionSize = positionSize;
}
void TestRenderSurface::StartRender()
{
}
bool TestRenderSurface::PreRender( bool resizingSurface )
{
return true;
}
void TestRenderSurface::PostRender( bool renderToFbo, bool replacingSurface, bool resizingSurface )
{
}
void TestRenderSurface::StopRender()
{
}
void TestRenderSurface::ReleaseLock()
{
}
Dali::Integration::RenderSurface::Type TestRenderSurface::GetSurfaceType()
{
return WINDOW_RENDER_SURFACE;
}
void TestRenderSurface::MakeContextCurrent()
{
}
Integration::DepthBufferAvailable TestRenderSurface::GetDepthBufferRequired()
{
return Integration::DepthBufferAvailable::TRUE;
}
Integration::StencilBufferAvailable TestRenderSurface::GetStencilBufferRequired()
{
return Integration::StencilBufferAvailable::TRUE;
}
void TestRenderSurface::SetBackgroundColor( Vector4 color )
{
mBackgroundColor = color;
}
Vector4 TestRenderSurface::GetBackgroundColor()
{
return mBackgroundColor;
}
} // Namespace dali
| 20.034483 | 99 | 0.770224 | nui-dali |
332f0f13acb05d384ddc9992f6123529f29d7dcc | 1,791 | hpp | C++ | plugins/account_plugin/include/eosio/account_plugin/account_manager.hpp | jayden211/eos3 | 974e98739f3ce5cbcf8c8bc4a241a9e29c7e1d22 | [
"MIT"
] | null | null | null | plugins/account_plugin/include/eosio/account_plugin/account_manager.hpp | jayden211/eos3 | 974e98739f3ce5cbcf8c8bc4a241a9e29c7e1d22 | [
"MIT"
] | null | null | null | plugins/account_plugin/include/eosio/account_plugin/account_manager.hpp | jayden211/eos3 | 974e98739f3ce5cbcf8c8bc4a241a9e29c7e1d22 | [
"MIT"
] | null | null | null | /**
* @file
* @copyright defined in eos/LICENSE.txt
*/
#pragma once
#include <eosio/chain/transaction.hpp>
#include "model.hpp"
namespace fc { class variant; }
namespace eosio {
namespace account {
/// Provides associate of wallet name to wallet and manages the interaction with each wallet.
///
/// The name of the wallet is also used as part of the file name by wallet_api. See account_manager::create.
/// No const methods because timeout may cause lock_all() to be called.
class account_manager {
public:
account_manager() = default;
account_manager(const account_manager&) = delete;
account_manager(account_manager&&) = delete;
account_manager& operator=(const account_manager&) = delete;
account_manager& operator=(account_manager&&) = delete;
~account_manager() = default;
/// Create a new wallet.
/// A new wallet is created in file dir/{name}.wallet see set_dir.
/// The new wallet is unlocked after creation.
/// @param name of the wallet and name of the file without ext .wallet.
/// @return Plaintext password that is needed to unlock wallet. Caller is responsible for saving password otherwise
/// they will not be able to unlock their wallet. Note user supplied passwords are not supported.
/// @throws fc::exception if wallet with name already exists (or filename already exists)
fc::variant create(const eosio::account::account_create& args);
fc::mutable_variant_object createkey(const int& num);
// get account balance
fc::variant get_account_balance(const currency_balance& args);
// transfer
fc::variant transfer(const transfer_info& args);
private:
std::string eosio_key = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3";
};
} // namespace wallet
} // namespace eosio
| 30.355932 | 118 | 0.726968 | jayden211 |
332fabc654220811b7f2a3fa3e8e4b966530a521 | 2,602 | cpp | C++ | test/protobuf-pbop-plugin-unittest/TestBufferedConnection.cpp | themucha/protobuf-pbop-plugin | 824b9db7d64d699f9c00fc79ea50b460345994d8 | [
"MIT"
] | 4 | 2020-09-10T08:38:40.000Z | 2022-02-28T23:39:32.000Z | test/protobuf-pbop-plugin-unittest/TestBufferedConnection.cpp | themucha/protobuf-pbop-plugin | 824b9db7d64d699f9c00fc79ea50b460345994d8 | [
"MIT"
] | 15 | 2020-06-24T20:31:56.000Z | 2020-07-25T16:30:56.000Z | test/protobuf-pbop-plugin-unittest/TestBufferedConnection.cpp | end2endzone/protobuf-pipe-plugin | 40dd2ba245fe0713f6b1f68622bc765711d3c7b8 | [
"MIT-0",
"MIT"
] | 3 | 2021-07-16T21:22:38.000Z | 2022-02-28T23:39:34.000Z | /**********************************************************************************
* MIT License
*
* Copyright (c) 2018 Antoine Beauchamp
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*********************************************************************************/
#include "TestBufferedConnection.h"
#include "pbop/BufferedConnection.h"
using namespace pbop;
void TestBufferedConnection::SetUp()
{
}
void TestBufferedConnection::TearDown()
{
}
TEST_F(TestBufferedConnection, testReadWrite)
{
std::string bufferA;
std::string bufferB;
BufferedConnection conn1(&bufferA, &bufferB);
BufferedConnection conn2(&bufferB, &bufferA);
Status s;
//write data to connection 1
const std::string write_data = "hello!";
s = conn1.Write(write_data);
ASSERT_TRUE( s.Success() ) << s.GetDescription();
//read data from connection 2
std::string read_data;
s = conn2.Read(read_data);
ASSERT_TRUE( s.Success() ) << s.GetDescription();
//expect readed and written data to be identical.
ASSERT_EQ(write_data, read_data);
}
TEST_F(TestBufferedConnection, testInvalidWrite)
{
std::string buffer;
BufferedConnection conn(&buffer, NULL);
//write data to connection
const std::string data = "hello!";
Status s = conn.Write(data);
ASSERT_FALSE( s.Success() ) << s.GetDescription();
}
TEST_F(TestBufferedConnection, testInvalidRead)
{
std::string buffer;
BufferedConnection conn(NULL, &buffer);
//read data to connection
std::string data;
Status s = conn.Read(data);
ASSERT_FALSE( s.Success() ) << s.GetDescription();
}
| 30.611765 | 83 | 0.690623 | themucha |
332fd4ce8d394e1084a66c35b4bbbbef0b6c7d66 | 12,318 | cpp | C++ | amt/Selection.cpp | dehilsterlexis/eclide-1 | 0c1685cc7165191b5033d450c59aec479f01010a | [
"Apache-2.0"
] | 8 | 2016-08-29T13:34:18.000Z | 2020-12-04T15:20:36.000Z | amt/Selection.cpp | dehilsterlexis/eclide-1 | 0c1685cc7165191b5033d450c59aec479f01010a | [
"Apache-2.0"
] | 221 | 2016-06-20T19:51:48.000Z | 2022-03-29T20:46:46.000Z | amt/Selection.cpp | dehilsterlexis/eclide-1 | 0c1685cc7165191b5033d450c59aec479f01010a | [
"Apache-2.0"
] | 13 | 2016-06-24T15:59:31.000Z | 2022-01-01T11:48:20.000Z | #include "StdAfx.h"
#include "Selection.h"
// ===========================================================================
class NoCaseCompare
{
public:
bool operator ()(const std::_tstring & l, const std::_tstring & r) const
{
return _tcsicmp(l.c_str(), r.c_str()) < 0;
}
};
// ===========================================================================
CSelection::CSelection(WTL::CTreeViewCtrlEx * tree)
{
m_tree = tree;
}
CSelection::~CSelection(void)
{
}
void CSelection::Clear()
{
CWaitCursor wait;
m_attrs.clear();
WTL::CTreeItem item = m_tree->GetRootItem();
while (item)
{
InitState((CTreeNode*)item.GetData(), THREESTATE_UNCHECKED);
item = item.GetNextVisible();
}
}
THREESTATE CSelection::CalcWorkspaceState(IWorkspace * workspace) const
{
bool unchecked = false;
bool checked = false;
IWorkspaceVector workspaces;
workspace->GetChildWorkspaces(workspaces);
for(IWorkspaceVector::const_iterator itr = workspaces.begin(); itr != workspaces.end(); ++itr)
{
switch(CalcWorkspaceState(itr->get()))
{
case THREESTATE_CHECKED:
checked = true;
break;
case THREESTATE_UNCHECKED:
unchecked = true;
break;
case THREESTATE_PARTIALCHECKED:
return THREESTATE_PARTIALCHECKED;
}
}
IWorkspaceItemVector workspaceItems;
workspace->GetWindows(workspaceItems);
for(IWorkspaceItemVector::const_iterator itr = workspaceItems.begin();itr != workspaceItems.end(); ++itr)
{
if (itr->get()->GetType() == WORKSPACE_ITEM_ATTRIBUTE)
{
AttributeStateMap::const_iterator found = m_attrs.find(itr->get()->GetAttribute());
if (found != m_attrs.end() && found->second.m_checked == true)
checked = true;
else
unchecked = true;
}
if (checked && unchecked)
return THREESTATE_PARTIALCHECKED;
}
if (checked)
return THREESTATE_CHECKED;
return THREESTATE_UNCHECKED;
}
THREESTATE CSelection::CalcModuleState(IModule * module) const
{
bool unchecked = false;
bool checked = false;
IModuleVector modules;
module->GetModules(modules);
for(IModuleVector::iterator itr = modules.begin(); itr != modules.end(); ++itr)
{
switch (CalcModuleState(itr->get()))
{
case THREESTATE_CHECKED:
checked = true;
break;
case THREESTATE_UNCHECKED:
unchecked = true;
break;
default:
return THREESTATE_PARTIALCHECKED;
}
if (checked && unchecked)
return THREESTATE_PARTIALCHECKED;
}
unsigned int attrCount = 0;
for(AttributeStateMap::const_iterator itr = m_attrs.begin(); itr != m_attrs.end(); ++itr)
{
if (boost::algorithm::iequals(module->GetQualifiedLabel(), itr->first->GetModule()->GetQualifiedLabel()))
{
attrCount++;
if (itr->second.m_checked == true)
checked = true;
else
unchecked = true;
}
if (checked && unchecked)
return THREESTATE_PARTIALCHECKED;
}
if (checked)
{
IAttributeVector attributes;
return module->GetAttributes(attributes, true) - attrCount == 0 ? THREESTATE_CHECKED : THREESTATE_PARTIALCHECKED; //(Module could contain more attrs than the ones we know about in m_attrs)
}
return THREESTATE_UNCHECKED;
}
THREESTATE CSelection::CalcAttributeState(IAttribute * attribute) const
{
if (!attribute->Exists())
return THREESTATE_BLANK;
AttributeStateMap::const_iterator itr = m_attrs.find(attribute);
if (itr != m_attrs.end())
{
return itr->second.m_checked ? THREESTATE_CHECKED : THREESTATE_UNCHECKED;
}
return THREESTATE_UNCHECKED;
}
THREESTATE CSelection::CalcAttributeHistoryState(IAttribute * attribute, int version) const
{
AttributeStateMap::const_iterator itr = m_attrs.find(attribute);
if (itr != m_attrs.end() && itr->second.m_version == version && itr->second.m_checked)
{
return THREESTATE_RADIO_CHECKED;
}
return THREESTATE_RADIO_UNCHECKED;
}
void CSelection::InitState(CTreeNode * item, THREESTATE knownState)
{
if (CComQIPtr<CWorkspacePairNode> node = item)
{
SetCheckThreeState(*item, knownState != THREESTATE_UNKNOWN ? knownState : CalcWorkspaceState(node->m_lhs));
}
else if (CComQIPtr<CModulePairNode> node = item)
{
SetCheckThreeState(*item, knownState != THREESTATE_UNKNOWN ? knownState : CalcModuleState(node->m_lhs));
}
else if (CComQIPtr<CAttributePairNode> node = item)
{
THREESTATE state = knownState != THREESTATE_UNKNOWN ? knownState : CalcAttributeState(node->m_lhs);
SetState(node->m_lhs, state == THREESTATE_CHECKED);
SetCheckThreeState(*item, state);
}
else if (CComQIPtr<CAttributeHistoryPairNode> node = item)
{
CComQIPtr<CAttributePairNode> parent = item->GetParentNode();
IAttribute * attr = parent->m_lhs;
if (m_attrs[attr].m_version == 0 && *parent->GetChildNode() == *item && !attr->IsSandboxed() && m_attrs[attr].m_checked)
SetCheckThreeState(*item, THREESTATE_RADIO_CHECKED);
else
SetCheckThreeState(*item, CalcAttributeHistoryState(parent->m_lhs, node->m_lhs->GetVersion()));
}
}
void CSelection::SetState(IAttribute * attribute, bool checked)
{
ATLASSERT(attribute);
if (!attribute->Exists())
return;
AttributeStateMap::const_iterator itr = m_attrs.find(attribute);
if (itr == m_attrs.end()) //Does not exist...
{
m_attrs[attribute].m_version = 0;
m_attrs[attribute].m_history = NULL;
m_attrs[attribute].m_checked = false;
}
if (m_attrs[attribute].m_checked != checked)
{
m_attrs[attribute].m_version = 0;
m_attrs[attribute].m_history = NULL;
m_attrs[attribute].m_checked = checked;
}
}
void CSelection::ItemClicked(CTreeNode * item, IAttributeVector * attrs, IAttributeVector * dependants)
{
THREESTATE curState = GetCheckThreeState(*item);
THREESTATE newState = curState == THREESTATE_BUSY_CHECKED ? THREESTATE_UNCHECKED : THREESTATE_CHECKED;
if (CComQIPtr<CWorkspacePairNode> node = item)
{
IWorkspace * ws = node->m_lhs;
SetSelection(item, *attrs, newState == THREESTATE_CHECKED);
}
else if (CComQIPtr<CModulePairNode> node = item)
{
IModule * mod = node->m_lhs;
SetSelection(item, *attrs, newState == THREESTATE_CHECKED);
}
else if (CComQIPtr<CAttributePairNode> node = item)
{
attrs->push_back(node->m_lhs.p);
SetSelection(item, *attrs, newState == THREESTATE_CHECKED);
}
else if (CComQIPtr<CAttributeHistoryPairNode> node = item)
{
CComQIPtr<CAttributePairNode> parent = node->GetParentNode();
CComQIPtr<CModulePairNode> gparent = parent->GetParentNode();
IAttribute * attr = parent->m_lhs;
m_attrs[attr].m_version = node->m_lhs->GetVersion();
m_attrs[attr].m_history = node->m_lhs;
m_attrs[attr].m_checked = true;
// TODO: GJS - This doesn't refresh to the top of nested modules...
Refresh(parent);
}
// Dependents could be anywhere...
if (!dependants->empty())
SetSelection(*dependants, newState == THREESTATE_CHECKED);
}
void CSelection::Refresh(CAttributePairNode * node)
{
SetCheckThreeState(*node, CalcAttributeState(node->m_lhs));
// Recalc Parents.
if (CComQIPtr<CWorkspacePairNode> parent = node->GetParentNode())
SetCheckThreeState(*parent, CalcWorkspaceState(parent->m_lhs));
else if (CComQIPtr<CModulePairNode> parent = node->GetParentNode())
SetCheckThreeState(*parent, CalcModuleState(parent->m_lhs));
// Recalc Children.
for(CComQIPtr<CAttributeHistoryPairNode> child = node->GetChildNode(); child; child = child->GetNextSiblingItem())
{
InitState(child);
}
}
bool CSelection::HasSelection() const
{
for(AttributeStateMap::const_iterator itr = m_attrs.begin(); itr != m_attrs.end(); ++itr)
{
if (itr->second.m_checked)
return true;
}
return false;
}
//class IAttributeSelectionCompare
//{
//public:
// bool operator ()(IAttributeSelection & l, IAttributeSelection & r)
// {
// CString lhsModule = l.m_moduleLabel.c_str();
// int compare = lhsModule.CompareNoCase(r.m_moduleLabel.c_str());
// if (compare == 0)
// {
// CString lhs = l.m_attrLabel.c_str();
// return lhs.CompareNoCase(r.m_attrLabel.c_str()) > 0 ? false : true;
// }
// else
// return compare > 0 ? false : true;
// }
//};
int CSelection::GetSelection(IRepository * rep, IWorkspaceVector & workspaces, IAttributeHistoryVector & attrs) const
{
WTL::CTreeItem item = m_tree->GetRootItem();
while(item)
{
CComPtr<CTreeNode> node = (CTreeNode *)item.GetData();
if (CComQIPtr<CWorkspacePairNode> ws_node = node)
{
switch (GetCheckThreeState(item))
{
case THREESTATE_CHECKED:
case THREESTATE_PARTIALCHECKED:
workspaces.push_back(ws_node->m_lhs.p);
break;
}
}
item = item.GetNextVisible();
}
for(AttributeStateMap::const_iterator itr = m_attrs.begin(); itr != m_attrs.end(); ++itr)
{
if (itr->second.m_checked)
{
if (itr->second.m_version == 0)
attrs.push_back(itr->first->GetAsHistory());
else
attrs.push_back(itr->second.m_history.p);
}
}
IAttributeHistoryCompare compare;
std::sort(attrs.begin(), attrs.end(), compare);
return attrs.size();
}
void CSelection::SetSelection(IAttributeVector & attrs, bool checked)
{
if (attrs.empty())
return;
CWaitCursor wait;
for(IAttributeVector::const_iterator itr = attrs.begin(); itr != attrs.end(); ++itr)
{
SetState(*itr, checked);
}
WTL::CTreeItem item = m_tree->GetRootItem();
while(item)
{
InitState((CTreeNode *)item.GetData());
item = item.GetNextVisible();
}
}
void RecursiveRefreshChildren(CSelection * self, WTL::CTreeItem * _item, THREESTATE knownState)
{
WTL::CTreeItem childItem = _item->GetChild();
while(childItem)
{
self->InitState((CTreeNode *)childItem.GetData(), knownState);
RecursiveRefreshChildren(self, &childItem, knownState);
childItem = childItem.GetNextSibling();
}
}
void CSelection::SetSelection(CTreeNode * _item, IAttributeVector & attrs, bool checked)
{
for(IAttributeVector::const_iterator itr = attrs.begin(); itr != attrs.end(); ++itr)
{
SetState(*itr, checked); //GJS this does not work for dependent attrs!!!!
}
SetCheckThreeState(*_item, checked ? THREESTATE_CHECKED : THREESTATE_UNCHECKED);
// Refresh Ancestors
WTL::CTreeItem item = _item->GetParent();
while(item)
{
InitState((CTreeNode *)item.GetData());
item = item.GetParent();
}
// Refresh Children
if (_item->IsExpanded())
RecursiveRefreshChildren(this, _item, checked ? THREESTATE_CHECKED : THREESTATE_UNCHECKED);
SetSelection(attrs, checked);
}
int CSelection::GetSelection(std::_tstring & attrs) const
{
for(AttributeStateMap::const_iterator itr = m_attrs.begin(); itr != m_attrs.end(); ++itr)
{
if (itr->second.m_checked)
{
if (attrs.length())
attrs += _T("\r\n");
attrs += itr->first->GetQualifiedLabel();
}
}
return attrs.size();
}
THREESTATE CSelection::GetCheckThreeState(HTREEITEM hItem) const
{
ATLASSERT(m_tree && m_tree->IsWindow());
UINT uRet = m_tree->GetItemState(hItem, TVIS_STATEIMAGEMASK);
return (THREESTATE)((uRet >> 12) - 1);
}
BOOL CSelection::SetCheckThreeState(HTREEITEM hItem, THREESTATE state)
{
ATLASSERT(m_tree && m_tree->IsWindow());
int nCheck = (int)state;
ATLASSERT(nCheck > THREESTATE_UNKNOWN && nCheck < THREESTATE_LAST);
return m_tree->SetItemState(hItem, INDEXTOSTATEIMAGEMASK(nCheck+1), TVIS_STATEIMAGEMASK);
}
| 31.911917 | 196 | 0.632408 | dehilsterlexis |
332feca63b89116619245c718a218cf790008234 | 3,347 | hpp | C++ | includes/utils/opengl/Shader.hpp | tnicolas42/bomberman | 493d7243fabb1e5b6d5adfdcb5eb5973869b83a2 | [
"MIT"
] | 6 | 2020-03-13T16:45:13.000Z | 2022-03-30T18:20:48.000Z | includes/utils/opengl/Shader.hpp | tnicolas42/bomberman | 493d7243fabb1e5b6d5adfdcb5eb5973869b83a2 | [
"MIT"
] | 191 | 2020-03-02T14:47:19.000Z | 2020-06-03T08:13:00.000Z | includes/utils/opengl/Shader.hpp | tnicolas42/bomberman | 493d7243fabb1e5b6d5adfdcb5eb5973869b83a2 | [
"MIT"
] | null | null | null | #ifndef SHADER_HPP_
#define SHADER_HPP_
#include <string>
#include <fstream>
#include <sstream>
#include "includesOpengl.hpp"
/**
* @brief Shader class used to manage shader compilation
*
* It also adds some tools to set uniform and activate shader easier
* Warning! before instantiating a Shader object you need to create the opengl contex with glfwCreateWindow
*/
class Shader {
public:
Shader(std::string const vsPath, std::string const fsPath, std::string const gsPath = "");
Shader(Shader const &src);
virtual ~Shader();
Shader &operator=(Shader const &rhs);
void use();
void unuse();
void setBool(const std::string &name, bool value) const;
void setInt(const std::string &name, int value) const;
void setFloat(const std::string &name, float value) const;
void setDouble(const std::string &name, double value) const;
void setVec2(const std::string &name, float x, float y) const;
void setVec2(const std::string &name, const glm::vec2 &vec) const;
void setVec2Double(const std::string &name, double x, double y) const;
void setVec2Double(const std::string &name, const glm::tvec2<double> &vec) const;
void setVec3(const std::string &name, float x, float y, float z) const;
void setVec3(const std::string &name, const glm::vec3 &vec) const;
void setVec3Double(const std::string &name, double x, double y, double z) const;
void setVec3Double(const std::string &name, const glm::tvec3<double> &vec) const;
void setVec4(const std::string &name, float x, float y, float z, float w) const;
void setVec4(const std::string &name, const glm::vec4 &vec) const;
void setVec4Double(const std::string &name, double x, double y, double z, double w) const;
void setVec4Double(const std::string &name, const glm::tvec4<double> &vec) const;
void setMat2(const std::string &name, const glm::mat2 &mat) const;
void setMat2Double(const std::string &name, const glm::dmat2 &mat) const;
void setMat3(const std::string &name, const glm::mat3 &mat) const;
void setMat3Double(const std::string &name, const glm::dmat3 &mat) const;
void setMat4(const std::string &name, const glm::mat4 &mat) const;
void setMat4Double(const std::string &name, const glm::dmat4 &mat) const;
/**
* @brief Shader exception
*/
class ShaderError : public std::exception {
public:
/**
* @brief Function auto called on errors
*
* @return const char* Error message
*/
virtual const char* what() const throw() = 0;
};
/**
* @brief Shader compilation exception
*/
class ShaderCompileException : public ShaderError {
public:
/**
* @brief Function auto called on errors
*
* @return const char* Error message
*/
virtual const char* what() const throw() {
return ("Shader failed to compile!");
}
};
/**
* @brief Shader linking exception
*/
class ShaderLinkingException : public ShaderError {
public:
/**
* @brief Function auto called on errors
*
* @return const char* Error message
*/
virtual const char* what() const throw() {
return ("Shader program failed to link!");
}
};
uint32_t id; /**< shader ID */
private:
void checkCompileErrors(uint32_t shader, std::string type);
std::string _vsPath;
std::string _gsPath;
std::string _fsPath;
};
#endif // SHADER_HPP_
| 31.87619 | 107 | 0.683896 | tnicolas42 |
3331757475cabab8b155d46b723b7294567871bc | 1,570 | hpp | C++ | Source/AliveLibAE/FootSwitch.hpp | Leonard2/alive_reversing | c6d85f435e275db1d41e2ec8b4e52454aa932e05 | [
"MIT"
] | null | null | null | Source/AliveLibAE/FootSwitch.hpp | Leonard2/alive_reversing | c6d85f435e275db1d41e2ec8b4e52454aa932e05 | [
"MIT"
] | null | null | null | Source/AliveLibAE/FootSwitch.hpp | Leonard2/alive_reversing | c6d85f435e275db1d41e2ec8b4e52454aa932e05 | [
"MIT"
] | null | null | null | #pragma once
#include "BaseAnimatedWithPhysicsGameObject.hpp"
#include "Path.hpp"
#include "FunctionFwd.hpp"
enum class SwitchOp : s16;
enum class FootSwitchTriggerBy : s16
{
eOnlyAbe_0 = 0,
eAnyone_1 = 1,
};
struct Path_FootSwitch final : public Path_TLV
{
s16 field_10_id;
Scale_short field_12_scale;
SwitchOp field_14_action;
FootSwitchTriggerBy field_16_trigger_by;
};
ALIVE_ASSERT_SIZEOF_ALWAYS(Path_FootSwitch, 0x18);
struct FootSwitch_Data final
{
s32 field_0_frameTableOffset;
s32 field_4_frameTableOffset;
s16 field_8_maxH;
s16 field_A_frameTableOffset;
};
ALIVE_ASSERT_SIZEOF_ALWAYS(FootSwitch_Data, 0xC);
class FootSwitch final : public ::BaseAnimatedWithPhysicsGameObject
{
public:
EXPORT FootSwitch* ctor_4DE090(Path_FootSwitch* pTlv, s32 tlvInfo);
virtual BaseGameObject* VDestructor(s32 flags) override;
virtual void VUpdate() override;
virtual void VScreenChanged() override;
private:
EXPORT FootSwitch* vdtor_4DE240(s32 flags);
EXPORT void dtor_4DE670();
EXPORT void vScreenChanged_4DE650();
EXPORT void vUpdate_4DE270();
EXPORT BaseAliveGameObject* WhoIsStoodOnMe_4DE700();
private:
s32 field_F4_tlvInfo;
enum class States : s16
{
eWaitForStepOnMe_0 = 0,
eWaitForGetOffMe_1 = 1,
};
States field_F8_state;
s16 field_FA_id;
SwitchOp field_FC_action;
FootSwitchTriggerBy field_FE_trigger_by;
s32 field_100_obj_id;
s16 field_104_bUnknown;
s16 field_106_bFindStander;
};
ALIVE_ASSERT_SIZEOF(FootSwitch, 0x108);
| 23.787879 | 71 | 0.751592 | Leonard2 |
3332cd4f747431afa12ee581f73f6c5d4515a574 | 18,366 | cc | C++ | src/tint/reader/spirv/parser_type.cc | encounter/dawn-cmake | 64a23ce0ede5f232cc209b69d64164ede6810b65 | [
"Apache-2.0"
] | null | null | null | src/tint/reader/spirv/parser_type.cc | encounter/dawn-cmake | 64a23ce0ede5f232cc209b69d64164ede6810b65 | [
"Apache-2.0"
] | null | null | null | src/tint/reader/spirv/parser_type.cc | encounter/dawn-cmake | 64a23ce0ede5f232cc209b69d64164ede6810b65 | [
"Apache-2.0"
] | null | null | null | // Copyright 2021 The Tint Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or stateied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/tint/reader/spirv/parser_type.h"
#include <string>
#include <unordered_map>
#include <utility>
#include "src/tint/program_builder.h"
#include "src/tint/utils/hash.h"
#include "src/tint/utils/map.h"
#include "src/tint/utils/unique_allocator.h"
TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Type);
TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Void);
TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Bool);
TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::U32);
TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::F32);
TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::I32);
TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Pointer);
TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Reference);
TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Vector);
TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Matrix);
TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Array);
TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Sampler);
TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Texture);
TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::DepthTexture);
TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::DepthMultisampledTexture);
TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::MultisampledTexture);
TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::SampledTexture);
TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::StorageTexture);
TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Named);
TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Alias);
TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Struct);
namespace tint::reader::spirv {
namespace {
struct PointerHasher {
size_t operator()(const Pointer& t) const { return utils::Hash(t.type, t.storage_class); }
};
struct ReferenceHasher {
size_t operator()(const Reference& t) const { return utils::Hash(t.type, t.storage_class); }
};
struct VectorHasher {
size_t operator()(const Vector& t) const { return utils::Hash(t.type, t.size); }
};
struct MatrixHasher {
size_t operator()(const Matrix& t) const { return utils::Hash(t.type, t.columns, t.rows); }
};
struct ArrayHasher {
size_t operator()(const Array& t) const { return utils::Hash(t.type, t.size, t.stride); }
};
struct AliasHasher {
size_t operator()(const Alias& t) const { return utils::Hash(t.name); }
};
struct StructHasher {
size_t operator()(const Struct& t) const { return utils::Hash(t.name); }
};
struct SamplerHasher {
size_t operator()(const Sampler& s) const { return utils::Hash(s.kind); }
};
struct DepthTextureHasher {
size_t operator()(const DepthTexture& t) const { return utils::Hash(t.dims); }
};
struct DepthMultisampledTextureHasher {
size_t operator()(const DepthMultisampledTexture& t) const { return utils::Hash(t.dims); }
};
struct MultisampledTextureHasher {
size_t operator()(const MultisampledTexture& t) const { return utils::Hash(t.dims, t.type); }
};
struct SampledTextureHasher {
size_t operator()(const SampledTexture& t) const { return utils::Hash(t.dims, t.type); }
};
struct StorageTextureHasher {
size_t operator()(const StorageTexture& t) const {
return utils::Hash(t.dims, t.format, t.access);
}
};
} // namespace
// Equality operators
//! @cond Doxygen_Suppress
static bool operator==(const Pointer& a, const Pointer& b) {
return a.type == b.type && a.storage_class == b.storage_class;
}
static bool operator==(const Reference& a, const Reference& b) {
return a.type == b.type && a.storage_class == b.storage_class;
}
static bool operator==(const Vector& a, const Vector& b) {
return a.type == b.type && a.size == b.size;
}
static bool operator==(const Matrix& a, const Matrix& b) {
return a.type == b.type && a.columns == b.columns && a.rows == b.rows;
}
static bool operator==(const Array& a, const Array& b) {
return a.type == b.type && a.size == b.size && a.stride == b.stride;
}
static bool operator==(const Named& a, const Named& b) {
return a.name == b.name;
}
static bool operator==(const Sampler& a, const Sampler& b) {
return a.kind == b.kind;
}
static bool operator==(const DepthTexture& a, const DepthTexture& b) {
return a.dims == b.dims;
}
static bool operator==(const DepthMultisampledTexture& a, const DepthMultisampledTexture& b) {
return a.dims == b.dims;
}
static bool operator==(const MultisampledTexture& a, const MultisampledTexture& b) {
return a.dims == b.dims && a.type == b.type;
}
static bool operator==(const SampledTexture& a, const SampledTexture& b) {
return a.dims == b.dims && a.type == b.type;
}
static bool operator==(const StorageTexture& a, const StorageTexture& b) {
return a.dims == b.dims && a.format == b.format;
}
//! @endcond
const ast::Type* Void::Build(ProgramBuilder& b) const {
return b.ty.void_();
}
const ast::Type* Bool::Build(ProgramBuilder& b) const {
return b.ty.bool_();
}
const ast::Type* U32::Build(ProgramBuilder& b) const {
return b.ty.u32();
}
const ast::Type* F32::Build(ProgramBuilder& b) const {
return b.ty.f32();
}
const ast::Type* I32::Build(ProgramBuilder& b) const {
return b.ty.i32();
}
Pointer::Pointer(const Type* t, ast::StorageClass s) : type(t), storage_class(s) {}
Pointer::Pointer(const Pointer&) = default;
const ast::Type* Pointer::Build(ProgramBuilder& b) const {
return b.ty.pointer(type->Build(b), storage_class);
}
Reference::Reference(const Type* t, ast::StorageClass s) : type(t), storage_class(s) {}
Reference::Reference(const Reference&) = default;
const ast::Type* Reference::Build(ProgramBuilder& b) const {
return type->Build(b);
}
Vector::Vector(const Type* t, uint32_t s) : type(t), size(s) {}
Vector::Vector(const Vector&) = default;
const ast::Type* Vector::Build(ProgramBuilder& b) const {
return b.ty.vec(type->Build(b), size);
}
Matrix::Matrix(const Type* t, uint32_t c, uint32_t r) : type(t), columns(c), rows(r) {}
Matrix::Matrix(const Matrix&) = default;
const ast::Type* Matrix::Build(ProgramBuilder& b) const {
return b.ty.mat(type->Build(b), columns, rows);
}
Array::Array(const Type* t, uint32_t sz, uint32_t st) : type(t), size(sz), stride(st) {}
Array::Array(const Array&) = default;
const ast::Type* Array::Build(ProgramBuilder& b) const {
if (size > 0) {
return b.ty.array(type->Build(b), u32(size), stride);
} else {
return b.ty.array(type->Build(b), nullptr, stride);
}
}
Sampler::Sampler(ast::SamplerKind k) : kind(k) {}
Sampler::Sampler(const Sampler&) = default;
const ast::Type* Sampler::Build(ProgramBuilder& b) const {
return b.ty.sampler(kind);
}
Texture::Texture(ast::TextureDimension d) : dims(d) {}
Texture::Texture(const Texture&) = default;
DepthTexture::DepthTexture(ast::TextureDimension d) : Base(d) {}
DepthTexture::DepthTexture(const DepthTexture&) = default;
const ast::Type* DepthTexture::Build(ProgramBuilder& b) const {
return b.ty.depth_texture(dims);
}
DepthMultisampledTexture::DepthMultisampledTexture(ast::TextureDimension d) : Base(d) {}
DepthMultisampledTexture::DepthMultisampledTexture(const DepthMultisampledTexture&) = default;
const ast::Type* DepthMultisampledTexture::Build(ProgramBuilder& b) const {
return b.ty.depth_multisampled_texture(dims);
}
MultisampledTexture::MultisampledTexture(ast::TextureDimension d, const Type* t)
: Base(d), type(t) {}
MultisampledTexture::MultisampledTexture(const MultisampledTexture&) = default;
const ast::Type* MultisampledTexture::Build(ProgramBuilder& b) const {
return b.ty.multisampled_texture(dims, type->Build(b));
}
SampledTexture::SampledTexture(ast::TextureDimension d, const Type* t) : Base(d), type(t) {}
SampledTexture::SampledTexture(const SampledTexture&) = default;
const ast::Type* SampledTexture::Build(ProgramBuilder& b) const {
return b.ty.sampled_texture(dims, type->Build(b));
}
StorageTexture::StorageTexture(ast::TextureDimension d, ast::TexelFormat f, ast::Access a)
: Base(d), format(f), access(a) {}
StorageTexture::StorageTexture(const StorageTexture&) = default;
const ast::Type* StorageTexture::Build(ProgramBuilder& b) const {
return b.ty.storage_texture(dims, format, access);
}
Named::Named(Symbol n) : name(n) {}
Named::Named(const Named&) = default;
Named::~Named() = default;
Alias::Alias(Symbol n, const Type* ty) : Base(n), type(ty) {}
Alias::Alias(const Alias&) = default;
const ast::Type* Alias::Build(ProgramBuilder& b) const {
return b.ty.type_name(name);
}
Struct::Struct(Symbol n, TypeList m) : Base(n), members(std::move(m)) {}
Struct::Struct(const Struct&) = default;
Struct::~Struct() = default;
const ast::Type* Struct::Build(ProgramBuilder& b) const {
return b.ty.type_name(name);
}
/// The PIMPL state of the Types object.
struct TypeManager::State {
/// The allocator of primitive types
utils::BlockAllocator<Type> allocator_;
/// The lazily-created Void type
spirv::Void const* void_ = nullptr;
/// The lazily-created Bool type
spirv::Bool const* bool_ = nullptr;
/// The lazily-created U32 type
spirv::U32 const* u32_ = nullptr;
/// The lazily-created F32 type
spirv::F32 const* f32_ = nullptr;
/// The lazily-created I32 type
spirv::I32 const* i32_ = nullptr;
/// Unique Pointer instances
utils::UniqueAllocator<spirv::Pointer, PointerHasher> pointers_;
/// Unique Reference instances
utils::UniqueAllocator<spirv::Reference, ReferenceHasher> references_;
/// Unique Vector instances
utils::UniqueAllocator<spirv::Vector, VectorHasher> vectors_;
/// Unique Matrix instances
utils::UniqueAllocator<spirv::Matrix, MatrixHasher> matrices_;
/// Unique Array instances
utils::UniqueAllocator<spirv::Array, ArrayHasher> arrays_;
/// Unique Alias instances
utils::UniqueAllocator<spirv::Alias, AliasHasher> aliases_;
/// Unique Struct instances
utils::UniqueAllocator<spirv::Struct, StructHasher> structs_;
/// Unique Sampler instances
utils::UniqueAllocator<spirv::Sampler, SamplerHasher> samplers_;
/// Unique DepthTexture instances
utils::UniqueAllocator<spirv::DepthTexture, DepthTextureHasher> depth_textures_;
/// Unique DepthMultisampledTexture instances
utils::UniqueAllocator<spirv::DepthMultisampledTexture, DepthMultisampledTextureHasher>
depth_multisampled_textures_;
/// Unique MultisampledTexture instances
utils::UniqueAllocator<spirv::MultisampledTexture, MultisampledTextureHasher>
multisampled_textures_;
/// Unique SampledTexture instances
utils::UniqueAllocator<spirv::SampledTexture, SampledTextureHasher> sampled_textures_;
/// Unique StorageTexture instances
utils::UniqueAllocator<spirv::StorageTexture, StorageTextureHasher> storage_textures_;
};
const Type* Type::UnwrapPtr() const {
const Type* type = this;
while (auto* ptr = type->As<Pointer>()) {
type = ptr->type;
}
return type;
}
const Type* Type::UnwrapRef() const {
const Type* type = this;
while (auto* ptr = type->As<Reference>()) {
type = ptr->type;
}
return type;
}
const Type* Type::UnwrapAlias() const {
const Type* type = this;
while (auto* alias = type->As<Alias>()) {
type = alias->type;
}
return type;
}
const Type* Type::UnwrapAll() const {
auto* type = this;
while (true) {
if (auto* alias = type->As<Alias>()) {
type = alias->type;
} else if (auto* ptr = type->As<Pointer>()) {
type = ptr->type;
} else {
break;
}
}
return type;
}
bool Type::IsFloatScalar() const {
return Is<F32>();
}
bool Type::IsFloatScalarOrVector() const {
return IsFloatScalar() || IsFloatVector();
}
bool Type::IsFloatVector() const {
return Is([](const Vector* v) { return v->type->IsFloatScalar(); });
}
bool Type::IsIntegerScalar() const {
return IsAnyOf<U32, I32>();
}
bool Type::IsIntegerScalarOrVector() const {
return IsUnsignedScalarOrVector() || IsSignedScalarOrVector();
}
bool Type::IsScalar() const {
return IsAnyOf<F32, U32, I32, Bool>();
}
bool Type::IsSignedIntegerVector() const {
return Is([](const Vector* v) { return v->type->Is<I32>(); });
}
bool Type::IsSignedScalarOrVector() const {
return Is<I32>() || IsSignedIntegerVector();
}
bool Type::IsUnsignedIntegerVector() const {
return Is([](const Vector* v) { return v->type->Is<U32>(); });
}
bool Type::IsUnsignedScalarOrVector() const {
return Is<U32>() || IsUnsignedIntegerVector();
}
TypeManager::TypeManager() {
state = std::make_unique<State>();
}
TypeManager::~TypeManager() = default;
const spirv::Void* TypeManager::Void() {
if (!state->void_) {
state->void_ = state->allocator_.Create<spirv::Void>();
}
return state->void_;
}
const spirv::Bool* TypeManager::Bool() {
if (!state->bool_) {
state->bool_ = state->allocator_.Create<spirv::Bool>();
}
return state->bool_;
}
const spirv::U32* TypeManager::U32() {
if (!state->u32_) {
state->u32_ = state->allocator_.Create<spirv::U32>();
}
return state->u32_;
}
const spirv::F32* TypeManager::F32() {
if (!state->f32_) {
state->f32_ = state->allocator_.Create<spirv::F32>();
}
return state->f32_;
}
const spirv::I32* TypeManager::I32() {
if (!state->i32_) {
state->i32_ = state->allocator_.Create<spirv::I32>();
}
return state->i32_;
}
const spirv::Pointer* TypeManager::Pointer(const Type* el, ast::StorageClass sc) {
return state->pointers_.Get(el, sc);
}
const spirv::Reference* TypeManager::Reference(const Type* el, ast::StorageClass sc) {
return state->references_.Get(el, sc);
}
const spirv::Vector* TypeManager::Vector(const Type* el, uint32_t size) {
return state->vectors_.Get(el, size);
}
const spirv::Matrix* TypeManager::Matrix(const Type* el, uint32_t columns, uint32_t rows) {
return state->matrices_.Get(el, columns, rows);
}
const spirv::Array* TypeManager::Array(const Type* el, uint32_t size, uint32_t stride) {
return state->arrays_.Get(el, size, stride);
}
const spirv::Alias* TypeManager::Alias(Symbol name, const Type* ty) {
return state->aliases_.Get(name, ty);
}
const spirv::Struct* TypeManager::Struct(Symbol name, TypeList members) {
return state->structs_.Get(name, std::move(members));
}
const spirv::Sampler* TypeManager::Sampler(ast::SamplerKind kind) {
return state->samplers_.Get(kind);
}
const spirv::DepthTexture* TypeManager::DepthTexture(ast::TextureDimension dims) {
return state->depth_textures_.Get(dims);
}
const spirv::DepthMultisampledTexture* TypeManager::DepthMultisampledTexture(
ast::TextureDimension dims) {
return state->depth_multisampled_textures_.Get(dims);
}
const spirv::MultisampledTexture* TypeManager::MultisampledTexture(ast::TextureDimension dims,
const Type* ty) {
return state->multisampled_textures_.Get(dims, ty);
}
const spirv::SampledTexture* TypeManager::SampledTexture(ast::TextureDimension dims,
const Type* ty) {
return state->sampled_textures_.Get(dims, ty);
}
const spirv::StorageTexture* TypeManager::StorageTexture(ast::TextureDimension dims,
ast::TexelFormat fmt,
ast::Access access) {
return state->storage_textures_.Get(dims, fmt, access);
}
// Debug String() methods for Type classes. Only enabled in debug builds.
#ifndef NDEBUG
std::string Void::String() const {
return "void";
}
std::string Bool::String() const {
return "bool";
}
std::string U32::String() const {
return "u32";
}
std::string F32::String() const {
return "f32";
}
std::string I32::String() const {
return "i32";
}
std::string Pointer::String() const {
std::stringstream ss;
ss << "ptr<" << std::string(ast::ToString(storage_class)) << ", " << type->String() + ">";
return ss.str();
}
std::string Reference::String() const {
std::stringstream ss;
ss << "ref<" + std::string(ast::ToString(storage_class)) << ", " << type->String() << ">";
return ss.str();
}
std::string Vector::String() const {
std::stringstream ss;
ss << "vec" << size << "<" << type->String() << ">";
return ss.str();
}
std::string Matrix::String() const {
std::stringstream ss;
ss << "mat" << columns << "x" << rows << "<" << type->String() << ">";
return ss.str();
}
std::string Array::String() const {
std::stringstream ss;
ss << "array<" << type->String() << ", " << size << ", " << stride << ">";
return ss.str();
}
std::string Sampler::String() const {
switch (kind) {
case ast::SamplerKind::kSampler:
return "sampler";
case ast::SamplerKind::kComparisonSampler:
return "sampler_comparison";
}
return "<unknown sampler>";
}
std::string DepthTexture::String() const {
std::stringstream ss;
ss << "depth_" << dims;
return ss.str();
}
std::string DepthMultisampledTexture::String() const {
std::stringstream ss;
ss << "depth_multisampled_" << dims;
return ss.str();
}
std::string MultisampledTexture::String() const {
std::stringstream ss;
ss << "texture_multisampled_" << dims << "<" << type << ">";
return ss.str();
}
std::string SampledTexture::String() const {
std::stringstream ss;
ss << "texture_" << dims << "<" << type << ">";
return ss.str();
}
std::string StorageTexture::String() const {
std::stringstream ss;
ss << "texture_storage_" << dims << "<" << format << ", " << access << ">";
return ss.str();
}
std::string Named::String() const {
return name.to_str();
}
#endif // NDEBUG
} // namespace tint::reader::spirv
| 31.181664 | 97 | 0.67674 | encounter |
3334a81c2583860a639ea5e929b629572b6d3297 | 3,958 | cpp | C++ | sensing/preprocessor/pointcloud/pointcloud_preprocessor/src/outlier_filter/voxel_grid_outlier_filter_nodelet.cpp | kmiya/AutowareArchitectureProposal.iv | 386b52c9cc90f4535ad833014f2f9500f0e64ccf | [
"Apache-2.0"
] | null | null | null | sensing/preprocessor/pointcloud/pointcloud_preprocessor/src/outlier_filter/voxel_grid_outlier_filter_nodelet.cpp | kmiya/AutowareArchitectureProposal.iv | 386b52c9cc90f4535ad833014f2f9500f0e64ccf | [
"Apache-2.0"
] | null | null | null | sensing/preprocessor/pointcloud/pointcloud_preprocessor/src/outlier_filter/voxel_grid_outlier_filter_nodelet.cpp | kmiya/AutowareArchitectureProposal.iv | 386b52c9cc90f4535ad833014f2f9500f0e64ccf | [
"Apache-2.0"
] | 1 | 2021-07-20T09:38:30.000Z | 2021-07-20T09:38:30.000Z | // Copyright 2020 Tier IV, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include "pointcloud_preprocessor/outlier_filter/voxel_grid_outlier_filter_nodelet.hpp"
#include "pcl/kdtree/kdtree_flann.h"
#include "pcl/search/kdtree.h"
#include "pcl/segmentation/segment_differences.h"
namespace pointcloud_preprocessor
{
VoxelGridOutlierFilterComponent::VoxelGridOutlierFilterComponent(
const rclcpp::NodeOptions & options)
: Filter("VoxelGridOutlierFilter", options)
{
// set initial parameters
{
voxel_size_x_ = static_cast<double>(declare_parameter("voxel_size_x", 0.3));
voxel_size_y_ = static_cast<double>(declare_parameter("voxel_size_y", 0.3));
voxel_size_z_ = static_cast<double>(declare_parameter("voxel_size_z", 0.1));
voxel_points_threshold_ = static_cast<int>(declare_parameter("voxel_points_threshold", 2));
}
using std::placeholders::_1;
set_param_res_ = this->add_on_set_parameters_callback(
std::bind(&VoxelGridOutlierFilterComponent::paramCallback, this, _1));
}
void VoxelGridOutlierFilterComponent::filter(
const PointCloud2ConstPtr & input, const IndicesPtr & indices, PointCloud2 & output)
{
boost::mutex::scoped_lock lock(mutex_);
pcl::PointCloud<pcl::PointXYZ>::Ptr pcl_input(new pcl::PointCloud<pcl::PointXYZ>);
pcl::PointCloud<pcl::PointXYZ>::Ptr pcl_voxelized_input(new pcl::PointCloud<pcl::PointXYZ>);
pcl::PointCloud<pcl::PointXYZ>::Ptr pcl_output(new pcl::PointCloud<pcl::PointXYZ>);
pcl::fromROSMsg(*input, *pcl_input);
pcl_voxelized_input->points.reserve(pcl_input->points.size());
voxel_filter.setInputCloud(pcl_input);
voxel_filter.setSaveLeafLayout(true);
voxel_filter.setLeafSize(voxel_size_x_, voxel_size_y_, voxel_size_z_);
voxel_filter.setMinimumPointsNumberPerVoxel(voxel_points_threshold_);
voxel_filter.filter(*pcl_voxelized_input);
pcl_output->points.reserve(pcl_input->points.size());
for (size_t i = 0; i < pcl_input->points.size(); ++i) {
const int index = voxel_filter.getCentroidIndexAt(
voxel_filter.getGridCoordinates(
pcl_input->points.at(i).x, pcl_input->points.at(i).y, pcl_input->points.at(i).z));
if (index != -1) { // not empty voxel
pcl_output->points.push_back(pcl_input->points.at(i));
}
}
pcl::toROSMsg(*pcl_output, output);
output.header = input->header;
}
rcl_interfaces::msg::SetParametersResult VoxelGridOutlierFilterComponent::paramCallback(
const std::vector<rclcpp::Parameter> & p)
{
boost::mutex::scoped_lock lock(mutex_);
if (get_param(p, "voxel_size_x", voxel_size_x_)) {
RCLCPP_DEBUG(get_logger(), "Setting new distance threshold to: %f.", voxel_size_x_);
}
if (get_param(p, "voxel_size_y", voxel_size_y_)) {
RCLCPP_DEBUG(get_logger(), "Setting new distance threshold to: %f.", voxel_size_y_);
}
if (get_param(p, "voxel_size_z", voxel_size_z_)) {
RCLCPP_DEBUG(get_logger(), "Setting new distance threshold to: %f.", voxel_size_z_);
}
if (get_param(p, "voxel_points_threshold", voxel_points_threshold_)) {
RCLCPP_DEBUG(get_logger(), "Setting new distance threshold to: %d.", voxel_points_threshold_);
}
rcl_interfaces::msg::SetParametersResult result;
result.successful = true;
result.reason = "success";
return result;
}
} // namespace pointcloud_preprocessor
#include "rclcpp_components/register_node_macro.hpp"
RCLCPP_COMPONENTS_REGISTER_NODE(pointcloud_preprocessor::VoxelGridOutlierFilterComponent)
| 40.387755 | 98 | 0.755937 | kmiya |
333a7c80909dbe0c80e2f55ad224219311a575aa | 2,983 | cpp | C++ | lib/ai/strategy.cpp | TaimoorRana/Risk | ed96461f2b87d6336e50b27a35f50946e9125c86 | [
"MIT"
] | 3 | 2016-05-23T09:39:08.000Z | 2016-10-08T03:28:24.000Z | lib/ai/strategy.cpp | TaimoorRana/Risk | ed96461f2b87d6336e50b27a35f50946e9125c86 | [
"MIT"
] | 3 | 2017-09-11T00:51:55.000Z | 2017-09-11T00:52:05.000Z | lib/ai/strategy.cpp | Taimoorrana1/Risk | ed96461f2b87d6336e50b27a35f50946e9125c86 | [
"MIT"
] | 1 | 2017-02-12T19:35:39.000Z | 2017-02-12T19:35:39.000Z | #include "strategy.h"
#include "librisk.h"
Strategy::Strategy() {
this->driver = GameDriver::getInstance();
}
/**
* @brief A signal sent to the Strategy class from the game driver to indicate
* that a computer-controlled player should made their move.
*
* The AI strategy implementations override each of the fooPhase() methods
* which return the name(s) of the country or countries to act upon.
*
* Empty string indicates the AI wishes to make no move, or there are none
* possible.
*/
void Strategy::takeAction(Mode mode) {
RiskMap* map = this->driver->getRiskMap();
if (mode == REINFORCEMENT) {
std::string countryName = this->reinforcePhase();
if (countryName == "") {
return;
}
Country* country = map->getCountry(countryName);
Player* player = map->getPlayer(country->getPlayer());
this->driver->reinforceCountry(player, country, player->getReinforcements());
}
else if (mode == ATTACK) {
std::pair<std::string, std::string> countryNames = this->attackPhase();
if (countryNames.first == "" || countryNames.second == "") {
return;
}
driver->attackCountry(map->getCountry(countryNames.first), map->getCountry(countryNames.second));
}
else if (mode == FORTIFICATION) {
std::pair<std::string, std::string> countryNames = this->fortifyPhase();
if (countryNames.first == "" || countryNames.second == "") {
return;
}
// Given the two countries, fortify so that the armies are as equal as possible.
Country* origin = map->getCountry(countryNames.first);
Country* destination = map->getCountry(countryNames.second);
int splitDifference = std::abs(origin->getArmies() - destination->getArmies()) / 2;
this->driver->fortifyCountry(origin, destination, splitDifference);
}
}
/**
* @brief Reinforcement phase decision making. Places all reinforcements on the
* country with the fewest armies.
*/
std::string Strategy::reinforcePhase() {
RiskMap* map = this->driver->getRiskMap();
std::string playerName = this->driver->getCurrentPlayerName();
int minArmies = 10000;
Country* minArmiesCountry = nullptr;
// add the reinforcements to the player
int numCardsSelected = map->getPlayer(playerName)->getCards();
int armiesEarned = convertCardsToReinforcements(numCardsSelected);
if (armiesEarned > 0) {
this->driver->addCardsTradeReinforcements(armiesEarned);
this->driver->updatePlayerCards(-numCardsSelected);
}
// Reinforce the weakest country
for (const std::string countryName : map->getCountriesOwnedByPlayer(playerName)) {
Country* country = map->getCountry(countryName);
int armies = country->getArmies();
if (armies < minArmies) {
minArmies = armies;
minArmiesCountry = country;
}
}
if (minArmiesCountry == nullptr) {
return "";
}
return minArmiesCountry->getName();
}
/**
* @brief Fortification phase decision making
*/
std::pair<std::string, std::string> Strategy::fortifyPhase() {
// Not implemented in the current AI.
return std::pair<std::string, std::string>("", "");
}
| 32.78022 | 99 | 0.712035 | TaimoorRana |
333be30b4764b335302c25d2b6dc8ac79f8e55f5 | 1,137 | cpp | C++ | TwitchXX/PeriodType.cpp | burannah/TwitchXX | d1845c615d106bfb223e20cdab0c88923f2588ad | [
"BSD-3-Clause"
] | 8 | 2016-10-22T11:36:48.000Z | 2021-02-03T07:09:54.000Z | TwitchXX/PeriodType.cpp | burannah/TwitchXX | d1845c615d106bfb223e20cdab0c88923f2588ad | [
"BSD-3-Clause"
] | 3 | 2018-10-01T20:48:03.000Z | 2018-10-25T12:11:25.000Z | TwitchXX/PeriodType.cpp | burannah/TwitchXX | d1845c615d106bfb223e20cdab0c88923f2588ad | [
"BSD-3-Clause"
] | 3 | 2017-12-09T11:29:31.000Z | 2020-11-09T15:19:25.000Z | //
// Created by buran on 14/03/18.
//
#include <PeriodType.h>
#include <TwitchException.h>
std::string TwitchXX::PeriodType::toString(TwitchXX::PeriodType::Value v)
{
static const std::map<Value, std::string> strs{{Value::ALL, "all"}, {Value::DAY, "day"}, {Value::WEEK, "week"}, {Value::MONTH, "month"}};
try
{
return strs.at(v);
}
catch(const std::out_of_range& e)
{
throw TwitchException("Value type is not supported");
}
}
TwitchXX::PeriodType::Value TwitchXX::PeriodType::fromString(const std::string &s)
{
static const std::map<std::string,Value> strs{{"all", Value::ALL}, {"day", Value::DAY}, {"week", Value::WEEK}, {"month", Value::MONTH}};
try
{
return strs.at(s);
}
catch(const std::out_of_range& e)
{
throw TwitchException("Can not convert string to period type");
}
}
TwitchXX::PeriodType::Value TwitchXX::PeriodType::fromInt(int i)
{
if(static_cast<int>(Value::ALL) > i || static_cast<int>(Value::MONTH) < i)
{
throw TwitchException("Value is not within PeriodType range");
}
return static_cast<Value>(i);
}
| 27.071429 | 141 | 0.626209 | burannah |
333c389d97af347535bf2571053a19641ff8cb80 | 1,307 | cpp | C++ | libs/network/example/http/hello_world_client.cpp | antoinelefloch/cpp-netlib | 5eb9b5550a10d06f064ee9883c7d942d3426f31b | [
"BSL-1.0"
] | 1 | 2018-01-28T14:30:42.000Z | 2018-01-28T14:30:42.000Z | libs/network/example/http/hello_world_client.cpp | antoinelefloch/cpp-netlib | 5eb9b5550a10d06f064ee9883c7d942d3426f31b | [
"BSL-1.0"
] | 1 | 2018-08-10T04:47:12.000Z | 2018-08-10T13:54:57.000Z | libs/network/example/http/hello_world_client.cpp | antoinelefloch/cpp-netlib | 5eb9b5550a10d06f064ee9883c7d942d3426f31b | [
"BSL-1.0"
] | 5 | 2017-12-28T12:42:25.000Z | 2021-07-01T07:41:53.000Z | // Copyright (c) Glyn Matthews 2010.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//[ hello_world_client_main
/*`
This is a part of the 'Hello World' example. We create a client
object and make a single HTTP request. If we use make this request
to the `hello_world_server`, then the output is simply "Hello,
World!".
*/
#include <boost/network/protocol/http/client.hpp>
#include <iostream>
namespace http = boost::network::http;
int
main(int argc, char *argv[]) {
if (argc != 2) {
std::cerr << "Usage: " << argv[0] << " url" << std::endl;
return 1;
}
try {
/*<< Creates the client. >>*/
http::client client;
/*<< Creates a request using a URI supplied on the command
line. >>*/
http::client::request request(argv[1]);
/*<< Gets a response from the HTTP server. >>*/
http::client::response response = client.get(request);
/*<< Prints the response body to the console. >>*/
std::cout << body(response) << std::endl;
}
catch (std::exception &e) {
std::cerr << e.what() << std::endl;
return 1;
}
return 0;
}
//]
| 27.229167 | 69 | 0.58378 | antoinelefloch |
33406a33053a64a4375a14b8d09d72fab0ccf2d6 | 387 | cpp | C++ | assets/code_box/bk_2748_slow.cpp | happyOBO/happyOBO.github.io | 96e60a67b9b84c26f01312f8ca5ade35803c521f | [
"MIT"
] | 2 | 2020-10-24T03:25:30.000Z | 2021-08-01T05:18:18.000Z | assets/code_box/bk_2748_slow.cpp | happyOBO/happyOBO.github.io | 96e60a67b9b84c26f01312f8ca5ade35803c521f | [
"MIT"
] | 2 | 2020-12-05T14:31:19.000Z | 2020-12-06T05:09:43.000Z | assets/code_box/bk_2748_slow.cpp | happyOBO/happyOBO.github.io | 96e60a67b9b84c26f01312f8ca5ade35803c521f | [
"MIT"
] | 4 | 2020-08-26T10:02:11.000Z | 2020-10-22T05:55:18.000Z | #include <iostream>
#include <vector>
#include <utility> // pair
using namespace std;
// 동적 계획법
int pibonacci(int n)
{
if(n <= 1)
{
return n;
}
else
{
return pibonacci(n-1) + pibonacci(n-2);
}
}
int main(void)
{
// int n;
// cin>>n;
for(int i = 0; i< 90;i++)
{
cout<<pibonacci(i)<<endl;
}
return 0;
} | 11.727273 | 47 | 0.472868 | happyOBO |
33459a3e4d3f4d771d7a680d4943e8f3028c6087 | 2,673 | hpp | C++ | graph_tree/HLD_lazy.hpp | hotman78/cpplib | c2f85c8741cdd0b731a5aa828b28b38c70c8d699 | [
"CC0-1.0"
] | null | null | null | graph_tree/HLD_lazy.hpp | hotman78/cpplib | c2f85c8741cdd0b731a5aa828b28b38c70c8d699 | [
"CC0-1.0"
] | null | null | null | graph_tree/HLD_lazy.hpp | hotman78/cpplib | c2f85c8741cdd0b731a5aa828b28b38c70c8d699 | [
"CC0-1.0"
] | null | null | null | #pragma once
#include"./graph_template.hpp"
#include"../segment_tree/lazy_segment_tree.hpp"
template<typename T,typename E,typename F,typename G,typename H>
class HLD_lazy{
int child_size(const graph& v,int n,int p){
int cnt=0;
for(auto t:v[n]){
if(t!=p)cnt+=child_size(v,t,n);
}
return sz[n]=cnt+1;
}
void make(const graph& v,int root){
sz=new int[v.size()];
vertex=new int[v.size()];
par=new int[v.size()];
head=new int[v.size()];
child_size(v,root,-1);
stack<tuple<int,int>>stk;
stk.emplace(root,-1);
int idx=0;
par[root]=root;
head[root]=root;
while(!stk.empty()){
int n,p;
tie(n,p)=stk.top();
stk.pop();
vertex[n]=idx++;
int mx=0,heavy=-1;
for(auto t:v[n])if(t!=p&&mx<sz[t]){
mx=sz[t];
heavy=t;
}
for(auto t:v[n]){
if(t!=heavy&&t!=p){
par[t]=n;
head[t]=t;
stk.emplace(t,n);
}
}
if(heavy!=-1){
par[heavy]=par[n];
head[heavy]=head[n];
stk.emplace(heavy,n);
}
}
}
int* sz;
int* vertex;
int* par;
int* head;
F _f;G _g;H _h;
lazy_segment_tree<T,E,F,G,H>* seg;
public:
HLD_lazy(const graph& v,int root=0,F f=F(),G g=G(),H h=H()):_f(f),_g(g),_h(h){
make(v,root);
seg=new lazy_segment_tree<T,E,F,G,H>(v.size(),f,g,h);
}
// HLD_lazy(const graph& v,const vector<T>& a,int root=0,F f=F(),G g=G(),H h=H()):_f(f),_g(g),_h(h){
// vector<T>tmp(v.size());
// make(v,root);
// for(int i=0;i<(int)v.size();i++){
// tmp[vertex[i]]=a[i];
// }
// seg=new lazy_segment_tree(tmp,f,g,h);
// }
int lca(int l,int r){
while(1){
if(head[l]==head[r])return sz[l]>sz[r]?l:r;
else if(sz[head[l]]>sz[head[r]])r=par[r];
else l=par[l];
}
}
inline void update_vertex(int u,E x){
seg->update(vertex[u],vertex[u],x);
}
inline maybe<T> get_vertex(int u){
return seg->get(vertex[u],vertex[u]);
}
inline void update_subtree(int u,E x){
seg->update(vertex[u],vertex[u]+sz[u]-1);
}
inline maybe<T> get_subtree(int u){
return seg->get(vertex[u],vertex[u]+sz[u]-1);
}
void update_path(int u,int v,E x){
while(1){
if(head[u]==head[v]){
seg->update(vertex[u],vertex[v],x);
break;
}
else if(sz[head[u]]>sz[head[v]]){
seg->update(vertex[v],vertex[head[v]],x);
v=par[v];
}
else{
seg->update(vertex[u],vertex[head[u]],x);
u=par[u];
}
}
}
T get_path(int u,int v){
auto f=expand<T,F>(_f);
maybe<T> res;
while(1){
if(head[u]==head[v]){
return f(res,seg->get(vertex[u],vertex[v]));
}
else if(sz[head[u]]>sz[head[v]]){
res=f(res,seg->get(vertex[v],vertex[head[v]]));
v=par[v];
}
else{
res=f(res,seg->get(vertex[u],vertex[head[u]]));
u=par[u];
}
}
}
}; | 22.275 | 101 | 0.564908 | hotman78 |
334643dd47c207e863046585a1cc5f7bdea0d79f | 1,005 | hpp | C++ | SDK/ARKSurvivalEvolved_Task_StunForestKaiju_parameters.hpp | 2bite/ARK-SDK | c38ca9925309516b2093ad8c3a70ed9489e1d573 | [
"MIT"
] | 10 | 2020-02-17T19:08:46.000Z | 2021-07-31T11:07:19.000Z | SDK/ARKSurvivalEvolved_Task_StunForestKaiju_parameters.hpp | 2bite/ARK-SDK | c38ca9925309516b2093ad8c3a70ed9489e1d573 | [
"MIT"
] | 9 | 2020-02-17T18:15:41.000Z | 2021-06-06T19:17:34.000Z | SDK/ARKSurvivalEvolved_Task_StunForestKaiju_parameters.hpp | 2bite/ARK-SDK | c38ca9925309516b2093ad8c3a70ed9489e1d573 | [
"MIT"
] | 3 | 2020-07-22T17:42:07.000Z | 2021-06-19T17:16:13.000Z | #pragma once
// ARKSurvivalEvolved (329.9) SDK
#ifdef _MSC_VER
#pragma pack(push, 0x8)
#endif
#include "ARKSurvivalEvolved_Task_StunForestKaiju_classes.hpp"
namespace sdk
{
//---------------------------------------------------------------------------
//Parameters
//---------------------------------------------------------------------------
// Function Task_StunForestKaiju.Task_StunForestKaiju_C.ReceiveExecute
struct UTask_StunForestKaiju_C_ReceiveExecute_Params
{
class AActor** OwnerActor; // (Parm, ZeroConstructor, IsPlainOldData)
};
// Function Task_StunForestKaiju.Task_StunForestKaiju_C.ExecuteUbergraph_Task_StunForestKaiju
struct UTask_StunForestKaiju_C_ExecuteUbergraph_Task_StunForestKaiju_Params
{
int EntryPoint; // (Parm, ZeroConstructor, IsPlainOldData)
};
}
#ifdef _MSC_VER
#pragma pack(pop)
#endif
| 29.558824 | 152 | 0.553234 | 2bite |
33465f942ba9784413f05fae279e98d3edfb5ef4 | 4,878 | cpp | C++ | HelicopterGame/Engine.cpp | MattGardiner97/HelicopterGame | 7cb14bd66e7ce69adb528a4974e5a1933b9fb057 | [
"MIT"
] | 2 | 2020-04-23T11:32:45.000Z | 2020-11-05T16:33:46.000Z | HelicopterGame/Engine.cpp | MattGardiner97/HelicopterGame | 7cb14bd66e7ce69adb528a4974e5a1933b9fb057 | [
"MIT"
] | null | null | null | HelicopterGame/Engine.cpp | MattGardiner97/HelicopterGame | 7cb14bd66e7ce69adb528a4974e5a1933b9fb057 | [
"MIT"
] | null | null | null | #include <stdio.h>
#include <string>
#include "Engine.h"
using namespace std;
bool Engine::Init(string WindowTitle) {
TTF_Init();
_graphics = new Graphics;
if (_graphics->Init(WindowTitle) == false)
return false;
_textureManager = new TextureManager(_graphics->GetRenderer());
_time = new Time;
if (_time->Init() == false)
return false;
_input = new Input;
if (_input->Init() == false)
return false;
_textTextureGenerator = new TextTextureGenerator(_graphics->GetRenderer());
_mainMenu = new MainMenu(this);
if (_mainMenu->Init() == false)
return false;
_scoreGUI = new ScoreGUI(this);
if (_scoreGUI->Init() == false)
return false;
_scoreGUI->UpdateScore(0);
_easyDifficulty = new EasyDifficulty;
_mediumDifficulty = new MediumDifficulty;
_hardDifficulty = new HardDifficulty;
_currentDifficulty = _easyDifficulty;
//Load helicopter texture
SDL_Texture* helicopterTexture = _textureManager->LoadFromFile(Constants::HELICOPTER_IMAGE_NAME);
if (helicopterTexture == NULL)
return false;
//Load background texture
SDL_Texture* backgroundTexture = _textureManager->LoadFromFile(Constants::BACKGROUND_IMAGE_NAME);
if (backgroundTexture == NULL)
return false;
//Load obstacle texture
SDL_Texture* obstacleTexture = _textureManager->LoadFromFile(Constants::OBSTACLE_IMAGE_NAME);
if (obstacleTexture == NULL)
return false;
_helicopter = new Helicopter(helicopterTexture, this);
_background = new Background(backgroundTexture, this);
_background2 = new Background(backgroundTexture, this);
_background2->SetX(Constants::WINDOW_WIDTH - 1);
_obstacleManager = new ObstacleManager(obstacleTexture, this);
_obstacleManager->Init();
return true;
}
void Engine::Cleanup() {
if (_graphics != NULL)
{
_graphics->Cleanup();
delete _graphics;
_graphics = NULL;
}
if (_time != NULL) {
_time->Cleanup();
delete _time;
_time = NULL;
}
if (_input != NULL)
{
_input->Cleanup();
delete _input;
_input = NULL;
}
if (_helicopter != NULL)
{
_helicopter->Cleanup();
delete _helicopter;
_helicopter = NULL;
}
if (_background != NULL)
{
_background->Cleanup();
delete _background;
_background = NULL;
}
if (_background2 != NULL)
{
_background2->Cleanup();
delete _background2;
_background2 = NULL;
}
if (_obstacleManager != NULL)
{
_obstacleManager->Cleanup();
delete _obstacleManager;
_obstacleManager = NULL;
}
_currentDifficulty = NULL;
if (_easyDifficulty != NULL)
{
delete _easyDifficulty;
_easyDifficulty = NULL;
}
if (_mediumDifficulty != NULL)
{
delete _mediumDifficulty;
_mediumDifficulty = NULL;
}
if (_hardDifficulty != NULL)
{
delete _hardDifficulty;
_hardDifficulty = NULL;
}
if (_mainMenu != NULL)
{
_mainMenu->Cleanup();
delete _mainMenu;
_mainMenu = NULL;
}
if (_scoreGUI != NULL)
{
_scoreGUI->Cleanup();
delete _scoreGUI;
_scoreGUI = NULL;
}
if (_textTextureGenerator != NULL)
{
delete _textTextureGenerator;
_textTextureGenerator = NULL;
}
SDL_Quit();
IMG_Quit();
}
void Engine::Run() {
while (_input->IsQuitRequested() == false) {
//Update Timer
_time->Update();
//Update Input
_input->Poll();
//Update
_background->Update();
_background2->Update();
_helicopter->Update();
_obstacleManager->Update();
_mainMenu->Update();
if (GameActive == false && _mainMenu->NewGameRequested == true)
NewGame();
//Check collisions
if (_obstacleManager->CheckPlayerCollision(_helicopter->GetBoundingRectangle()))
GameOver();
//Check helicopter offscreen
if (_helicopter->IsOffScreen())
GameOver();
//Clear render target
_graphics->Clear();
//Draw helicopter
_background->Draw();
_background2->Draw();
_obstacleManager->Draw();
_helicopter->Draw();
_mainMenu->Draw();
_scoreGUI->Draw();
//Present render target
_graphics->Present();
SDL_Delay(8);
}
}
Input* Engine::GetInput() {
return _input;
}
Graphics* Engine::GetGraphics() {
return _graphics;
}
Time* Engine::GetTime() {
return _time;
}
Difficulty* Engine::GetCurrentDifficulty() {
return _currentDifficulty;
}
void Engine::GameOver() {
GameActive = false;
_mainMenu->Active = true;
_obstacleManager->Active = false;
_helicopter->Active = false;
_mainMenu->NewGameRequested = false;
}
void Engine::NewGame() {
_mainMenu->NewGameRequested = false;
switch (_mainMenu->SelectedDifficulty)
{
case 0:
_currentDifficulty = _easyDifficulty;
break;
case 1:
_currentDifficulty = _mediumDifficulty;
break;
case 2:
_currentDifficulty = _hardDifficulty;
break;
}
GameActive = true;
_mainMenu->Active = false;
_obstacleManager->Reset();
_helicopter->Reset();
_scoreGUI->UpdateScore(0);
}
TextTextureGenerator* Engine::GetTextTextureGenerator() {
return _textTextureGenerator;
}
void Engine::IncrementScore() {
_scoreGUI->UpdateScore(_scoreGUI->Score + 1);
} | 19.280632 | 98 | 0.709922 | MattGardiner97 |
334a491b4cee77ecb4bcce660376daef16196376 | 2,522 | hpp | C++ | SuperReads_RNA/global-1/jellyfish/include/jellyfish/text_dumper.hpp | Kuanhao-Chao/multiStringTie | be37f9b4ae72b14e7cf645f24725b7186f66a816 | [
"MIT"
] | 255 | 2015-02-18T20:27:23.000Z | 2022-03-18T18:55:35.000Z | SuperReads_RNA/global-1/jellyfish/include/jellyfish/text_dumper.hpp | Kuanhao-Chao/multiStringTie | be37f9b4ae72b14e7cf645f24725b7186f66a816 | [
"MIT"
] | 350 | 2015-03-11T14:24:06.000Z | 2022-03-29T03:54:10.000Z | SuperReads_RNA/global-1/jellyfish/include/jellyfish/text_dumper.hpp | wrf/stringtie | 2e99dccd9a2e2ce51cfddcb3896984fa773697f7 | [
"MIT"
] | 66 | 2015-02-19T00:21:38.000Z | 2022-03-30T09:52:23.000Z | /* This file is part of Jellyfish.
Jellyfish is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Jellyfish is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Jellyfish. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __JELLYFISH_TEXT_DUMPER_HPP__
#define __JELLYFISH_TEXT_DUMPER_HPP__
#include <jellyfish/sorted_dumper.hpp>
namespace jellyfish {
template<typename Key, typename Val>
class text_writer {
public:
void write(std::ostream& out, const Key& key, const Val val) {
out << key << " " << val << "\n";
}
};
template<typename storage_t>
class text_dumper : public sorted_dumper<text_dumper<storage_t>, storage_t> {
typedef sorted_dumper<text_dumper<storage_t>, storage_t> super;
text_writer<typename super::key_type, uint64_t> writer;
public:
static const char* format;
text_dumper(int nb_threads, const char* file_prefix, file_header* header = 0) :
super(nb_threads, file_prefix, header)
{ }
virtual void _dump(storage_t* ary) {
if(super::header_) {
super::header_->update_from_ary(*ary);
super::header_->format(format);
}
super::_dump(ary);
}
void write_key_value_pair(std::ostream& out, typename super::heap_item item) {
writer.write(out, item->key_, item->val_);
}
};
template<typename storage_t>
const char* jellyfish::text_dumper<storage_t>::format = "text/sorted";
template<typename Key, typename Val>
class text_reader {
std::istream& is_;
char* buffer_;
Key key_;
Val val_;
const RectangularBinaryMatrix m_;
const size_t size_mask_;
public:
text_reader(std::istream& is,
file_header* header) :
is_(is),
buffer_(new char[header->key_len() / 2 + 1]),
key_(header->key_len() / 2),
m_(header->matrix()),
size_mask_(header->size() - 1)
{ }
const Key& key() const { return key_; }
const Val& val() const { return val_; }
size_t pos() const { return m_.times(key()) & size_mask_; }
bool next() {
is_ >> key_ >> val_;
return is_.good();
}
};
}
#endif /* __JELLYFISH_TEXT_DUMPER_HPP__ */
| 28.337079 | 81 | 0.693101 | Kuanhao-Chao |
334d01c7077b5ab8b0418cc313ebb7b6942228d6 | 265,388 | cpp | C++ | groups/bsl/bslstl/bslstl_stack.t.cpp | eddiepierce/bde | 45953ece9dd1cd8732f01a1cd24bbe838791d298 | [
"Apache-2.0"
] | 1 | 2021-01-05T00:22:16.000Z | 2021-01-05T00:22:16.000Z | groups/bsl/bslstl/bslstl_stack.t.cpp | eddiepierce/bde | 45953ece9dd1cd8732f01a1cd24bbe838791d298 | [
"Apache-2.0"
] | 2 | 2020-11-05T15:20:55.000Z | 2021-01-05T19:38:43.000Z | groups/bsl/bslstl/bslstl_stack.t.cpp | eddiepierce/bde | 45953ece9dd1cd8732f01a1cd24bbe838791d298 | [
"Apache-2.0"
] | 2 | 2020-01-16T17:58:12.000Z | 2020-08-11T20:59:30.000Z | // bslstl_stack.t.cpp -*-C++-*-
#include <bslstl_stack.h>
#include <bslstl_vector.h>
#include <bsltf_stdstatefulallocator.h>
#include <bsltf_stdtestallocator.h>
#include <bsltf_templatetestfacility.h>
#include <bsltf_testvaluesarray.h>
#include <bslma_allocator.h>
#include <bslma_default.h>
#include <bslma_defaultallocatorguard.h>
#include <bslma_mallocfreeallocator.h>
#include <bslma_testallocator.h>
#include <bslma_testallocatormonitor.h>
#include <bslalg_rangecompare.h>
#include <bslmf_issame.h>
#include <bslmf_haspointersemantics.h>
#include <bslmf_movableref.h>
#include <bsls_alignmentutil.h>
#include <bsls_asserttest.h>
#include <bsls_bsltestutil.h>
#include <bsls_compilerfeatures.h>
#include <bsls_nameof.h>
#include <bsls_platform.h>
#include <algorithm>
#include <functional>
#include <typeinfo>
#include <cstdio>
#include <cstdio>
#include <cstdlib>
#include <stdlib.h> // atoi
#include <string.h>
// ============================================================================
// ADL SWAP TEST HELPER
// ----------------------------------------------------------------------------
template <class TYPE>
void invokeAdlSwap(TYPE& a, TYPE& b)
// Exchange the values of the specified 'a' and 'b' objects using the
// 'swap' method found by ADL (Argument Dependent Lookup). The behavior
// is undefined unless 'a' and 'b' were created with the same allocator.
{
using namespace bsl;
swap(a, b);
}
// The following 'using' directives must come *after* the definition of
// 'invokeAdlSwap' (above).
using namespace BloombergLP;
using namespace bsl;
// ============================================================================
// TEST PLAN
// ----------------------------------------------------------------------------
// Overview
// --------
// The object under test is a container whose interface and contract is
// dictated by the C++ standard. The general concerns are compliance,
// exception safety, and proper dispatching (for member function templates such
// as assign and insert). This container is implemented in the form of a class
// template, and thus its proper instantiation for several types is a concern.
// Regarding the allocator template argument, we use mostly a 'bsl::allocator'
// together with a 'bslma::TestAllocator' mechanism, but we also verify the C++
// standard.
//
// The Primary Manipulators and Basic Accessors are decided to be:
//
// Primary Manipulators:
//: o 'push'
//: o 'pop
//
// Basic Accessors:
//: o 'empty'
//: o 'size'
//: o 'top'
//
// This test plan follows the standard approach for components implementing
// value-semantic containers. We have chosen as *primary* *manipulators* the
// 'push' and 'pop' methods to be used by the generator functions 'g' and
// 'gg'. Note that some manipulators must support aliasing, and those that
// perform memory allocation must be tested for exception neutrality via the
// 'bslma::TestAllocator' component. After the mandatory sequence of cases
// (1--10) for value-semantic types (cases 5 and 10 are not implemented, as
// there is not output or streaming below bslstl), we test each individual
// constructor, manipulator, and accessor in subsequent cases.
//
// Certain standard value-semantic-type test cases are omitted:
//: o BSLX streaming is not (yet) implemented for this class.
//
// Global Concerns:
//: o The test driver is robust w.r.t. reuse in other, similar components.
//: o ACCESSOR methods are declared 'const'.
//: o CREATOR & MANIPULATOR pointer/reference parameters are declared 'const'.
//: o No memory is ever allocated from the global allocator.
//: o Any allocated memory is always from the object allocator.
//: o An object's value is independent of the allocator used to supply memory.
//: o Injected exceptions are safely propagated during memory allocation.
//: o Precondition violations are detected in appropriate build modes.
//
// Global Assumptions:
//: o All explicit memory allocations are presumed to use the global, default,
//: or object allocator.
//: o ACCESSOR methods are 'const' thread-safe.
//: o Individual attribute types are presumed to be *alias-safe*; hence, only
//: certain methods require the testing of this property:
//: o copy-assignment
//: o swap
// ----------------------------------------------------------------------------
// CREATORS
// [ 7] copy c'tor
// [ 2] stack, stack(bslma::Allocator *bA)
// [17] stack(MovableRef container)
// [17] stack(MovableRef container, bslma::Allocator *bA)
// [17] stack(MovableRef stack)
// [17] stack(MovableRef stack, bslma::Allocator *bA)
//
// MANIPULATORS
// [ 9] operator=
// [18] operator=(MovableRef stack)
// [ 8] member swap
// [ 2] Primary Manipulators -- push and pop
// [18] push(MovableRef value)
// [18] emplace(Args&&.. args)
//
// ACCESSORS
// [15] testing empty, size
// [ 4] Primary Accessors
//
// FREE FUNCTIONS
// [12] inequality comparisons: '<', '>', '<=', '>='
// [ 6] equality comparisons: '==', '!='
// [ 5] operator<< (N/A)
// ----------------------------------------------------------------------------
// [16] Usage Example
// [14] testing simple container that does not support allocators
// [13] testing container override of specified 'VALUE'
// [11] type traits
// [10] allocator
// [ 3] Primary generator functions 'gg' and 'ggg'
// [ 1] Breathing Test
// [19] CONCERN: Methods qualifed 'noexcept' in standard are so implemented.
//
// ============================================================================
// STANDARD BDE ASSERT TEST MACROS
// ----------------------------------------------------------------------------
// NOTE: THIS IS A LOW-LEVEL COMPONENT AND MAY NOT USE ANY C++ LIBRARY
// FUNCTIONS, INCLUDING IOSTREAMS.
namespace {
int testStatus = 0;
void aSsErT(bool b, const char *s, int i)
{
if (b) {
printf("Error " __FILE__ "(%d): %s (failed)\n", i, s);
if (testStatus >= 0 && testStatus <= 100) ++testStatus;
}
}
} // close unnamed namespace
//=============================================================================
// STANDARD BDE TEST DRIVER MACROS
//-----------------------------------------------------------------------------
#define ASSERT BSLS_BSLTESTUTIL_ASSERT
#define LOOP_ASSERT BSLS_BSLTESTUTIL_LOOP_ASSERT
#define LOOP0_ASSERT BSLS_BSLTESTUTIL_LOOP0_ASSERT
#define LOOP1_ASSERT BSLS_BSLTESTUTIL_LOOP1_ASSERT
#define LOOP2_ASSERT BSLS_BSLTESTUTIL_LOOP2_ASSERT
#define LOOP3_ASSERT BSLS_BSLTESTUTIL_LOOP3_ASSERT
#define LOOP4_ASSERT BSLS_BSLTESTUTIL_LOOP4_ASSERT
#define LOOP5_ASSERT BSLS_BSLTESTUTIL_LOOP5_ASSERT
#define LOOP6_ASSERT BSLS_BSLTESTUTIL_LOOP6_ASSERT
#define ASSERTV BSLS_BSLTESTUTIL_ASSERTV
#define Q BSLS_BSLTESTUTIL_Q // Quote identifier literally.
#define P BSLS_BSLTESTUTIL_P // Print identifier and value.
#define P_ BSLS_BSLTESTUTIL_P_ // P(X) without '\n'.
#define T_ BSLS_BSLTESTUTIL_T_ // Print a tab (w/o newline).
#define L_ BSLS_BSLTESTUTIL_L_ // current Line number
#define RUN_EACH_TYPE BSLTF_TEMPLATETESTFACILITY_RUN_EACH_TYPE
// ============================================================================
// NEGATIVE-TEST MACRO ABBREVIATIONS
// ----------------------------------------------------------------------------
#define ASSERT_SAFE_PASS(EXPR) BSLS_ASSERTTEST_ASSERT_SAFE_PASS(EXPR)
#define ASSERT_SAFE_FAIL(EXPR) BSLS_ASSERTTEST_ASSERT_SAFE_FAIL(EXPR)
#define ASSERT_PASS(EXPR) BSLS_ASSERTTEST_ASSERT_PASS(EXPR)
#define ASSERT_FAIL(EXPR) BSLS_ASSERTTEST_ASSERT_FAIL(EXPR)
#define ASSERT_OPT_PASS(EXPR) BSLS_ASSERTTEST_ASSERT_OPT_PASS(EXPR)
#define ASSERT_OPT_FAIL(EXPR) BSLS_ASSERTTEST_ASSERT_OPT_FAIL(EXPR)
// ============================================================================
// PRINTF FORMAT MACRO ABBREVIATIONS
// ----------------------------------------------------------------------------
#define ZU BSLS_BSLTESTUTIL_FORMAT_ZU
// ============================================================================
// GLOBAL TEST VALUES
// ----------------------------------------------------------------------------
static bool verbose;
static bool veryVerbose;
static bool veryVeryVerbose;
static bool veryVeryVeryVerbose;
//=============================================================================
// GLOBAL TYPEDEFS/CONSTANTS FOR TESTING
//-----------------------------------------------------------------------------
// Define DEFAULT DATA used in multiple test cases.
static const size_t DEFAULT_MAX_LENGTH = 32;
struct DefaultDataRow {
int d_line; // source line number
const char *d_spec; // specification string, for input to 'gg' function
const char *d_results; // expected element values
};
static
const DefaultDataRow DEFAULT_DATA[] = {
//line spec results
//---- -------- -------
{ L_, "", "" },
{ L_, "A", "A" },
{ L_, "AA", "A" },
{ L_, "B", "B" },
{ L_, "AB", "AB" },
{ L_, "BA", "AB" },
{ L_, "AC", "AC" },
{ L_, "CD", "CD" },
{ L_, "ABC", "ABC" },
{ L_, "ACB", "ABC" },
{ L_, "BAC", "ABC" },
{ L_, "BCA", "ABC" },
{ L_, "CAB", "ABC" },
{ L_, "CBA", "ABC" },
{ L_, "BAD", "ABD" },
{ L_, "ABCA", "ABC" },
{ L_, "ABCB", "ABC" },
{ L_, "ABCC", "ABC" },
{ L_, "ABCABC", "ABC" },
{ L_, "AABBCC", "ABC" },
{ L_, "ABCD", "ABCD" },
{ L_, "ACBD", "ABCD" },
{ L_, "BDCA", "ABCD" },
{ L_, "DCBA", "ABCD" },
{ L_, "BEAD", "ABDE" },
{ L_, "BCDE", "BCDE" },
{ L_, "ABCDE", "ABCDE" },
{ L_, "ACBDE", "ABCDE" },
{ L_, "CEBDA", "ABCDE" },
{ L_, "EDCBA", "ABCDE" },
{ L_, "FEDCB", "BCDEF" },
{ L_, "FEDCBA", "ABCDEF" },
{ L_, "ABCDEFG", "ABCDEFG" },
{ L_, "ABCDEFGH", "ABCDEFGH" },
{ L_, "ABCDEFGHI", "ABCDEFGHI" },
{ L_, "ABCDEFGHIJKLMNOP", "ABCDEFGHIJKLMNOP" },
{ L_, "PONMLKJIGHFEDCBA", "ABCDEFGHIJKLMNOP" },
{ L_, "ABCDEFGHIJKLMNOPQ", "ABCDEFGHIJKLMNOPQ" },
{ L_, "DHBIMACOPELGFKNJQ", "ABCDEFGHIJKLMNOPQ" }
};
static const int DEFAULT_NUM_DATA = sizeof DEFAULT_DATA / sizeof *DEFAULT_DATA;
typedef bslmf::MovableRefUtil MoveUtil;
//=============================================================================
// GLOBAL HELPER FUNCTIONS FOR TESTING
//-----------------------------------------------------------------------------
#ifndef BSLS_PLATFORM_OS_WINDOWS
# define TEST_TYPES_REGULAR(containerArg) \
containerArg<signed char>, \
containerArg<size_t>, \
containerArg<bsltf::TemplateTestFacility::ObjectPtr>, \
containerArg<bsltf::TemplateTestFacility::FunctionPtr>, \
containerArg<bsltf::TemplateTestFacility::MethodPtr>, \
containerArg<bsltf::EnumeratedTestType::Enum>, \
containerArg<bsltf::UnionTestType>, \
containerArg<bsltf::SimpleTestType>, \
containerArg<bsltf::AllocTestType>, \
containerArg<bsltf::BitwiseMoveableTestType>, \
containerArg<bsltf::AllocBitwiseMoveableTestType>, \
containerArg<bsltf::NonTypicalOverloadsTestType>
#else
# define TEST_TYPES_REGULAR(containerArg) \
containerArg<signed char>, \
containerArg<size_t>, \
containerArg<bsltf::TemplateTestFacility::ObjectPtr>, \
containerArg<bsltf::TemplateTestFacility::MethodPtr>, \
containerArg<bsltf::EnumeratedTestType::Enum>, \
containerArg<bsltf::UnionTestType>, \
containerArg<bsltf::SimpleTestType>, \
containerArg<bsltf::AllocTestType>, \
containerArg<bsltf::BitwiseMoveableTestType>, \
containerArg<bsltf::AllocBitwiseMoveableTestType>, \
containerArg<bsltf::NonTypicalOverloadsTestType>
#endif
#define TEST_TYPES_INEQUAL_COMPARABLE(containerArg) \
containerArg<signed char>, \
containerArg<size_t>, \
containerArg<bsltf::TemplateTestFacility::ObjectPtr>, \
containerArg<bsltf::EnumeratedTestType::Enum>
#define TEST_TYPES_MOVABLE(containerArg) \
containerArg<bsltf::MovableTestType>, \
containerArg<bsltf::MovableAllocTestType>
namespace bsl {
// stack-specific print function.
template <class VALUE, class CONTAINER>
void debugprint(const bsl::stack<VALUE, CONTAINER>& s)
{
if (s.empty()) {
printf("<empty>");
}
else {
printf("size: %d, top: ", (int)s.size());
bsls::BslTestUtil::callDebugprint(static_cast<char>(
bsltf::TemplateTestFacility::getIdentifier(s.top())));
}
fflush(stdout);
}
} // close namespace bsl
template <class VALUE>
struct NonAllocCont {
// PUBLIC TYPES
typedef VALUE value_type;
typedef VALUE& reference;
typedef const VALUE& const_reference;
typedef std::size_t size_type;
private:
// DATA
bsl::vector<VALUE> d_vector;
public:
// CREATORS
NonAllocCont() : d_vector(&bslma::MallocFreeAllocator::singleton()) {}
~NonAllocCont() {}
// MANIPULATORS
NonAllocCont& operator=(const NonAllocCont& rhs)
{
d_vector = rhs.d_vector;
return *this;
}
reference back() { return d_vector.back(); }
void pop_back() { d_vector.pop_back(); }
void push_back(const value_type& value) { d_vector.push_back(value); }
bsl::vector<value_type>& contents() { return d_vector; }
// ACCESSORS
bool operator==(const NonAllocCont& rhs) const
{
return d_vector == rhs.d_vector;
}
bool operator!=(const NonAllocCont& rhs) const
{
return !operator==(rhs);
}
bool operator<(const NonAllocCont& rhs) const
{
return d_vector < rhs.d_vector;
}
bool operator>=(const NonAllocCont& rhs) const
{
return !operator<(rhs);
}
bool operator>(const NonAllocCont& rhs) const
{
return d_vector > rhs.d_vector;
}
bool operator<=(const NonAllocCont& rhs) const
{
return !operator>(rhs);
}
const_reference back() const { return d_vector.back(); }
size_type size() const { return d_vector.size(); }
};
namespace std {
template <class VALUE>
void swap(NonAllocCont<VALUE>& lhs, NonAllocCont<VALUE>& rhs)
{
lhs.contents().swap(rhs.contents());
}
} // close namespace std
template <class VALUE>
struct ValueName {
private:
// NOT IMPLEMENTED
static const char *name();
// Not implemented, so that an attempt to show the name of an
// unrecognized type will result in failure to link.
};
template <>
struct ValueName<signed char> {
static const char *name() { return "signed char"; }
};
template <>
struct ValueName<size_t> {
static const char *name() { return "size_t"; }
};
template <>
struct ValueName<bsltf::TemplateTestFacility::ObjectPtr> {
static const char *name() { return "TemplateTestFacility::ObjectPtr"; }
};
template <>
struct ValueName<bsltf::TemplateTestFacility::FunctionPtr> {
static const char *name() { return "TemplateTestFacility::FunctionPtr"; }
};
template <>
struct ValueName<bsltf::TemplateTestFacility::MethodPtr> {
static const char *name() { return "TemplateTestFacility::MethodPtr"; }
};
template <>
struct ValueName<bsltf::EnumeratedTestType::Enum> {
static const char *name() { return "EnumeratedTestType::Enum"; }
};
template <>
struct ValueName<bsltf::UnionTestType> {
static const char *name() { return "UnionTestType"; }
};
template <>
struct ValueName<bsltf::SimpleTestType> {
static const char *name() { return "SimpleTestType"; }
};
template <>
struct ValueName<bsltf::AllocTestType> {
static const char *name() { return "AllocTestType"; }
};
template <>
struct ValueName<bsltf::BitwiseMoveableTestType> {
static const char *name() { return "BitwiseMoveableTestType"; }
};
template <>
struct ValueName<bsltf::AllocBitwiseMoveableTestType> {
static const char *name() { return "AllocBitwiseMoveableTestType"; }
};
template <>
struct ValueName<bsltf::NonTypicalOverloadsTestType> {
static const char *name() { return "NonTypicalOverloadsTestType"; }
};
template <class CONTAINER>
struct ContainerName {
static const char *name();
};
template <class VALUE>
struct ContainerName<deque<VALUE> > {
static const char *name()
{
static char buf[1000];
strcpy(buf, "deque<");
strcat(buf, ValueName<VALUE>::name());
strcat(buf, ">");
return buf;
}
};
template <class VALUE>
struct ContainerName<vector<VALUE> > {
static const char *name()
{
static char buf[1000];
strcpy(buf, "vector<");
strcat(buf, ValueName<VALUE>::name());
strcat(buf, ">");
return buf;
}
};
bool expectToAllocate(int n)
// Return 'true' if the container is expected to allocate memory on the
// specified 'n'th element, and 'false' otherwise.
{
if (n > 32) {
return (0 == n % 32); // RETURN
}
return (((n - 1) & n) == 0); // Allocate when 'n' is a power of 2
}
template<class CONTAINER, class VALUES>
void emptyNVerifyStack(stack<typename CONTAINER::value_type,
CONTAINER> *pmX,
const VALUES& expectedValues,
size_t expectedSize,
const int LINE)
// Verify the specified 'container' has the specified 'expectedSize' and
// contains the same values as the array in the specified 'expectedValues'.
// Return 0 if 'container' has the expected values, and a non-zero value
// otherwise.
{
const char *cont = ContainerName<CONTAINER>::name();
const char *val = ValueName<typename CONTAINER::value_type>::name();
ASSERTV(cont, val, LINE, expectedSize, pmX->size(),
expectedSize == pmX->size());
if (expectedSize != pmX->size()) {
return; // RETURN
}
for (int i = static_cast<int>(expectedSize) - 1; i >= 0; --i) {
if (expectedValues[i] != pmX->top()) P_(cont);
ASSERTV(val, i, LINE, expectedValues[i], pmX->top(),
expectedValues[i] == pmX->top());
pmX->pop();
}
}
template<class CONTAINER, class VALUES>
void verifyStack(const stack<typename CONTAINER::value_type,
CONTAINER>& X,
const VALUES& expectedValues,
size_t expectedSize,
const int LINE,
bslma::Allocator *allocator = 0)
{
stack<typename CONTAINER::value_type, CONTAINER>
copyX(X, bslma::Default::allocator(allocator));
emptyNVerifyStack(©X, expectedValues, expectedSize, LINE);
}
// ----------------------------------------------------------------------------
// HELPERS: "Called Method" Classes: 'NonMovableVector' and 'MovableVector'
// ----------------------------------------------------------------------------
enum CalledMethod
// Enumerations used to indicate if appropriate special container's method
// has been invoked.
{
e_NONE = 0
, e_CTOR_DFT_SANS_ALLOC = 1 << 0
, e_CTOR_DFT_AVEC_ALLOC = 1 << 1
, e_CTOR_CPY_SANS_ALLOC = 1 << 3
, e_CTOR_CPY_AVEC_ALLOC = 1 << 4
, e_CTOR_MOV_SANS_ALLOC = 1 << 5
, e_CTOR_MOV_AVEC_ALLOC = 1 << 6
, e_ASSIGN_CREF = 1 << 7
, e_ASSIGN_MOVE = 1 << 8
, e_PUSH_BACK_CREF = 1 << 9
, e_PUSH_BACK_MOVE = 1 << 10
, e_EMPLACE_0 = 1 << 11
, e_EMPLACE_1 = 1 << 12
, e_EMPLACE_2 = 1 << 13
, e_EMPLACE_3 = 1 << 14
, e_EMPLACE_4 = 1 << 15
, e_EMPLACE_5 = 1 << 16
, e_EMPLACE_6 = 1 << 17
, e_EMPLACE_7 = 1 << 18
, e_EMPLACE_8 = 1 << 19
, e_EMPLACE_9 = 1 << 20
, e_EMPLACE_A = 1 << 21
};
void debugprint(enum CalledMethod calledMethod)
{
const char *ascii;
#define CASE(X) case(e_ ## X): ascii = #X; break;
switch (calledMethod) {
CASE(NONE)
CASE(CTOR_DFT_SANS_ALLOC)
CASE(CTOR_DFT_AVEC_ALLOC)
CASE(CTOR_CPY_SANS_ALLOC)
CASE(CTOR_CPY_AVEC_ALLOC)
CASE(CTOR_MOV_SANS_ALLOC)
CASE(CTOR_MOV_AVEC_ALLOC)
CASE(ASSIGN_CREF)
CASE(ASSIGN_MOVE)
CASE(PUSH_BACK_CREF)
CASE(PUSH_BACK_MOVE)
CASE(EMPLACE_0)
CASE(EMPLACE_1)
CASE(EMPLACE_2)
CASE(EMPLACE_3)
CASE(EMPLACE_4)
CASE(EMPLACE_5)
CASE(EMPLACE_6)
CASE(EMPLACE_7)
CASE(EMPLACE_8)
CASE(EMPLACE_9)
CASE(EMPLACE_A)
default: ascii = "(* UNKNOWN *)";
}
#undef CASE
printf("%s", ascii);
}
inline CalledMethod operator|=(CalledMethod& lhs, CalledMethod rhs)
// Bitwise OR the values of the specified 'lhs' and 'rhs' flags, and return
// the resulting value.
{
lhs = static_cast<CalledMethod>(
static_cast<int>(lhs) | static_cast<int>(rhs));
return lhs;
}
CalledMethod g_calledMethodFlag; // global variable, that stores information
// about called methods for special
// containers 'NonMovableVector' and
// 'MovableVector'.
void setupCalledMethodCheck()
// Reset 'g_calledMethodFlag' global variable's value.
{
g_calledMethodFlag = e_NONE;
}
enum CalledMethod getCalledMethod()
{
return g_calledMethodFlag;
}
// ======================
// class NonMovableVector
// ======================
template <class VALUE, class ALLOCATOR>
class NonMovableVector;
template<class VALUE, class ALLOCATOR>
bool operator==(const NonMovableVector<VALUE, ALLOCATOR>& lhs,
const NonMovableVector<VALUE, ALLOCATOR>& rhs);
template <class VALUE, class ALLOCATOR = bsl::allocator<VALUE> >
class NonMovableVector {
// This class is a value-semantic class template, acting as a transparent
// proxy for the underlying 'bsl::vector' container, that holds elements of
// the (template parameter) 'VALUE', and recording in the global variable
// 'g_calledMethodFlag' methods being invoked. The information recorded is
// used to verify that 'stack' invokes expected container methods.
// DATA
bsl::vector<VALUE> d_vector; // container for it's behaviour simulation
// FRIENDS
friend bool operator==<VALUE, ALLOCATOR>(const NonMovableVector& lhs,
const NonMovableVector& rhs);
public:
// CLASS METHODS
static int GGG(NonMovableVector *object,
const char *spec,
int verbose = 1);
static NonMovableVector GG(NonMovableVector *object,
const char *spec);
// PUBLIC TYPES
typedef ALLOCATOR allocator_type;
typedef VALUE value_type;
typedef VALUE& reference;
typedef const VALUE& const_reference;
typedef std::size_t size_type;
typedef VALUE *iterator;
typedef const VALUE *const_iterator;
// CREATORS
NonMovableVector()
: d_vector()
// Create an empty vector. Method invocation is recorded.
{
g_calledMethodFlag |= e_CTOR_DFT_SANS_ALLOC;
}
NonMovableVector(const ALLOCATOR& basicAllocator)
: d_vector(basicAllocator)
// Create an empty vector, using the specified 'basicAllocator' to
// supply memory. Method invocation is recorded.
{
g_calledMethodFlag |= e_CTOR_DFT_AVEC_ALLOC;
}
NonMovableVector(const NonMovableVector& original)
// Create a vector that has the same value as the specified 'original'
// vector. Method invocation is recorded.
: d_vector(original.d_vector)
{
g_calledMethodFlag |= e_CTOR_CPY_SANS_ALLOC;
}
NonMovableVector(const NonMovableVector& original,
const ALLOCATOR& basicAllocator)
// Create a vector that has the same value as the specified 'original'
// vector, using the specified 'basicAllocator' to supply memory.
// Method invocation is recorded.
: d_vector(original.d_vector, basicAllocator)
{
g_calledMethodFlag |= e_CTOR_CPY_AVEC_ALLOC;
}
// MANIPULATORS
NonMovableVector& operator=(const NonMovableVector& rhs)
// Assign to this vector the value of the specified 'other' vector and
// return a reference to this modifiable vector. Method invocation is
// recorded.
{
d_vector = rhs.d_vector;
g_calledMethodFlag |= e_ASSIGN_CREF;
return *this;
}
void pop_back()
// Erase the last element from this vector.
{
d_vector.pop_back();
}
void push_back(const value_type& value)
// Append a copy of the specified 'value' at the end of this vector.
// Method invocation is recorded.
{
g_calledMethodFlag |= e_PUSH_BACK_CREF;
d_vector.push_back(value);
}
template <class INPUT_ITER>
iterator insert(const_iterator position,
INPUT_ITER first,
INPUT_ITER last)
// Insert at the specified 'position' in this vector the values in
// the range starting at the specified 'first' and ending
// immediately before the specified 'last' iterators of the
// (template parameter) type 'INPUT_ITER', and return an iterator
// to the first newly inserted element.
{
return d_vector.insert(position, first, last);
}
iterator begin()
// Return an iterator pointing the first element in this modifiable
// vector (or the past-the-end iterator if this vector is empty).
{
return d_vector.begin();
}
iterator end()
// Return the past-the-end iterator for this modifiable vector.
{
return d_vector.end();
}
reference front()
// Return a reference to the modifiable element at the first position
// in this vector. The behavior is undefined if this vector is empty.
{
return d_vector.front();
}
reference back()
// Return a reference to the modifiable element at the last position in
// this vector. The behavior is undefined if this vector is empty.
{
return d_vector.back();
}
#if !BSLS_COMPILERFEATURES_SIMULATE_CPP11_FEATURES
template <class... Args> void emplace_back(Args&&... arguments)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the specified 'arguments'. Note that this method is written only
// for testing purposes, it DOESN'T simulate standard vector behavior
// and requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
int argumentsNumber = sizeof...(arguments);
g_calledMethodFlag |= static_cast<CalledMethod>(
static_cast<int>(e_EMPLACE_0) << argumentsNumber);
d_vector.push_back(value_type(1));
}
#elif BSLS_COMPILERFEATURES_SIMULATE_VARIADIC_TEMPLATES
inline
void emplace_back()
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter. Note that
// this method is written only for testing purposes, it DOESN'T
// simulate standard vector behavior and requires that the (template
// parameter) type 'VALUE_TYPE' has constructor, accepting integer
// value as a parameter. Method invocation is recorded.
{
g_calledMethodFlag |= e_EMPLACE_0;
d_vector.push_back(value_type(1));
}
template <class Args_01>
inline
void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the passed argument. Note that this method is written only for
// testing purposes, it DOESN'T simulate standard vector behavior and
// requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
// Compiler warnings suppression.
(void)args_01;
g_calledMethodFlag |= e_EMPLACE_1;
d_vector.push_back(value_type(1));
}
template <class Args_01,
class Args_02>
inline
void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the passed arguments. Note that this method is written only for
// testing purposes, it DOESN'T simulate standard vector behavior and
// requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
// Compiler warnings suppression.
(void)args_01;
(void)args_02;
g_calledMethodFlag |= e_EMPLACE_2;
d_vector.push_back(value_type(1));
}
template <class Args_01,
class Args_02,
class Args_03>
inline
void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the passed arguments. Note that this method is written only for
// testing purposes, it DOESN'T simulate standard vector behavior and
// requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
// Compiler warnings suppression.
(void)args_01;
(void)args_02;
(void)args_03;
g_calledMethodFlag |= e_EMPLACE_3;
d_vector.push_back(value_type(1));
}
template <class Args_01,
class Args_02,
class Args_03,
class Args_04>
inline
void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the passed arguments. Note that this method is written only for
// testing purposes, it DOESN'T simulate standard vector behavior and
// requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
// Compiler warnings suppression.
(void)args_01;
(void)args_02;
(void)args_03;
(void)args_04;
g_calledMethodFlag |= e_EMPLACE_4;
d_vector.push_back(value_type(1));
}
template <class Args_01,
class Args_02,
class Args_03,
class Args_04,
class Args_05>
inline
void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the passed arguments. Note that this method is written only for
// testing purposes, it DOESN'T simulate standard vector behavior and
// requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
// Compiler warnings suppression.
(void)args_01;
(void)args_02;
(void)args_03;
(void)args_04;
(void)args_05;
g_calledMethodFlag |= e_EMPLACE_5;
d_vector.push_back(value_type(1));
}
template <class Args_01,
class Args_02,
class Args_03,
class Args_04,
class Args_05,
class Args_06>
inline
void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_06) args_06)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the passed arguments. Note that this method is written only for
// testing purposes, it DOESN'T simulate standard vector behavior and
// requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
// Compiler warnings suppression.
(void)args_01;
(void)args_02;
(void)args_03;
(void)args_04;
(void)args_05;
(void)args_06;
g_calledMethodFlag |= e_EMPLACE_6;
d_vector.push_back(value_type(1));
}
template <class Args_01,
class Args_02,
class Args_03,
class Args_04,
class Args_05,
class Args_06,
class Args_07>
inline
void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_06) args_06,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_07) args_07)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the passed arguments. Note that this method is written only for
// testing purposes, it DOESN'T simulate standard vector behavior and
// requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
// Compiler warnings suppression.
(void)args_01;
(void)args_02;
(void)args_03;
(void)args_04;
(void)args_05;
(void)args_06;
(void)args_07;
g_calledMethodFlag |= e_EMPLACE_7;
d_vector.push_back(value_type(1));
}
template <class Args_01,
class Args_02,
class Args_03,
class Args_04,
class Args_05,
class Args_06,
class Args_07,
class Args_08>
inline
void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_06) args_06,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_07) args_07,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_08) args_08)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the passed arguments. Note that this method is written only for
// testing purposes, it DOESN'T simulate standard vector behavior and
// requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
// Compiler warnings suppression.
(void)args_01;
(void)args_02;
(void)args_03;
(void)args_04;
(void)args_05;
(void)args_06;
(void)args_07;
(void)args_08;
g_calledMethodFlag |= e_EMPLACE_8;
d_vector.push_back(value_type(1));
}
template <class Args_01,
class Args_02,
class Args_03,
class Args_04,
class Args_05,
class Args_06,
class Args_07,
class Args_08,
class Args_09>
inline
void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_06) args_06,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_07) args_07,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_08) args_08,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_09) args_09)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the passed arguments. Note that this method is written only for
// testing purposes, it DOESN'T simulate standard vector behavior and
// requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
// Compiler warnings suppression.
(void)args_01;
(void)args_02;
(void)args_03;
(void)args_04;
(void)args_05;
(void)args_06;
(void)args_07;
(void)args_08;
(void)args_09;
g_calledMethodFlag |= e_EMPLACE_9;
d_vector.push_back(value_type(1));
}
template <class Args_01,
class Args_02,
class Args_03,
class Args_04,
class Args_05,
class Args_06,
class Args_07,
class Args_08,
class Args_09,
class Args_10>
inline
void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_06) args_06,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_07) args_07,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_08) args_08,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_09) args_09,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_10) args_10)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the passed arguments. Note that this method is written only for
// testing purposes, it DOESN'T simulate standard vector behavior and
// requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
// Compiler warnings suppression.
(void)args_01;
(void)args_02;
(void)args_03;
(void)args_04;
(void)args_05;
(void)args_06;
(void)args_07;
(void)args_08;
(void)args_09;
(void)args_10;
g_calledMethodFlag |= e_EMPLACE_A;
d_vector.push_back(value_type(1));
}
#else
template <class... Args> void emplace_back(
BSLS_COMPILERFEATURES_FORWARD_REF(Args)... arguments)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the specified 'arguments'. Note that this method is written only
// for testing purposes, it DOESN'T simulate standard vector behavior
// and requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
int argumentsNumber = sizeof...(arguments);
g_calledMethodFlag |= static_cast<CalledMethod>(
static_cast<int>(e_EMPLACE_0) << argumentsNumber);
d_vector.push_back(value_type(1));
}
#endif
// ACCESSORS
const_iterator begin() const
// Return an iterator pointing the first element in this non-modifiable
// vector (or the past-the-end iterator if this vector is empty).
{
return d_vector.begin();
}
const_iterator end() const
// Return the past-the-end iterator for this non-modifiable vector.
{
return d_vector.end();
}
const_reference front() const
// Return a reference to the non-modifiable element at the first
// position in this vector. The behavior is undefined if this vector
// is empty.
{
return d_vector.front();
}
const_reference back() const
// Return a reference to the non-modifiable element at the last
// position in this vector. The behavior is undefined if this vector
// is empty.
{
return d_vector.back();
}
size_type size() const
// Return the number of elements in this vector.
{
return d_vector.size();
}
bool empty() const
// Return 'true' if this vector has size 0, and 'false' otherwise.
{
return d_vector.empty();
}
};
// ----------------------
// class NonMovableVector
// ----------------------
// CLASS METHODS
template <class CONTAINER>
class TestDriver;
template <class VALUE, class ALLOCATOR>
int NonMovableVector<VALUE, ALLOCATOR>::
GGG(NonMovableVector *object,
const char *spec,
int verbose)
{
bslma::DefaultAllocatorGuard guard(
&bslma::NewDeleteAllocator::singleton());
typename TestDriver<NonMovableVector>::TestValues VALUES;
enum { SUCCESS = -1 };
for (int i = 0; spec[i]; ++i) {
if ('A' <= spec[i] && spec[i] <= 'Z') {
object->push_back(VALUES[spec[i] - 'A']);
}
else {
if (verbose) {
printf("Error, bad character ('%c') "
"in spec \"%s\" at position %d.\n", spec[i], spec, i);
}
// Discontinue processing this spec.
return i; // RETURN
}
}
return SUCCESS;
}
template <class VALUE, class ALLOCATOR>
NonMovableVector<VALUE, ALLOCATOR>
NonMovableVector<VALUE, ALLOCATOR>::
GG(NonMovableVector *object, const char *spec)
{
ASSERTV(GGG(object, spec) < 0);
return *object;
}
// FREE OPERATORS
template<class VALUE, class ALLOCATOR>
bool operator==(const NonMovableVector<VALUE, ALLOCATOR>& lhs,
const NonMovableVector<VALUE, ALLOCATOR>& rhs)
{
return lhs.d_vector == rhs.d_vector;
}
// ===================
// class MovableVector
// ===================
template <class VALUE, class ALLOCATOR>
class MovableVector;
template<class VALUE, class ALLOCATOR>
bool operator==(const MovableVector<VALUE, ALLOCATOR>& lhs,
const MovableVector<VALUE, ALLOCATOR>& rhs);
template <class VALUE, class ALLOCATOR = bsl::allocator<VALUE> >
class MovableVector
{
// TBD
//
// This class is a value-semantic class template, acting as a transparent
// proxy for the underlying 'bsl::vector' container, that holds elements of
// the (template parameter) 'VALUE', and recording in the global variable
// 'g_calledMethodFlag' methods being invoked. The information recorded is
// used to verify that 'stack' invokes expected container methods.
private:
// DATA
bsl::vector<VALUE> d_vector; // provides required behavior
// FRIENDS
friend bool operator==<VALUE, ALLOCATOR>(
const MovableVector<VALUE, ALLOCATOR>& lhs,
const MovableVector<VALUE, ALLOCATOR>& rhs);
public:
// CLASS METHODS
static int GGG(MovableVector *object,
const char *spec,
int verbose = 1);
static MovableVector GG(MovableVector *object,
const char *spec);
// PUBLIC TYPES
typedef ALLOCATOR allocator_type;
typedef VALUE value_type;
typedef VALUE& reference;
typedef const VALUE& const_reference;
typedef std::size_t size_type;
typedef VALUE* iterator;
typedef const VALUE* const_iterator;
// CREATORS
MovableVector()
: d_vector()
// Create an empty vector. Method invocation is recorded.
{
g_calledMethodFlag |= e_CTOR_DFT_SANS_ALLOC;
}
MovableVector(const ALLOCATOR& basicAllocator)
: d_vector( basicAllocator)
// Create an empty vector, using the specified 'basicAllocator' to
// supply memory. Method invocation is recorded.
{
g_calledMethodFlag |= e_CTOR_DFT_AVEC_ALLOC;
}
MovableVector(const MovableVector& original)
// Create a vector that has the same value as the specified 'original'
// vector. Method invocation is recorded.
: d_vector(original.d_vector)
{
g_calledMethodFlag |= e_CTOR_CPY_SANS_ALLOC;
}
MovableVector(bslmf::MovableRef<MovableVector> original)
// Create a vector that has the same value as the specified 'original'
// vector. Method invocation is recorded.
: d_vector(MoveUtil::move(MoveUtil::access(original).d_vector))
{
g_calledMethodFlag |= e_CTOR_MOV_SANS_ALLOC;
}
MovableVector(const MovableVector& original,
const ALLOCATOR& basicAllocator)
// Create a vector that has the same value as the specified 'original'
// vector, using the specified 'basicAllocator' to supply memory.
// Method invocation is recorded.
: d_vector(original.d_vector, basicAllocator)
{
g_calledMethodFlag |= e_CTOR_CPY_AVEC_ALLOC;
}
MovableVector(bslmf::MovableRef<MovableVector> original,
const ALLOCATOR& basicAllocator)
// Create a vector that has the same value as the specified 'original'
// vector, using the specified 'basicAllocator' to supply memory.
// Method invocation is recorded.
: d_vector(MoveUtil::move(MoveUtil::access(original).d_vector),
basicAllocator)
{
g_calledMethodFlag |= e_CTOR_MOV_AVEC_ALLOC;
}
// MANIPULATORS
MovableVector& operator=(const MovableVector& rhs)
// Assign to this vector the value of the specified 'other' vector and
// return a reference to this modifiable vector. Method invocation is
// recorded.
{
g_calledMethodFlag |= e_ASSIGN_CREF;
d_vector = rhs.d_vector;
return *this;
}
MovableVector& operator=(bslmf::MovableRef<MovableVector> rhs)
// Assign to this vector the value of the specified 'other' vector and
// return a reference to this modifiable vector. Method invocation is
// recorded.
{
g_calledMethodFlag |= e_ASSIGN_MOVE;
d_vector = MoveUtil::move(MoveUtil::access(rhs).d_vector);
return *this;
}
void pop_back()
// Erase the last element from this vector.
{
d_vector.pop_back();
}
void push_back(const value_type& value)
// Append a copy of the specified 'value' at the end of this vector.
// Method invocation is recorded.
{
g_calledMethodFlag |= e_PUSH_BACK_CREF;
d_vector.push_back(value);
}
void push_back(bslmf::MovableRef<value_type> value)
// Append a copy of the specified 'value' at the end of this vector.
// Method invocation is recorded.
{
g_calledMethodFlag |= e_PUSH_BACK_MOVE;
d_vector.push_back(MoveUtil::move(value));
}
template <class INPUT_ITER>
iterator insert(const_iterator position,
INPUT_ITER first,
INPUT_ITER last)
// Insert at the specified 'position' in this vector the values in
// the range starting at the specified 'first' and ending
// immediately before the specified 'last' iterators of the
// (template parameter) type 'INPUT_ITER', and return an iterator
// to the first newly inserted element.
{
return d_vector.insert(position, first, last);
}
iterator begin()
// Return an iterator pointing the first element in this modifiable
// vector (or the past-the-end iterator if this vector is empty).
{
return d_vector.begin(); }
iterator end()
// Return the past-the-end iterator for this modifiable vector.
{
return d_vector.end();
}
reference front()
// Return a reference to the modifiable element at the first position
// in this vector. The behavior is undefined if this vector is empty.
{
return d_vector.front();
}
reference back()
// Return a reference to the modifiable element at the last position in
// this vector. The behavior is undefined if this vector is empty.
{
return d_vector.back();
}
#if !BSLS_COMPILERFEATURES_SIMULATE_CPP11_FEATURES
template <class... Args> void emplace_back(Args&&... arguments)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the specified 'arguments'. Note that this method is written only
// for testing purposes, it DOESN'T simulate standard vector behavior
// and requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
int argumentsNumber = sizeof...(arguments);
g_calledMethodFlag |= static_cast<CalledMethod>(
static_cast<int>(e_EMPLACE_0) << argumentsNumber);
d_vector.push_back(value_type(1));
}
#elif BSLS_COMPILERFEATURES_SIMULATE_VARIADIC_TEMPLATES
inline
void emplace_back()
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter. Note that
// this method is written only for testing purposes, it DOESN'T
// simulate standard vector behavior and requires that the (template
// parameter) type 'VALUE_TYPE' has constructor, accepting integer
// value as a parameter. Method invocation is recorded.
{
g_calledMethodFlag |= e_EMPLACE_0;
d_vector.push_back(value_type(1));
}
template <class Args_01>
inline
void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the passed argument. Note that this method is written only for
// testing purposes, it DOESN'T simulate standard vector behavior and
// requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
// Compiler warnings suppression.
(void)args_01;
g_calledMethodFlag |= e_EMPLACE_1;
d_vector.push_back(value_type(1));
}
template <class Args_01,
class Args_02>
inline
void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the passed arguments. Note that this method is written only for
// testing purposes, it DOESN'T simulate standard vector behavior and
// requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
// Compiler warnings suppression.
(void)args_01;
(void)args_02;
g_calledMethodFlag |= e_EMPLACE_2;
d_vector.push_back(value_type(1));
}
template <class Args_01,
class Args_02,
class Args_03>
inline
void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the passed arguments. Note that this method is written only for
// testing purposes, it DOESN'T simulate standard vector behavior and
// requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
// Compiler warnings suppression.
(void)args_01;
(void)args_02;
(void)args_03;
g_calledMethodFlag |= e_EMPLACE_3;
d_vector.push_back(value_type(1));
}
template <class Args_01,
class Args_02,
class Args_03,
class Args_04>
inline
void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the passed arguments. Note that this method is written only for
// testing purposes, it DOESN'T simulate standard vector behavior and
// requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
// Compiler warnings suppression.
(void)args_01;
(void)args_02;
(void)args_03;
(void)args_04;
g_calledMethodFlag |= e_EMPLACE_4;
d_vector.push_back(value_type(1));
}
template <class Args_01,
class Args_02,
class Args_03,
class Args_04,
class Args_05>
inline
void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the passed arguments. Note that this method is written only for
// testing purposes, it DOESN'T simulate standard vector behavior and
// requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
// Compiler warnings suppression.
(void)args_01;
(void)args_02;
(void)args_03;
(void)args_04;
(void)args_05;
g_calledMethodFlag |= e_EMPLACE_5;
d_vector.push_back(value_type(1));
}
template <class Args_01,
class Args_02,
class Args_03,
class Args_04,
class Args_05,
class Args_06>
inline
void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_06) args_06)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the passed arguments. Note that this method is written only for
// testing purposes, it DOESN'T simulate standard vector behavior and
// requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
// Compiler warnings suppression.
(void)args_01;
(void)args_02;
(void)args_03;
(void)args_04;
(void)args_05;
(void)args_06;
g_calledMethodFlag |= e_EMPLACE_6;
d_vector.push_back(value_type(1));
}
template <class Args_01,
class Args_02,
class Args_03,
class Args_04,
class Args_05,
class Args_06,
class Args_07>
inline
void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_06) args_06,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_07) args_07)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the passed arguments. Note that this method is written only for
// testing purposes, it DOESN'T simulate standard vector behavior and
// requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
// Compiler warnings suppression.
(void)args_01;
(void)args_02;
(void)args_03;
(void)args_04;
(void)args_05;
(void)args_06;
(void)args_07;
g_calledMethodFlag |= e_EMPLACE_7;
d_vector.push_back(value_type(1));
}
template <class Args_01,
class Args_02,
class Args_03,
class Args_04,
class Args_05,
class Args_06,
class Args_07,
class Args_08>
inline
void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_06) args_06,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_07) args_07,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_08) args_08)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the passed arguments. Note that this method is written only for
// testing purposes, it DOESN'T simulate standard vector behavior and
// requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
// Compiler warnings suppression.
(void)args_01;
(void)args_02;
(void)args_03;
(void)args_04;
(void)args_05;
(void)args_06;
(void)args_07;
(void)args_08;
g_calledMethodFlag |= e_EMPLACE_8;
d_vector.push_back(value_type(1));
}
template <class Args_01,
class Args_02,
class Args_03,
class Args_04,
class Args_05,
class Args_06,
class Args_07,
class Args_08,
class Args_09>
inline
void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_06) args_06,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_07) args_07,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_08) args_08,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_09) args_09)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the passed arguments. Note that this method is written only for
// testing purposes, it DOESN'T simulate standard vector behavior and
// requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
// Compiler warnings suppression.
(void)args_01;
(void)args_02;
(void)args_03;
(void)args_04;
(void)args_05;
(void)args_06;
(void)args_07;
(void)args_08;
(void)args_09;
g_calledMethodFlag |= e_EMPLACE_9;
d_vector.push_back(value_type(1));
}
template <class Args_01,
class Args_02,
class Args_03,
class Args_04,
class Args_05,
class Args_06,
class Args_07,
class Args_08,
class Args_09,
class Args_10>
inline
void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_06) args_06,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_07) args_07,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_08) args_08,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_09) args_09,
BSLS_COMPILERFEATURES_FORWARD_REF(Args_10) args_10)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the passed arguments. Note that this method is written only for
// testing purposes, it DOESN'T simulate standard vector behavior and
// requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
// Compiler warnings suppression.
(void)args_01;
(void)args_02;
(void)args_03;
(void)args_04;
(void)args_05;
(void)args_06;
(void)args_07;
(void)args_08;
(void)args_09;
(void)args_10;
g_calledMethodFlag |= e_EMPLACE_A;
d_vector.push_back(value_type(1));
}
#else
template <class... Args> void emplace_back(
BSLS_COMPILERFEATURES_FORWARD_REF(Args)... arguments)
// Append to the end of this vector a newly created 'value_type'
// object, constructed with integer literal as a parameter, despite of
// the specified 'arguments'. Note that this method is written only
// for testing purposes, it DOESN'T simulate standard vector behavior
// and requires that the (template parameter) type 'VALUE_TYPE' has
// constructor, accepting integer value as a parameter. Method
// invocation is recorded.
{
int argumentsNumber = sizeof...(arguments);
g_calledMethodFlag |= static_cast<CalledMethod>(
static_cast<int>(e_EMPLACE_0) << argumentsNumber);
d_vector.push_back(value_type(1));
}
#endif
// ACCESSORS
const_iterator begin() const
// Return an iterator pointing the first element in this non-modifiable
// vector (or the past-the-end iterator if this vector is empty).
{
return d_vector.begin();
}
const_iterator end() const
// Return the past-the-end iterator for this non-modifiable vector.
{
return d_vector.end();
}
const_reference front() const
// Return a reference to the non-modifiable element at the first
// position in this vector. The behavior is undefined if this vector
// is empty.
{
return d_vector.front();
}
const_reference back() const
// Return a reference to the non-modifiable element at the last
// position in this vector. The behavior is undefined if this vector
// is empty.
{
return d_vector.back();
}
size_type size() const
// Return the number of elements in this vector.
{
return d_vector.size();
}
bool empty() const
// Return 'true' if this vector has size 0, and 'false' otherwise.
{
return d_vector.empty();
}
};
// -------------------
// class MovableVector
// -------------------
// CLASS METHODS
template <class CONTAINER>
class TestDriver;
template <class VALUE, class ALLOCATOR>
int MovableVector<VALUE, ALLOCATOR>::
GGG(MovableVector *object,
const char *spec,
int verbose)
{
bslma::DefaultAllocatorGuard guard(
&bslma::NewDeleteAllocator::singleton());
typename TestDriver<MovableVector>::TestValues VALUES;
enum { SUCCESS = -1 };
for (int i = 0; spec[i]; ++i) {
if ('A' <= spec[i] && spec[i] <= 'Z') {
object->push_back(VALUES[spec[i] - 'A']);
}
else {
if (verbose) {
printf("Error, bad character ('%c') "
"in spec \"%s\" at position %d.\n", spec[i], spec, i);
}
// Discontinue processing this spec.
return i; // RETURN
}
}
return SUCCESS;
}
template <class VALUE, class ALLOCATOR>
MovableVector<VALUE, ALLOCATOR>
MovableVector<VALUE, ALLOCATOR>::
GG(MovableVector *object, const char *spec)
{
ASSERTV(GGG(object, spec) < 0);
return *object;
}
// FREE OPERATORS
template<class VALUE, class ALLOCATOR>
bool operator==(const MovableVector<VALUE, ALLOCATOR>& lhs,
const MovableVector<VALUE, ALLOCATOR>& rhs)
{
return lhs.d_vector == rhs.d_vector;
}
// ==========================
// class StatefulStlAllocator
// ==========================
template <class VALUE>
class StatefulStlAllocator : public bsltf::StdTestAllocator<VALUE>
// This class implements a standard compliant allocator that has an
// attribute, 'id'.
{
// DATA
int d_id; // identifier
private:
// TYPES
typedef bsltf::StdTestAllocator<VALUE> StlAlloc;
// Alias for the base class.
public:
template <class OTHER_TYPE>
struct rebind {
// This nested 'struct' template, parameterized by some 'OTHER_TYPE',
// provides a namespace for an 'other' type alias, which is an
// allocator type following the same template as this one but that
// allocates elements of 'OTHER_TYPE'. Note that this allocator type
// is convertible to and from 'other' for any 'OTHER_TYPE' including
// 'void'.
typedef StatefulStlAllocator<OTHER_TYPE> other;
};
// CREATORS
StatefulStlAllocator()
// Create a 'StatefulStlAllocator' object.
: StlAlloc()
{
}
//! StatefulStlAllocator(const StatefulStlAllocator& original) = default;
// Create a 'StatefulStlAllocator' object having the same id as the
// specified 'original'.
template <class OTHER_TYPE>
StatefulStlAllocator(const StatefulStlAllocator<OTHER_TYPE>& original)
// Create a 'StatefulStlAllocator' object having the same id as the
// specified 'original' with a different template type.
: StlAlloc(original)
, d_id(original.id())
{
}
// MANIPULATORS
void setId(int value)
// Set the 'id' attribute of this object to the specified 'value'.
{
d_id = value;
}
// ACCESSORS
int id() const
// Return the value of the 'id' attribute of this object.
{
return d_id;
}
};
template <class T>
struct SpecialContainerTrait
// A class should declare this trait if it registers it's methods
// invocation in 'g_calledMethodFlag' global variable.
{
static const bool is_special_container = false;
};
template <class T>
struct SpecialContainerTrait<NonMovableVector<T> >
{
static const bool is_special_container = true;
};
template <class T>
struct SpecialContainerTrait<MovableVector<T> >
{
static const bool is_special_container = true;
};
template <class CONTAINER>
bool isCalledMethodCheckPassed(CalledMethod flag)
// Return 'true' if global variable 'g_calledMethodFlag' has the same value
// as the specified 'flag', and 'false' otherwise. Note that this check is
// performed only for special containers, defined above. Function always
// returns 'true' for all other classes.
{
if (SpecialContainerTrait<CONTAINER>::is_special_container) {
return flag == g_calledMethodFlag;
}
return true;
}
//=============================================================================
// USAGE EXAMPLE
//=============================================================================
// Suppose a husband wants to keep track of chores his wife has asked him to
// do. Over the years of being married, he has noticed that his wife generally
// wants the most recently requested task done first. If she has a new task in
// mind that is low-priority, she will avoid asking for it until higher
// priority tasks are finished. When he has finished all tasks, he is to
// report to his wife that he is ready for more.
// First, we define the class implementing the 'to-do' list.
class ToDoList {
// DATA
bsl::stack<const char *> d_stack;
public:
// MANIPULATORS
void enqueueTask(const char *task);
// Add the specified 'task', a string describing a task, to the
// list. Note the lifetime of the string referred to by 'task' must
// exceed the lifetime of the task in this list.
bool finishTask();
// Remove the current task from the list. Return 'true' if a task was
// removed and it was the last task on the list, and return 'false'
// otherwise.
// ACCESSORS
const char *currentTask() const;
// Return the string representing the current task. If there
// is no current task, return the string "<EMPTY>", which is
// not a valid task.
};
// MANIPULATORS
void ToDoList::enqueueTask(const char *task)
{
d_stack.push(task);
}
bool ToDoList::finishTask()
{
if (!d_stack.empty()) {
d_stack.pop();
return d_stack.empty(); // RETURN
}
return false;
};
// ACCESSORS
const char *ToDoList::currentTask() const
{
if (d_stack.empty()) {
return "<EMPTY>"; // RETURN
}
return d_stack.top();
}
//=============================================================================
// ====================
// class ExceptionGuard
// ====================
template <class OBJECT>
struct ExceptionGuard {
// This class provide a mechanism to verify the strong exception guarantee
// in exception-throwing code. On construction, this class stores the
// a copy of an object of the parameterized type 'OBJECT' and the address
// of that object. On destruction, if 'release' was not invoked, it will
// verify the value of the object is the same as the value of the copy
// create on construction. This class requires the copy constructor and
// 'operator ==' to be tested before use.
// DATA
int d_line; // the line number at construction
OBJECT d_copy; // copy of the object being tested
const OBJECT *d_object_p; // address of the original object
public:
// CREATORS
ExceptionGuard(const OBJECT *object,
int line,
bslma::Allocator *basicAllocator = 0)
: d_line(line)
, d_copy(*object, basicAllocator)
, d_object_p(object)
// Create the exception guard for the specified 'object' at the
// specified 'line' number. Optionally, specify 'basicAllocator' used
// to supply memory.
{}
~ExceptionGuard()
// Destroy the exception guard. If the guard was not released, verify
// that the state of the object supplied at construction has not
// change.
{
if (d_object_p) {
const int LINE = d_line;
ASSERTV(LINE, d_copy == *d_object_p);
}
}
// MANIPULATORS
void release()
// Release the guard from verifying the state of the object.
{
d_object_p = 0;
}
};
// ============================================================================
// GLOBAL TYPEDEFS FOR TESTING
// ----------------------------------------------------------------------------
//
// ================
// class TestDriver
// ================
template <class CONTAINER>
class TestDriver {
// TBD
//
// This templatized struct provide a namespace for testing the 'set'
// container. The parameterized 'KEY', 'COMP' and 'ALLOC' specifies the
// value type, comparator type and allocator type respectively. Each
// "testCase*" method test a specific aspect of 'stack<VALUE, CONTAINER>'.
// Every test cases should be invoked with various parameterized type to
// fully test the container.
public:
// PUBLIC TYPES
typedef bsl::stack<typename CONTAINER::value_type, CONTAINER> Obj;
// Type under test.
private:
// TYPES
typedef typename Obj::value_type value_type;
typedef typename Obj::reference reference;
typedef typename Obj::const_reference const_reference;
typedef typename Obj::size_type size_type;
typedef CONTAINER container_type;
// Shorthands
public:
typedef bsltf::TestValuesArray<value_type> TestValues;
private:
// TEST APPARATUS
//-------------------------------------------------------------------------
// The generating functions interpret the given 'spec' in order from left
// to right to configure the object according to a custom language.
// Uppercase letters [A..Z] correspond to arbitrary (but unique) char
// values to be appended to the 'stack<VALUE, CONTAINER>' object.
//..
// LANGUAGE SPECIFICATION:
// -----------------------
//
// <SPEC> ::= <EMPTY> | <LIST>
//
// <EMPTY> ::=
//
// <LIST> ::= <ITEM> | <ITEM><LIST>
//
// <ITEM> ::= <ELEMENT> | <CLEAR>
//
// <ELEMENT> ::= 'A' | 'B' | 'C' | 'D' | 'E' | ... | 'Z'
// // unique but otherwise arbitrary
// Spec String Description
// ----------- -----------------------------------------------------------
// "" Has no effect; leaves the object empty.
// "A" Insert the value corresponding to A.
// "AA" Insert two values both corresponding to A.
// "ABC" Insert three values corresponding to A, B and C.
//..
//-------------------------------------------------------------------------
static int ggg(Obj *object, const char *spec, int verbose = 1);
// Configure the specified 'object' according to the specified 'spec',
// using only the primary manipulator function 'insert' and white-box
// manipulator 'clear'. Optionally specify a zero 'verbose' to
// suppress 'spec' syntax error messages. Return the index of the
// first invalid character, and a negative value otherwise. Note that
// this function is used to implement 'gg' as well as allow for
// verification of syntax error detection.
static Obj& gg(Obj *object, const char *spec);
// Return, by reference, the specified object with its value adjusted
// according to the specified 'spec'.
static Obj g(const char *spec);
// Return, by value, a new object corresponding to the specified
// 'spec'.
static void emptyAndVerify(Obj *obj,
const TestValues& testValues,
size_t numTestValues,
const int LINE);
// Pop the elements out of 'obj', verifying that they exactly match
// the first 'numTestValues' elements in 'testValues'.
static bool typeAlloc()
{
return bslma::UsesBslmaAllocator<value_type>::value;
}
static bool emptyWillAlloc()
{
// Creating an empty 'deque' allocates memory, creating an empty
// 'vector' does not.
return bsl::is_same<CONTAINER, deque<value_type> >::value;
}
static bool use_same_allocator(Obj& object,
int TYPE_ALLOC,
bslma::TestAllocator *ta);
// Return 'true' if the specified 'object' uses the specified 'ta'
// allocator for supplying memory. The specified 'TYPE_ALLOC'
// identifies, if 'object' uses allocator at all. Return 'false' if
// object doesn't use 'ta'.
public:
// TEST CASES
static void testCase19();
// Test 'noexcept' specifications
#if !BSLS_COMPILERFEATURES_SIMULATE_CPP11_FEATURES
static void testCase18MoveOnlyType();
// Test move manipulators on move-only types
static void testCase17MoveOnlyType();
// Test move manipulators on move-only types
#endif // !BSLS_COMPILERFEATURES_SIMULATE_CPP11_FEATURES
template <bool PROPAGATE_ON_CONTAINER_MOVE_ASSIGNMENT_FLAG,
bool OTHER_FLAGS>
static void testCase18_propagate_on_container_move_assignment_dispatch();
static void testCase18_propagate_on_container_move_assignment();
// Test 'propagate_on_container_move_assignment'.
static void testCase18(bool isMovableContainer);
// Test move manipulators
static void testCase17(bool isMovableContainer);
// Test move constructors
static void testCase12();
// Test inequality operators
static void testCase11();
// Test type traits.
static void testCase10();
// Test bslma::Allocator.
template <bool PROPAGATE_ON_CONTAINER_COPY_ASSIGNMENT_FLAG,
bool OTHER_FLAGS>
static void testCase9_propagate_on_container_copy_assignment_dispatch();
static void testCase9_propagate_on_container_copy_assignment();
// Test 'propagate_on_container_copy_assignment'.
static void testCase9();
// Test assignment operator ('operator=').
template <bool PROPAGATE_ON_CONTAINER_SWAP_FLAG,
bool OTHER_FLAGS>
static void testCase8_propagate_on_container_swap_dispatch();
static void testCase8_propagate_on_container_swap();
// Test 'propagate_on_container_swap'.
static void testCase8();
// Test 'swap' member.
template <bool SELECT_ON_CONTAINER_COPY_CONSTRUCTION_FLAG,
bool OTHER_FLAGS>
static void testCase7_select_on_container_copy_construction_dispatch();
static void testCase7_select_on_container_copy_construction();
// Test 'select_on_container_copy_construction'.
static void testCase7();
// Test copy constructor.
static void testCase6();
// Test equality operator ('operator==').
static void testCase5();
// Reserved for (<<) operator.
static void testCase4();
// Test basic accessors ('size' and 'top').
static void testCase3();
// Test generator functions 'ggg', and 'gg'.
static void testCase2();
// Test primary manipulators ('push' and 'pop').
static void testCase1(int *testKeys,
size_t numValues);
// Breathing test. This test *exercises* basic functionality but
// *test* nothing.
static void testCase1_NoAlloc(int *testValues,
size_t numValues);
// Breathing test, except on a non-allocator container. This test
// *exercises* basic functionality but *test* nothing.
};
// ----------------
// class TestDriver
// ----------------
template <class CONTAINER>
bool TestDriver<CONTAINER>::use_same_allocator(Obj& object,
int TYPE_ALLOC,
bslma::TestAllocator *ta)
{
bslma::DefaultAllocatorGuard guard(
&bslma::NewDeleteAllocator::singleton());
const TestValues VALUES;
if (0 == TYPE_ALLOC) { // If 'VALUE' does not use allocator, return true.
return true; // RETURN
}
const bsls::Types::Int64 BB = ta->numBlocksTotal();
const bsls::Types::Int64 B = ta->numBlocksInUse();
object.push(VALUES[0]);
const bsls::Types::Int64 AA = ta->numBlocksTotal();
const bsls::Types::Int64 A = ta->numBlocksInUse();
if (BB + TYPE_ALLOC <= AA && B + TYPE_ALLOC <= A) {
return true; // RETURN
}
if (veryVerbose) {
Q(Did find expected allocator)
P(ta->name())
}
return false;
}
template <class CONTAINER>
int TestDriver<CONTAINER>::ggg(Obj *object,
const char *spec,
int verbose)
{
bslma::DefaultAllocatorGuard guard(
&bslma::NewDeleteAllocator::singleton());
const TestValues VALUES;
enum { SUCCESS = -1 };
for (int i = 0; spec[i]; ++i) {
if ('A' <= spec[i] && spec[i] <= 'Z') {
object->push(VALUES[spec[i] - 'A']);
}
else {
if (verbose) {
printf("Error, bad character ('%c') "
"in spec \"%s\" at position %d.\n", spec[i], spec, i);
}
// Discontinue processing this spec.
return i; // RETURN
}
}
return SUCCESS;
}
template <class CONTAINER>
bsl::stack<typename CONTAINER::value_type, CONTAINER>&
TestDriver<CONTAINER>::gg(Obj *object,
const char *spec)
{
ASSERTV(ggg(object, spec) < 0);
return *object;
}
template <class CONTAINER>
bsl::stack<typename CONTAINER::value_type, CONTAINER>
TestDriver<CONTAINER>::g(const char *spec)
{
Obj object((bslma::Allocator *)0);
return gg(&object, spec);
}
template <class CONTAINER>
void TestDriver<CONTAINER>::emptyAndVerify(Obj *obj,
const TestValues& testValues,
size_t numTestValues,
const int LINE)
{
ASSERTV(LINE, numTestValues, obj->size(), numTestValues == obj->size());
for (int ti = static_cast<int>(numTestValues) - 1; ti >= 0; --ti) {
ASSERTV(LINE, testValues[ti], obj->top(),
testValues[ti] == obj->top());
obj->pop();
}
ASSERTV(LINE, obj->size(), obj->empty());
ASSERTV(LINE, obj->size(), 0 == obj->size());
}
template <class CONTAINER>
void TestDriver<CONTAINER>::testCase19()
{
// ------------------------------------------------------------------------
// 'noexcept' SPECIFICATION
//
// Concerns:
//: 1 The 'noexcept' specification has been applied to all class interfaces
//: required by the standard.
//
// Plan:
//: 1 Apply the uniary 'noexcept' operator to expressions that mimic those
//: appearing in the standard and confirm that calculated boolean value
//: matches the expected value.
//:
//: 2 Since the 'noexcept' specification does not vary with the 'TYPE'
//: of the container, we need test for just one general type and any
//: 'TYPE' specializations.
//
// Testing:
// CONCERN: Methods qualifed 'noexcept' in standard are so implemented.
// ------------------------------------------------------------------------
if (verbose) {
P(bsls::NameOf<CONTAINER>())
}
// N4594: 23.6.6.1 'stack' definition
// page 905:
//..
// void swap(stack& s) noexcept(is_nothrow_swappable_v<Container>)
// { using std::swap; swap(c, s.c); }
//..
{
Obj c;
Obj s;
ASSERT(false == BSLS_KEYWORD_NOEXCEPT_OPERATOR(c.swap(s)));
}
// page 905
//..
// template <class T, class Container>
// void swap(stack<T, Container>& x, stack<T, Container>& y)
// noexcept(noexcept(x.swap(y)));
//..
{
Obj x;
Obj y;
ASSERT(false == BSLS_KEYWORD_NOEXCEPT_OPERATOR(swap(x, y)));
}
}
#if !BSLS_COMPILERFEATURES_SIMULATE_CPP11_FEATURES
template <class CONTAINER>
void TestDriver<CONTAINER>::testCase18MoveOnlyType()
{
// ------------------------------------------------------------------------
// MOVE MANIPULATORS FOR MOVE ONLY TYPES
//
// Concerns:
//: 1 The implementation of the move manipulator methods do not rely on
//: the (non-existent) copy construction or copy assignment methods of
//: the contained type.
//
// Plan:
//: 1 Instantiate this test method for the instrumented helper container
//: class, 'MovableVector', using 'bsltf::MoveOnlyAllocTestType' for the
//: contained value type.
//:
//: 2 Recast the tests of 'testCase18' so there is no reliance on copy
//: construction or copy assignment.
//
// Testing:
// operator=(MovableRef queue)
// emplace(Args&&.. args)
// push(MovableRef value)
// ------------------------------------------------------------------------
enum { k_MAX_NUM_PARAMS = 10 };
typedef typename CONTAINER::value_type VALUE;
const int TYPE_ALLOC = bslma::UsesBslmaAllocator<VALUE>::value;
const bool is_special_container =
SpecialContainerTrait<CONTAINER>::is_special_container;
const bool is_copy_constructible = bsl::is_copy_constructible<VALUE>::value;
if (verbose) {
P_(bsls::NameOf<CONTAINER>())
P_(bsls::NameOf<VALUE>())
P_(is_special_container)
P_(is_copy_constructible)
P (TYPE_ALLOC)
}
ASSERT( is_special_container);
ASSERT(!is_copy_constructible);
if (verbose) { printf("Movable 'push'"); }
{
const CalledMethod expectedPushMethod = e_PUSH_BACK_MOVE;
const int count = 3;
Obj mX; const Obj& X = mX; // test object for 'push'
for (int i = 0; i < count; ++i) {
if (veryVerbose) { P(i) }
static VALUE value0(VALUE(0));
setupCalledMethodCheck();
mX.push(MoveUtil::move(VALUE(i)));
ASSERT(isCalledMethodCheckPassed<CONTAINER>(expectedPushMethod));
ASSERT(value0 == X.front());
ASSERT(VALUE(i) == X.back());
}
}
if (verbose) { printf("Movable 'operator='"); }
{
const CalledMethod expectedAssignMethod = e_ASSIGN_MOVE;
const int count = 3;
for (int i = 0; i < count; ++i) {
if (veryVerbose) { P(i) }
Obj mX; const Obj& X = mX;
Obj mY; const Obj& Y = mY;
for (int j = 0; j < i; ++j) {
mX.push(VALUE(j));
mY.push(VALUE(j));
}
Obj mZ; const Obj& Z = mZ;
setupCalledMethodCheck();
mZ = MoveUtil::move(mX);
ASSERTV(
i,
bsls::NameOf<CONTAINER>(),
expectedAssignMethod,
getCalledMethod(),
isCalledMethodCheckPassed<CONTAINER>(expectedAssignMethod));
ASSERT(Y == Z);
}
}
if (verbose) { printf("'emplace'"); }
{
Obj mA; const Obj& A = mA; // test object for 'emplace'
Obj mB; const Obj& B = mB; // control object for 'emplace'
(void) A; // Compiler warnings suppression.
(void) B; // Compiler warnings suppression.
for (int numArgs = 0; numArgs < k_MAX_NUM_PARAMS; ++numArgs) {
if (veryVerbose) { P(numArgs) }
VALUE *addressOfResult = 0;
CalledMethod expectedEmplacePush =
static_cast<CalledMethod>(static_cast<int>(e_EMPLACE_0)
<< numArgs);
setupCalledMethodCheck();
switch (numArgs) {
case 0: {
VALUE& result = mA.emplace();
addressOfResult = bsls::Util::addressOf(result);
} break;
case 1: {
VALUE& result = mA.emplace(0);
addressOfResult = bsls::Util::addressOf(result);
} break;
case 2: {
VALUE& result = mA.emplace(0, 0);
addressOfResult = bsls::Util::addressOf(result);
} break;
case 3: {
VALUE& result = mA.emplace(0, 0, 0);
addressOfResult = bsls::Util::addressOf(result);
} break;
case 4: {
VALUE& result = mA.emplace(0, 0, 0, 0);
addressOfResult = bsls::Util::addressOf(result);
} break;
case 5: {
VALUE& result = mA.emplace(0, 0, 0, 0, 0);
addressOfResult = bsls::Util::addressOf(result);
} break;
case 6: {
VALUE& result = mA.emplace(0, 0, 0, 0, 0, 0);
addressOfResult = bsls::Util::addressOf(result);
} break;
case 7: {
VALUE& result = mA.emplace(0, 0, 0, 0, 0, 0, 0);
addressOfResult = bsls::Util::addressOf(result);
} break;
case 8: {
VALUE& result = mA.emplace(0, 0, 0, 0, 0, 0, 0, 0);
addressOfResult = bsls::Util::addressOf(result);
} break;
case 9: {
VALUE& result = mA.emplace(0, 0, 0, 0, 0, 0, 0, 0, 0);
addressOfResult = bsls::Util::addressOf(result);
} break;
case 10: {
VALUE& result = mA.emplace(0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
addressOfResult = bsls::Util::addressOf(result);
} break;
default:
ASSERT(!"'value' not in range '[0, k_MAX_NUM_PARAMS]'");
}
ASSERTV(
numArgs,
bsls::NameOf<CONTAINER>(),
expectedEmplacePush,
getCalledMethod(),
isCalledMethodCheckPassed<CONTAINER>(expectedEmplacePush));
const VALUE *ADDRESS_OF_TOP_VALUE = bsls::Util::addressOf(A.top());
ASSERTV(numArgs, bsls::NameOf<CONTAINER>(),
ADDRESS_OF_TOP_VALUE == addressOfResult);
// Track expected value of 'A'. Note that the 'emplace' methods of
// '(Non)?MovableVector' append 'VALUE(1)' regardless the number
// and value of their arguments.
mB.push(VALUE(1));
ASSERTV(A.size(), B.size(), B == A);
}
}
}
template <class CONTAINER>
void TestDriver<CONTAINER>::testCase17MoveOnlyType()
{
// ------------------------------------------------------------------------
// MOVE CONSTRUCTORS FOR MOVE ONLY TYPES
//
// Concerns:
//: 1 The implementation of the move constructors do not rely on the
//: (non-existent) copy construction and copy assignment methods of the
//: contained type.
//
// Plan:
//: 1 Instantiate this test method for the instrumented helper container
//: class, 'MovableVector', using 'bsltf::MoveOnlyAllocTestType' for the
//: contained value type.
//:
//: 2 Recast the tests of 'testCase18' so there is no reliance on copy
//: construction or copy assignment.
//
// Testing:
// queue(MovableRef container);
// queue(MovableRef original);
// queue(MovableRef container, const ALLOCATOR& allocator);
// queue(MovableRef original, const ALLOCATOR& allocator);
// ------------------------------------------------------------------------
typedef typename CONTAINER::value_type VALUE;
const int TYPE_ALLOC = bslma::UsesBslmaAllocator<VALUE>::value;
const bool is_special_container =
SpecialContainerTrait<CONTAINER>::is_special_container;
const bool is_copy_constructible = bsl::is_copy_constructible<VALUE>
::value;
if (verbose) {
P_(bsls::NameOf<CONTAINER>())
P_(bsls::NameOf<VALUE>())
P_(is_special_container)
P_(is_copy_constructible)
P (TYPE_ALLOC)
}
ASSERT( is_special_container);
ASSERT(!is_copy_constructible);
{
const int NUM_DATA = DEFAULT_NUM_DATA;
const DefaultDataRow (&DATA)[NUM_DATA] = DEFAULT_DATA;
const TestValues VALUES;
for (int ti = 0; ti < NUM_DATA; ++ti) {
const int LINE = DATA[ti].d_line; // source line number
const char *const SPEC = DATA[ti].d_spec;
if (veryVerbose) {
T_ P_(LINE) P(SPEC);
}
bslma::TestAllocator da("default", veryVeryVeryVerbose);
bslma::TestAllocator sa("source" , veryVeryVeryVerbose);
for (char cfg = 'a'; cfg <= 'e'; ++cfg) {
const char CONFIG = cfg; // how we call the constructor
if (veryVerbose) {
T_ T_ P(CONFIG);
}
// Create source object
Obj *pX = new Obj(&sa);
Obj& mX = *pX; const Obj& X = mX;
// Create control object
Obj mZ; const Obj& Z = mZ;
// Create value ('CONTAINER') object
CONTAINER mC(&sa); const CONTAINER& C = mC;
// Install default allocator.
bslma::DefaultAllocatorGuard dag(&da);
bslma::TestAllocator ta("target", veryVeryVeryVerbose);
bslma::TestAllocator fa("footprint", veryVeryVeryVerbose);
Obj *objPtr;
bslma::TestAllocator *objAllocatorPtr; (void)objAllocatorPtr;
setupCalledMethodCheck();
CalledMethod expectedCtor;
switch (CONFIG) {
case 'a': {
objPtr = new (fa) Obj(MoveUtil::move(mX));
objAllocatorPtr = &sa;
expectedCtor = e_CTOR_MOV_SANS_ALLOC;
} break;
case 'b': {
objPtr = new (fa) Obj(MoveUtil::move(mX),
(bslma::Allocator *)0);
objAllocatorPtr = &da;
expectedCtor = e_CTOR_MOV_AVEC_ALLOC;
} break;
case 'c': {
objPtr = new (fa) Obj(MoveUtil::move(mX), &ta);
objAllocatorPtr = &ta;
expectedCtor = e_CTOR_MOV_AVEC_ALLOC;
} break;
case 'd': {
objPtr = new (fa) Obj(MoveUtil::move(mC));
objAllocatorPtr = &sa;
expectedCtor = e_CTOR_MOV_SANS_ALLOC;
} break;
case 'e': {
objPtr = new (fa) Obj(MoveUtil::move(mC),
(bslma::Allocator *)0);
objAllocatorPtr = &da;
expectedCtor = e_CTOR_MOV_AVEC_ALLOC;
} break;
case 'f': {
objPtr = new (fa) Obj(MoveUtil::move(mC), &ta);
objAllocatorPtr = &ta;
expectedCtor = e_CTOR_MOV_AVEC_ALLOC;
} break;
default: {
ASSERTV(LINE, SPEC, CONFIG, !"Bad constructor config.");
return; // RETURN
} break;
}
Obj& mY = *objPtr; const Obj& Y = mY; // test object
ASSERTV(
bsls::NameOf<CONTAINER>(),
LINE,
SPEC,
expectedCtor,
getCalledMethod(),
true == isCalledMethodCheckPassed<CONTAINER>(expectedCtor));
ASSERTV(LINE, SPEC, CONFIG, sizeof(Obj) == fa.numBytesInUse());
// Reclaim dynamically allocated source object.
delete pX;
// Reclaim dynamically allocated object under test.
fa.deleteObject(objPtr);
// Verify all memory is released on object destruction.
ASSERTV(LINE, SPEC, CONFIG, fa.numBlocksInUse(),
0 == fa.numBlocksInUse());
ASSERTV(LINE, SPEC, CONFIG, ta.numBlocksInUse(),
0 == ta.numBlocksInUse());
}
ASSERTV(LINE, SPEC, da.numBlocksInUse(), 0 == da.numBlocksInUse());
ASSERTV(LINE, SPEC, sa.numBlocksInUse(), 0 == sa.numBlocksInUse());
}
}
}
#endif // !BSLS_COMPILERFEATURES_SIMULATE_CPP11_FEATURES
template <class CONTAINER>
template <bool PROPAGATE_ON_CONTAINER_MOVE_ASSIGNMENT_FLAG,
bool OTHER_FLAGS>
void TestDriver<CONTAINER>::
testCase18_propagate_on_container_move_assignment_dispatch()
{
typedef typename CONTAINER::value_type VALUE;
// Set the three properties of 'bsltf::StdStatefulAllocator' that are not
// under test in this test case to 'false'.
typedef bsltf::StdStatefulAllocator<
VALUE,
OTHER_FLAGS,
OTHER_FLAGS,
OTHER_FLAGS,
PROPAGATE_ON_CONTAINER_MOVE_ASSIGNMENT_FLAG>
StdAlloc;
typedef bsl::deque<VALUE, StdAlloc> CObj;
typedef bsl::stack<VALUE, CObj> Obj;
const bool PROPAGATE = PROPAGATE_ON_CONTAINER_MOVE_ASSIGNMENT_FLAG;
static const char *SPECS[] = {
"",
"A",
"BC",
"CDE",
};
const int NUM_SPECS = static_cast<const int>(sizeof SPECS / sizeof *SPECS);
bslma::TestAllocator da("default", veryVeryVeryVerbose);
bslma::DefaultAllocatorGuard dag(&da);
// Create control and source objects.
for (int ti = 0; ti < NUM_SPECS; ++ti) {
const char *const ISPEC = SPECS[ti];
const size_t ILENGTH = strlen(ISPEC);
TestValues IVALUES(ISPEC);
bslma::TestAllocator oas("source", veryVeryVeryVerbose);
bslma::TestAllocator oat("target", veryVeryVeryVerbose);
StdAlloc mas(&oas);
StdAlloc mat(&oat);
StdAlloc scratch(&da);
const CObj CI(IVALUES.begin(), IVALUES.end(), scratch);
const Obj W(CI, scratch); // control
// Create target object.
for (int tj = 0; tj < NUM_SPECS; ++tj) {
const char *const JSPEC = SPECS[tj];
const size_t JLENGTH = strlen(JSPEC);
TestValues JVALUES(JSPEC);
{
Obj mY(CI, mas); const Obj& Y = mY;
if (veryVerbose) { T_ P_(ISPEC) P_(Y) P(W) }
const CObj CJ(JVALUES.begin(), JVALUES.end(), scratch);
Obj mX(CJ, mat); const Obj& X = mX;
bslma::TestAllocatorMonitor oasm(&oas);
bslma::TestAllocatorMonitor oatm(&oat);
Obj *mR = &(mX = MoveUtil::move(mY));
ASSERTV(ISPEC, JSPEC, W, X, W == X);
ASSERTV(ISPEC, JSPEC, mR, &mX, mR == &mX);
// TBD no 'get_allocator' in 'stack'
#if 0
ASSERTV(ISPEC, JSPEC, PROPAGATE,
!PROPAGATE == (mat == X.get_allocator()));
ASSERTV(ISPEC, JSPEC, PROPAGATE,
PROPAGATE == (mas == X.get_allocator()));
ASSERTV(ISPEC, JSPEC, mas == Y.get_allocator());
#endif
if (PROPAGATE) {
ASSERTV(ISPEC, JSPEC, 0 == oat.numBlocksInUse());
}
else {
ASSERTV(ISPEC, JSPEC, oasm.isInUseSame());
}
}
ASSERTV(ISPEC, 0 == oas.numBlocksInUse());
ASSERTV(ISPEC, 0 == oat.numBlocksInUse());
}
}
ASSERTV(0 == da.numBlocksInUse());
}
template <class CONTAINER>
void TestDriver<CONTAINER>::testCase18_propagate_on_container_move_assignment()
{
// ------------------------------------------------------------------------
// MOVE-ASSIGNMENT OPERATOR: ALLOCATOR PROPAGATION
//
// Concerns:
//: 1 If the 'propagate_on_container_move_assignment' trait is 'false', the
//: allocator used by the target object remains unchanged (i.e., the
//: source object's allocator is *not* propagated).
//:
//: 2 If the 'propagate_on_container_move_assignment' trait is 'true', the
//: allocator used by the target object is updated to be a copy of that
//: used by the source object (i.e., the source object's allocator *is*
//: propagated).
//:
//: 3 The allocator used by the source object remains unchanged whether or
//; not it is propagated to the target object.
//:
//: 4 If the allocator is propagated from the source object to the target
//: object, all memory allocated from the target object's original
//: allocator is released.
//:
//: 5 The effect of the 'propagate_on_container_move_assignment' trait is
//: independent of the other three allocator propagation traits.
//
// Plan:
//: 1 Specify a set S of object values with varied differences, ordered by
//: increasing length, to be used in the following tests.
//:
//: 2 Create two 'bsltf::StdStatefulAllocator' objects with their
//: 'propagate_on_container_move_assignment' property configured to
//: 'false'. In two successive iterations of P-3, first configure the
//: three properties not under test to be 'false', then configure them
//: all to be 'true'.
//:
//: 3 For each value '(x, y)' in the cross product S x S: (C-1)
//:
//: 1 Initialize an object 'X' from 'x' using one of the allocators from
//: P-2.
//:
//: 2 Initialize two objects from 'y', a control object 'W' using a
//: scratch allocator and an object 'Y' using the other allocator from
//: P-2.
//:
//: 3 Move-assign 'Y' to 'X' and use 'operator==' to verify that 'X'
//: subsequently has the same value as 'W'.
//:
//: 4 Use the 'get_allocator' method to verify that the allocator of 'Y'
//: is *not* propagated to 'X' and that the allocator used by 'Y'
//: remains unchanged. (C-1)
//:
//: 4 Repeat P-2..3 except that this time configure the allocator property
//: under test to 'true' and verify that the allocator of 'Y' *is*
//: propagated to 'X'. Also verify that all memory is released to the
//: allocator that was in use by 'X' prior to the assignment. (C-2..5)
//
// Testing:
// propagate_on_container_move_assignment
// ------------------------------------------------------------------------
if (verbose) printf("\nMOVE-ASSIGNMENT OPERATOR: ALLOCATOR PROPAGATION"
"\n===============================================\n");
if (verbose)
printf("\n'propagate_on_container_move_assignment::value == false'\n");
testCase18_propagate_on_container_move_assignment_dispatch<false, false>();
testCase18_propagate_on_container_move_assignment_dispatch<false, true>();
if (verbose)
printf("\n'propagate_on_container_move_assignment::value == true'\n");
testCase18_propagate_on_container_move_assignment_dispatch<true, false>();
testCase18_propagate_on_container_move_assignment_dispatch<true, true>();
}
template <class CONTAINER>
void TestDriver<CONTAINER>::testCase18(bool isMovableContainer)
{
// ------------------------------------------------------------------------
// MOVE MANIPULATORS:
//
// Concerns:
//: 1 Each of the methods under test correctly forwards its arguments
//: to the corresponding method of the underlying 'CONTAINER' when
//: that container provides those "move" methods, and to the expected
//: alternate methods otherwise.
//:
//: 2 The reference returned from the assignment operator is to the target
//: object (i.e., '*this').
//:
//: 3 'emplace_back' returns a reference to the inserted element.
//
// Plan:
//: 1 Instantiate this test method for the two instrumented helper
//: container classes: 'NonMovableVector' and 'MovableVector'.
//:
//: 2 Use loop-based tests that iterate for a small number of values.
//: Use 3 different values for the 'push' and assignment tests. The
//: 'emplace' tests a different number of parameters on each test.
//: Those require 10 iterations to address each of the 10 overloads
//: used when CPP11 support is not available.
//:
//: 3 For each test create a "control" object that has the expected
//: value of the object under test. Create the control object using
//: the previously tested (non-moveable) 'push' method.
//:
//: 4 Invoke the method under test on the object under test. Confirm
//: that the expected enumerated value was set in the global variable.
//: Confirm that the test object has the expected value. Confirm that
//: the expected value is returned (if any).
//
// Testing:
// operator=(MovableRef stack)
// emplace(Args&&.. args)
// push(MovableRef value)
// ------------------------------------------------------------------------
typedef typename CONTAINER::value_type VALUE;
enum { k_MAX_NUM_PARAMS = 10 };
const int TYPE_ALLOC = bslma::UsesBslmaAllocator<VALUE>::value;
const bool is_special_container =
SpecialContainerTrait<CONTAINER>::is_special_container;
const TestValues VALUES;
if (verbose) {
P_(bsls::NameOf<CONTAINER>())
P_(bsls::NameOf<VALUE>())
P_(is_special_container)
P (TYPE_ALLOC)
}
ASSERT(is_special_container);
if (verbose) { printf("Movable 'push'"); }
{
Obj mX; const Obj& X = mX; // test object for 'push'
Obj mY; const Obj& Y = mY; // control object for 'push'
CalledMethod expectedPushMethod = isMovableContainer
? e_PUSH_BACK_MOVE
: e_PUSH_BACK_CREF;
for (int i = 0; i < 3; ++i) {
if (veryVerbose) { P(i) }
VALUE value = VALUES[i];
VALUE valueToBeMoved = value;
setupCalledMethodCheck();
mX.push(MoveUtil::move(valueToBeMoved));
ASSERT(isCalledMethodCheckPassed<CONTAINER>(expectedPushMethod));
setupCalledMethodCheck();
mY.push( value);
ASSERT(isCalledMethodCheckPassed<CONTAINER>(e_PUSH_BACK_CREF));
ASSERT(Y == X);
}
}
if (verbose) { printf("Movable 'operator='"); }
{
CalledMethod expectedAssignMethod = isMovableContainer
? e_ASSIGN_MOVE
: e_ASSIGN_CREF;
Obj mX; const Obj& X = mX; // test object for 'push'
for (int i = 0; i < 3; ++i) {
if (veryVerbose) { P(i) }
VALUE value = VALUES[i];
Obj mU; const Obj& U = mU; // test object
Obj mV; const Obj& V = mV; // control object
mX.push(value);
Obj mT(X); // sacrifice object
Obj *mR = 0;
setupCalledMethodCheck();
mR = &(mU = MoveUtil::move(mT));
ASSERTV(bsls::Util::addressOf(U) == mR);
ASSERTV(
i,
bsls::NameOf<CONTAINER>(),
expectedAssignMethod,
getCalledMethod(),
isCalledMethodCheckPassed<CONTAINER>(expectedAssignMethod));
ASSERT(U == X);
setupCalledMethodCheck();
mV = X;
ASSERTV(
i,
bsls::NameOf<CONTAINER>(),
expectedAssignMethod,
getCalledMethod(),
isCalledMethodCheckPassed<CONTAINER>(e_ASSIGN_CREF));
ASSERT(V == X);
ASSERT(U == V);
}
}
if (verbose) { printf("'emplace'"); }
{
Obj mA; const Obj& A = mA; // test object for 'emplace'
Obj mB; const Obj& B = mB; // control object for 'emplace'
for (int value = 0; value < k_MAX_NUM_PARAMS; ++value) {
if (veryVerbose) { P(value) }
CalledMethod expectedEmplacePush =
static_cast<CalledMethod>(static_cast<int>(e_EMPLACE_0)
<< value);
setupCalledMethodCheck();
VALUE *addressOfResult = 0;
switch (value) {
case 0: {
VALUE& result = mA.emplace();
addressOfResult = bsls::Util::addressOf(result);
} break;
case 1: {
VALUE& result = mA.emplace(0);
addressOfResult = bsls::Util::addressOf(result);
} break;
case 2: {
VALUE& result = mA.emplace(0, 0);
addressOfResult = bsls::Util::addressOf(result);
} break;
case 3: {
VALUE& result = mA.emplace(0, 0, 0);
addressOfResult = bsls::Util::addressOf(result);
} break;
case 4: {
VALUE& result = mA.emplace(0, 0, 0, 0);
addressOfResult = bsls::Util::addressOf(result);
} break;
case 5: {
VALUE& result = mA.emplace(0, 0, 0, 0, 0);
addressOfResult = bsls::Util::addressOf(result);
} break;
case 6: {
VALUE& result = mA.emplace(0, 0, 0, 0, 0, 0);
addressOfResult = bsls::Util::addressOf(result);
} break;
case 7: {
VALUE& result = mA.emplace(0, 0, 0, 0, 0, 0, 0);
addressOfResult = bsls::Util::addressOf(result);
} break;
case 8: {
VALUE& result = mA.emplace(0, 0, 0, 0, 0, 0, 0, 0);
addressOfResult = bsls::Util::addressOf(result);
} break;
case 9: {
VALUE& result = mA.emplace(0, 0, 0, 0, 0, 0, 0, 0, 0);
addressOfResult = bsls::Util::addressOf(result);
} break;
case 10: {
VALUE& result = mA.emplace(0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
addressOfResult = bsls::Util::addressOf(result);
} break;
default:
ASSERT(!"'value' not in range '[0, k_MAX_NUM_PARAMS]'");
}
const VALUE *ADDRESS_OF_TOP = bsls::Util::addressOf(A.top());
ASSERTV(ADDRESS_OF_TOP == addressOfResult);
ASSERTV(
value,
bsls::NameOf<CONTAINER>(),
expectedEmplacePush,
getCalledMethod(),
isCalledMethodCheckPassed<CONTAINER>(expectedEmplacePush));
// Track expected value of 'A'. Note that the 'emplace' methods of
// '(Non)?MovableVector' append 'VALUE(1)' regardless the number
// and value of their arguments.
mB.push(VALUE(1));
ASSERTV(A.size(), B.size(), B == A);
}
}
}
template <class CONTAINER>
void TestDriver<CONTAINER>::testCase17(bool isMovableContainer)
{
// ------------------------------------------------------------------------
// MOVE CONSTRUCTORS:
// Ensure that we can construct any object of the class, having other
// object of the class as the source. To provide backward compatibility,
// copy copnstructor should be used in the absence of move constructor.
// We are going to use two special containers 'NonMovableVector' and
// 'MovableVector', that register called method, to verify it.
//
// Concerns:
//: 1 Appropriate constructor of underlying container (move or copy) is
//: called.
//:
//: 2 The new object has the same value as the source object.
//:
//: 3 All internal representations of a given value can be used to create a
//: new object of equivalent value.
//:
//: 4 The source object is left in a valid but unspecified state.
//:
//: 5 No additional memory is allocated by the target object.
//:
//: 5 If an allocator is NOT supplied to the constructor, the
//: allocator of the source object in effect at the time of construction
//: becomes the object allocator for the resulting object.
//:
//: 6 If an allocator IS supplied to the constructor, that
//: allocator becomes the object allocator for the resulting object.
//:
//: 7 If a null allocator address IS supplied to the constructor, the
//: default allocator in effect at the time of construction becomes
//: the object allocator for the resulting object.
//:
//: 8 Supplying an allocator to the constructor has no effect on subsequent
//: object values.
//:
//: 9 Subsequent changes to or destruction of the source object have no
//: effect on the move-constructed object and vice-versa.
//:
//:10 Every object releases any allocated memory at destruction.
//
// Plan:
//: 1 Using the table-driven technique:
//:
//: 1 Specify a set of (unique) valid source object values.
//:
//: 2 Specify a set of (unique) valid value ('CONTAINER') objects.
//:
//: 2 For each row (representing a distinct object value, 'V') in the table
//: described in P-1:
//:
//: 1 Execute an inner loop creating three distinct objects, in turn,
//: each object having the same value, 'V', but configured differently
//: identified by 'CONFIG':
//:
//: 'a': passing a source object without passing an allocator;
//:
//: 'b': passing a source object and an explicit null allocator;
//:
//: 'c': passing a source object and the address of a test allocator
//: distinct from the default and source object's allocators.
//:
//: 'd': passing a value object without passing an allocator;
//:
//: 'e': passing a value object and an explicit null allocator;
//:
//: 'f': passing a value object and the address of a test allocator
//: distinct from the default and source object's allocators.
//:
//: 2 For each of the four iterations in P-2.1:
//:
//: 1 Use the value constructor with 'sa' allocator to create dynamic
//: source object 'mX' and control object 'mZ', each having the value
//: 'V'.
//:
//: 2 Create a 'bslma_TestAllocator' object, and install it as the
//: default allocator (note that a ubiquitous test allocator is
//: already installed as the global allocator).
//:
//: 3 Choose the move constructor depending on 'CONFIG' to dynamically
//: create an object, 'mY', using movable reference of 'mX'.
//:
//: 4 Verify that the appropriate constructor of underlying container
//: has been called. Note that this check is skipped for all classes
//: except special containers 'NonMovableVector' and 'MovableVector'.
//: (C-1)
//:
//: 5 Use the appropriate test allocator to verify that no additional
//: memory is allocated by the target object. (C-5)
//:
//: 6 Use the helper function 'use_same_allocator' to verify each
//: underlying attribute capable of allocating memory to ensure
//: that its object allocator is properly installed. (C-6..9)
//:
//: 7 Use the helper function 'use_same_comparator' to verify that the
//: target object, 'mY', has the same comparator as that of 'mZ', to
//: ensure that new object comprator is properly installed. (C-2..3)
//:
//: 8 Add some values to the source and target object separately.
//: Verify that they change independently. Destroy source object.
//: Verify that target object is unaffected. (C-4, 10)
//:
//: 9 Delete the target object and let the control object go out of
//: scope to verify, that all memory has been released. (C-11)
//
// Testing:
// stack(MovableRef container)
// stack(MovableRef container, bslma::Allocator *bA)
// stack(MovableRef stack)
// stack(MovableRef stack, bslma::Allocator *bA)
// ------------------------------------------------------------------------
typedef typename CONTAINER::value_type VALUE;
const int TYPE_ALLOC = bslma::UsesBslmaAllocator<VALUE>::value;
const bool is_special_container =
SpecialContainerTrait<CONTAINER>::is_special_container;
if (verbose) {
P_(bsls::NameOf<CONTAINER>())
P_(bsls::NameOf<VALUE>())
P_(is_special_container)
P (TYPE_ALLOC)
}
{ ASSERT(is_special_container);
const int NUM_DATA = DEFAULT_NUM_DATA;
const DefaultDataRow (&DATA)[NUM_DATA] = DEFAULT_DATA;
const TestValues VALUES;
for (int ti = 0; ti < NUM_DATA; ++ti) {
const int LINE = DATA[ti].d_line; // source line number
const char *const SPEC = DATA[ti].d_spec;
if (veryVerbose) {
T_ P_(LINE) P(SPEC);
}
bslma::TestAllocator da("default", veryVeryVeryVerbose);
bslma::TestAllocator sa("source" , veryVeryVeryVerbose);
for (char cfg = 'a'; cfg <= 'e'; ++cfg) {
const char CONFIG = cfg; // how we call the constructor
if (veryVerbose) {
T_ T_ P(CONFIG);
}
// Create source object
Obj *pX = new Obj(&sa);
Obj& mX = gg(pX, SPEC);
const Obj& X = mX;
// Create control object
Obj mZ; const Obj& Z = gg(&mZ, SPEC);
// Create value ('CONTAINER') object
CONTAINER mC(&sa);
const CONTAINER& C = CONTAINER::GG(&mC, SPEC);
// Install default allocator.
bslma::DefaultAllocatorGuard dag(&da);
bslma::TestAllocator ta("target", veryVeryVeryVerbose);
bslma::TestAllocator fa("footprint", veryVeryVeryVerbose);
Obj *objPtr;
bslma::TestAllocator *objAllocatorPtr;
setupCalledMethodCheck();
CalledMethod expectedCtor;
switch (CONFIG) {
case 'a': {
objPtr = new (fa) Obj(MoveUtil::move(mX));
objAllocatorPtr = isMovableContainer
? &sa
: &da;
expectedCtor = isMovableContainer
? e_CTOR_MOV_SANS_ALLOC
: e_CTOR_CPY_SANS_ALLOC;
} break;
case 'b': {
objPtr = new (fa) Obj(MoveUtil::move(mX),
(bslma::Allocator *)0);
objAllocatorPtr = &da;
expectedCtor = isMovableContainer
? e_CTOR_MOV_AVEC_ALLOC
: e_CTOR_CPY_AVEC_ALLOC;
} break;
case 'c': {
objPtr = new (fa) Obj(MoveUtil::move(mX), &ta);
objAllocatorPtr = &ta;
expectedCtor = isMovableContainer
? e_CTOR_MOV_AVEC_ALLOC
: e_CTOR_CPY_AVEC_ALLOC;
} break;
case 'd': {
objPtr = new (fa) Obj(MoveUtil::move(mC));
objAllocatorPtr = isMovableContainer
? &sa
: &da;
expectedCtor = isMovableContainer
? e_CTOR_MOV_SANS_ALLOC
: e_CTOR_CPY_SANS_ALLOC;
} break;
case 'e': {
objPtr = new (fa) Obj(MoveUtil::move(mC),
(bslma::Allocator *)0);
objAllocatorPtr = &da;
expectedCtor = isMovableContainer
? e_CTOR_MOV_AVEC_ALLOC
: e_CTOR_CPY_AVEC_ALLOC;
} break;
case 'f': {
objPtr = new (fa) Obj(MoveUtil::move(mC), &ta);
objAllocatorPtr = &ta;
expectedCtor = isMovableContainer
? e_CTOR_MOV_AVEC_ALLOC
: e_CTOR_CPY_AVEC_ALLOC;
} break;
default: {
ASSERTV(LINE, SPEC, CONFIG, !"Bad constructor config.");
return; // RETURN
} break;
}
Obj& mY = *objPtr; const Obj& Y = mY;
ASSERTV(
bsls::NameOf<CONTAINER>(),
LINE,
SPEC,
expectedCtor,
getCalledMethod(),
true == isCalledMethodCheckPassed<CONTAINER>(expectedCtor));
ASSERTV(LINE, SPEC, CONFIG, sizeof(Obj) == fa.numBytesInUse());
// Verify correctness of the contents moving.
ASSERTV(LINE, SPEC, CONFIG, Z == Y);
// Verify any attribute allocators are installed properly.
ASSERTV(LINE, SPEC, CONFIG, use_same_allocator(
mY,
TYPE_ALLOC,
objAllocatorPtr));
// Verify independence of the target object from the source
// one.
size_t sourceSize = X.size();
size_t targetSize = Y.size();
mX.push(VALUES[0]);
ASSERTV(LINE, SPEC, CONFIG, sourceSize != X.size());
ASSERTV(LINE, SPEC, CONFIG, targetSize == Y.size());
sourceSize = X.size();
mY.push(VALUES[0]);
ASSERTV(LINE, SPEC, CONFIG, sourceSize == X.size());
ASSERTV(LINE, SPEC, CONFIG, targetSize != Y.size());
targetSize = Y.size();
const VALUE top = Y.top();
// Reclaim dynamically allocated source object.
delete pX;
ASSERTV(LINE, SPEC, CONFIG, top == Y.top());
ASSERTV(LINE, SPEC, CONFIG, targetSize == Y.size());
// Reclaim dynamically allocated object under test.
fa.deleteObject(objPtr);
// Verify all memory is released on object destruction.
ASSERTV(LINE, SPEC, CONFIG, fa.numBlocksInUse(),
0 == fa.numBlocksInUse());
ASSERTV(LINE, SPEC, CONFIG, ta.numBlocksInUse(),
0 == ta.numBlocksInUse());
}
ASSERTV(LINE, SPEC, da.numBlocksInUse(), 0 == da.numBlocksInUse());
ASSERTV(LINE, SPEC, sa.numBlocksInUse(), 0 == sa.numBlocksInUse());
}
}
}
template <class CONTAINER>
void TestDriver<CONTAINER>::testCase12()
{
// ------------------------------------------------------------------------
// TESTING INEQUALITY OPERATORS
//
// Concern:
// That the inequality operators function correctly.
//
// Plan:
// Load 2 stack objects according to two SPEC's via the 'ggg' function,
// and compare them. It turns out that 'strcmp' comparing the two
// 'SPEC's will correspond directly to the result of inequality
// operators, which is very convenient.
//
// Repeat the test a second time, with the second stack object created
// with a different allocator than the first, to verify that creation
// via different allocators has no impact on value.
// ------------------------------------------------------------------------
const char *cont = ContainerName<container_type>::name();
const int NUM_DATA = DEFAULT_NUM_DATA;
const DefaultDataRow (&DATA)[NUM_DATA] = DEFAULT_DATA;
bslma::TestAllocator ta("testA", veryVeryVeryVerbose);
bslma::TestAllocator tb("testB", veryVeryVeryVerbose);
bslma::TestAllocator da("default", veryVeryVeryVerbose);
bslma::DefaultAllocatorGuard dag(&da);
if (veryVerbose) printf(" %s ---------------------------", cont);
{
// Create first object
for (int ti = 0; ti < NUM_DATA; ++ti) {
const char *const SPECX = DATA[ti].d_spec;
Obj mX(&ta); const Obj& X = gg(&mX, SPECX);
for (int tj = 0; tj < NUM_DATA; ++tj) {
const char *const SPECY = DATA[tj].d_spec;
Obj mY(&ta); const Obj& Y = gg(&mY, SPECY);
const int CMP = ti == tj
? 0
: strcmp(SPECX, SPECY) > 0 ? 1 : -1;
const bool EQ = X == Y;
const bool NE = X != Y;
const bool LT = X < Y;
const bool LE = X <= Y;
const bool GT = X > Y;
const bool GE = X >= Y;
ASSERTV(cont, SPECX, SPECY, EQ == (Y == X));
ASSERTV(cont, SPECX, SPECY, NE == (Y != X));
ASSERTV(cont, SPECX, SPECY, LT == (Y > X));
ASSERTV(cont, SPECX, SPECY, LE == (Y >= X));
ASSERTV(cont, SPECX, SPECY, GT == (Y < X));
ASSERTV(cont, SPECX, SPECY, GE == (Y <= X));
ASSERTV(cont, SPECX, SPECY, LT == !GE);
ASSERTV(cont, SPECX, SPECY, GT == !LE);
ASSERTV(cont, SPECX, SPECY, !(LT && GT));
ASSERTV(cont, SPECX, SPECY, LE || GE);
if (0 == CMP) {
ASSERTV(cont, SPECX, SPECY, !LT && !GT);
ASSERTV(cont, SPECX, SPECY, LE && GE);
}
else {
ASSERTV(cont, SPECX, SPECY, LT || GT);
}
ASSERTV(cont, SPECX, SPECY, CMP, (CMP < 0) == LT);
ASSERTV(cont, SPECX, SPECY, CMP, (CMP < 0) == !GE);
ASSERTV(cont, SPECX, SPECY, CMP, !((CMP == 0) && LT));
ASSERTV(cont, SPECX, SPECY, CMP, !((CMP == 0) && GT));
ASSERTV(cont, SPECX, SPECY, CMP, (CMP > 0) == GT);
ASSERTV(cont, SPECX, SPECY, CMP, (CMP > 0) == !LE);
ASSERTV(cont, SPECX, SPECY, CMP, (CMP == 0) == EQ);
ASSERTV(cont, SPECX, SPECY, CMP, (CMP != 0) == NE);
}
// Do it all over again, this time using a different allocator
// for 'mY' to verify changing the allocator has no impact on
// comparisons. Note we are re-testing the equality comparators
// so this memory allocation aspect is tested for them too.
for (int tj = 0; tj < NUM_DATA; ++tj) {
const char *const SPECY = DATA[tj].d_spec;
Obj mY(g(SPECY), &tb); const Obj& Y = mY;
const int CMP = ti == tj
? 0
: strcmp(SPECX, SPECY) > 0 ? 1 : -1;
const bool EQ = X == Y;
const bool NE = X != Y;
const bool LT = X < Y;
const bool LE = X <= Y;
const bool GT = X > Y;
const bool GE = X >= Y;
ASSERTV(cont, SPECX, SPECY, EQ == (Y == X));
ASSERTV(cont, SPECX, SPECY, NE == (Y != X));
ASSERTV(cont, SPECX, SPECY, LT == (Y > X));
ASSERTV(cont, SPECX, SPECY, LE == (Y >= X));
ASSERTV(cont, SPECX, SPECY, GT == (Y < X));
ASSERTV(cont, SPECX, SPECY, GE == (Y <= X));
ASSERTV(cont, SPECX, SPECY, LT == !GE);
ASSERTV(cont, SPECX, SPECY, GT == !LE);
ASSERTV(cont, SPECX, SPECY, !(LT && GT));
ASSERTV(cont, SPECX, SPECY, LE || GE);
if (EQ) {
ASSERTV(cont, SPECX, SPECY, !LT && !GT);
ASSERTV(cont, SPECX, SPECY, LE && GE);
}
else {
ASSERTV(cont, SPECX, SPECY, LT || GT);
}
ASSERTV(cont, SPECX, SPECY, CMP, (CMP < 0) == LT);
ASSERTV(cont, SPECX, SPECY, CMP, (CMP < 0) == !GE);
ASSERTV(cont, SPECX, SPECY, CMP, !((CMP == 0) && LT));
ASSERTV(cont, SPECX, SPECY, CMP, !((CMP == 0) && GT));
ASSERTV(cont, SPECX, SPECY, CMP, (CMP > 0) == GT);
ASSERTV(cont, SPECX, SPECY, CMP, (CMP > 0) == !LE);
ASSERTV(cont, SPECX, SPECY, CMP, (CMP == 0) == EQ);
ASSERTV(cont, SPECX, SPECY, CMP, (CMP != 0) == NE);
}
}
}
}
template <class CONTAINER>
void TestDriver<CONTAINER>::testCase11()
{
// ------------------------------------------------------------------------
// TESTING TYPE TRAITS
//
// Concern:
//: 1 The object has the necessary type traits.
//
// Plan:
//: 1 Use 'BSLMF_ASSERT' to verify all the type traits exists. (C-1)
//
// Testing:
// CONCERN: The object has the necessary type traits
// ------------------------------------------------------------------------
// Verify set defines the expected traits.
enum { CONTAINER_USES_ALLOC =
bslma::UsesBslmaAllocator<CONTAINER>::value };
BSLMF_ASSERT(
((int) CONTAINER_USES_ALLOC == bslma::UsesBslmaAllocator<Obj>::value));
// Verify stack does not define other common traits.
BSLMF_ASSERT((0 == bslalg::HasStlIterators<Obj>::value));
BSLMF_ASSERT((0 == bsl::is_trivially_copyable<Obj>::value));
BSLMF_ASSERT((0 == bslmf::IsBitwiseEqualityComparable<Obj>::value));
BSLMF_ASSERT((0 == bslmf::IsBitwiseMoveable<Obj>::value));
BSLMF_ASSERT((0 == bslmf::HasPointerSemantics<Obj>::value));
BSLMF_ASSERT((0 == bsl::is_trivially_default_constructible<Obj>::value));
}
template <class CONTAINER>
void TestDriver<CONTAINER>::testCase10()
{
// ------------------------------------------------------------------------
// TESTING BSLMA ALLOCATOR
//
// Concern:
//: 1 A standard compliant allocator can be used instead of
//: 'bsl::allocator'.
//:
//: 2 Methods that uses the allocator (e.g., variations of constructor,
//: 'insert' and 'swap') can successfully populate the object.
//:
//: 3 'KEY' types that allocate memory uses the default allocator instead
//: of the object allocator.
//:
//: 4 Every object releases any allocated memory at destruction.
//
// Plan:
//: 1 Using a loop base approach, create a list of specs and their
//: expected value. For each spec:
//:
//: 1 Create an object using a standard allocator through multiple ways,
//: including: range-based constructor, copy constructor, range-based
//: insert, multiple inserts, and swap.
//:
//: 2 Verify the value of each objects is as expected.
//:
//: 3 For types that allocate memory, verify memory for the elements
//: comes from the default allocator.
//
// Testing:
// CONCERN: 'set' is compatible with a standard allocator.
// ------------------------------------------------------------------------
const char *cont = ContainerName<container_type>::name();
const size_t NUM_DATA = DEFAULT_NUM_DATA;
const DefaultDataRow (&DATA)[NUM_DATA] = DEFAULT_DATA;
bslma::TestAllocator scratch("scratch", veryVeryVeryVerbose);
for (size_t ti = 0; ti < NUM_DATA; ++ti) {
const int LINE = DATA[ti].d_line;
const char *const SPEC = DATA[ti].d_spec;
const size_t LENGTH = strlen(DATA[ti].d_spec);
const TestValues EXP(DATA[ti].d_spec, &scratch);
TestValues values(SPEC, &scratch);
bslma::TestAllocator ta("test", veryVeryVeryVerbose);
bslma::TestAllocatorMonitor tam(&ta);
bslma::TestAllocator da("default", veryVeryVeryVerbose);
bslma::DefaultAllocatorGuard dag(&da);
bslma::TestAllocatorMonitor dam(&da);
{
container_type tmpCont(&ta);
for (size_t tk = 0; tk < LENGTH; ++tk) {
tmpCont.push_back(values[tk]);
}
Obj mX(tmpCont, &ta); const Obj& X = mX;
verifyStack(X, EXP, LENGTH, L_, &ta);
Obj mY(X, &ta); const Obj& Y = mY;
verifyStack(Y, EXP, LENGTH, L_, &ta);
Obj mZ(&ta); const Obj& Z = mZ;
mZ.swap(mX);
verifyStack(Z, EXP, LENGTH, L_, &ta);
ASSERTV(LINE, X.empty());
ASSERTV(LINE, 0 == X.size());
}
ASSERT(tam.isTotalUp() || 0 == LENGTH);
ASSERT(tam.isInUseSame());
tam.reset();
{
Obj mX(&ta); const Obj& X = mX;
for (size_t tj = 0; tj < LENGTH; ++tj) {
mX.push(values[tj]);
ASSERTV(LINE, tj, LENGTH, values[tj] == X.top());
}
verifyStack(X, EXP, LENGTH, L_, &ta);
}
ASSERT(tam.isTotalUp() || 0 == LENGTH);
ASSERT(tam.isInUseSame());
ASSERT(dam.isTotalSame());
{
container_type tmpCont;
for (size_t tk = 0; tk < LENGTH; ++tk) {
tmpCont.push_back(values[tk]);
}
Obj mX(tmpCont); const Obj& X = mX;
verifyStack(X, EXP, LENGTH, L_);
Obj mY(X); const Obj& Y = mY;
verifyStack(Y, EXP, LENGTH, L_);
Obj mZ; const Obj& Z = mZ;
mZ.swap(mX);
verifyStack(Z, EXP, LENGTH, L_);
ASSERTV(LINE, X.empty());
ASSERTV(LINE, 0 == X.size());
}
ASSERTV(cont, dam.isTotalUp() == (emptyWillAlloc() || LENGTH > 0));
dam.reset();
{
Obj mX; const Obj& X = mX;
for (size_t tj = 0; tj < LENGTH; ++tj) {
mX.push(values[tj]);
ASSERTV(LINE, tj, LENGTH, values[tj] == X.top());
}
verifyStack(X, EXP, LENGTH, L_);
}
ASSERTV(cont, dam.isTotalUp() == (emptyWillAlloc() || LENGTH > 0));
ASSERTV(LINE, da.numBlocksInUse(), 0 == da.numBlocksInUse());
}
}
template <class CONTAINER>
template <bool PROPAGATE_ON_CONTAINER_COPY_ASSIGNMENT_FLAG,
bool OTHER_FLAGS>
void TestDriver<CONTAINER>::
testCase9_propagate_on_container_copy_assignment_dispatch()
{
typedef typename CONTAINER::value_type VALUE;
// Set the three properties of 'bsltf::StdStatefulAllocator' that are not
// under test in this test case to 'false'.
typedef bsltf::StdStatefulAllocator<
VALUE,
OTHER_FLAGS,
PROPAGATE_ON_CONTAINER_COPY_ASSIGNMENT_FLAG,
OTHER_FLAGS,
OTHER_FLAGS> StdAlloc;
typedef bsl::deque<VALUE, StdAlloc> CObj;
typedef bsl::stack<VALUE, CObj> Obj;
const bool PROPAGATE = PROPAGATE_ON_CONTAINER_COPY_ASSIGNMENT_FLAG;
static const char *SPECS[] = {
"",
"A",
"BC",
"CDE",
};
const int NUM_SPECS = static_cast<const int>(sizeof SPECS / sizeof *SPECS);
bslma::TestAllocator da("default", veryVeryVeryVerbose);
bslma::DefaultAllocatorGuard dag(&da);
// Create control and source objects.
for (int ti = 0; ti < NUM_SPECS; ++ti) {
const char *const ISPEC = SPECS[ti];
const size_t ILENGTH = strlen(ISPEC);
TestValues IVALUES(ISPEC);
bslma::TestAllocator oas("source", veryVeryVeryVerbose);
bslma::TestAllocator oat("target", veryVeryVeryVerbose);
StdAlloc mas(&oas);
StdAlloc mat(&oat);
StdAlloc scratch(&da);
const CObj CI(IVALUES.begin(), IVALUES.end(), scratch);
const Obj W(CI, scratch); // control
// Create target object.
for (int tj = 0; tj < NUM_SPECS; ++tj) {
const char *const JSPEC = SPECS[tj];
const size_t JLENGTH = strlen(JSPEC);
TestValues JVALUES(JSPEC);
{
Obj mY(CI, mas); const Obj& Y = mY;
if (veryVerbose) { T_ P_(ISPEC) P_(Y) P(W) }
const CObj CJ(JVALUES.begin(), JVALUES.end(), scratch);
Obj mX(CJ, mat); const Obj& X = mX;
bslma::TestAllocatorMonitor oasm(&oas);
bslma::TestAllocatorMonitor oatm(&oat);
Obj *mR = &(mX = Y);
ASSERTV(ISPEC, JSPEC, W, X, W == X);
ASSERTV(ISPEC, JSPEC, W, Y, W == Y);
ASSERTV(ISPEC, JSPEC, mR, &mX, mR == &mX);
// TBD no 'get_allocator' in 'stack'
#if 0
ASSERTV(ISPEC, JSPEC, PROPAGATE,
!PROPAGATE == (mat == X.get_allocator()));
ASSERTV(ISPEC, JSPEC, PROPAGATE,
PROPAGATE == (mas == X.get_allocator()));
ASSERTV(ISPEC, JSPEC, mas == Y.get_allocator());
#endif
if (PROPAGATE) {
ASSERTV(ISPEC, JSPEC, 0 == oat.numBlocksInUse());
}
else {
ASSERTV(ISPEC, JSPEC, oasm.isInUseSame());
}
}
ASSERTV(ISPEC, 0 == oas.numBlocksInUse());
ASSERTV(ISPEC, 0 == oat.numBlocksInUse());
}
}
ASSERTV(0 == da.numBlocksInUse());
}
template <class CONTAINER>
void TestDriver<CONTAINER>::testCase9_propagate_on_container_copy_assignment()
{
// ------------------------------------------------------------------------
// COPY-ASSIGNMENT OPERATOR: ALLOCATOR PROPAGATION
//
// Concerns:
//: 1 If the 'propagate_on_container_copy_assignment' trait is 'false', the
//: allocator used by the target object remains unchanged (i.e., the
//: source object's allocator is *not* propagated).
//:
//: 2 If the 'propagate_on_container_copy_assignment' trait is 'true', the
//: allocator used by the target object is updated to be a copy of that
//: used by the source object (i.e., the source object's allocator *is*
//: propagated).
//:
//: 3 The allocator used by the source object remains unchanged whether or
//; not it is propagated to the target object.
//:
//: 4 If the allocator is propagated from the source object to the target
//: object, all memory allocated from the target object's original
//: allocator is released.
//:
//: 5 The effect of the 'propagate_on_container_copy_assignment' trait is
//: independent of the other three allocator propagation traits.
//
// Plan:
//: 1 Specify a set S of object values with varied differences, ordered by
//: increasing length, to be used in the following tests.
//:
//: 2 Create two 'bsltf::StdStatefulAllocator' objects with their
//: 'propagate_on_container_copy_assignment' property configured to
//: 'false'. In two successive iterations of P-3, first configure the
//: three properties not under test to be 'false', then configure them
//: all to be 'true'.
//:
//: 3 For each value '(x, y)' in the cross product S x S: (C-1)
//:
//: 1 Initialize an object 'X' from 'x' using one of the allocators from
//: P-2.
//:
//: 2 Initialize two objects from 'y', a control object 'W' using a
//: scratch allocator and an object 'Y' using the other allocator from
//: P-2.
//:
//: 3 Copy-assign 'Y' to 'X' and use 'operator==' to verify that both
//: 'X' and 'Y' subsequently have the same value as 'W'.
//:
//: 4 Use the 'get_allocator' method to verify that the allocator of 'Y'
//: is *not* propagated to 'X' and that the allocator used by 'Y'
//: remains unchanged. (C-1)
//:
//: 4 Repeat P-2..3 except that this time configure the allocator property
//: under test to 'true' and verify that the allocator of 'Y' *is*
//: propagated to 'X'. Also verify that all memory is released to the
//: allocator that was in use by 'X' prior to the assignment. (C-2..5)
//
// Testing:
// propagate_on_container_copy_assignment
// ------------------------------------------------------------------------
if (verbose) printf("\nCOPY-ASSIGNMENT OPERATOR: ALLOCATOR PROPAGATION"
"\n===============================================\n");
if (verbose)
printf("\n'propagate_on_container_copy_assignment::value == false'\n");
testCase9_propagate_on_container_copy_assignment_dispatch<false, false>();
testCase9_propagate_on_container_copy_assignment_dispatch<false, true>();
if (verbose)
printf("\n'propagate_on_container_copy_assignment::value == true'\n");
testCase9_propagate_on_container_copy_assignment_dispatch<true, false>();
testCase9_propagate_on_container_copy_assignment_dispatch<true, true>();
}
template <class CONTAINER>
void TestDriver<CONTAINER>::testCase9()
{
// ------------------------------------------------------------------------
// COPY-ASSIGNMENT OPERATOR:
// Ensure that we can assign the value of any object of the class to any
// object of the class, such that the two objects subsequently have the
// same value.
//
// Concerns:
//: 1 The assignment operator can change the value of any modifiable target
//: object to that of any source object.
//:
//: 2 The allocator address held by the target object is unchanged.
//:
//: 3 Any memory allocation is from the target object's allocator.
//:
//: 4 The signature and return type are standard.
//:
//: 5 The reference returned is to the target object (i.e., '*this').
//:
//: 6 The value of the source object is not modified.
//:
//: 7 The allocator address held by the source object is unchanged.
//:
//: 8 QoI: Assigning a source object having the default-constructed value
//: allocates no memory.
//:
//: 9 Any memory allocation is exception neutral.
//:
//:10 Assigning an object to itself behaves as expected (alias-safety).
//:
//:11 Every object releases any allocated memory at destruction.
//
// Plan:
//: 1 Use the address of 'operator=' to initialize a member-function
//: pointer having the appropriate signature and return type for the
//: copy-assignment operator defined in this component. (C-4)
//:
//: 2 Create a 'bslma::TestAllocator' object, and install it as the default
//: allocator (note that a ubiquitous test allocator is already installed
//: as the global allocator).
//:
//: 3 Using the table-driven technique:
//:
//: 1 Specify a set of (unique) valid object values.
//:
//: 4 For each row 'R1' (representing a distinct object value, 'V') in the
//: table described in P-3: (C-1..2, 5..8, 11)
//:
//: 1 Use the value constructor and a "scratch" allocator to create two
//: 'const' 'Obj', 'Z' and 'ZZ', each having the value 'V'.
//:
//: 2 Execute an inner loop that iterates over each row 'R2'
//: (representing a distinct object value, 'W') in the table described
//: in P-3:
//:
//: 3 For each of the iterations (P-4.2): (C-1..2, 5..8, 11)
//:
//: 1 Create a 'bslma::TestAllocator' object, 'oa'.
//:
//: 2 Use the value constructor and 'oa' to create a modifiable 'Obj',
//: 'mX', having the value 'W'.
//:
//: 3 Assign 'mX' from 'Z' in the presence of injected exceptions
//: (using the 'bslma::TestAllocator_EXCEPTION_TEST_*' macros).
//:
//: 4 Verify that the address of the return value is the same as that
//: of 'mX'. (C-5)
//:
//: 5 Use the equality-comparison operator to verify that: (C-1, 6)
//:
//: 1 The target object, 'mX', now has the same value as that of 'Z'.
//: (C-1)
//:
//: 2 'Z' still has the same value as that of 'ZZ'. (C-6)
//:
//: 6 Use the 'allocator' accessor of both 'mX' and 'Z' to verify that
//: the respective allocator addresses held by the target and source
//: objects are unchanged. (C-2, 7)
//:
//: 7 Use the appropriate test allocators to verify that: (C-8, 11)
//:
//: 1 For an object that (a) is initialized with a value that did NOT
//: require memory allocation, and (b) is then assigned a value
//: that DID require memory allocation, the target object DOES
//: allocate memory from its object allocator only (irrespective of
//: the specific number of allocations or the total amount of
//: memory allocated); also cross check with what is expected for
//: 'mX' and 'Z'.
//:
//: 2 An object that is assigned a value that did NOT require memory
//: allocation, does NOT allocate memory from its object allocator;
//: also cross check with what is expected for 'Z'.
//:
//: 3 No additional memory is allocated by the source object. (C-8)
//:
//: 4 All object memory is released when the object is destroyed.
//: (C-11)
//:
//: 5 Repeat steps similar to those described in P-4 except that, this
//: time, there is no inner loop (as in P-4.2); instead, the source
//: object, 'Z', is a reference to the target object, 'mX', and both 'mX'
//: and 'ZZ' are initialized to have the value 'V'. For each row
//: (representing a distinct object value, 'V') in the table described in
//: P-3: (C-9)
//:
//: 1 Create a 'bslma::TestAllocator' object, 'oa'.
//:
//: 2 Use the value constructor and 'oa' to create a modifiable 'Obj'
//: 'mX'; also use the value constructor and a distinct "scratch"
//: allocator to create a 'const' 'Obj' 'ZZ'.
//:
//: 3 Let 'Z' be a reference providing only 'const' access to 'mX'.
//:
//: 4 Assign 'mX' from 'Z' in the presence of injected exceptions (using
//: the 'bslma::TestAllocator_EXCEPTION_TEST_*' macros). (C-9)
//:
//: 5 Verify that the address of the return value is the same as that of
//: 'mX'.
//:
//: 6 Use the equality-comparison operator to verify that the target
//: object, 'mX', still has the same value as that of 'ZZ'.
//:
//: 7 Use the 'allocator' accessor of 'mX' to verify that it is still the
//: object allocator.
//:
//: 8 Use the appropriate test allocators to verify that:
//:
//: 1 Any memory that is allocated is from the object allocator.
//:
//: 2 No additional (e.g., temporary) object memory is allocated when
//: assigning an object value that did NOT initially require
//: allocated memory.
//:
//: 3 All object memory is released when the object is destroyed.
//:
//: 6 Use the test allocator from P-2 to verify that no memory is ever
//: allocated from the default allocator. (C-3)
//
// Testing:
// set& operator=(const set& rhs);
// ------------------------------------------------------------------------
const int NUM_DATA = DEFAULT_NUM_DATA;
const DefaultDataRow (&DATA)[NUM_DATA] = DEFAULT_DATA;
bslma::TestAllocator da("default", veryVeryVeryVerbose);
bslma::DefaultAllocatorGuard dag(&da);
if (verbose) printf("\nCompare each pair of similar and different"
" values (u, ua, v, va) in S X A X S X A"
" without perturbation.\n");
{
// Create first object
for (int ti = 0; ti < NUM_DATA; ++ti) {
const int LINE1 = DATA[ti].d_line;
const char *const SPEC1 = DATA[ti].d_spec;
bslma::TestAllocator scratch("scratch", veryVeryVeryVerbose);
Obj mZ(&scratch); const Obj& Z = gg(&mZ, SPEC1);
Obj mZZ(&scratch); const Obj& ZZ = gg(&mZZ, SPEC1);
// Ensure the first row of the table contains the
// default-constructed value.
static bool firstFlag = true;
if (firstFlag) {
ASSERTV(LINE1, Obj(&scratch) == Z);
firstFlag = false;
}
// Create second object
for (int tj = 0; tj < NUM_DATA; ++tj) {
const int LINE2 = DATA[tj].d_line;
const char *const SPEC2 = DATA[tj].d_spec;
bslma::TestAllocator oa("object", veryVeryVeryVerbose);
{
Obj mX(&oa); const Obj& X = gg(&mX, SPEC2);
ASSERTV(LINE1, LINE2, (Z == X) == (LINE1 == LINE2));
bslma::TestAllocatorMonitor oam(&oa), sam(&scratch);
BSLMA_TESTALLOCATOR_EXCEPTION_TEST_BEGIN(oa) {
if (veryVeryVerbose) { T_ T_ Q(ExceptionTestBody) }
Obj *mR = &(mX = Z);
ASSERTV(LINE1, LINE2, Z == X);
ASSERTV(LINE1, LINE2, mR == &mX);
} BSLMA_TESTALLOCATOR_EXCEPTION_TEST_END
ASSERTV(LINE1, LINE2, ZZ == Z);
// ASSERTV(LINE1, LINE2, &oa == X.get_allocator());
// ASSERTV(LINE1, LINE2, &scratch == Z.get_allocator());
ASSERTV(LINE1, LINE2, sam.isInUseSame());
ASSERTV(LINE1, LINE2, 0 == da.numBlocksTotal());
}
// Verify all memory is released on object destruction.
ASSERTV(LINE1, LINE2, oa.numBlocksInUse(),
0 == oa.numBlocksInUse());
}
// self-assignment
bslma::TestAllocator oa("object", veryVeryVeryVerbose);
{
bslma::TestAllocator scratch("scratch", veryVeryVeryVerbose);
Obj mX(&oa); const Obj& X = gg(&mX, SPEC1);
Obj mZZ(&scratch); const Obj& ZZ = gg(&mZZ, SPEC1);
const Obj& Z = mX;
ASSERTV(LINE1, ZZ == Z);
bslma::TestAllocatorMonitor oam(&oa), sam(&scratch);
BSLMA_TESTALLOCATOR_EXCEPTION_TEST_BEGIN(oa) {
if (veryVeryVerbose) { T_ T_ Q(ExceptionTestBody) }
Obj *mR = &(mX = Z);
ASSERTV(LINE1, ZZ == Z);
ASSERTV(LINE1, mR == &X);
} BSLMA_TESTALLOCATOR_EXCEPTION_TEST_END
// ASSERTV(LINE1, &oa == Z.get_allocator());
ASSERTV(LINE1, sam.isTotalSame());
ASSERTV(LINE1, oam.isTotalSame());
ASSERTV(LINE1, 0 == da.numBlocksTotal());
}
// Verify all object memory is released on destruction.
ASSERTV(LINE1, oa.numBlocksInUse(), 0 == oa.numBlocksInUse());
}
}
}
template <class CONTAINER>
template <bool PROPAGATE_ON_CONTAINER_SWAP_FLAG,
bool OTHER_FLAGS>
void TestDriver<CONTAINER>::testCase8_propagate_on_container_swap_dispatch()
{
typedef typename CONTAINER::value_type VALUE;
// Set the three properties of 'bsltf::StdStatefulAllocator' that are not
// under test in this test case to 'false'.
typedef bsltf::StdStatefulAllocator<VALUE,
OTHER_FLAGS,
OTHER_FLAGS,
PROPAGATE_ON_CONTAINER_SWAP_FLAG,
OTHER_FLAGS> StdAlloc;
typedef bsl::deque<VALUE, StdAlloc> CObj;
typedef bsl::stack<VALUE, CObj> Obj;
const bool PROPAGATE = PROPAGATE_ON_CONTAINER_SWAP_FLAG;
static const char *SPECS[] = {
"",
"A",
"BC",
"CDE",
};
const int NUM_SPECS = static_cast<const int>(sizeof SPECS / sizeof *SPECS);
bslma::TestAllocator da("default", veryVeryVeryVerbose);
bslma::DefaultAllocatorGuard dag(&da);
for (int ti = 0; ti < NUM_SPECS; ++ti) {
const char *const ISPEC = SPECS[ti];
const size_t ILENGTH = strlen(ISPEC);
TestValues IVALUES(ISPEC);
bslma::TestAllocator xoa("x-original", veryVeryVeryVerbose);
bslma::TestAllocator yoa("y-original", veryVeryVeryVerbose);
StdAlloc xma(&xoa);
StdAlloc yma(&yoa);
StdAlloc scratch(&da);
const CObj CI(IVALUES.begin(), IVALUES.end(), scratch);
const Obj ZZ(CI, scratch); // control
for (int tj = 0; tj < NUM_SPECS; ++tj) {
const char *const JSPEC = SPECS[tj];
const size_t JLENGTH = strlen(JSPEC);
TestValues JVALUES(JSPEC);
const CObj CJ(JVALUES.begin(), JVALUES.end(), scratch);
const Obj WW(CJ, scratch); // control
{
Obj mX(CI, xma); const Obj& X = mX;
if (veryVerbose) { T_ P_(ISPEC) P_(X) P(ZZ) }
Obj mY(CJ, yma); const Obj& Y = mY;
ASSERTV(ISPEC, JSPEC, ZZ, X, ZZ == X);
ASSERTV(ISPEC, JSPEC, WW, Y, WW == Y);
// member 'swap'
{
bslma::TestAllocatorMonitor dam(&da);
bslma::TestAllocatorMonitor xoam(&xoa);
bslma::TestAllocatorMonitor yoam(&yoa);
mX.swap(mY);
ASSERTV(ISPEC, JSPEC, WW, X, WW == X);
ASSERTV(ISPEC, JSPEC, ZZ, Y, ZZ == Y);
if (PROPAGATE) {
// TBD no 'get_allocator' in 'stack'
#if 0
ASSERTV(ISPEC, JSPEC, yma == X.get_allocator());
ASSERTV(ISPEC, JSPEC, xma == Y.get_allocator());
#endif
ASSERTV(ISPEC, JSPEC, dam.isTotalSame());
ASSERTV(ISPEC, JSPEC, xoam.isTotalSame());
ASSERTV(ISPEC, JSPEC, yoam.isTotalSame());
}
// TBD no 'get_allocator' in 'stack'
#if 0
else {
ASSERTV(ISPEC, JSPEC, xma == X.get_allocator());
ASSERTV(ISPEC, JSPEC, yma == Y.get_allocator());
}
#endif
}
// free function 'swap'
{
bslma::TestAllocatorMonitor dam(&da);
bslma::TestAllocatorMonitor xoam(&xoa);
bslma::TestAllocatorMonitor yoam(&yoa);
swap(mX, mY);
ASSERTV(ISPEC, JSPEC, ZZ, X, ZZ == X);
ASSERTV(ISPEC, JSPEC, WW, Y, WW == Y);
// TBD no 'get_allocator' in 'stack'
#if 0
ASSERTV(ISPEC, JSPEC, xma == X.get_allocator());
ASSERTV(ISPEC, JSPEC, yma == Y.get_allocator());
#endif
if (PROPAGATE) {
ASSERTV(ISPEC, JSPEC, dam.isTotalSame());
ASSERTV(ISPEC, JSPEC, xoam.isTotalSame());
ASSERTV(ISPEC, JSPEC, yoam.isTotalSame());
}
}
}
ASSERTV(ISPEC, 0 == xoa.numBlocksInUse());
ASSERTV(ISPEC, 0 == yoa.numBlocksInUse());
}
}
ASSERTV(0 == da.numBlocksInUse());
}
template <class CONTAINER>
void TestDriver<CONTAINER>::testCase8_propagate_on_container_swap()
{
// ------------------------------------------------------------------------
// SWAP MEMBER AND FREE FUNCTIONS: ALLOCATOR PROPAGATION
//
// Concerns:
//: 1 If the 'propagate_on_container_swap' trait is 'false', the
//: allocators used by the source and target objects remain unchanged
//: (i.e., the allocators are *not* exchanged).
//:
//: 2 If the 'propagate_on_container_swap' trait is 'true', the
//: allocator used by the target (source) object is updated to be a copy
//: of that used by the source (target) object (i.e., the allocators
//: *are* exchanged).
//:
//: 3 If the allocators are propagated (i.e., exchanged), there is no
//: additional allocation from any allocator.
//:
//: 4 The effect of the 'propagate_on_container_swap' trait is independent
//: of the other three allocator propagation traits.
//:
//: 5 Following the swap operation, neither object holds on to memory
//: allocated from the other object's allocator.
//
// Plan:
//: 1 Specify a set S of object values with varied differences, ordered by
//: increasing length, to be used in the following tests.
//:
//: 2 Create two 'bsltf::StdStatefulAllocator' objects with their
//: 'propagate_on_container_swap' property configured to 'false'. In two
//: successive iterations of P-3, first configure the three properties
//: not under test to be 'false', then configure them all to be 'true'.
//:
//: 3 For each value '(x, y)' in the cross product S x S: (C-1)
//:
//: 1 Initialize two objects from 'x', a control object 'ZZ' using a
//: scratch allocator and an object 'X' using one of the allocators
//: from P-2.
//:
//: 2 Initialize two objects from 'y', a control object 'WW' using a
//: scratch allocator and an object 'Y' using the other allocator from
//: P-2.
//:
//: 3 Using both member 'swap' and free function 'swap', swap 'X' with
//: 'Y' and use 'operator==' to verify that 'X' and 'Y' have the
//: expected values.
//:
//: 4 Use the 'get_allocator' method to verify that the allocators of 'X'
//: and 'Y' are *not* exchanged. (C-1)
//:
//: 4 Repeat P-2..3 except that this time configure the allocator property
//: under test to 'true' and verify that the allocators of 'X' and 'Y'
//: *are* exchanged. Also verify that there is no additional allocation
//: from any allocator. (C-2..5)
//
// Testing:
// propagate_on_container_swap
// ------------------------------------------------------------------------
if (verbose)
printf("\nSWAP MEMBER AND FREE FUNCTIONS: ALLOCATOR PROPAGATION"
"\n=====================================================\n");
if (verbose) printf("\n'propagate_on_container_swap::value == false'\n");
testCase8_propagate_on_container_swap_dispatch<false, false>();
testCase8_propagate_on_container_swap_dispatch<false, true>();
if (verbose) printf("\n'propagate_on_container_swap::value == true'\n");
testCase8_propagate_on_container_swap_dispatch<true, false>();
testCase8_propagate_on_container_swap_dispatch<true, true>();
}
template <class CONTAINER>
void TestDriver<CONTAINER>::testCase8()
{
// ------------------------------------------------------------------------
// SWAP MEMBER AND FREE FUNCTIONS
// Ensure that, when member and free 'swap' are implemented, we can
// exchange the values of any two objects that use the same
// allocator.
//
// Concerns:
//: 1 Both functions exchange the values of the (two) supplied objects.
//:
//: 2 The common object allocator address held by both objects is
//: unchanged.
//:
//: 3 If the two objects being swapped uses the same allocators, neither
//: function allocates memory from any allocator.
//:
//: 4 Both functions have standard signatures and return types.
//:
//: 5 Two objects with different allocators may be swapped. In which case,
//: memory may be allocated.
//:
//: 6 Using either function to swap an object with itself does not
//: affect the value of the object (alias-safety).
//:
//: 7 The free 'swap' function is discoverable through ADL (Argument
//: Dependent Lookup).
//:
//: 8 QoI: Asserted precondition violations are detected when enabled.
//
// Plan:
//: 1 Use the addresses of the 'swap' member and free functions defined
//: in this component to initialize, respectively, member-function
//: and free-function pointers having the appropriate signatures and
//: return types. (C-4)
//:
//: 2 Create a 'bslma::TestAllocator' object, and install it as the
//: default allocator (note that a ubiquitous test allocator is
//: already installed as the global allocator).
//:
//: 3 Using the table-driven technique:
//:
//: 1 Specify a set of (unique) valid object values (one per row) in
//: terms of their individual attributes, including (a) first, the
//: default value, (b) boundary values corresponding to every range
//: of values that each individual attribute can independently
//: attain, and (c) values that should require allocation from each
//: individual attribute that can independently allocate memory.
//:
//: 2 Additionally, provide a (tri-valued) column, 'MEM', indicating
//: the expectation of memory allocation for all typical
//: implementations of individual attribute types: ('Y') "Yes",
//: ('N') "No", or ('?') "implementation-dependent".
//:
//: 4 For each row 'R1' in the table of P-3: (C-1..2, 6)
//:
//: 1 Create a 'bslma::TestAllocator' object, 'oa'.
//:
//: 2 Use the value constructor and 'oa' to create a modifiable
//: 'Obj', 'mW', having the value described by 'R1'; also use the
//: copy constructor and a "scratch" allocator to create a 'const'
//: 'Obj' 'XX' from 'mW'.
//:
//: 3 Use the member and free 'swap' functions to swap the value of
//: 'mW' with itself; verify, after each swap, that: (C-6)
//:
//: 1 The value is unchanged. (C-6)
//:
//: 2 The allocator address held by the object is unchanged.
//:
//: 3 There was no additional object memory allocation.
//:
//: 4 For each row 'R2' in the table of P-3: (C-1..2)
//:
//: 1 Use the copy constructor and 'oa' to create a modifiable
//: 'Obj', 'mX', from 'XX' (P-4.2).
//:
//: 2 Use the value constructor and 'oa' to create a modifiable
//: 'Obj', 'mY', and having the value described by 'R2'; also use
//: the copy constructor to create, using a "scratch" allocator,
//: a 'const' 'Obj', 'YY', from 'Y'.
//:
//: 3 Use, in turn, the member and free 'swap' functions to swap
//: the values of 'mX' and 'mY'; verify, after each swap, that:
//: (C-1..2)
//:
//: 1 The values have been exchanged. (C-1)
//:
//: 2 The common object allocator address held by 'mX' and 'mY'
//: is unchanged in both objects. (C-2)
//:
//: 3 There was no additional object memory allocation.
//:
//: 5 Create a new object allocator, 'oaz'
//:
//: 6 Repeat P-4.4.2 with 'oaz' under the presence of exception.
//:
//: 5 Verify that the free 'swap' function is discoverable through ADL:
//: (C-6)
//:
//: 1 Create a set of attribute values, 'A', distinct from the values
//: corresponding to the default-constructed object, choosing
//: values that allocate memory if possible.
//:
//: 2 Create a 'bslma::TestAllocator' object, 'oa'.
//:
//: 3 Use the default constructor and 'oa' to create a modifiable
//: 'Obj' 'mX' (having default attribute values); also use the copy
//: constructor and a "scratch" allocator to create a 'const' 'Obj'
//: 'XX' from 'mX'.
//:
//: 4 Use the value constructor and 'oa' to create a modifiable 'Obj'
//: 'mY' having the value described by the 'Ai' attributes; also
//: use the copy constructor and a "scratch" allocator to create a
//: 'const' 'Obj' 'YY' from 'mY'.
//:
//: 5 Use the 'invokeAdlSwap' helper function template to swap the
//: values of 'mX' and 'mY', using the free 'swap' function defined
//: in this component, then verify that: (C-7)
//:
//: 1 The values have been exchanged.
//:
//: 2 There was no additional object memory allocation. (C-7)
//:
//: 6 Use the test allocator from P-2 to verify that no memory is ever
//: allocated from the default allocator. (C-3)
//:
//: 7 Verify that, in appropriate build modes, defensive checks are
//: triggered when an attempt is made to swap objects that do not
//: refer to the same allocator, but not when the allocators are the
//: same (using the 'BSLS_ASSERTTEST_*' macros). (C-7)
//
// Testing:
// void swap(set& other);
// void swap(set<K, C, A>& a, set<K, C, A>& b);
// ------------------------------------------------------------------------
if (verbose) printf("\nSWAP MEMBER AND FREE FUNCTIONS"
"\n==============================\n");
if (verbose) printf(
"\nAssign the address of each function to a variable.\n");
{
typedef void (Obj::*funcPtr)(Obj&);
typedef void (*freeFuncPtr)(Obj&, Obj&);
// Verify that the signatures and return types are standard.
funcPtr memberSwap = &Obj::swap;
freeFuncPtr freeSwap = bsl::swap;
(void) memberSwap; // quash potential compiler warnings
(void) freeSwap;
}
if (verbose) printf(
"\nCreate a test allocator and install it as the default.\n");
bslma::TestAllocator da("default", veryVeryVeryVerbose);
bslma::DefaultAllocatorGuard dag(&da);
if (verbose) printf(
"\nUse a table of distinct object values and expected memory usage.\n");
const int NUM_DATA = DEFAULT_NUM_DATA;
const DefaultDataRow (&DATA)[NUM_DATA] = DEFAULT_DATA;
for (int ti = 0; ti < NUM_DATA; ++ti) {
const int LINE1 = DATA[ti].d_line;
const char *const SPEC1 = DATA[ti].d_spec;
bslma::TestAllocator oa("object", veryVeryVeryVerbose);
bslma::TestAllocator scratch("scratch", veryVeryVeryVerbose);
Obj mW(&oa); const Obj& W = gg(&mW, SPEC1);
const Obj XX(W, &scratch);
// Ensure the first row of the table contains the
// default-constructed value.
static bool firstFlag = true;
if (firstFlag) {
ASSERTV(LINE1, Obj() == W);
firstFlag = false;
}
// member 'swap'
{
bslma::TestAllocatorMonitor oam(&oa);
mW.swap(mW);
ASSERTV(LINE1, XX == W);
// ASSERTV(LINE1, &oa == W.get_allocator());
ASSERTV(LINE1, oam.isTotalSame());
}
// free function 'swap'
{
bslma::TestAllocatorMonitor oam(&oa);
swap(mW, mW);
ASSERTV(LINE1, XX == W);
// ASSERTV(LINE1, &oa == W.get_allocator());
ASSERTV(LINE1, oam.isTotalSame());
}
for (int tj = 0; tj < NUM_DATA; ++tj) {
const int LINE2 = DATA[tj].d_line;
const char *const SPEC2 = DATA[tj].d_spec;
Obj mX(XX, &oa); const Obj& X = mX;
Obj mY(&oa); const Obj& Y = gg(&mY, SPEC2);
const Obj YY(Y, &scratch);
// member 'swap'
{
bslma::TestAllocatorMonitor oam(&oa);
mX.swap(mY);
ASSERTV(LINE1, LINE2, YY == X);
ASSERTV(LINE1, LINE2, XX == Y);
// ASSERTV(LINE1, LINE2, &oa == X.get_allocator());
// ASSERTV(LINE1, LINE2, &oa == Y.get_allocator());
ASSERTV(LINE1, LINE2, oam.isTotalSame());
}
// free function 'swap'
{
bslma::TestAllocatorMonitor oam(&oa);
swap(mX, mY);
ASSERTV(LINE1, LINE2, XX == X);
ASSERTV(LINE1, LINE2, YY == Y);
// ASSERTV(LINE1, LINE2, &oa == X.get_allocator());
// ASSERTV(LINE1, LINE2, &oa == Y.get_allocator());
ASSERTV(LINE1, LINE2, oam.isTotalSame());
}
bslma::TestAllocator oaz("z_object", veryVeryVeryVerbose);
Obj mZ(&oaz); const Obj& Z = gg(&mZ, SPEC2);
const Obj ZZ(Z, &scratch);
// member 'swap'
{
bslma::TestAllocatorMonitor oam(&oa);
bslma::TestAllocatorMonitor oazm(&oaz);
BSLMA_TESTALLOCATOR_EXCEPTION_TEST_BEGIN(oa) {
ExceptionGuard<Obj> guardX(&X, L_, &scratch);
ExceptionGuard<Obj> guardZ(&Z, L_, &scratch);
mX.swap(mZ);
guardX.release();
guardZ.release();
} BSLMA_TESTALLOCATOR_EXCEPTION_TEST_END
ASSERTV(LINE1, LINE2, ZZ == X);
ASSERTV(LINE1, LINE2, XX == Z);
// ASSERTV(LINE1, LINE2, &oa == X.get_allocator());
// ASSERTV(LINE1, LINE2, &oaz == Z.get_allocator());
if (0 == X.size()) {
ASSERTV(LINE1, LINE2, emptyWillAlloc()||oam.isTotalSame());
}
else {
ASSERTV(LINE1, LINE2, oam.isTotalUp());
}
if (0 == Z.size()) {
ASSERTV(LINE1, LINE2, emptyWillAlloc() ||
oazm.isTotalSame());
}
else {
ASSERTV(LINE1, LINE2, oazm.isTotalUp());
}
}
// free function 'swap'
{
bslma::TestAllocatorMonitor oam(&oa);
bslma::TestAllocatorMonitor oazm(&oaz);
BSLMA_TESTALLOCATOR_EXCEPTION_TEST_BEGIN(oa) {
ExceptionGuard<Obj> guardX(&X, L_, &scratch);
ExceptionGuard<Obj> guardZ(&Z, L_, &scratch);
swap(mX, mZ);
guardX.release();
guardZ.release();
} BSLMA_TESTALLOCATOR_EXCEPTION_TEST_END
ASSERTV(LINE1, LINE2, XX == X);
ASSERTV(LINE1, LINE2, ZZ == Z);
// ASSERTV(LINE1, LINE2, &oa == X.get_allocator());
// ASSERTV(LINE1, LINE2, &oaz == Z.get_allocator());
if (0 == X.size()) {
ASSERTV(LINE1, LINE2, emptyWillAlloc()||oam.isTotalSame());
}
else {
ASSERTV(LINE1, LINE2, oam.isTotalUp());
}
if (0 == Z.size()) {
ASSERTV(LINE1, LINE2, emptyWillAlloc() ||
oazm.isTotalSame());
}
else {
ASSERTV(LINE1, LINE2, oazm.isTotalUp());
}
}
}
}
if (verbose) printf(
"\nInvoke free 'swap' function in a context where ADL is used.\n");
{
// 'A' values: Should cause memory allocation if possible.
bslma::TestAllocator oa("object", veryVeryVeryVerbose);
bslma::TestAllocator scratch("scratch", veryVeryVeryVerbose);
Obj mX(&oa); const Obj& X = mX;
const Obj XX(X, &scratch);
Obj mY(&oa); const Obj& Y = gg(&mY, "ABC");
const Obj YY(Y, &scratch);
bslma::TestAllocatorMonitor oam(&oa);
invokeAdlSwap(mX, mY);
ASSERTV(YY == X);
ASSERTV(XX == Y);
ASSERT(oam.isTotalSame());
}
}
template <class CONTAINER>
template <bool SELECT_ON_CONTAINER_COPY_CONSTRUCTION_FLAG,
bool OTHER_FLAGS>
void TestDriver<CONTAINER>::
testCase7_select_on_container_copy_construction_dispatch()
{
typedef typename CONTAINER::value_type VALUE;
const int TYPE_ALLOC = bslma::UsesBslmaAllocator<VALUE>::value;
// Set the three properties of 'bsltf::StdStatefulAllocator' that are not
// under test in this test case to 'false'.
typedef bsltf::StdStatefulAllocator<
VALUE,
SELECT_ON_CONTAINER_COPY_CONSTRUCTION_FLAG,
OTHER_FLAGS,
OTHER_FLAGS,
OTHER_FLAGS> StdAlloc;
typedef bsl::deque<VALUE, StdAlloc> CObj;
typedef bsl::stack<VALUE, CObj> Obj;
const bool PROPAGATE = SELECT_ON_CONTAINER_COPY_CONSTRUCTION_FLAG;
static const char *SPECS[] = {
"",
"A",
"BC",
"CDE",
};
const int NUM_SPECS = static_cast<const int>(sizeof SPECS / sizeof *SPECS);
for (int ti = 0; ti < NUM_SPECS; ++ti) {
const char *const SPEC = SPECS[ti];
const size_t LENGTH = strlen(SPEC);
TestValues VALUES(SPEC);
bslma::TestAllocator da("default", veryVeryVeryVerbose);
bslma::TestAllocator oa("object", veryVeryVeryVerbose);
bslma::DefaultAllocatorGuard dag(&da);
StdAlloc ma(&oa);
StdAlloc scratch(&da);
{
const CObj C(VALUES.begin(), VALUES.end(), scratch);
const Obj W(C, ma); // control
ASSERTV(ti, LENGTH == W.size()); // same lengths
if (veryVerbose) { printf("\tControl Obj: "); P(W); }
Obj mX(C, ma); const Obj& X = mX;
if (veryVerbose) { printf("\t\tDynamic Obj: "); P(X); }
bslma::TestAllocatorMonitor dam(&da);
bslma::TestAllocatorMonitor oam(&oa);
const Obj Y(X);
ASSERTV(SPEC, W == Y);
ASSERTV(SPEC, W == X);
// TBD no 'get_allocator' in 'stack'
#if 0
ASSERTV(SPEC, PROPAGATE, PROPAGATE == (ma == Y.get_allocator()));
ASSERTV(SPEC, PROPAGATE, ma == X.get_allocator());
#endif
if (PROPAGATE) {
ASSERTV(SPEC, 0 != TYPE_ALLOC || dam.isInUseSame());
ASSERTV(SPEC, 0 == LENGTH || oam.isInUseUp());
}
else {
ASSERTV(SPEC, 0 == LENGTH || dam.isInUseUp());
ASSERTV(SPEC, oam.isTotalSame());
}
}
ASSERTV(SPEC, 0 == da.numBlocksInUse());
ASSERTV(SPEC, 0 == oa.numBlocksInUse());
}
}
template <class CONTAINER>
void TestDriver<CONTAINER>::testCase7_select_on_container_copy_construction()
{
// ------------------------------------------------------------------------
// COPY CONSTRUCTOR: ALLOCATOR PROPAGATION
//
// Concerns:
//: 1 The allocator of a source object using a standard allocator is
//: propagated to the newly constructed object according to the
//: 'select_on_container_copy_construction' method of the allocator.
//:
//: 2 In the absence of a 'select_on_container_copy_construction' method,
//: the allocator of a source object using a standard allocator is always
//: propagated to the newly constructed object (C++03 semantics).
//:
//: 3 The effect of the 'select_on_container_copy_construction' trait is
//: independent of the other three allocator propagation traits.
//
// Plan:
//: 1 Specify a set S of object values with varied differences, ordered by
//: increasing length, to be used in the following tests.
//:
//: 2 Create a 'bsltf::StdStatefulAllocator' with its
//: 'select_on_container_copy_construction' property configured to
//: 'false'. In two successive iterations of P-3..5, first configure the
//: three properties not under test to be 'false', then confgiure them
//: all to be 'true'.
//:
//: 3 For each value in S, initialize objects 'W' (a control) and 'X' using
//: the allocator from P-2.
//:
//: 4 Copy construct 'Y' from 'X' and use 'operator==' to verify that both
//: 'X' and 'Y' subsequently have the same value as 'W'.
//:
//: 5 Use the 'get_allocator' method to verify that the allocator of 'X'
//: is *not* propagated to 'Y'.
//:
//: 6 Repeat P-2..5 except that this time configure the allocator property
//: under test to 'true' and verify that the allocator of 'X' *is*
//: propagated to 'Y'. (C-1)
//:
//: 7 Repeat P-2..5 except that this time use a 'StatefulStlAllocator',
//: which does not define a 'select_on_container_copy_construction'
//: method, and verify that the allocator of 'X' is *always* propagated
//: to 'Y'. (C-2..3)
//
// Testing:
// select_on_container_copy_construction
// ------------------------------------------------------------------------
if (verbose) printf("\n'select_on_container_copy_construction' "
"propagates *default* allocator.\n");
testCase7_select_on_container_copy_construction_dispatch<false, false>();
testCase7_select_on_container_copy_construction_dispatch<false, true>();
if (verbose) printf("\n'select_on_container_copy_construction' "
"propagates allocator of source object.\n");
testCase7_select_on_container_copy_construction_dispatch<true, false>();
testCase7_select_on_container_copy_construction_dispatch<true, true>();
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if (verbose) printf("\nVerify C++03 semantics (allocator has no "
"'select_on_container_copy_construction' method).\n");
typedef typename CONTAINER::value_type VALUE;
typedef StatefulStlAllocator<VALUE> Allocator;
typedef bsl::deque<VALUE, Allocator> CObj;
typedef bsl::stack<VALUE, CObj> Obj;
{
static const char *SPECS[] = {
"",
"A",
"BC",
"CDE",
};
const int NUM_SPECS =
static_cast<const int>(sizeof SPECS / sizeof *SPECS);
for (int ti = 0; ti < NUM_SPECS; ++ti) {
const char *const SPEC = SPECS[ti];
const size_t LENGTH = strlen(SPEC);
TestValues VALUES(SPEC);
const int ALLOC_ID = ti + 73;
Allocator a; a.setId(ALLOC_ID);
const CObj C(VALUES.begin(), VALUES.end(), a);
const Obj W(C, a); // control
ASSERTV(ti, LENGTH == W.size()); // same lengths
if (veryVerbose) { printf("\tControl Obj: "); P(W); }
Obj mX(C, a); const Obj& X = mX;
if (veryVerbose) { printf("\t\tDynamic Obj: "); P(X); }
const Obj Y(X);
ASSERTV(SPEC, W == Y);
ASSERTV(SPEC, W == X);
// TBD no 'get_allocator' in 'stack'
#if 0
ASSERTV(SPEC, ALLOC_ID == Y.get_allocator().id());
#endif
}
}
}
template <class CONTAINER>
void TestDriver<CONTAINER>::testCase7()
{
// ------------------------------------------------------------------------
// TESTING COPY CONSTRUCTOR:
//: 1 The new object's value is the same as that of the original object
//: (relying on the equality operator) and created with the correct
//: capacity.
//:
//: 2 The value of the original object is left unaffected.
//:
//: 3 Subsequent changes in or destruction of the source object have no
//: effect on the copy-constructed object.
//:
//: 4 Subsequent changes ('insert's) on the created object have no
//: effect on the original and change the capacity of the new object
//: correctly.
//:
//: 5 The object has its internal memory management system hooked up
//: properly so that *all* internally allocated memory draws from a
//: user-supplied allocator whenever one is specified.
//:
//: 6 The function is exception neutral w.r.t. memory allocation.
//
// Plan:
//: 1 Specify a set S of object values with substantial and varied
//: differences, ordered by increasing length, to be used in the
//: following tests.
//:
//: 2 For each value in S, initialize objects w and x, copy construct y
//: from x and use 'operator==' to verify that both x and y subsequently
//: have the same value as w. Let x go out of scope and again verify
//: that w == y. (C-1..4)
//:
//: 3 For each value in S initialize objects w and x, and copy construct y
//: from x. Change the state of y, by using the *primary* *manipulator*
//: 'push'. Using the 'operator!=' verify that y differs from x and
//: w, and verify that the capacity of y changes correctly. (C-5)
//:
//: 4 Perform tests performed as P-2: (C-6)
//: 1 While passing a testAllocator as a parameter to the new object and
//: ascertaining that the new object gets its memory from the provided
//: testAllocator.
//: 2 Verify neither of global and default allocator is used to supply
//: memory. (C-6)
//:
//: 5 Perform tests as P-2 in the presence of exceptions during memory
//: allocations using a 'bslma::TestAllocator' and varying its
//: *allocation* *limit*. (C-7)
//
// Testing:
// set(const set& original);
// set(const set& original, const A& allocator);
// ------------------------------------------------------------------------
const char *cont = ContainerName<container_type>::name();
const char *val = ValueName<value_type>::name();
if (verbose) { P_(cont); P_(val); P(typeAlloc()); }
const TestValues VALUES; // contains 52 distinct increasing values
bslma::TestAllocator da(veryVeryVerbose);
bslma::DefaultAllocatorGuard dag(&da);
bslma::TestAllocator oa(veryVeryVerbose);
{
static const char *SPECS[] = {
"",
"A",
"BC",
"CDE",
"DEAB",
"EABCD",
"ABCDEFG",
"HFGEDCBA",
"CFHEBIDGA",
"BENCKHGMALJDFOI",
"IDMLNEFHOPKGBCJA",
"OIQGDNPMLKBACHFEJ"
};
const int NUM_SPECS = sizeof SPECS / sizeof *SPECS;
for (int ti = 0; ti < NUM_SPECS; ++ti) {
const char *const SPEC = SPECS[ti];
const size_t LENGTH = strlen(SPEC);
if (verbose) {
printf("\nFor an object of length " ZU ":\n", LENGTH);
P(SPEC);
}
// Create control object 'W'.
Obj mW; const Obj& W = gg(&mW, SPEC);
ASSERTV(ti, LENGTH == W.size()); // same lengths
Obj mX(&oa); const Obj& X = gg(&mX, SPEC);
ASSERT(W == X);
{ // Testing concern 1..4.
if (veryVerbose) { printf("\t\t\tRegular Case :"); }
Obj *pX = new Obj(&oa);
gg(pX, SPEC);
const Obj Y0(*pX);
ASSERTV(SPEC, W == Y0);
ASSERTV(SPEC, W == X);
// ASSERTV(SPEC, Y0.get_allocator() ==
// bslma::Default::defaultAllocator());
delete pX;
ASSERTV(SPEC, W == Y0);
}
{ // Testing concern 5.
if (veryVerbose) printf("\t\t\tInsert into created obj, "
"without test allocator:\n");
Obj mY1(X); const Obj& Y1 = mY1;
mY1.push(VALUES['Z' - 'A']);
ASSERTV(SPEC, Y1.size() == LENGTH + 1);
ASSERTV(SPEC, W != Y1);
ASSERTV(SPEC, X != Y1);
ASSERTV(SPEC, W == X);
}
{ // Testing concern 5 with test allocator.
if (veryVerbose)
printf("\t\t\tInsert into created obj, "
"with test allocator:\n");
bslma::TestAllocatorMonitor dam(&da);
bslma::TestAllocatorMonitor oam(&oa);
Obj mY11(X, &oa); const Obj& Y11 = mY11;
ASSERT(dam.isTotalSame());
ASSERTV(cont, LENGTH, oam.isTotalSame(), emptyWillAlloc(),
oam.isTotalSame() == (!emptyWillAlloc() && 0 == LENGTH));
mY11.push(VALUES['Z' - 'A']);
ASSERT(dam.isTotalSame());
ASSERTV(SPEC, Y11.size() == LENGTH + 1);
ASSERTV(SPEC, W != Y11);
ASSERTV(SPEC, X != Y11);
// ASSERTV(SPEC, Y11.get_allocator() == X.get_allocator());
}
{ // Exception checking.
BSLMA_TESTALLOCATOR_EXCEPTION_TEST_BEGIN(oa) {
bslma::TestAllocatorMonitor dam(&da);
bslma::TestAllocatorMonitor oam(&oa);
const Obj Y2(X, &oa);
if (veryVerbose) {
printf("\t\t\tException Case :\n");
}
ASSERT(dam.isTotalSame());
ASSERT(oam.isTotalUp() || (!emptyWillAlloc() &&
0 == LENGTH));
ASSERTV(SPEC, W == Y2);
ASSERTV(SPEC, W == X);
// ASSERTV(SPEC, Y2.get_allocator() == X.get_allocator());
} BSLMA_TESTALLOCATOR_EXCEPTION_TEST_END
}
}
}
}
template <class CONTAINER>
void TestDriver<CONTAINER>::testCase6()
{
// ---------------------------------------------------------------------
// TESTING EQUALITY OPERATORS:
// Concerns:
//: 1 Two objects, 'X' and 'Y', compare equal if and only if they contain
//: the same values.
//:
//: 2 No non-salient attributes (i.e., 'allocator') participate.
//:
//: 3 'true == (X == X)' (i.e., identity)
//:
//: 4 'false == (X != X)' (i.e., identity)
//:
//: 5 'X == Y' if and only if 'Y == X' (i.e., commutativity)
//:
//: 6 'X != Y' if and only if 'Y != X' (i.e., commutativity)
//:
//: 7 'X != Y' if and only if '!(X == Y)'
//
// Plan:
//: 1 Use the respective addresses of 'operator==' and 'operator!=' to
//: initialize function pointers having the appropriate signatures and
//: return types for the two homogeneous, free equality- comparison
//: operators defined in this component. (C-8..9, 12..13)
//:
//: 2 Create a 'bslma::TestAllocator' object, and install it as the default
//: allocator (note that a ubiquitous test allocator is already installed
//: as the global allocator).
//:
//: 3 Using the table-driven technique, specify a set of distinct
//: specifications for the 'gg' function.
//:
//: 4 For each row 'R1' in the table of P-3: (C-1..7)
//:
//: 1 Create a single object, using a comparator that can be disabled and
//: a"scratch" allocator, and use it to verify the reflexive
//: (anti-reflexive) property of equality (inequality) in the presence
//: of aliasing. (C-3..4)
//:
//: 2 For each row 'R2' in the table of P-3: (C-1..2, 5..7)
//:
//: 1 Record, in 'EXP', whether or not distinct objects created from
//: 'R1' and 'R2', respectively, are expected to have the same value.
//:
//: 2 For each of two configurations, 'a' and 'b': (C-1..2, 5..7)
//:
//: 1 Create two (object) allocators, 'oax' and 'oay'.
//:
//: 2 Create an object 'X', using 'oax', having the value 'R1'.
//:
//: 3 Create an object 'Y', using 'oax' in configuration 'a' and
//: 'oay' in configuration 'b', having the value 'R2'.
//:
//: 4 Disable the comparator so that it will cause an error if it's
//: used.
//:
//: 5 Verify the commutativity property and expected return value for
//: both '==' and '!=', while monitoring both 'oax' and 'oay' to
//: ensure that no object memory is ever allocated by either
//: operator. (C-1..2, 5..7, 10)
//:
//: 5 Use the test allocator from P-2 to verify that no memory is ever
//: allocated from the default allocator. (C-11)
//
// Testing:
// bool operator==(const set<K, C, A>& lhs, const set<K, C, A>& rhs);
// bool operator!=(const set<K, C, A>& lhs, const set<K, C, A>& rhs);
// ------------------------------------------------------------------------
if (verbose) printf("\nEQUALITY-COMPARISON OPERATORS"
"\n=============================\n");
if (verbose)
printf("\nAssign the address of each operator to a variable.\n");
{
typedef bool (*operatorPtr)(const Obj&, const Obj&);
// Verify that the signatures and return types are standard.
operatorPtr operatorEq = operator==;
operatorPtr operatorNe = operator!=;
(void) operatorEq; // quash potential compiler warnings
(void) operatorNe;
}
const int NUM_DATA = DEFAULT_NUM_DATA;
const DefaultDataRow (&DATA)[NUM_DATA] = DEFAULT_DATA;
bslma::TestAllocator da("default", veryVeryVeryVerbose);
bslma::DefaultAllocatorGuard dag(&da);
bslma::TestAllocatorMonitor dam(&da);
if (verbose) printf("\nCompare every value with every value.\n");
{
// Create first object
for (int ti = 0; ti < NUM_DATA; ++ti) {
const char *const SPEC1 = DATA[ti].d_spec;
const size_t LENGTH1 = strlen(DATA[ti].d_spec);
if (veryVerbose) { T_ P(SPEC1) }
// Ensure an object compares correctly with itself (alias test).
{
bslma::TestAllocator scratch("scratch", veryVeryVeryVerbose);
Obj mX(&scratch); const Obj& X = gg(&mX, SPEC1);
ASSERTV(SPEC1, X == X);
ASSERTV(SPEC1, !(X != X));
}
for (int tj = 0; tj < NUM_DATA; ++tj) {
const char *const SPEC2 = DATA[tj].d_spec;
const size_t LENGTH2 = strlen(DATA[tj].d_spec);
if (veryVerbose) {
T_ T_ P(SPEC2) }
const bool EXP = ti == tj; // expected equality
for (char cfg = 'a'; cfg <= 'b'; ++cfg) {
const char CONFIG = cfg; // Determines 'Y's allocator.
// Create two distinct test allocators, 'oax' and 'oay'.
bslma::TestAllocator oax("objectx", veryVeryVeryVerbose);
bslma::TestAllocator oay("objecty", veryVeryVeryVerbose);
// Map allocators above to objects 'X' and 'Y' below.
bslma::TestAllocator& xa = oax;
bslma::TestAllocator& ya = 'a' == CONFIG ? oax : oay;
Obj mX(&xa); const Obj& X = gg(&mX, SPEC1);
Obj mY(&ya); const Obj& Y = gg(&mY, SPEC2);
ASSERTV(CONFIG, LENGTH1 == X.size());
ASSERTV(CONFIG, LENGTH2 == Y.size());
// Verify value, commutativity, and no memory allocation.
bslma::TestAllocatorMonitor oaxm(&xa);
bslma::TestAllocatorMonitor oaym(&ya);
ASSERTV(CONFIG, EXP == (X == Y));
ASSERTV(CONFIG, EXP == (Y == X));
ASSERTV(CONFIG, !EXP == (X != Y));
ASSERTV(CONFIG, !EXP == (Y != X));
ASSERTV(CONFIG, oaxm.isTotalSame());
ASSERTV(CONFIG, oaym.isTotalSame());
}
}
}
}
ASSERT(dam.isTotalSame());
}
template <class CONTAINER>
void TestDriver<CONTAINER>::testCase4()
{
// ------------------------------------------------------------------------
// BASIC ACCESSORS
// Ensure each basic accessor:
// - top
// - size
// properly interprets object state.
//
// Concerns:
//: 1 Each accessor returns the value of the correct property of the
//: object.
//:
//: 2 Each accessor method is declared 'const'.
//:
//: 3 No accessor allocates any memory.
//:
//
// Plan:
//: 1 For each set of 'SPEC' of different length:
//:
//: 1 Default construct the object with various configuration:
//:
//: 2 Add in a series of objects.
//:
//: 3 Verify 'top' yields the expected result.
//
// Testing:
// const_iterator cbegin();
// const_iterator cend();
// size_type size() const;
// ------------------------------------------------------------------------
const char *cont = ContainerName<container_type>::name();
const char *val = ValueName<value_type>::name();
if (verbose) { P_(cont); P_(val); P(typeAlloc()); }
const TestValues VALUES; // contains 52 distinct increasing values
static const struct {
int d_line; // source line number
const char *d_spec; // specification string
const char *d_results; // expected results
} DATA[] = {
//line spec result
//---- -------- ------
{ L_, "", "" },
{ L_, "A", "A" },
{ L_, "AB", "AB" },
{ L_, "ABC", "ABC" },
{ L_, "ABCD", "ABCD" },
{ L_, "ABCDE", "ABCDE" }
};
const int NUM_DATA = sizeof DATA / sizeof *DATA;
if (verbose) { printf(
"\nCreate objects with various allocator configurations.\n"); }
{
for (int ti = 0; ti < NUM_DATA; ++ti) {
const int LINE = DATA[ti].d_line;
const char *const SPEC = DATA[ti].d_spec;
const size_t LENGTH = strlen(DATA[ti].d_results);
const TestValues EXP(DATA[ti].d_results);
if (verbose) { P_(LINE) P_(LENGTH) P(SPEC); }
for (char cfg = 'a'; cfg <= 'd'; ++cfg) {
const char CONFIG = cfg;
bslma::TestAllocator da("default", veryVeryVeryVerbose);
bslma::TestAllocator fa("footprint", veryVeryVeryVerbose);
bslma::TestAllocator sa1("supplied1", veryVeryVeryVerbose);
bslma::TestAllocator sa2("supplied2", veryVeryVeryVerbose);
bslma::DefaultAllocatorGuard dag(&da);
bslma::TestAllocator& oa = 'a' == CONFIG || 'b' == CONFIG
? da
: 'c' == CONFIG
? sa1
: sa2;
bslma::TestAllocator& noa = &oa != &da ? da : sa1;
bslma::TestAllocatorMonitor oam(&oa);
bslma::TestAllocatorMonitor noam(&noa);
Obj& mX = 'a' == CONFIG
? * new (fa) Obj()
: 'b' == CONFIG
? * new (fa) Obj((bslma::Allocator *) 0)
: 'c' == CONFIG
? * new (fa) Obj(&sa1)
: * new (fa) Obj(&sa2);
ASSERT( oam.isTotalUp() == emptyWillAlloc());
ASSERT(noam.isTotalSame());
const Obj& X = gg(&mX, SPEC);
ASSERT(&X == &mX);
oam.reset();
// --------------------------------------------------------
// Verify basic accessors
// ASSERTV(LINE, SPEC, CONFIG, &oa == X.get_allocator());
ASSERTV(LINE, SPEC, CONFIG, LENGTH == X.size());
if (LENGTH > 0) {
ASSERTV(LINE, SPEC, CONFIG, EXP[LENGTH - 1] == mX.top());
ASSERTV(LINE, SPEC, CONFIG, EXP[LENGTH - 1] == X.top());
}
else {
bsls::AssertTestHandlerGuard hG;
ASSERT_SAFE_FAIL(mX.top());
}
ASSERTV(LINE, LENGTH, X.empty(), (0 == LENGTH) == mX.empty());
ASSERTV(LINE, LENGTH, X.empty(), (0 == LENGTH) == X.empty());
ASSERT( oam.isTotalSame());
ASSERT(noam.isTotalSame());
fa.deleteObject(&mX);
}
}
}
}
template <class CONTAINER>
void TestDriver<CONTAINER>::testCase3()
{
// ------------------------------------------------------------------------
// TESTING PRIMITIVE GENERATOR FUNCTIONS gg AND ggg:
// Having demonstrated that our primary manipulators work as expected
// under normal conditions
//
// Concerns:
//: 1 Valid generator syntax produces expected results
//:
//: 2 Invalid syntax is detected and reported.
//
// Plan:
//: 1 For each of an enumerated sequence of 'spec' values, ordered by
//: increasing 'spec' length:
//:
//: 1 Use the primitive generator function 'gg' to set the state of a
//: newly created object.
//:
//: 2 Verify that 'gg' returns a valid reference to the modified argument
//: object.
//:
//: 3 Use the basic accessors to verify that the value of the object is
//: as expected. (C-1)
//:
//: 2 For each of an enumerated sequence of 'spec' values, ordered by
//: increasing 'spec' length, use the primitive generator function 'ggg'
//: to set the state of a newly created object.
//:
//: 1 Verify that 'ggg' returns the expected value corresponding to the
//: location of the first invalid value of the 'spec'. (C-2)
//
// Testing:
// set<K,A>& gg(set<K,A> *object, const char *spec);
// int ggg(set<K,A> *object, const char *spec, int verbose = 1);
// ------------------------------------------------------------------------
const char *cont = ContainerName<container_type>::name();
const char *val = ValueName<value_type>::name();
if (verbose) { P_(cont); P(val); }
bslma::TestAllocator oa(veryVeryVerbose);
bslma::TestAllocator da(veryVeryVerbose);
bslma::DefaultAllocatorGuard dag(&da);
if (verbose) printf("\nTesting generator on valid specs.\n");
{
static const struct {
int d_line; // source line number
const char *d_spec; // specification string
const char *d_results; // expected element values
} DATA[] = {
//line spec results
//---- -------- -------
{ L_, "", "" },
{ L_, "A", "A" },
{ L_, "B", "B" },
{ L_, "AB", "AB" },
{ L_, "CD", "CD" },
{ L_, "ABC", "ABC" },
{ L_, "ABCD", "ABCD" },
{ L_, "ABCDE", "ABCDE" },
};
const int NUM_DATA = sizeof DATA / sizeof *DATA;
int oldLen = -1;
for (int ti = 0; ti < NUM_DATA ; ++ti) {
const int LINE = DATA[ti].d_line;
const char *const SPEC = DATA[ti].d_spec;
const size_t LENGTH = strlen(DATA[ti].d_results);
const TestValues EXP(DATA[ti].d_results);
const int curLen = (int)strlen(SPEC);
bslma::TestAllocatorMonitor oam(&oa);
bslma::TestAllocatorMonitor dam(&da);
Obj mX(&oa);
const Obj& X = gg(&mX, SPEC);
LOOP3_ASSERT(oam.isTotalUp(), emptyWillAlloc(), LENGTH,
oam.isTotalUp() == (emptyWillAlloc() || LENGTH > 0));
ASSERT(dam.isTotalSame());
const Obj& Y = g( SPEC);
ASSERT(&mX == &X);
ASSERT(Y == X);
if (curLen != oldLen) {
if (verbose) printf("\tof length %d:\n", curLen);
ASSERTV(LINE, oldLen <= curLen); // non-decreasing
oldLen = curLen;
}
ASSERTV(LINE, LENGTH == X.size());
ASSERTV(LINE, LENGTH == Y.size());
emptyNVerifyStack(&mX, EXP, LENGTH, L_);
emptyNVerifyStack(const_cast<Obj *>(&Y), EXP, LENGTH, L_);
}
}
if (verbose) printf("\nTesting generator on invalid specs.\n");
{
static const struct {
int d_line; // source line number
const char *d_spec; // specification string
int d_index; // offending character index
} DATA[] = {
//line spec index
//---- -------- -----
{ L_, "", -1, }, // control
{ L_, "A", -1, }, // control
{ L_, " ", 0, },
{ L_, ".", 0, },
{ L_, "E", -1, }, // control
{ L_, "a", 0, },
{ L_, "z", 0, },
{ L_, "AE", -1, }, // control
{ L_, "aE", 0, },
{ L_, "Ae", 1, },
{ L_, ".~", 0, },
{ L_, "~!", 0, },
{ L_, " ", 0, },
{ L_, "ABC", -1, }, // control
{ L_, " BC", 0, },
{ L_, "A C", 1, },
{ L_, "AB ", 2, },
{ L_, "?#:", 0, },
{ L_, " ", 0, },
{ L_, "ABCDE", -1, }, // control
{ L_, "aBCDE", 0, },
{ L_, "ABcDE", 2, },
{ L_, "ABCDe", 4, },
{ L_, "AbCdE", 1, }
};
const int NUM_DATA = sizeof DATA / sizeof *DATA;
int oldLen = -1;
for (int ti = 0; ti < NUM_DATA ; ++ti) {
const int LINE = DATA[ti].d_line;
const char *const SPEC = DATA[ti].d_spec;
const int INDEX = DATA[ti].d_index;
const int LENGTH = static_cast<int>(strlen(SPEC));
Obj mX(&oa);
if (LENGTH != oldLen) {
if (verbose) printf("\tof length %d:\n", LENGTH);
ASSERTV(LINE, oldLen <= LENGTH); // non-decreasing
oldLen = LENGTH;
}
if (veryVerbose) printf("\t\tSpec = \"%s\"\n", SPEC);
int RESULT = ggg(&mX, SPEC, veryVerbose);
ASSERTV(LINE, INDEX == RESULT);
}
}
}
template <class CONTAINER>
void TestDriver<CONTAINER>::testCase2()
{
// ------------------------------------------------------------------------
// TESTING CONSTRUCTORS AND PRIMARY MANIPULATORS (BOOTSTRAP):
// The basic concern is that the default constructor, the destructor,
// and, under normal conditions (i.e., no aliasing), the primary
// manipulators
// - push
// - pop
//
// Concerns:
//: 1 An object created with the default constructor (with or without a
//: supplied allocator) has the contractually specified default value.
//:
//: 2 If an allocator is NOT supplied to the default constructor, the
//: default allocator in effect at the time of construction becomes the
//: object allocator for the resulting object.
//:
//: 3 If an allocator IS supplied to the default constructor, that
//: allocator becomes the object allocator for the resulting object.
//:
//: 4 Supplying a null allocator address has the same effect as not
//: supplying an allocator.
//:
//: 5 Supplying an allocator to the default constructor has no effect on
//: subsequent object values.
//:
//: 6 Any memory allocation is from the object allocator.
//:
//: 7 There is no temporary allocation from any allocator.
//:
//: 8 Every object releases any allocated memory at destruction.
//:
//: 9 QoI: The default constructor allocates no memory (only true if the
//: underlying container is 'vector').
//:
//:10 'insert' adds an additional element to the object if the element
//: being inserted does not already exist.
//:
//:11 'push' pushes a sequence of objects into the stack, and 'pop' will
//: recover those same values in reverse order.
//:
//:12 Any argument can be 'const'.
//:
//:13 Any memory allocation is exception neutral.
//:
//:14 All version of the copy c'tor produce an object with the same value
//: as the original, and allocate memory from the appropriate allocator.
//:
//:15 All versions of the c'tor from container produce an object with the
//: appropriate value, and allocate memory from the appropriate
//: allocator.
//
// Plan:
//: 1 For each value of increasing length, 'L':
//:
//: 2 Using a loop-based approach, default-construct three distinct
//: objects, in turn, but configured differently: (a) without passing
//: an allocator, (b) passing a null allocator address explicitly, and
//: (c) passing the address of a test allocator distinct from the
//: default. For each of these three iterations: (C-1..14)
//:
//: 1 Create three 'bslma::TestAllocator' objects, and install one as
//: as the current default allocator (note that a ubiquitous test
//: allocator is already installed as the global allocator).
//:
//: 2 Use the default constructor to dynamically create an object
//: 'X', with its object allocator configured appropriately (see
//: P-2); use a distinct test allocator for the object's footprint.
//:
//: 3 Use the (as yet unproven) 'get_allocator' to ensure that its
//: object allocator is properly installed. (C-2..4)
//:
//: 4 Use the appropriate test allocators to verify that no memory is
//: allocated by the default constructor. (C-9)
//:
//: 5 Use the individual (as yet unproven) salient attribute accessors
//: to verify the default-constructed value. (C-1)
//:
//: 6 Insert 'L - 1' elements in order of increasing value into the
//: container.
//:
//: 7 Insert the 'L'th value in the presense of exception and use the
//: (as yet unproven) basic accessors to verify the container has the
//: expected values. Verify the number of allocation is as expected.
//: (C-5..6, 13..14)
//:
//: 8 Verify that no temporary memory is allocated from the object
//: allocator. (C-7)
//:
//: 9 Make a copy of the object using the appropriate copy c'tor.
//:
//: 10 Verify that all object memory is released when the object is
//: destroyed. (Implicit in test allocator). (C-8)
//: 11 Verify that calling 'pop' on an empty stack will fail an assert
//: in safe mode.
//
// Testing:
// stack;
// stack(bslma_Allocator *);
// ~stack();
// push(const value_type& value);
// stack(const CONTAINER& container, bslma_allocator *);
// stack(const stack& stack, bslma_allocator *);
// ------------------------------------------------------------------------
const char *cont = ContainerName<container_type>::name();
const char *val = ValueName<value_type>::name();
if (verbose) { P_(cont); P_(val); P(typeAlloc()); }
const TestValues VALUES; // contains 52 distinct increasing values
const size_t MAX_LENGTH = 9;
for (size_t ti = 0; ti < MAX_LENGTH; ++ti) {
const size_t LENGTH = ti;
for (char cfg = 'a'; cfg <= 'c'; ++cfg) {
const char CONFIG = cfg; // how we specify the allocator
bslma::TestAllocator da("default", veryVeryVeryVerbose);
bslma::TestAllocator fa("footprint", veryVeryVeryVerbose);
bslma::TestAllocator sa("supplied", veryVeryVeryVerbose);
bslma::DefaultAllocatorGuard dag(&da);
// ----------------------------------------------------------------
if (veryVerbose) {
printf("\n\tTesting default constructor.\n");
}
Obj *objPtr;
switch (CONFIG) {
case 'a': {
objPtr = new (fa) Obj();
} break;
case 'b': {
objPtr = new (fa) Obj((bslma::Allocator *) 0);
} break;
case 'c': {
objPtr = new (fa) Obj(&sa);
} break;
default: {
ASSERTV(CONFIG, !"Bad allocator config.");
return; // RETURN
} break;
}
Obj& mX = *objPtr; const Obj& X = mX;
bslma::TestAllocator& oa = 'c' == CONFIG ? sa : da;
// Verify any attribute allocators are installed properly.
// ASSERTV(LENGTH, CONFIG, &oa == X.get_allocator());
// Verify no allocation from the object/non-object allocators.
ASSERTV(CONFIG, emptyWillAlloc() == !!oa.numBlocksTotal());
ASSERTV(LENGTH, CONFIG, 0 == X.size());
ASSERTV(LENGTH, CONFIG, X.empty());
{
bsls::AssertTestHandlerGuard hG;
ASSERT_SAFE_FAIL(mX.pop());
}
// ----------------------------------------------------------------
if (veryVerbose) { printf("\n\tTesting 'push' (bootstrap).\n"); }
for (size_t tj = 0; tj + 1 < LENGTH; ++tj) {
mX.push(VALUES[tj]);
ASSERTV(LENGTH, tj, CONFIG, VALUES[tj] == mX.top());
ASSERTV(LENGTH, tj, CONFIG, VALUES[tj] == X.top());
}
if (LENGTH > 1) {
ASSERTV(CONFIG, oa.numBlocksTotal() > 0);
}
if (0 < LENGTH) {
ASSERTV(LENGTH, CONFIG, LENGTH - 1 == X.size());
bslma::TestAllocator scratch("scratch", veryVeryVeryVerbose);
// insert the last element with an exception guard
BSLMA_TESTALLOCATOR_EXCEPTION_TEST_BEGIN(oa) {
ExceptionGuard<Obj> guard(&X, L_, &scratch);
mX.push(VALUES[LENGTH - 1]);
guard.release();
// Verify no temporary memory is allocated from the object
// allocator.
if (1 == LENGTH && !typeAlloc()) {
// If the vector grows, the old vector will be
// deallocated, so only do this test on '1 == LENGTH'.
ASSERTV(LENGTH, CONFIG, oa.numBlocksTotal(),
oa.numBlocksInUse(),
oa.numBlocksTotal() == oa.numBlocksInUse());
}
ASSERTV(LENGTH, CONFIG, VALUES[LENGTH - 1] == mX.top());
ASSERTV(LENGTH, CONFIG, VALUES[LENGTH - 1] == X.top());
} BSLMA_TESTALLOCATOR_EXCEPTION_TEST_END
ASSERTV(LENGTH, CONFIG, LENGTH == X.size());
}
// Test copy c'tors
{
bslma::TestAllocatorMonitor oaMonitor(&oa);
Obj *copyPtr;
switch (CONFIG) {
case 'a': {
copyPtr = new (fa) Obj(X);
} break;
case 'b': {
copyPtr = new (fa) Obj(X, (bslma::Allocator *) 0);
} break;
case 'c': {
copyPtr = new (fa) Obj(X, &sa);
} break;
default: {
ASSERTV(CONFIG, !"Bad allocator config.");
return; // RETURN
} break;
}
ASSERT(X == *copyPtr);
ASSERT((0 < LENGTH || emptyWillAlloc()) ==
oaMonitor.isTotalUp());
emptyAndVerify(copyPtr, VALUES, LENGTH, L_);
fa.deleteObject(copyPtr);
}
// Test container c'tors
{
bslma::TestAllocatorMonitor oaMonitor(&oa);
bslma::TestAllocator ca;
CONTAINER c(&ca); const CONTAINER& C = c;
// We have to insert the values one at a time, 'vector' has a
// problem with range inserts of function ptrs.
for (size_t tk = 0; tk < LENGTH; ++tk) {
c.push_back(VALUES[tk]);
}
Obj *cCopyPtr;
switch (CONFIG) {
case 'a': {
cCopyPtr = new (fa) Obj(C);
} break;
case 'b': {
cCopyPtr = new (fa) Obj(C, (bslma::Allocator *) 0);
} break;
case 'c': {
cCopyPtr = new (fa) Obj(C, &sa);
} break;
default: {
ASSERTV(CONFIG, !"Bad allocator config.");
return; // RETURN
} break;
}
ASSERT(X == *cCopyPtr);
ASSERT((0 < LENGTH || emptyWillAlloc()) ==
oaMonitor.isTotalUp());
if ('a' == CONFIG) {
// Sometimes don't do this, just so we test the case where
// we destroy a non-empty object.
emptyAndVerify(cCopyPtr, VALUES, LENGTH, L_);
}
fa.deleteObject(cCopyPtr);
}
emptyAndVerify(&mX, VALUES, LENGTH, L_);
if (&oa != &da) {
ASSERT(0 == da.numBlocksTotal());
}
// Reclaim dynamically allocated object under test.
fa.deleteObject(objPtr);
}
}
}
template <class CONTAINER>
void TestDriver<CONTAINER>::testCase1_NoAlloc(int *testValues,
size_t numValues)
{
// ------------------------------------------------------------------------
// BREATHING TEST
// This case exercises (but does not fully test) basic functionality.
//
// Concerns:
//: 1 The class is sufficiently functional to enable comprehensive
//: testing in subsequent test cases.
//
// Plan:
//: 1 Execute each method to verify functionality for simple case.
//
// Testing:
// BREATHING TEST
// ------------------------------------------------------------------------
// Sanity check.
ASSERTV(0 < numValues);
ASSERTV(8 > numValues);
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if (veryVerbose) {
printf("Default construct an empty set.\n");
}
{
Obj x; const Obj& X = x;
ASSERTV(0 == X.size());
ASSERTV(true == X.empty());
}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if (veryVerbose) {
printf("Test use of allocators.\n");
}
{
Obj o1; const Obj& O1 = o1;
for (size_t i = 0; i < numValues; ++i) {
o1.push(value_type(testValues[i]));
}
ASSERTV(numValues == O1.size());
Obj o2(O1); const Obj& O2 = o2;
ASSERTV(numValues == O1.size());
ASSERTV(numValues == O2.size());
Obj o3; const Obj& O3 = o3;
ASSERTV(numValues == O1.size());
ASSERTV(numValues == O2.size());
ASSERTV(0 == O3.size());
o1.swap(o3);
ASSERTV(0 == O1.size());
ASSERTV(numValues == O2.size());
ASSERTV(numValues == O3.size());
ASSERTV(0 == O1.size());
ASSERTV(numValues == O2.size());
ASSERTV(numValues == O3.size());
}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if (veryVerbose) {
printf("Test primary manipulators/accessors on every permutation.\n");
}
native_std::sort(testValues, testValues + numValues);
do {
// For each possible permutation of values, insert values, iterate over
// the resulting container, find values, and then erase values.
Obj x; const Obj& X = x;
for (size_t i = 0; i < numValues; ++i) {
Obj y(X); const Obj& Y = y;
ASSERTV(X == Y);
ASSERTV(!(X != Y));
// Test 'insert'.
value_type value(testValues[i]);
x.push(value);
ASSERTV(testValues[i] == x.top());
// Test size, empty.
ASSERTV(i + 1 == X.size());
ASSERTV(false == X.empty());
ASSERTV(X != Y);
ASSERTV(!(X == Y));
y = x;
ASSERTV(X == Y);
ASSERTV(!(X != Y));
}
ASSERTV(X.size() == numValues);
for (int i = static_cast<int>(numValues) - 1; i >= 0; --i) {
testValues[i] = X.top();
x.pop();
}
ASSERTV(X.size() == 0);
} while (native_std::next_permutation(testValues,
testValues + numValues));
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if (veryVerbose) {
printf("Test class comparison operators.\n");
}
{
// Iterate over possible selections of elements to add to two
// containers, 'X' and 'Y' then compare the results of the comparison
// operators to the comparison between two containers equivalent to
// the underlying containers in the stack objects.
for (size_t i = 0; i < numValues; ++i) {
for (size_t j = 0; j < numValues; ++j) {
for (size_t length = 0; length < numValues; ++length) {
for (size_t m = 0; m < j; ++m) {
Obj x; const Obj& X = x;
Obj y; const Obj& Y = y;
CONTAINER xx;
const CONTAINER& XX = xx;
CONTAINER yy;
const CONTAINER& YY = yy;
for (size_t k = 0; k < j; ++k) {
size_t xIndex = (i + length) % numValues;
size_t yIndex = (j + length) % numValues;
x.push( testValues[xIndex]);
xx.push_back(testValues[xIndex]);
if (k < m) {
y.push( testValues[yIndex]);
yy.push_back(testValues[yIndex]);
}
}
ASSERTV((X == Y) == (XX == YY));
ASSERTV((X != Y) == (XX != YY));
ASSERTV((X < Y) == (XX < YY));
ASSERTV((X > Y) == (XX > YY));
ASSERTV((X <= Y) == (XX <= YY));
ASSERTV((X >= Y) == (XX >= YY));
ASSERTV((X == Y) == !(X != Y));
ASSERTV((X != Y) == !(X == Y));
ASSERTV((X < Y) == !(X >= Y));
ASSERTV((X > Y) == !(X <= Y));
ASSERTV((X <= Y) == !(X > Y));
ASSERTV((X >= Y) == !(X < Y));
ASSERTV((Y == X) == (YY == XX));
ASSERTV((Y != X) == (YY != XX));
ASSERTV((Y < X) == (YY < XX));
ASSERTV((Y > X) == (YY > XX));
ASSERTV((Y <= X) == (YY <= XX));
ASSERTV((Y >= X) == (YY >= XX));
ASSERTV((Y == X) == !(Y != X));
ASSERTV((Y != X) == !(Y == X));
ASSERTV((Y < X) == !(Y >= X));
ASSERTV((Y > X) == !(Y <= X));
ASSERTV((Y <= X) == !(Y > X));
ASSERTV((Y >= X) == !(Y < X));
}
}
}
}
}
}
template <class CONTAINER>
void TestDriver<CONTAINER>::testCase1(int *testValues,
size_t numValues)
{
// ------------------------------------------------------------------------
// BREATHING TEST
// This case exercises (but does not fully test) basic functionality.
//
// Concerns:
//: 1 The class is sufficiently functional to enable comprehensive
//: testing in subsequent test cases.
//
// Plan:
//: 1 Execute each method to verify functionality for simple case.
//
// Testing:
// BREATHING TEST
// ------------------------------------------------------------------------
bslma::TestAllocator defaultAllocator("defaultAllocator");
bslma::DefaultAllocatorGuard defaultGuard(&defaultAllocator);
bslma::TestAllocator objectAllocator("objectAllocator");
// Sanity check.
ASSERTV(0 < numValues);
ASSERTV(8 > numValues);
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if (veryVerbose) {
printf("Default construct an empty set.\n");
}
{
Obj x(&objectAllocator); const Obj& X = x;
ASSERTV(0 == X.size());
ASSERTV(true == X.empty());
ASSERTV(0 == defaultAllocator.numBytesInUse());
ASSERTV(emptyWillAlloc() == (0 != objectAllocator.numBytesInUse()));
}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if (veryVerbose) {
printf("Test use of allocators.\n");
}
{
bslma::TestAllocatorMonitor defaultMonitor(&defaultAllocator);
bslma::TestAllocator objectAllocator1("objectAllocator1");
bslma::TestAllocator objectAllocator2("objectAllocator2");
Obj o1(&objectAllocator1); const Obj& O1 = o1;
// ASSERTV(&objectAllocator1 == O1.get_allocator().mechanism());
for (size_t i = 0; i < numValues; ++i) {
o1.push(value_type(testValues[i]));
}
ASSERTV(numValues == O1.size());
ASSERTV(0 < objectAllocator1.numBytesInUse());
ASSERTV(0 == objectAllocator2.numBytesInUse());
Obj o2(O1, &objectAllocator2); const Obj& O2 = o2;
// ASSERTV(&objectAllocator2 == O2.get_allocator().mechanism());
ASSERTV(numValues == O1.size());
ASSERTV(numValues == O2.size());
ASSERTV(0 < objectAllocator1.numBytesInUse());
ASSERTV(0 < objectAllocator2.numBytesInUse());
Obj o3(&objectAllocator1); const Obj& O3 = o3;
// ASSERTV(&objectAllocator1 == O3.get_allocator().mechanism());
bslma::TestAllocatorMonitor monitor1(&objectAllocator1);
ASSERTV(numValues == O1.size());
ASSERTV(numValues == O2.size());
ASSERTV(0 == O3.size());
ASSERTV(monitor1.isInUseSame());
ASSERTV(monitor1.isTotalSame());
ASSERTV(0 < objectAllocator1.numBytesInUse());
ASSERTV(0 < objectAllocator2.numBytesInUse());
o1.swap(o3);
ASSERTV(0 == O1.size());
ASSERTV(numValues == O2.size());
ASSERTV(numValues == O3.size());
ASSERTV(monitor1.isInUseSame());
ASSERTV(monitor1.isTotalSame());
ASSERTV(0 < objectAllocator1.numBytesInUse());
ASSERTV(0 < objectAllocator2.numBytesInUse());
o3.swap(o2);
ASSERTV(0 == O1.size());
ASSERTV(numValues == O2.size());
ASSERTV(numValues == O3.size());
ASSERTV(!monitor1.isInUseUp()); // Memory usage may go down depending
// on implementation
ASSERTV(monitor1.isTotalUp());
ASSERTV(0 < objectAllocator1.numBytesInUse());
ASSERTV(0 < objectAllocator2.numBytesInUse());
// ASSERTV(&objectAllocator1 == O1.get_allocator().mechanism());
// ASSERTV(&objectAllocator2 == O2.get_allocator().mechanism());
// ASSERTV(&objectAllocator1 == O3.get_allocator().mechanism());
ASSERTV(! defaultMonitor.isTotalUp());
}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if (veryVerbose) {
printf("Test primary manipulators/accessors on every permutation.\n");
}
native_std::sort(testValues, testValues + numValues);
do {
// For each possible permutation of values, insert values, iterate over
// the resulting container, find values, and then erase values.
bslma::TestAllocatorMonitor defaultMonitor(&defaultAllocator);
Obj x(&objectAllocator); const Obj& X = x;
for (size_t i = 0; i < numValues; ++i) {
Obj y(X, &objectAllocator); const Obj& Y = y;
ASSERTV(X == Y);
ASSERTV(!(X != Y));
// Test 'insert'.
value_type value(testValues[i]);
x.push(value);
ASSERTV(testValues[i] == x.top());
// Test size, empty.
ASSERTV(i + 1 == X.size());
ASSERTV(false == X.empty());
ASSERTV(X != Y);
ASSERTV(!(X == Y));
y = x;
ASSERTV(X == Y);
ASSERTV(!(X != Y));
}
ASSERTV(X.size() == numValues);
ASSERTV(0 != objectAllocator.numBytesInUse());
ASSERTV(0 == defaultAllocator.numBytesInUse());
for (int i = static_cast<int>(numValues) - 1; i >= 0; --i) {
testValues[i] = (int) X.top();
x.pop();
}
ASSERTV(X.size() == 0);
ASSERTV(! defaultMonitor.isTotalUp());
} while (native_std::next_permutation(testValues,
testValues + numValues));
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if (veryVerbose) {
printf("Test class comparison operators.\n");
}
{
// Iterate over possible selections of elements to add to two
// containers, 'X' and 'Y' then compare the results of the comparison
// operators to the comparison between two containers equivalent to
// the underlying containers in the stack objects.
for (size_t i = 0; i < numValues; ++i) {
for (size_t j = 0; j < numValues; ++j) {
for (size_t length = 0; length < numValues; ++length) {
for (size_t m = 0; m < j; ++m) {
Obj x(&objectAllocator); const Obj& X = x;
Obj y(&objectAllocator); const Obj& Y = y;
CONTAINER xx(&objectAllocator);
const CONTAINER& XX = xx;
CONTAINER yy(&objectAllocator);
const CONTAINER& YY = yy;
for (size_t k = 0; k < j; ++k) {
size_t xIndex = (i + length) % numValues;
size_t yIndex = (j + length) % numValues;
x.push( testValues[xIndex]);
xx.push_back(testValues[xIndex]);
if (k < m) {
y.push( testValues[yIndex]);
yy.push_back(testValues[yIndex]);
}
}
ASSERTV((X == Y) == (XX == YY));
ASSERTV((X != Y) == (XX != YY));
ASSERTV((X < Y) == (XX < YY));
ASSERTV((X > Y) == (XX > YY));
ASSERTV((X <= Y) == (XX <= YY));
ASSERTV((X >= Y) == (XX >= YY));
ASSERTV((X == Y) == !(X != Y));
ASSERTV((X != Y) == !(X == Y));
ASSERTV((X < Y) == !(X >= Y));
ASSERTV((X > Y) == !(X <= Y));
ASSERTV((X <= Y) == !(X > Y));
ASSERTV((X >= Y) == !(X < Y));
ASSERTV((Y == X) == (YY == XX));
ASSERTV((Y != X) == (YY != XX));
ASSERTV((Y < X) == (YY < XX));
ASSERTV((Y > X) == (YY > XX));
ASSERTV((Y <= X) == (YY <= XX));
ASSERTV((Y >= X) == (YY >= XX));
ASSERTV((Y == X) == !(Y != X));
ASSERTV((Y != X) == !(Y == X));
ASSERTV((Y < X) == !(Y >= X));
ASSERTV((Y > X) == !(Y <= X));
ASSERTV((Y <= X) == !(Y > X));
ASSERTV((Y >= X) == !(Y < X));
}
}
}
}
}
}
// ============================================================================
// MAIN PROGRAM
// ----------------------------------------------------------------------------
bool intLessThan(int a, int b)
{
return a < b;
}
int main(int argc, char *argv[])
{
int test = argc > 1 ? atoi(argv[1]) : 0;
verbose = argc > 2;
veryVerbose = argc > 3;
veryVeryVerbose = argc > 4;
veryVeryVeryVerbose = argc > 5;
printf("TEST " __FILE__ " CASE %d\n", test);
bslma::TestAllocator globalAllocator("global", veryVeryVeryVerbose);
bslma::Default::setGlobalAllocator(&globalAllocator);
bslma::TestAllocator defaultAllocator("default", veryVeryVeryVerbose);
ASSERT(0 == bslma::Default::setDefaultAllocator(&defaultAllocator));
switch (test) { case 0:
case 19: {
// --------------------------------------------------------------------
// 'noexcept' SPECIFICATION
// --------------------------------------------------------------------
if (verbose) printf("\n" "'noexcept' SPECIFICATION" "\n"
"------------------------" "\n");
TestDriver<bsl::vector<int> >::testCase19();
} break;
case 18: {
// --------------------------------------------------------------------
// MOVE MANIPULATORS
// --------------------------------------------------------------------
if (verbose) printf("\n" "MOVE MANIPULATORS" "\n"
"-----------------" "\n");
// TestDriver< MovableVector<int> >::testCase18(true);
// TestDriver<NonMovableVector<int> >::testCase18(false);
typedef signed char SC;
typedef size_t SZ;
typedef bsltf::TemplateTestFacility::ObjectPtr TTF_OP;
// typedef bsltf::TemplateTestFacility::MethodPtr TTF_MP;
typedef bsltf::EnumeratedTestType::Enum ETT;
typedef bsltf::SimpleTestType STT;
typedef bsltf::AllocTestType ATT;
typedef bsltf::BitwiseMoveableTestType BMTT;
typedef bsltf::AllocBitwiseMoveableTestType ABMTT;
typedef bsltf::NonTypicalOverloadsTestType NTOTT;
TestDriver< MovableVector< int> >::testCase18(true );
TestDriver<NonMovableVector< int> >::testCase18(false);
TestDriver< MovableVector< SC> >::testCase18(true );
TestDriver<NonMovableVector< SC> >::testCase18(false);
TestDriver< MovableVector< SZ> >::testCase18(true );
TestDriver<NonMovableVector< SZ> >::testCase18(false);
TestDriver< MovableVector<TTF_OP> >::testCase18(true );
TestDriver<NonMovableVector<TTF_OP> >::testCase18(false);
// TestDriver< MovableVector<TTF_MP> >::testCase18(true );
// TestDriver<NonMovableVector<TTF_MP> >::testCase18(false);
TestDriver< MovableVector< ETT> >::testCase18(true );
TestDriver<NonMovableVector< ETT> >::testCase18(false);
TestDriver< MovableVector< STT> >::testCase18(true );
TestDriver<NonMovableVector< STT> >::testCase18(false);
TestDriver< MovableVector< ATT> >::testCase18(true );
TestDriver<NonMovableVector< ATT> >::testCase18(false);
TestDriver< MovableVector< BMTT> >::testCase18(true );
TestDriver<NonMovableVector< BMTT> >::testCase18(false);
TestDriver< MovableVector< ABMTT> >::testCase18(true );
TestDriver<NonMovableVector< ABMTT> >::testCase18(false);
TestDriver< MovableVector< NTOTT> >::testCase18(true );
TestDriver<NonMovableVector< NTOTT> >::testCase18(false);
#ifndef BSLS_PLATFORM_OS_WINDOWS
typedef bsltf::TemplateTestFacility::ObjectPtr TTF_FP;
TestDriver< MovableVector<TTF_FP> >::testCase18(true );
TestDriver<NonMovableVector<TTF_FP> >::testCase18(false);
#endif
#if !BSLS_COMPILERFEATURES_SIMULATE_CPP11_FEATURES
if (verbose) printf("\n" "Move Only Type" "\n"
"--------------" "\n");
// typedef bsltf::MoveOnlyAllocTestType MOATT;
// TestDriver<MOATT, MovableVector<MOATT> >::testCase18MoveOnlyType();
#endif // !BSLS_COMPILERFEATURES_SIMULATE_CPP11_FEATURES
// 'propagate_on_container_move_assignment' testing
// TBD enable this
#if 0
RUN_EACH_TYPE(TestDriver,
testCase18_propagate_on_container_move_assignment,
TEST_TYPES_REGULAR(deque));
RUN_EACH_TYPE(TestDriver,
testCase18_propagate_on_container_move_assignment,
TEST_TYPES_MOVABLE(deque));
#endif
} break;
case 17: {
// --------------------------------------------------------------------
// MOVE CONSTRUCTORS
// --------------------------------------------------------------------
if (verbose) printf("\n" "MOVE CONSTRUCTORS" "\n"
"-----------------" "\n");
// TestDriver< MovableVector<int> >::testCase17(true);
// TestDriver<NonMovableVector<int> >::testCase17(false);
typedef signed char SC;
typedef size_t SZ;
typedef bsltf::TemplateTestFacility::ObjectPtr TTF_OP;
typedef bsltf::TemplateTestFacility::MethodPtr TTF_MP;
typedef bsltf::EnumeratedTestType::Enum ETT;
typedef bsltf::SimpleTestType STT;
typedef bsltf::AllocTestType ATT;
typedef bsltf::BitwiseMoveableTestType BMTT;
typedef bsltf::AllocBitwiseMoveableTestType ABMTT;
typedef bsltf::NonTypicalOverloadsTestType NTOTT;
TestDriver< MovableVector< int> >::testCase17(true );
TestDriver<NonMovableVector< int> >::testCase17(false);
TestDriver< MovableVector< SC> >::testCase17(true );
TestDriver<NonMovableVector< SC> >::testCase17(false);
TestDriver< MovableVector< SZ> >::testCase17(true );
TestDriver<NonMovableVector< SZ> >::testCase17(false);
TestDriver< MovableVector<TTF_OP> >::testCase17(true );
TestDriver<NonMovableVector<TTF_OP> >::testCase17(false);
TestDriver< MovableVector<TTF_MP> >::testCase17(true );
TestDriver<NonMovableVector<TTF_MP> >::testCase17(false);
TestDriver< MovableVector< ETT> >::testCase17(true );
TestDriver<NonMovableVector< ETT> >::testCase17(false);
TestDriver< MovableVector< STT> >::testCase17(true );
TestDriver<NonMovableVector< STT> >::testCase17(false);
TestDriver< MovableVector< ATT> >::testCase17(true );
TestDriver<NonMovableVector< ATT> >::testCase17(false);
TestDriver< MovableVector< BMTT> >::testCase17(true );
TestDriver<NonMovableVector< BMTT> >::testCase17(false);
TestDriver< MovableVector< ABMTT> >::testCase17(true );
TestDriver<NonMovableVector< ABMTT> >::testCase17(false);
TestDriver< MovableVector< NTOTT> >::testCase17(true );
TestDriver<NonMovableVector< NTOTT> >::testCase17(false);
#ifndef BSLS_PLATFORM_OS_WINDOWS
typedef bsltf::TemplateTestFacility::ObjectPtr TTF_FP;
TestDriver< MovableVector<TTF_FP> >::testCase17(true );
TestDriver<NonMovableVector<TTF_FP> >::testCase17(false);
#endif
#if !BSLS_COMPILERFEATURES_SIMULATE_CPP11_FEATURES
if (verbose) printf("\n" "Move Only Type" "\n"
"--------------" "\n");
// typedef bsltf::MoveOnlyAllocTestType MOATT;
// TestDriver< MovableVector<MOATT> >::testCase17MoveOnlyType();
#endif // !BSLS_COMPILERFEATURES_SIMULATE_CPP11_FEATURES
} break;
case 16: {
// --------------------------------------------------------------------
// USAGE EXAMPLE
//
// Concern:
// Demonstrate the use of the stack.
//
// Plan:
// Create the class 'ToDoList', which implements a list of chores to
// be done, using the 'stack' container adapter, and demonstrate the
// use of 'ToDoList'.
// --------------------------------------------------------------------
// Then, create an object of type 'ToDoList'.
ToDoList toDoList;
// Next, a few tasks are requested:
toDoList.enqueueTask("Change the car's oil.");
toDoList.enqueueTask("Pay the bills.");
// Then, the husband watches the Yankee's game on TV. Upon returning
// to the list he consults the list to see what task is up next:
ASSERT(!strcmp("Pay the bills.", toDoList.currentTask()));
// Next, he sees that he has to pay the bills. When the bills are
// finished, he flushes that task from the list:
ASSERT(false == toDoList.finishTask());
// Then, he consults the list for the next task.
ASSERT(!strcmp("Change the car's oil.", toDoList.currentTask()));
// Next, he sees he has to change the car's oil. Before he can get
// started, another request comes:
toDoList.enqueueTask("Get some hot dogs.");
ASSERT(!strcmp("Get some hot dogs.", toDoList.currentTask()));
// Then, he drives the car to the convenience store and picks up some
// hot dogs and buns. Upon returning home, he gives the hot dogs to
// his wife, updates the list, and consults it for the next task.
ASSERT(false == toDoList.finishTask());
ASSERT(!strcmp("Change the car's oil.", toDoList.currentTask()));
// Next, he finishes the oil change, updates the list, and consults it
// for the next task.
ASSERT(true == toDoList.finishTask());
ASSERT(!strcmp("<EMPTY>", toDoList.currentTask()));
// Finally, the wife has been informed that everything is done, and she
// makes another request:
toDoList.enqueueTask("Clean the rain gutters.");
} break;
case 15: {
// --------------------------------------------------------------------
// TESTING EMPTY, SIZE
//
// Concern:
// That the 'empty()' and 'size()' accessors work according to their
// specifications.
//
// Plan:
// Manipulate a 'stack' object, and observe that 'empty()' and
// 'size()' return the expected values.
// --------------------------------------------------------------------
stack<int> mX; const stack<int>& X = mX;
ASSERT(mX.empty()); ASSERT(X.empty());
ASSERT(0 == mX.size()); ASSERT(0 == X.size());
for (int i = 7; i < 22; ++i) {
mX.push(i);
ASSERT(! mX.empty()); ASSERT(! X.empty());
ASSERT(i - 6 == (int) mX.size()); ASSERT(i - 6 == (int) X.size());
ASSERT(i == X.top());
mX.top() = static_cast<int>(X.size()); // 'top()' returns a ref to
// modifiable
ASSERT((int) X.size() == X.top());
}
for (size_t i = X.size(); i > 0; --i, mX.pop()) {
ASSERT(! mX.empty()); ASSERT(! X.empty());
ASSERT(i == X.size());
ASSERT(X.top() == static_cast<int>(i));
}
ASSERT(mX.empty()); ASSERT(X.empty());
ASSERT(0 == mX.size()); ASSERT(0 == X.size());
} break;
case 14: {
// --------------------------------------------------------------------
// TESTING NON ALLOCATOR SUPPORTING TYPE
// --------------------------------------------------------------------
typedef stack<int, NonAllocCont<int> > IStack;
IStack mX; const IStack& X = mX;
ASSERT(X.empty());
mX.push(3);
mX.push(4);
mX.push(5);
ASSERT(! X.empty());
ASSERT(3 == X.size());
ASSERT(5 == X.top());
IStack mY(X); const IStack& Y = mY;
ASSERT(X == Y); ASSERT(!(X != Y));
ASSERT(X <= Y); ASSERT(!(X > Y));
ASSERT(X >= Y); ASSERT(!(X < Y));
mY.pop();
mY.push(6);
ASSERT(X != Y); ASSERT(!(X == Y));
ASSERT(X < Y); ASSERT(!(X >= Y));
ASSERT(X <= Y); ASSERT(!(X > Y));
} break;
case 13: {
// --------------------------------------------------------------------
// TESTING CONTAINER OVERRIDE
// --------------------------------------------------------------------
// Verify that a stack with no container specified is the same as one
// we 'deque' specified.
typedef stack<int> IStack;
typedef stack<int, deque<int> > IDStack;
BSLMF_ASSERT((bsl::is_same<IStack, IDStack>::value));
// Verify that if a container is specified, the first template
// argument is ignored.
typedef stack<void, vector<int> > VIVStack;
typedef stack<double, vector<int> > DIVStack;
BSLMF_ASSERT((bsl::is_same<VIVStack::value_type, int>::value));
BSLMF_ASSERT((bsl::is_same<DIVStack::value_type, int>::value));
VIVStack vivs; const VIVStack& VIVS = vivs;
vivs.push(4); ASSERT(4 == VIVS.top());
vivs.push(7); ASSERT(7 == VIVS.top());
ASSERT(2 == VIVS.size());
ASSERT(!VIVS.empty());
vivs.pop(); ASSERT(4 == VIVS.top());
} break;
case 12: {
// --------------------------------------------------------------------
// TESTING INEQUALITY OPERATORS
// --------------------------------------------------------------------
if (verbose) printf("\nTesting Inequality Operators\n"
"\n============================\n");
if (verbose) printf("deque ---------------------------------------\n");
RUN_EACH_TYPE(TestDriver, testCase12,
TEST_TYPES_INEQUAL_COMPARABLE(deque));
if (verbose) printf("vector --------------------------------------\n");
RUN_EACH_TYPE(TestDriver, testCase12,
TEST_TYPES_INEQUAL_COMPARABLE(vector));
} break;
case 11: {
// --------------------------------------------------------------------
// TESTING TYPE TRAITS
// --------------------------------------------------------------------
if (verbose) printf("\nTesting Type Traits\n"
"\n===================\n");
// Verify the bslma-allocator trait is not defined for non
// bslma-allocators.
typedef bsltf::StdTestAllocator<bsltf::AllocTestType> StlAlloc;
typedef bsltf::AllocTestType ATT;
typedef deque< ATT, StlAlloc> WeirdAllocDeque;
typedef vector<ATT, StlAlloc> WeirdAllocVector;
typedef bsl::stack<ATT, WeirdAllocDeque > WeirdAllocDequeStack;
typedef bsl::stack<ATT, WeirdAllocVector> WeirdAllocVectorStack;
typedef bsl::stack<int, NonAllocCont<int> > NonAllocStack;
if (verbose) printf("NonAllocCont --------------------------------\n");
BSLMF_ASSERT((0 == bslma::UsesBslmaAllocator<
NonAllocCont<int> >::value));
BSLMF_ASSERT((0 == bslma::UsesBslmaAllocator<
NonAllocStack>::value));
TestDriver<NonAllocCont<int> >::testCase11();
if (verbose) printf("deque ---------------------------------------\n");
BSLMF_ASSERT((0 == bslma::UsesBslmaAllocator<
WeirdAllocDeque>::value));
BSLMF_ASSERT((0 == bslma::UsesBslmaAllocator<
WeirdAllocDequeStack>::value));
RUN_EACH_TYPE(TestDriver, testCase11, TEST_TYPES_REGULAR(deque));
if (verbose) printf("vector --------------------------------------\n");
BSLMF_ASSERT((0 == bslma::UsesBslmaAllocator<
WeirdAllocVector>::value));
BSLMF_ASSERT((0 == bslma::UsesBslmaAllocator<
WeirdAllocVectorStack>::value));
RUN_EACH_TYPE(TestDriver, testCase11, TEST_TYPES_REGULAR(vector));
} break;
case 10: {
// --------------------------------------------------------------------
// TESTING STL ALLOCATOR
// --------------------------------------------------------------------
if (verbose) printf("\nTesting STL ALLOCTOR\n"
"\n====================\n");
if (verbose) printf("deque ---------------------------------------\n");
RUN_EACH_TYPE(TestDriver, testCase10,TEST_TYPES_REGULAR(deque));
if (verbose) printf("vector --------------------------------------\n");
RUN_EACH_TYPE(TestDriver, testCase10,TEST_TYPES_REGULAR(vector));
} break;
case 9: {
// --------------------------------------------------------------------
// ASSIGNMENT OPERATOR
// --------------------------------------------------------------------
if (verbose) printf("\nTesting Assignment Operator"
"\n===========================\n");
if (verbose) printf("deque ---------------------------------------\n");
RUN_EACH_TYPE(TestDriver, testCase9, TEST_TYPES_REGULAR(deque));
if (verbose) printf("vector --------------------------------------\n");
RUN_EACH_TYPE(TestDriver, testCase9, TEST_TYPES_REGULAR(vector));
// 'propagate_on_container_copy_assignment' testing
// TBD enable this
#if 0
RUN_EACH_TYPE(TestDriver,
testCase9_propagate_on_container_copy_assignment,
TEST_TYPES_REGULAR(deque));
RUN_EACH_TYPE(TestDriver,
testCase9_propagate_on_container_copy_assignment,
TEST_TYPES_MOVABLE(deque));
#endif
} break;
case 8: {
// --------------------------------------------------------------------
// MANIPULATOR AND FREE FUNCTION 'swap'
// --------------------------------------------------------------------
if (verbose) printf("\nMANIPULATOR AND FREE FUNCTION 'swap'"
"\n====================================\n");
if (verbose) printf("deque ---------------------------------------\n");
RUN_EACH_TYPE(TestDriver, testCase8, TEST_TYPES_REGULAR(deque));
if (verbose) printf("vector --------------------------------------\n");
RUN_EACH_TYPE(TestDriver, testCase8, TEST_TYPES_REGULAR(vector));
// 'propagate_on_container_swap' testing
// TBD enable this
#if 0
RUN_EACH_TYPE(TestDriver,
testCase8_propagate_on_container_swap,
TEST_TYPES_REGULAR(deque));
RUN_EACH_TYPE(TestDriver,
testCase8_propagate_on_container_swap,
TEST_TYPES_MOVABLE(deque));
#endif
} break;
case 7: {
// --------------------------------------------------------------------
// COPY CONSTRUCTOR
// --------------------------------------------------------------------
if (verbose) printf("\nTesting Copy Constructors"
"\n=========================\n");
if (verbose) printf("deque ---------------------------------------\n");
RUN_EACH_TYPE(TestDriver, testCase7, TEST_TYPES_REGULAR(deque));
if (verbose) printf("vector --------------------------------------\n");
RUN_EACH_TYPE(TestDriver, testCase7, TEST_TYPES_REGULAR(vector));
// 'select_on_container_copy_construction' testing
if (verbose) printf("\nCOPY CONSTRUCTOR: ALLOCATOR PROPAGATION"
"\n=======================================\n");
// TBD enable this
#if 0
RUN_EACH_TYPE(TestDriver,
testCase7_select_on_container_copy_construction,
TEST_TYPES_REGULAR(deque));
RUN_EACH_TYPE(TestDriver,
testCase7_select_on_container_copy_construction,
TEST_TYPES_MOVABLE(deque));
#endif
} break;
case 6: {
// --------------------------------------------------------------------
// EQUALITY OPERATORS
// --------------------------------------------------------------------
if (verbose) printf("\nTesting Equality Operators"
"\n==========================\n");
if (verbose) printf("deque ---------------------------------------\n");
RUN_EACH_TYPE(TestDriver, testCase6, TEST_TYPES_REGULAR(deque));
if (verbose) printf("vector --------------------------------------\n");
RUN_EACH_TYPE(TestDriver, testCase6, TEST_TYPES_REGULAR(vector));
} break;
case 5: {
// --------------------------------------------------------------------
// TESTING OUTPUT (<<) OPERATOR
// --------------------------------------------------------------------
if (verbose) printf("\nTesting Output (<<) Operator"
"\n============================\n");
if (verbose)
printf("There is no output operator for this component.\n");
} break;
case 4: {
// --------------------------------------------------------------------
// BASIC ACCESSORS
// --------------------------------------------------------------------
if (verbose) printf("\nTesting Basic Accessors"
"\n=======================\n");
if (verbose) printf("deque ---------------------------------------\n");
RUN_EACH_TYPE(TestDriver, testCase4, TEST_TYPES_REGULAR(deque));
if (verbose) printf("vector --------------------------------------\n");
RUN_EACH_TYPE(TestDriver, testCase4, TEST_TYPES_REGULAR(vector));
} break;
case 3: {
// --------------------------------------------------------------------
// GENERATOR FUNCTIONS 'gg' and 'ggg'
// --------------------------------------------------------------------
if (verbose) printf("\nTesting 'gg'"
"\n============\n");
if (verbose) printf("deque ---------------------------------------\n");
RUN_EACH_TYPE(TestDriver, testCase3, TEST_TYPES_REGULAR(deque));
if (verbose) printf("vector --------------------------------------\n");
RUN_EACH_TYPE(TestDriver, testCase3, TEST_TYPES_REGULAR(vector));
} break;
case 2: {
// --------------------------------------------------------------------
// PRIMARY MANIPULATORS
// --------------------------------------------------------------------
if (verbose) printf("\nTesting C'tors and Primary Manipulators\n"
"=======================================\n");
if (verbose) printf("deque ---------------------------------------\n");
RUN_EACH_TYPE(TestDriver, testCase2, TEST_TYPES_REGULAR(deque));
if (verbose) printf("vector --------------------------------------\n");
RUN_EACH_TYPE(TestDriver, testCase2, TEST_TYPES_REGULAR(vector));
if (verbose) printf("NonAllocCont --------------------------------\n");
// RUN_EACH_TYPE(TestDriver, testCase2, TEST_TYPES_REGULAR(NonAllocCont));
} break;
case 1: {
// --------------------------------------------------------------------
// BREATHING TEST
// This case exercises (but does not fully test) basic functionality.
//
// Concerns:
//: 1 The class is sufficiently functional to enable comprehensive
//: testing in subsequent test cases.
//
// Plan:
//: 1 Run each method with arbitrary inputs and verify the behavior is
//: as expected.
//
// Testing:
// BREATHING TEST
// --------------------------------------------------------------------
if (verbose) printf("\nBREATHING TEST"
"\n==============\n");
{
int INT_VALUES[] = { INT_MIN, -2, -1, 0, 1, 2, INT_MAX };
int NUM_INT_VALUES = sizeof(INT_VALUES) / sizeof(*INT_VALUES);
if (verbose) printf("deque:\n");
TestDriver<bsl::deque<int> >::testCase1(INT_VALUES,
NUM_INT_VALUES);
if (verbose) printf("vector:\n");
TestDriver<bsl::vector<int> >::testCase1(INT_VALUES,
NUM_INT_VALUES);
if (verbose) printf("deque<double>:\n");
TestDriver<bsl::deque<double> >::testCase1(INT_VALUES,
NUM_INT_VALUES);
if (verbose) printf("NonAllocCont<int>:\n");
TestDriver<NonAllocCont<int> >::testCase1_NoAlloc(INT_VALUES,
NUM_INT_VALUES);
#if 0
// add once 'list' is in bslstl, add it
if (verbose) printf("list:\n");
TestDriver<bsl::list<int> >::testCase1(INT_VALUES,
NUM_INT_VALUES);
#endif
}
} break;
default: {
fprintf(stderr, "WARNING: CASE `%d' NOT FOUND.\n", test);
testStatus = -1;
}
}
// CONCERN: In no case does memory come from the global allocator.
ASSERTV(globalAllocator.numBlocksTotal(),
0 == globalAllocator.numBlocksTotal());
if (testStatus > 0) {
fprintf(stderr, "Error, non-zero test status = %d.\n", testStatus);
}
return testStatus;
}
// ----------------------------------------------------------------------------
// Copyright 2013 Bloomberg Finance L.P.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ----------------------------- END-OF-FILE ----------------------------------
| 37.389124 | 79 | 0.527254 | eddiepierce |
3353ff81d09048c05d9b81e88a697c3030074d58 | 7,859 | cc | C++ | source/common/http/codec_client.cc | spenceral/envoy | 5333b928d8bcffa26ab19bf018369a835f697585 | [
"Apache-2.0"
] | 1 | 2020-10-27T21:20:41.000Z | 2020-10-27T21:20:41.000Z | source/common/http/codec_client.cc | spenceral/envoy | 5333b928d8bcffa26ab19bf018369a835f697585 | [
"Apache-2.0"
] | 137 | 2020-11-19T03:32:36.000Z | 2022-03-28T21:14:14.000Z | source/common/http/codec_client.cc | baojr/envoy | fe62d976a26faa46efc590a48a734f11ee6545f9 | [
"Apache-2.0"
] | 1 | 2021-05-18T14:12:17.000Z | 2021-05-18T14:12:17.000Z | #include "common/http/codec_client.h"
#include <cstdint>
#include <memory>
#include "envoy/http/codec.h"
#include "common/common/enum_to_int.h"
#include "common/config/utility.h"
#include "common/http/exception.h"
#include "common/http/http1/codec_impl.h"
#include "common/http/http2/codec_impl.h"
#include "common/http/status.h"
#include "common/http/utility.h"
#ifdef ENVOY_ENABLE_QUIC
#include "common/quic/codec_impl.h"
#endif
namespace Envoy {
namespace Http {
CodecClient::CodecClient(Type type, Network::ClientConnectionPtr&& connection,
Upstream::HostDescriptionConstSharedPtr host,
Event::Dispatcher& dispatcher)
: type_(type), host_(host), connection_(std::move(connection)),
idle_timeout_(host_->cluster().idleTimeout()) {
if (type_ != Type::HTTP3) {
// Make sure upstream connections process data and then the FIN, rather than processing
// TCP disconnects immediately. (see https://github.com/envoyproxy/envoy/issues/1679 for
// details)
connection_->detectEarlyCloseWhenReadDisabled(false);
}
connection_->addConnectionCallbacks(*this);
connection_->addReadFilter(Network::ReadFilterSharedPtr{new CodecReadFilter(*this)});
if (idle_timeout_) {
idle_timer_ = dispatcher.createTimer([this]() -> void { onIdleTimeout(); });
enableIdleTimer();
}
// We just universally set no delay on connections. Theoretically we might at some point want
// to make this configurable.
connection_->noDelay(true);
}
CodecClient::~CodecClient() {
ASSERT(connect_called_, "CodecClient::connect() is not called through out the life time.");
}
void CodecClient::connect() {
connect_called_ = true;
ASSERT(codec_ != nullptr);
// In general, codecs are handed new not-yet-connected connections, but in the
// case of ALPN, the codec may be handed an already connected connection.
if (!connection_->connecting()) {
ASSERT(connection_->state() == Network::Connection::State::Open);
connected_ = true;
} else {
ENVOY_CONN_LOG(debug, "connecting", *connection_);
connection_->connect();
}
}
void CodecClient::close() { connection_->close(Network::ConnectionCloseType::NoFlush); }
void CodecClient::deleteRequest(ActiveRequest& request) {
connection_->dispatcher().deferredDelete(request.removeFromList(active_requests_));
if (codec_client_callbacks_) {
codec_client_callbacks_->onStreamDestroy();
}
if (numActiveRequests() == 0) {
enableIdleTimer();
}
}
RequestEncoder& CodecClient::newStream(ResponseDecoder& response_decoder) {
ActiveRequestPtr request(new ActiveRequest(*this, response_decoder));
request->encoder_ = &codec_->newStream(*request);
request->encoder_->getStream().addCallbacks(*request);
LinkedList::moveIntoList(std::move(request), active_requests_);
disableIdleTimer();
return *active_requests_.front()->encoder_;
}
void CodecClient::onEvent(Network::ConnectionEvent event) {
if (event == Network::ConnectionEvent::Connected) {
ENVOY_CONN_LOG(debug, "connected", *connection_);
connection_->streamInfo().setDownstreamSslConnection(connection_->ssl());
connected_ = true;
}
if (event == Network::ConnectionEvent::RemoteClose) {
remote_closed_ = true;
}
// HTTP/1 can signal end of response by disconnecting. We need to handle that case.
if (type_ == Type::HTTP1 && event == Network::ConnectionEvent::RemoteClose &&
!active_requests_.empty()) {
Buffer::OwnedImpl empty;
onData(empty);
}
if (event == Network::ConnectionEvent::RemoteClose ||
event == Network::ConnectionEvent::LocalClose) {
ENVOY_CONN_LOG(debug, "disconnect. resetting {} pending requests", *connection_,
active_requests_.size());
disableIdleTimer();
idle_timer_.reset();
StreamResetReason reason = StreamResetReason::ConnectionFailure;
if (connected_) {
reason = StreamResetReason::ConnectionTermination;
if (protocol_error_) {
if (Runtime::runtimeFeatureEnabled(
"envoy.reloadable_features.return_502_for_upstream_protocol_errors")) {
reason = StreamResetReason::ProtocolError;
connection_->streamInfo().setResponseFlag(
StreamInfo::ResponseFlag::UpstreamProtocolError);
}
}
}
while (!active_requests_.empty()) {
// Fake resetting all active streams so that reset() callbacks get invoked.
active_requests_.front()->encoder_->getStream().resetStream(reason);
}
}
}
void CodecClient::responsePreDecodeComplete(ActiveRequest& request) {
ENVOY_CONN_LOG(debug, "response complete", *connection_);
if (codec_client_callbacks_) {
codec_client_callbacks_->onStreamPreDecodeComplete();
}
deleteRequest(request);
// HTTP/2 can send us a reset after a complete response if the request was not complete. Users
// of CodecClient will deal with the premature response case and we should not handle any
// further reset notification.
request.encoder_->getStream().removeCallbacks(request);
}
void CodecClient::onReset(ActiveRequest& request, StreamResetReason reason) {
ENVOY_CONN_LOG(debug, "request reset", *connection_);
if (codec_client_callbacks_) {
codec_client_callbacks_->onStreamReset(reason);
}
deleteRequest(request);
}
void CodecClient::onData(Buffer::Instance& data) {
const Status status = codec_->dispatch(data);
if (!status.ok()) {
ENVOY_CONN_LOG(debug, "Error dispatching received data: {}", *connection_, status.message());
// Don't count 408 responses where we have no active requests as protocol errors
if (!isPrematureResponseError(status) ||
(!active_requests_.empty() ||
getPrematureResponseHttpCode(status) != Code::RequestTimeout)) {
host_->cluster().stats().upstream_cx_protocol_error_.inc();
protocol_error_ = true;
}
close();
}
// All data should be consumed at this point if the connection remains open.
ASSERT(data.length() == 0 || connection_->state() != Network::Connection::State::Open,
absl::StrCat("extraneous bytes after response complete: ", data.length()));
}
CodecClientProd::CodecClientProd(Type type, Network::ClientConnectionPtr&& connection,
Upstream::HostDescriptionConstSharedPtr host,
Event::Dispatcher& dispatcher,
Random::RandomGenerator& random_generator)
: CodecClient(type, std::move(connection), host, dispatcher) {
switch (type) {
case Type::HTTP1: {
codec_ = std::make_unique<Http1::ClientConnectionImpl>(
*connection_, host->cluster().http1CodecStats(), *this, host->cluster().http1Settings(),
host->cluster().maxResponseHeadersCount());
break;
}
case Type::HTTP2: {
codec_ = std::make_unique<Http2::ClientConnectionImpl>(
*connection_, *this, host->cluster().http2CodecStats(), random_generator,
host->cluster().http2Options(), Http::DEFAULT_MAX_REQUEST_HEADERS_KB,
host->cluster().maxResponseHeadersCount(), Http2::ProdNghttp2SessionFactory::get());
break;
}
case Type::HTTP3: {
#ifdef ENVOY_ENABLE_QUIC
auto& quic_session = dynamic_cast<Quic::EnvoyQuicClientSession&>(*connection_);
codec_ = std::make_unique<Quic::QuicHttpClientConnectionImpl>(
quic_session, *this, host->cluster().http3CodecStats(), host->cluster().http3Options(),
Http::DEFAULT_MAX_REQUEST_HEADERS_KB, host->cluster().maxResponseHeadersCount());
// Initialize the session after max request header size is changed in above http client
// connection creation.
quic_session.Initialize();
break;
#else
// Should be blocked by configuration checking at an earlier point.
NOT_REACHED_GCOVR_EXCL_LINE;
#endif
}
}
connect();
}
} // namespace Http
} // namespace Envoy
| 37.070755 | 97 | 0.706451 | spenceral |
3354a84bee3d35265cbe663d2405c9cfa1adef30 | 11,427 | hpp | C++ | libraries/boost/include/boost/hana/tuple.hpp | SheldonHH/eosio.cdt | 64448283307b2daebb7db4df947fafd3fc56a83c | [
"MIT"
] | 918 | 2016-12-22T02:53:08.000Z | 2022-03-22T06:21:35.000Z | libraries/boost/include/boost/hana/tuple.hpp | SheldonHH/eosio.cdt | 64448283307b2daebb7db4df947fafd3fc56a83c | [
"MIT"
] | 594 | 2018-09-06T02:03:01.000Z | 2022-03-23T19:18:26.000Z | libraries/boost/include/boost/hana/tuple.hpp | SheldonHH/eosio.cdt | 64448283307b2daebb7db4df947fafd3fc56a83c | [
"MIT"
] | 349 | 2018-09-06T05:02:09.000Z | 2022-03-12T11:07:17.000Z | /*!
@file
Defines `boost::hana::tuple`.
@copyright Louis Dionne 2013-2017
@copyright Jason Rice 2017
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_HANA_TUPLE_HPP
#define BOOST_HANA_TUPLE_HPP
#include <boost/hana/fwd/tuple.hpp>
#include <boost/hana/basic_tuple.hpp>
#include <boost/hana/bool.hpp>
#include <boost/hana/config.hpp>
#include <boost/hana/detail/decay.hpp>
#include <boost/hana/detail/fast_and.hpp>
#include <boost/hana/detail/index_if.hpp>
#include <boost/hana/detail/intrinsics.hpp>
#include <boost/hana/detail/operators/adl.hpp>
#include <boost/hana/detail/operators/comparable.hpp>
#include <boost/hana/detail/operators/iterable.hpp>
#include <boost/hana/detail/operators/monad.hpp>
#include <boost/hana/detail/operators/orderable.hpp>
#include <boost/hana/fwd/at.hpp>
#include <boost/hana/fwd/core/make.hpp>
#include <boost/hana/fwd/drop_front.hpp>
#include <boost/hana/fwd/index_if.hpp>
#include <boost/hana/fwd/is_empty.hpp>
#include <boost/hana/fwd/length.hpp>
#include <boost/hana/fwd/optional.hpp>
#include <boost/hana/fwd/unpack.hpp>
#include <boost/hana/type.hpp> // required by fwd decl of tuple_t
#include <cstddef>
#include <type_traits>
#include <utility>
BOOST_HANA_NAMESPACE_BEGIN
namespace detail {
template <typename Xs, typename Ys, std::size_t ...n>
constexpr void assign(Xs& xs, Ys&& ys, std::index_sequence<n...>) {
int sequence[] = {int{}, ((void)(
hana::at_c<n>(xs) = hana::at_c<n>(static_cast<Ys&&>(ys))
), int{})...};
(void)sequence;
}
struct from_index_sequence_t { };
template <typename Tuple, typename ...Yn>
struct is_same_tuple : std::false_type { };
template <typename Tuple>
struct is_same_tuple<typename detail::decay<Tuple>::type, Tuple>
: std::true_type
{ };
template <bool SameTuple, bool SameNumberOfElements, typename Tuple, typename ...Yn>
struct enable_tuple_variadic_ctor;
template <typename ...Xn, typename ...Yn>
struct enable_tuple_variadic_ctor<false, true, hana::tuple<Xn...>, Yn...>
: std::enable_if<
detail::fast_and<BOOST_HANA_TT_IS_CONSTRUCTIBLE(Xn, Yn&&)...>::value
>
{ };
}
//////////////////////////////////////////////////////////////////////////
// tuple
//////////////////////////////////////////////////////////////////////////
template <>
struct tuple<>
: detail::operators::adl<tuple<>>
, detail::iterable_operators<tuple<>>
{
constexpr tuple() { }
using hana_tag = tuple_tag;
};
template <typename ...Xn>
struct tuple
: detail::operators::adl<tuple<Xn...>>
, detail::iterable_operators<tuple<Xn...>>
{
basic_tuple<Xn...> storage_;
using hana_tag = tuple_tag;
private:
template <typename Other, std::size_t ...n>
explicit constexpr tuple(detail::from_index_sequence_t, std::index_sequence<n...>, Other&& other)
: storage_(hana::at_c<n>(static_cast<Other&&>(other))...)
{ }
public:
template <typename ...dummy, typename = typename std::enable_if<
detail::fast_and<BOOST_HANA_TT_IS_CONSTRUCTIBLE(Xn, dummy...)...>::value
>::type>
constexpr tuple()
: storage_()
{ }
template <typename ...dummy, typename = typename std::enable_if<
detail::fast_and<BOOST_HANA_TT_IS_CONSTRUCTIBLE(Xn, Xn const&, dummy...)...>::value
>::type>
constexpr tuple(Xn const& ...xn)
: storage_(xn...)
{ }
template <typename ...Yn, typename = typename detail::enable_tuple_variadic_ctor<
detail::is_same_tuple<tuple, Yn...>::value,
sizeof...(Xn) == sizeof...(Yn), tuple, Yn...
>::type>
constexpr tuple(Yn&& ...yn)
: storage_(static_cast<Yn&&>(yn)...)
{ }
template <typename ...Yn, typename = typename std::enable_if<
detail::fast_and<BOOST_HANA_TT_IS_CONSTRUCTIBLE(Xn, Yn const&)...>::value
>::type>
constexpr tuple(tuple<Yn...> const& other)
: tuple(detail::from_index_sequence_t{},
std::make_index_sequence<sizeof...(Xn)>{},
other.storage_)
{ }
template <typename ...Yn, typename = typename std::enable_if<
detail::fast_and<BOOST_HANA_TT_IS_CONSTRUCTIBLE(Xn, Yn&&)...>::value
>::type>
constexpr tuple(tuple<Yn...>&& other)
: tuple(detail::from_index_sequence_t{},
std::make_index_sequence<sizeof...(Xn)>{},
static_cast<tuple<Yn...>&&>(other).storage_)
{ }
// The three following constructors are required to make sure that
// the tuple(Yn&&...) constructor is _not_ preferred over the copy
// constructor for unary tuples containing a type that is constructible
// from tuple<...>. See test/tuple/cnstr.trap.cpp
template <typename ...dummy, typename = typename std::enable_if<
detail::fast_and<BOOST_HANA_TT_IS_CONSTRUCTIBLE(Xn, Xn const&, dummy...)...>::value
>::type>
constexpr tuple(tuple const& other)
: tuple(detail::from_index_sequence_t{},
std::make_index_sequence<sizeof...(Xn)>{},
other.storage_)
{ }
template <typename ...dummy, typename = typename std::enable_if<
detail::fast_and<BOOST_HANA_TT_IS_CONSTRUCTIBLE(Xn, Xn const&, dummy...)...>::value
>::type>
constexpr tuple(tuple& other)
: tuple(const_cast<tuple const&>(other))
{ }
template <typename ...dummy, typename = typename std::enable_if<
detail::fast_and<BOOST_HANA_TT_IS_CONSTRUCTIBLE(Xn, Xn&&, dummy...)...>::value
>::type>
constexpr tuple(tuple&& other)
: tuple(detail::from_index_sequence_t{},
std::make_index_sequence<sizeof...(Xn)>{},
static_cast<tuple&&>(other).storage_)
{ }
template <typename ...Yn, typename = typename std::enable_if<
detail::fast_and<BOOST_HANA_TT_IS_ASSIGNABLE(Xn&, Yn const&)...>::value
>::type>
constexpr tuple& operator=(tuple<Yn...> const& other) {
detail::assign(this->storage_, other.storage_,
std::make_index_sequence<sizeof...(Xn)>{});
return *this;
}
template <typename ...Yn, typename = typename std::enable_if<
detail::fast_and<BOOST_HANA_TT_IS_ASSIGNABLE(Xn&, Yn&&)...>::value
>::type>
constexpr tuple& operator=(tuple<Yn...>&& other) {
detail::assign(this->storage_, static_cast<tuple<Yn...>&&>(other).storage_,
std::make_index_sequence<sizeof...(Xn)>{});
return *this;
}
};
//////////////////////////////////////////////////////////////////////////
// Operators
//////////////////////////////////////////////////////////////////////////
namespace detail {
template <>
struct comparable_operators<tuple_tag> {
static constexpr bool value = true;
};
template <>
struct orderable_operators<tuple_tag> {
static constexpr bool value = true;
};
template <>
struct monad_operators<tuple_tag> {
static constexpr bool value = true;
};
}
//////////////////////////////////////////////////////////////////////////
// Foldable
//////////////////////////////////////////////////////////////////////////
template <>
struct unpack_impl<tuple_tag> {
template <typename F>
static constexpr decltype(auto) apply(tuple<>&&, F&& f)
{ return static_cast<F&&>(f)(); }
template <typename F>
static constexpr decltype(auto) apply(tuple<>&, F&& f)
{ return static_cast<F&&>(f)(); }
template <typename F>
static constexpr decltype(auto) apply(tuple<> const&, F&& f)
{ return static_cast<F&&>(f)(); }
template <typename Xs, typename F>
static constexpr decltype(auto) apply(Xs&& xs, F&& f) {
return hana::unpack(static_cast<Xs&&>(xs).storage_, static_cast<F&&>(f));
}
};
template <>
struct length_impl<tuple_tag> {
template <typename ...Xs>
static constexpr auto apply(tuple<Xs...> const&)
{ return hana::size_c<sizeof...(Xs)>; }
};
//////////////////////////////////////////////////////////////////////////
// Iterable
//////////////////////////////////////////////////////////////////////////
template <>
struct at_impl<tuple_tag> {
template <typename Xs, typename N>
static constexpr decltype(auto) apply(Xs&& xs, N const&) {
constexpr std::size_t index = N::value;
return hana::at_c<index>(static_cast<Xs&&>(xs).storage_);
}
};
template <>
struct drop_front_impl<tuple_tag> {
template <std::size_t N, typename Xs, std::size_t ...i>
static constexpr auto helper(Xs&& xs, std::index_sequence<i...>) {
return hana::make<tuple_tag>(hana::at_c<i+N>(static_cast<Xs&&>(xs))...);
}
template <typename Xs, typename N>
static constexpr auto apply(Xs&& xs, N const&) {
constexpr std::size_t len = decltype(hana::length(xs))::value;
return helper<N::value>(static_cast<Xs&&>(xs), std::make_index_sequence<
N::value < len ? len - N::value : 0
>{});
}
};
template <>
struct is_empty_impl<tuple_tag> {
template <typename ...Xs>
static constexpr auto apply(tuple<Xs...> const&)
{ return hana::bool_c<sizeof...(Xs) == 0>; }
};
// compile-time optimizations (to reduce the # of function instantiations)
template <std::size_t n, typename ...Xs>
constexpr decltype(auto) at_c(tuple<Xs...> const& xs) {
return hana::at_c<n>(xs.storage_);
}
template <std::size_t n, typename ...Xs>
constexpr decltype(auto) at_c(tuple<Xs...>& xs) {
return hana::at_c<n>(xs.storage_);
}
template <std::size_t n, typename ...Xs>
constexpr decltype(auto) at_c(tuple<Xs...>&& xs) {
return hana::at_c<n>(static_cast<tuple<Xs...>&&>(xs).storage_);
}
template <>
struct index_if_impl<tuple_tag> {
template <typename ...Xs, typename Pred>
static constexpr auto apply(tuple<Xs...> const&, Pred const&)
-> typename detail::index_if<Pred, Xs...>::type
{ return {}; }
};
//////////////////////////////////////////////////////////////////////////
// Sequence
//////////////////////////////////////////////////////////////////////////
template <>
struct Sequence<tuple_tag> {
static constexpr bool value = true;
};
template <>
struct make_impl<tuple_tag> {
template <typename ...Xs>
static constexpr
tuple<typename detail::decay<Xs>::type...> apply(Xs&& ...xs)
{ return {static_cast<Xs&&>(xs)...}; }
};
BOOST_HANA_NAMESPACE_END
#endif // !BOOST_HANA_TUPLE_HPP
| 36.507987 | 105 | 0.548613 | SheldonHH |
3359a6ae3816d07eb1feae546979c1f85c2c3098 | 578 | cpp | C++ | src/RE/BSShader/BSShaderMaterial/BSLightingShaderMaterialLandscape.cpp | fireundubh/CommonLibSSE-po3 | cf30265f3cd3aa70a8eeff4d598754439e983ddd | [
"MIT"
] | 1 | 2021-08-30T20:33:43.000Z | 2021-08-30T20:33:43.000Z | src/RE/BSShader/BSShaderMaterial/BSLightingShaderMaterialLandscape.cpp | fireundubh/CommonLibSSE-po3 | cf30265f3cd3aa70a8eeff4d598754439e983ddd | [
"MIT"
] | null | null | null | src/RE/BSShader/BSShaderMaterial/BSLightingShaderMaterialLandscape.cpp | fireundubh/CommonLibSSE-po3 | cf30265f3cd3aa70a8eeff4d598754439e983ddd | [
"MIT"
] | null | null | null | #include "RE/BSShader/BSShaderMaterial/BSLightingShaderMaterialBase/BSLightingShaderMaterialLandscape.h"
namespace RE
{
BSLightingShaderMaterialLandscape* BSLightingShaderMaterialLandscape::CreateMaterial()
{
auto material = malloc<BSLightingShaderMaterialLandscape>();
material->ctor();
return material;
}
BSLightingShaderMaterialLandscape* BSLightingShaderMaterialLandscape::ctor()
{
using func_t = decltype(&BSLightingShaderMaterialLandscape::ctor);
REL::Relocation<func_t> func{ Offset::BSLightingShaderMaterialLandscape::Ctor };
return func(this);
}
} | 28.9 | 104 | 0.811419 | fireundubh |
335acd0f7e0fe3cd4c6f95b7aea5810746f8e5b3 | 31,089 | cc | C++ | src/gb/base/flags_test.cc | jpursey/game-bits | 2daefa2cef5601939dbea50a755b8470e38656ae | [
"MIT"
] | 1 | 2020-07-11T17:03:19.000Z | 2020-07-11T17:03:19.000Z | src/gb/base/flags_test.cc | jpursey/gbits | 4dfedd1297ca368ad1d80a03308fc4da0241f948 | [
"MIT"
] | 2 | 2021-12-10T13:38:51.000Z | 2022-02-22T16:02:24.000Z | src/gb/base/flags_test.cc | jpursey/game-bits | 2daefa2cef5601939dbea50a755b8470e38656ae | [
"MIT"
] | null | null | null | // Copyright (c) 2020 John Pursey
//
// Use of this source code is governed by an MIT-style License that can be found
// in the LICENSE file or at https://opensource.org/licenses/MIT.
#include "gb/base/flags.h"
#include "gtest/gtest.h"
namespace gb {
namespace {
enum BasicEnum {
kBasicEnum_Zero,
kBasicEnum_One,
kBasicEnum_Two,
kBasicEnum_Three,
kBasicEnum_Big = 63,
};
enum SizedEnum : int8_t {
kSizedEnum_Zero,
kSizedEnum_One,
kSizedEnum_Two,
kSizedEnum_Three,
kSizedEnum_Big = 63,
};
enum class ClassEnum : int8_t {
kZero,
kOne,
kTwo,
kThree,
kBig = 63,
};
// Helpers to test parameter passing and implicit conversion.
Flags<BasicEnum> BasicIdentity(Flags<BasicEnum> flags) { return flags; }
Flags<SizedEnum> SizedIdentity(Flags<SizedEnum> flags) { return flags; }
Flags<ClassEnum> ClassIdentity(Flags<ClassEnum> flags) { return flags; }
// Static assert is used to ensure constexpr-ness.
static_assert(Flags<BasicEnum>().IsEmpty(),
"BasicEnum default flags not empty");
static_assert(Flags<BasicEnum>().GetMask() == 0,
"BasicEnum default mask is not zero");
static_assert(Flags<BasicEnum>(1).GetMask() == 1, "BasicEnum 1 is not 1");
static_assert(Flags<BasicEnum>(kBasicEnum_Zero).GetMask() == 1,
"BasicEnum Zero is not 1");
static_assert(Flags<BasicEnum>(kBasicEnum_Big).GetMask() == 1ULL << 63,
"BasicEnum Big is not 1 << 63");
static_assert(Flags<BasicEnum>(kBasicEnum_Zero).IsSet(kBasicEnum_Zero),
"BasicEnum Zero does not have Zero set");
static_assert(!Flags<BasicEnum>(kBasicEnum_Zero).IsSet(kBasicEnum_One),
"BasicEnum Zero has One set");
static_assert(Flags<BasicEnum>({}).GetMask() == 0, "BasicEnum {} is not 0");
static_assert(Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}).GetMask() ==
3,
"BasicEnum {Zero,One} is not 3");
static_assert(
Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}).IsSet(kBasicEnum_One),
"BasicEnum {Zero,One} does not have One set");
static_assert(Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One,
kBasicEnum_Two})
.IsSet({kBasicEnum_Zero, kBasicEnum_Two}),
"BasicEnum {Zero,One,Two} does not have {Zero,Two} set");
static_assert(!Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One})
.IsSet({kBasicEnum_Zero, kBasicEnum_Two}),
"BasicEnum {Zero,One} does have {Zero,Two} set");
static_assert(Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One})
.Intersects({kBasicEnum_Zero, kBasicEnum_Two}),
"BasicEnum {Zero,One} does not intersect {Zero,Two}");
static_assert(!Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One})
.Intersects({kBasicEnum_Two, kBasicEnum_Three}),
"BasicEnum {Zero,One} intersects {Two, Three}");
static_assert(Flags<BasicEnum>(kBasicEnum_Zero) ==
Flags<BasicEnum>(kBasicEnum_Zero),
"BasicEnum Zero is not equal to Zero");
static_assert(!(Flags<BasicEnum>(kBasicEnum_Zero) ==
Flags<BasicEnum>(kBasicEnum_One)),
"BasicEnum Zero is equal to One");
static_assert(Flags<BasicEnum>(kBasicEnum_Zero) !=
Flags<BasicEnum>(kBasicEnum_One),
"BasicEnum Zero is equal to One");
static_assert(!(Flags<BasicEnum>(kBasicEnum_Zero) !=
Flags<BasicEnum>(kBasicEnum_Zero)),
"BasicEnum Zero is not equal to Zero");
static_assert(Flags<BasicEnum>(kBasicEnum_Zero) <
Flags<BasicEnum>(kBasicEnum_One),
"BasicEnum Zero is not less than One");
static_assert(!(Flags<BasicEnum>(kBasicEnum_Zero) <
Flags<BasicEnum>(kBasicEnum_Zero)),
"BasicEnum Zero is less than Zero");
static_assert(Flags<BasicEnum>(kBasicEnum_Zero) <=
Flags<BasicEnum>(kBasicEnum_One),
"BasicEnum Zero is not less or equal to One");
static_assert(Flags<BasicEnum>(kBasicEnum_Zero) <=
Flags<BasicEnum>(kBasicEnum_Zero),
"BasicEnum Zero is not less or equal to Zero");
static_assert(!(Flags<BasicEnum>(kBasicEnum_One) <=
Flags<BasicEnum>(kBasicEnum_Zero)),
"BasicEnum One is less or equal to Zero");
static_assert(Flags<BasicEnum>(kBasicEnum_One) >
Flags<BasicEnum>(kBasicEnum_Zero),
"BasicEnum One is not greater than Zero");
static_assert(!(Flags<BasicEnum>(kBasicEnum_One) >
Flags<BasicEnum>(kBasicEnum_One)),
"BasicEnum One is greater than One");
static_assert(Flags<BasicEnum>(kBasicEnum_One) >=
Flags<BasicEnum>(kBasicEnum_Zero),
"BasicEnum One is not greater or equal to Zero");
static_assert(Flags<BasicEnum>(kBasicEnum_One) >=
Flags<BasicEnum>(kBasicEnum_One),
"BasicEnum One is not greater or equal to One");
static_assert(!(Flags<BasicEnum>(kBasicEnum_Zero) >=
Flags<BasicEnum>(kBasicEnum_One)),
"BasicEnum Zero is greater or equal to One");
static_assert(Flags<BasicEnum>(kBasicEnum_Zero) + kBasicEnum_One ==
Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}),
"BasicEnum Zero + One is not equal to {Zero, One}");
static_assert(kBasicEnum_Zero + Flags<BasicEnum>(kBasicEnum_One) ==
Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}),
"BasicEnum Zero + One is not equal to {Zero, One}");
static_assert(
Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}) +
Flags<BasicEnum>({kBasicEnum_One, kBasicEnum_Two}) ==
Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One, kBasicEnum_Two}),
"BasicEnum {Zero,One} + {One,Two} is not equal to {Zero,One,Two}");
static_assert(Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}) -
Flags<BasicEnum>(kBasicEnum_Zero) ==
Flags<BasicEnum>(kBasicEnum_One),
"BasicEnum {Zero,One} - Zero is not equal to One");
static_assert(Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}) -
kBasicEnum_Zero ==
Flags<BasicEnum>(kBasicEnum_One),
"BasicEnum {Zero,One} - Zero is not equal to One");
static_assert(Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}) -
Flags<BasicEnum>(kBasicEnum_Two) ==
Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}),
"BasicEnum {Zero,One} - Two is not equal to {Zero,One}");
static_assert(Union(kBasicEnum_Zero, Flags<BasicEnum>(kBasicEnum_One)) ==
Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}),
"BasicEnum Zero union One is not equal to {Zero, One}");
static_assert(
Union(Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}),
Flags<BasicEnum>({kBasicEnum_One, kBasicEnum_Two})) ==
Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One, kBasicEnum_Two}),
"BasicEnum {Zero,One} union {One,Two} is not equal to {Zero,One,Two}");
static_assert(Intersect(Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}),
Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_Two})) ==
Flags<BasicEnum>(kBasicEnum_Zero),
"BasicEnum {Zero,One} intersect {Zero,Two} is not equal to Zero");
static_assert(Intersect(Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}),
Flags<BasicEnum>(kBasicEnum_Two))
.IsEmpty(),
"BasicEnum {Zero,One} intersect Two is not empty");
static_assert(Flags<BasicEnum>({kBasicEnum_One, kBasicEnum_Two}) ==
Flags<BasicEnum>(kBasicEnum_One, kBasicEnum_Two),
"BasicEnum {One,Two} is not equal to (One,Two)");
static_assert(
Flags<BasicEnum>({kBasicEnum_One, kBasicEnum_Two, kBasicEnum_Three}) ==
Flags<BasicEnum>(kBasicEnum_One, kBasicEnum_Two, kBasicEnum_Three),
"BasicEnum {One,Two,Three} is not equal to (One,Two,Three)");
static_assert(
Flags<BasicEnum>({Flags<BasicEnum>{kBasicEnum_Zero, kBasicEnum_One},
kBasicEnum_Two,
Flags<BasicEnum>{kBasicEnum_Three, kBasicEnum_Big}}) ==
Flags<BasicEnum>(Flags<BasicEnum>{kBasicEnum_Zero, kBasicEnum_One},
kBasicEnum_Two,
Flags<BasicEnum>{kBasicEnum_Three, kBasicEnum_Big}),
"BasicEnum {{Zero,One},Two,{Three,Big}} is not equal to "
"({Zero,One},Two,{Three,Big})");
static_assert(Flags<SizedEnum>().IsEmpty(),
"SizedEnum default flags not empty");
static_assert(Flags<SizedEnum>().GetMask() == 0,
"SizedEnum default mask is not zero");
static_assert(Flags<SizedEnum>(1).GetMask() == 1, "SizedEnum 1 is not 1");
static_assert(Flags<SizedEnum>(kSizedEnum_Zero).GetMask() == 1,
"SizedEnum Zero is not 1");
static_assert(Flags<SizedEnum>(kSizedEnum_Big).GetMask() == 1ULL << 63,
"SizedEnum Big is not 1 << 63");
static_assert(Flags<SizedEnum>(kSizedEnum_Zero).IsSet(kSizedEnum_Zero),
"SizedEnum Zero does not have Zero set");
static_assert(!Flags<SizedEnum>(kSizedEnum_Zero).IsSet(kSizedEnum_One),
"SizedEnum Zero has One set");
static_assert(Flags<SizedEnum>({}).GetMask() == 0, "SizedEnum {} is not 0");
static_assert(Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}).GetMask() ==
3,
"SizedEnum {Zero,One} is not 3");
static_assert(
Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}).IsSet(kSizedEnum_One),
"SizedEnum {Zero,One} does not have One set");
static_assert(Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One,
kSizedEnum_Two})
.IsSet({kSizedEnum_Zero, kSizedEnum_Two}),
"SizedEnum {Zero,One,Two} does not have {Zero,Two} set");
static_assert(!Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One})
.IsSet({kSizedEnum_Zero, kSizedEnum_Two}),
"SizedEnum {Zero,One} does have {Zero,Two} set");
static_assert(Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One})
.Intersects({kSizedEnum_Zero, kSizedEnum_Two}),
"SizedEnum {Zero,One} does not intersect {Zero,Two}");
static_assert(!Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One})
.Intersects({kSizedEnum_Two, kSizedEnum_Three}),
"SizedEnum {Zero,One} intersects {Two, Three}");
static_assert(Flags<SizedEnum>(kSizedEnum_Zero) ==
Flags<SizedEnum>(kSizedEnum_Zero),
"SizedEnum Zero is not equal to Zero");
static_assert(!(Flags<SizedEnum>(kSizedEnum_Zero) ==
Flags<SizedEnum>(kSizedEnum_One)),
"SizedEnum Zero is equal to One");
static_assert(Flags<SizedEnum>(kSizedEnum_Zero) !=
Flags<SizedEnum>(kSizedEnum_One),
"SizedEnum Zero is equal to One");
static_assert(!(Flags<SizedEnum>(kSizedEnum_Zero) !=
Flags<SizedEnum>(kSizedEnum_Zero)),
"SizedEnum Zero is not equal to Zero");
static_assert(Flags<SizedEnum>(kSizedEnum_Zero) <
Flags<SizedEnum>(kSizedEnum_One),
"SizedEnum Zero is not less than One");
static_assert(!(Flags<SizedEnum>(kSizedEnum_Zero) <
Flags<SizedEnum>(kSizedEnum_Zero)),
"SizedEnum Zero is less than Zero");
static_assert(Flags<SizedEnum>(kSizedEnum_Zero) <=
Flags<SizedEnum>(kSizedEnum_One),
"SizedEnum Zero is not less or equal to One");
static_assert(Flags<SizedEnum>(kSizedEnum_Zero) <=
Flags<SizedEnum>(kSizedEnum_Zero),
"SizedEnum Zero is not less or equal to Zero");
static_assert(!(Flags<SizedEnum>(kSizedEnum_One) <=
Flags<SizedEnum>(kSizedEnum_Zero)),
"SizedEnum One is less or equal to Zero");
static_assert(Flags<SizedEnum>(kSizedEnum_One) >
Flags<SizedEnum>(kSizedEnum_Zero),
"SizedEnum One is not greater than Zero");
static_assert(!(Flags<SizedEnum>(kSizedEnum_One) >
Flags<SizedEnum>(kSizedEnum_One)),
"SizedEnum One is greater than One");
static_assert(Flags<SizedEnum>(kSizedEnum_One) >=
Flags<SizedEnum>(kSizedEnum_Zero),
"SizedEnum One is not greater or equal to Zero");
static_assert(Flags<SizedEnum>(kSizedEnum_One) >=
Flags<SizedEnum>(kSizedEnum_One),
"SizedEnum One is not greater or equal to One");
static_assert(!(Flags<SizedEnum>(kSizedEnum_Zero) >=
Flags<SizedEnum>(kSizedEnum_One)),
"SizedEnum Zero is greater or equal to One");
static_assert(Flags<SizedEnum>(kSizedEnum_Zero) + kSizedEnum_One ==
Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}),
"SizedEnum Zero + One is not equal to {Zero, One}");
static_assert(kSizedEnum_Zero + Flags<SizedEnum>(kSizedEnum_One) ==
Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}),
"SizedEnum Zero + One is not equal to {Zero, One}");
static_assert(
Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}) +
Flags<SizedEnum>({kSizedEnum_One, kSizedEnum_Two}) ==
Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One, kSizedEnum_Two}),
"SizedEnum {Zero,One} + {One,Two} is not equal to {Zero,One,Two}");
static_assert(Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}) -
Flags<SizedEnum>(kSizedEnum_Zero) ==
Flags<SizedEnum>(kSizedEnum_One),
"SizedEnum {Zero,One} - Zero is not equal to One");
static_assert(Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}) -
kSizedEnum_Zero ==
Flags<SizedEnum>(kSizedEnum_One),
"SizedEnum {Zero,One} - Zero is not equal to One");
static_assert(Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}) -
Flags<SizedEnum>(kSizedEnum_Two) ==
Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}),
"SizedEnum {Zero,One} - Two is not equal to {Zero,One}");
static_assert(Union(kSizedEnum_Zero, Flags<SizedEnum>(kSizedEnum_One)) ==
Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}),
"SizedEnum Zero union One is not equal to {Zero, One}");
static_assert(
Union(Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}),
Flags<SizedEnum>({kSizedEnum_One, kSizedEnum_Two})) ==
Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One, kSizedEnum_Two}),
"SizedEnum {Zero,One} union {One,Two} is not equal to {Zero,One,Two}");
static_assert(Intersect(Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}),
Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_Two})) ==
Flags<SizedEnum>(kSizedEnum_Zero),
"SizedEnum {Zero,One} intersect {Zero,Two} is not equal to Zero");
static_assert(Intersect(Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}),
Flags<SizedEnum>(kSizedEnum_Two))
.IsEmpty(),
"SizedEnum {Zero,One} intersect Two is not empty");
static_assert(Flags<SizedEnum>({kSizedEnum_One, kSizedEnum_Two}) ==
Flags<SizedEnum>(kSizedEnum_One, kSizedEnum_Two),
"SizedEnum {One,Two} is not equal to (One,Two)");
static_assert(
Flags<SizedEnum>({kSizedEnum_One, kSizedEnum_Two, kSizedEnum_Three}) ==
Flags<SizedEnum>(kSizedEnum_One, kSizedEnum_Two, kSizedEnum_Three),
"SizedEnum {One,Two,Three} is not equal to (One,Two,Three)");
static_assert(
Flags<SizedEnum>({Flags<SizedEnum>{kSizedEnum_Zero, kSizedEnum_One},
kSizedEnum_Two,
Flags<SizedEnum>{kSizedEnum_Three, kSizedEnum_Big}}) ==
Flags<SizedEnum>(Flags<SizedEnum>{kSizedEnum_Zero, kSizedEnum_One},
kSizedEnum_Two,
Flags<SizedEnum>{kSizedEnum_Three, kSizedEnum_Big}),
"SizedEnum {{Zero,One},Two,{Three,Big}} is not equal to "
"({Zero,One},Two,{Three,Big})");
static_assert(Flags<ClassEnum>().IsEmpty(),
"ClassEnum default flags not empty");
static_assert(Flags<ClassEnum>().GetMask() == 0,
"ClassEnum default mask is not zero");
static_assert(Flags<ClassEnum>(1).GetMask() == 1, "ClassEnum 1 is not 1");
static_assert(Flags<ClassEnum>(ClassEnum::kZero).GetMask() == 1,
"ClassEnum Zero is not 1");
static_assert(Flags<ClassEnum>(ClassEnum::kBig).GetMask() == 1ULL << 63,
"ClassEnum Big is not 1 << 63");
static_assert(Flags<ClassEnum>(ClassEnum::kZero).IsSet(ClassEnum::kZero),
"ClassEnum Zero does not have Zero set");
static_assert(!Flags<ClassEnum>(ClassEnum::kZero).IsSet(ClassEnum::kOne),
"ClassEnum Zero has One set");
static_assert(Flags<ClassEnum>({}).GetMask() == 0, "ClassEnum {} is not 0");
static_assert(Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}).GetMask() ==
3,
"ClassEnum {Zero,One} is not 3");
static_assert(Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne})
.IsSet(ClassEnum::kOne),
"ClassEnum {Zero,One} does not have One set");
static_assert(Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne,
ClassEnum::kTwo})
.IsSet({ClassEnum::kZero, ClassEnum::kTwo}),
"ClassEnum {Zero,One,Two} does not have {Zero,Two} set");
static_assert(!Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne})
.IsSet({ClassEnum::kZero, ClassEnum::kTwo}),
"ClassEnum {Zero,One} does have {Zero,Two} set");
static_assert(Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne})
.Intersects({ClassEnum::kZero, ClassEnum::kTwo}),
"ClassEnum {Zero,One} does not intersect {Zero,Two}");
static_assert(!Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne})
.Intersects({ClassEnum::kTwo, ClassEnum::kThree}),
"ClassEnum {Zero,One} intersects {Two, Three}");
static_assert(Flags<ClassEnum>(ClassEnum::kZero) ==
Flags<ClassEnum>(ClassEnum::kZero),
"ClassEnum Zero is not equal to Zero");
static_assert(!(Flags<ClassEnum>(ClassEnum::kZero) ==
Flags<ClassEnum>(ClassEnum::kOne)),
"ClassEnum Zero is equal to One");
static_assert(Flags<ClassEnum>(ClassEnum::kZero) == ClassEnum::kZero,
"ClassEnum Zero is not equal to Zero");
static_assert(Flags<ClassEnum>(ClassEnum::kZero) !=
Flags<ClassEnum>(ClassEnum::kOne),
"ClassEnum Zero is equal to One");
static_assert(!(Flags<ClassEnum>(ClassEnum::kZero) !=
Flags<ClassEnum>(ClassEnum::kZero)),
"ClassEnum Zero is not equal to Zero");
static_assert(Flags<ClassEnum>(ClassEnum::kZero) != ClassEnum::kOne,
"ClassEnum Zero is equal to One");
static_assert(Flags<ClassEnum>(ClassEnum::kZero) <
Flags<ClassEnum>(ClassEnum::kOne),
"ClassEnum Zero is not less than One");
static_assert(!(Flags<ClassEnum>(ClassEnum::kZero) <
Flags<ClassEnum>(ClassEnum::kZero)),
"ClassEnum Zero is less than Zero");
static_assert(Flags<ClassEnum>(ClassEnum::kZero) <=
Flags<ClassEnum>(ClassEnum::kOne),
"ClassEnum Zero is not less or equal to One");
static_assert(Flags<ClassEnum>(ClassEnum::kZero) <=
Flags<ClassEnum>(ClassEnum::kZero),
"ClassEnum Zero is not less or equal to Zero");
static_assert(!(Flags<ClassEnum>(ClassEnum::kOne) <=
Flags<ClassEnum>(ClassEnum::kZero)),
"ClassEnum One is less or equal to Zero");
static_assert(Flags<ClassEnum>(ClassEnum::kOne) >
Flags<ClassEnum>(ClassEnum::kZero),
"ClassEnum One is not greater than Zero");
static_assert(!(Flags<ClassEnum>(ClassEnum::kOne) >
Flags<ClassEnum>(ClassEnum::kOne)),
"ClassEnum One is greater than One");
static_assert(Flags<ClassEnum>(ClassEnum::kOne) >=
Flags<ClassEnum>(ClassEnum::kZero),
"ClassEnum One is not greater or equal to Zero");
static_assert(Flags<ClassEnum>(ClassEnum::kOne) >=
Flags<ClassEnum>(ClassEnum::kOne),
"ClassEnum One is not greater or equal to One");
static_assert(!(Flags<ClassEnum>(ClassEnum::kZero) >=
Flags<ClassEnum>(ClassEnum::kOne)),
"ClassEnum Zero is greater or equal to One");
static_assert(Flags<ClassEnum>(ClassEnum::kZero) + ClassEnum::kOne ==
Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}),
"ClassEnum Zero + One is not equal to {Zero, One}");
static_assert(ClassEnum::kZero + Flags<ClassEnum>(ClassEnum::kOne) ==
Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}),
"ClassEnum Zero + One is not equal to {Zero, One}");
static_assert(
Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}) +
Flags<ClassEnum>({ClassEnum::kOne, ClassEnum::kTwo}) ==
Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne, ClassEnum::kTwo}),
"ClassEnum {Zero,One} + {One,Two} is not equal to {Zero,One,Two}");
static_assert(Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}) -
Flags<ClassEnum>(ClassEnum::kZero) ==
Flags<ClassEnum>(ClassEnum::kOne),
"ClassEnum {Zero,One} - Zero is not equal to One");
static_assert(Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}) -
ClassEnum::kZero ==
Flags<ClassEnum>(ClassEnum::kOne),
"ClassEnum {Zero,One} - Zero is not equal to One");
static_assert(Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}) -
Flags<ClassEnum>(ClassEnum::kTwo) ==
Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}),
"ClassEnum {Zero,One} - Two is not equal to {Zero,One}");
static_assert(Union(Flags<ClassEnum>(ClassEnum::kZero),
Flags<ClassEnum>(ClassEnum::kOne)) ==
Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}),
"ClassEnum Zero union One is not equal to {Zero, One}");
static_assert(
Union(Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}),
Flags<ClassEnum>({ClassEnum::kOne, ClassEnum::kTwo})) ==
Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne, ClassEnum::kTwo}),
"ClassEnum {Zero,One} union {One,Two} is not equal to {Zero,One,Two}");
static_assert(Intersect(Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}),
Flags<ClassEnum>({ClassEnum::kZero,
ClassEnum::kTwo})) ==
Flags<ClassEnum>(ClassEnum::kZero),
"ClassEnum {Zero,One} intersect {Zero,Two} is not equal to Zero");
static_assert(Intersect(Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}),
Flags<ClassEnum>(ClassEnum::kTwo))
.IsEmpty(),
"ClassEnum {Zero,One} intersect Two is not empty");
static_assert(Flags<ClassEnum>({ClassEnum::kOne, ClassEnum::kTwo}) ==
Flags<ClassEnum>(ClassEnum::kOne, ClassEnum::kTwo),
"ClassEnum {One,Two} is not equal to (One,Two)");
static_assert(
Flags<ClassEnum>({ClassEnum::kOne, ClassEnum::kTwo, ClassEnum::kThree}) ==
Flags<ClassEnum>(ClassEnum::kOne, ClassEnum::kTwo, ClassEnum::kThree),
"ClassEnum {One,Two,Three} is not equal to (One,Two,Three)");
static_assert(
Flags<ClassEnum>({Flags<ClassEnum>{ClassEnum::kZero, ClassEnum::kOne},
ClassEnum::kTwo,
Flags<ClassEnum>{ClassEnum::kThree, ClassEnum::kBig}}) ==
Flags<ClassEnum>(Flags<ClassEnum>{ClassEnum::kZero, ClassEnum::kOne},
ClassEnum::kTwo,
Flags<ClassEnum>{ClassEnum::kThree, ClassEnum::kBig}),
"ClassEnum {{Zero,One},Two,{Three,Big}} is not equal to "
"({Zero,One},Two,{Three,Big})");
TEST(FlagsTest, BasicImplicitParameterConversions) {
EXPECT_EQ(BasicIdentity({}), Flags<BasicEnum>({}));
EXPECT_EQ(BasicIdentity(kBasicEnum_One), Flags<BasicEnum>(kBasicEnum_One));
EXPECT_EQ(BasicIdentity({kBasicEnum_One, kBasicEnum_Two}),
Flags<BasicEnum>(kBasicEnum_One, kBasicEnum_Two));
}
TEST(FlagsTest, BasicSet) {
Flags<BasicEnum> flags;
flags.Set(kBasicEnum_Zero);
EXPECT_EQ(flags, Flags<BasicEnum>(kBasicEnum_Zero));
flags.Set({kBasicEnum_One, kBasicEnum_Two});
EXPECT_EQ(flags,
Flags<BasicEnum>(kBasicEnum_Zero, kBasicEnum_One, kBasicEnum_Two));
flags.Set({kBasicEnum_One, kBasicEnum_Three});
EXPECT_EQ(flags, Flags<BasicEnum>(kBasicEnum_Zero, kBasicEnum_One,
kBasicEnum_Two, kBasicEnum_Three));
}
TEST(FlagsTest, BasicClear) {
Flags<BasicEnum> flags(kBasicEnum_Zero);
flags.Clear();
EXPECT_TRUE(flags.IsEmpty());
flags.Set({kBasicEnum_One, kBasicEnum_Two});
flags.Clear(kBasicEnum_One);
EXPECT_EQ(flags, kBasicEnum_Two);
}
TEST(FlagsTest, BasicAssign) {
Flags<BasicEnum> flags;
flags = kBasicEnum_Zero;
EXPECT_EQ(flags, Flags<BasicEnum>(kBasicEnum_Zero));
flags = {kBasicEnum_One, kBasicEnum_Two};
EXPECT_EQ(flags, Flags<BasicEnum>(kBasicEnum_One, kBasicEnum_Two));
}
TEST(FlagsTest, BasicAddAssign) {
Flags<BasicEnum> flags;
flags += kBasicEnum_Zero;
EXPECT_EQ(flags, Flags<BasicEnum>(kBasicEnum_Zero));
flags += {kBasicEnum_One, kBasicEnum_Two};
EXPECT_EQ(flags,
Flags<BasicEnum>(kBasicEnum_Zero, kBasicEnum_One, kBasicEnum_Two));
flags += {kBasicEnum_One, kBasicEnum_Three};
EXPECT_EQ(flags, Flags<BasicEnum>(kBasicEnum_Zero, kBasicEnum_One,
kBasicEnum_Two, kBasicEnum_Three));
flags += {};
EXPECT_EQ(flags, Flags<BasicEnum>(kBasicEnum_Zero, kBasicEnum_One,
kBasicEnum_Two, kBasicEnum_Three));
}
TEST(FlagsTest, BasicSubAssign) {
Flags<BasicEnum> flags(kBasicEnum_Zero);
flags -= kBasicEnum_Zero;
EXPECT_TRUE(flags.IsEmpty());
flags.Set({kBasicEnum_One, kBasicEnum_Two, kBasicEnum_Three});
flags -= {kBasicEnum_One, kBasicEnum_Three};
EXPECT_EQ(flags, kBasicEnum_Two);
flags -= {};
EXPECT_EQ(flags, kBasicEnum_Two);
}
TEST(FlagsTest, SizedImplicitParameterConversions) {
EXPECT_EQ(SizedIdentity({}), Flags<SizedEnum>({}));
EXPECT_EQ(SizedIdentity(kSizedEnum_One), Flags<SizedEnum>(kSizedEnum_One));
EXPECT_EQ(SizedIdentity({kSizedEnum_One, kSizedEnum_Two}),
Flags<SizedEnum>(kSizedEnum_One, kSizedEnum_Two));
}
TEST(FlagsTest, SizedSet) {
Flags<SizedEnum> flags;
flags.Set(kSizedEnum_Zero);
EXPECT_EQ(flags, Flags<SizedEnum>(kSizedEnum_Zero));
flags.Set({kSizedEnum_One, kSizedEnum_Two});
EXPECT_EQ(flags,
Flags<SizedEnum>(kSizedEnum_Zero, kSizedEnum_One, kSizedEnum_Two));
flags.Set({kSizedEnum_One, kSizedEnum_Three});
EXPECT_EQ(flags, Flags<SizedEnum>(kSizedEnum_Zero, kSizedEnum_One,
kSizedEnum_Two, kSizedEnum_Three));
}
TEST(FlagsTest, SizedClear) {
Flags<SizedEnum> flags(kSizedEnum_Zero);
flags.Clear();
EXPECT_TRUE(flags.IsEmpty());
flags.Set({kSizedEnum_One, kSizedEnum_Two});
flags.Clear(kSizedEnum_One);
EXPECT_EQ(flags, kSizedEnum_Two);
}
TEST(FlagsTest, SizedAssign) {
Flags<SizedEnum> flags;
flags = kSizedEnum_Zero;
EXPECT_EQ(flags, Flags<SizedEnum>(kSizedEnum_Zero));
flags = {kSizedEnum_One, kSizedEnum_Two};
EXPECT_EQ(flags, Flags<SizedEnum>(kSizedEnum_One, kSizedEnum_Two));
}
TEST(FlagsTest, SizedAddAssign) {
Flags<SizedEnum> flags;
flags += kSizedEnum_Zero;
EXPECT_EQ(flags, Flags<SizedEnum>(kSizedEnum_Zero));
flags += {kSizedEnum_One, kSizedEnum_Two};
EXPECT_EQ(flags,
Flags<SizedEnum>(kSizedEnum_Zero, kSizedEnum_One, kSizedEnum_Two));
flags += {kSizedEnum_One, kSizedEnum_Three};
EXPECT_EQ(flags, Flags<SizedEnum>(kSizedEnum_Zero, kSizedEnum_One,
kSizedEnum_Two, kSizedEnum_Three));
flags += {};
EXPECT_EQ(flags, Flags<SizedEnum>(kSizedEnum_Zero, kSizedEnum_One,
kSizedEnum_Two, kSizedEnum_Three));
}
TEST(FlagsTest, SizedSubAssign) {
Flags<SizedEnum> flags(kSizedEnum_Zero);
flags -= kSizedEnum_Zero;
EXPECT_TRUE(flags.IsEmpty());
flags.Set({kSizedEnum_One, kSizedEnum_Two, kSizedEnum_Three});
flags -= {kSizedEnum_One, kSizedEnum_Three};
EXPECT_EQ(flags, kSizedEnum_Two);
flags -= {};
EXPECT_EQ(flags, kSizedEnum_Two);
}
TEST(FlagsTest, ClassImplicitParameterConversions) {
EXPECT_EQ(ClassIdentity({}), Flags<ClassEnum>({}));
EXPECT_EQ(ClassIdentity(ClassEnum::kOne), Flags<ClassEnum>(ClassEnum::kOne));
EXPECT_EQ(ClassIdentity({ClassEnum::kOne, ClassEnum::kTwo}),
Flags<ClassEnum>(ClassEnum::kOne, ClassEnum::kTwo));
}
TEST(FlagsTest, ClassSet) {
Flags<ClassEnum> flags;
flags.Set(ClassEnum::kZero);
EXPECT_EQ(flags, Flags<ClassEnum>(ClassEnum::kZero));
flags.Set({ClassEnum::kOne, ClassEnum::kTwo});
EXPECT_EQ(flags, Flags<ClassEnum>(ClassEnum::kZero, ClassEnum::kOne,
ClassEnum::kTwo));
flags.Set({ClassEnum::kOne, ClassEnum::kThree});
EXPECT_EQ(flags, Flags<ClassEnum>(ClassEnum::kZero, ClassEnum::kOne,
ClassEnum::kTwo, ClassEnum::kThree));
}
TEST(FlagsTest, ClassClear) {
Flags<ClassEnum> flags(ClassEnum::kZero);
flags.Clear();
EXPECT_TRUE(flags.IsEmpty());
flags.Set({ClassEnum::kOne, ClassEnum::kTwo});
flags.Clear(ClassEnum::kOne);
EXPECT_EQ(flags, ClassEnum::kTwo);
}
TEST(FlagsTest, ClassAssign) {
Flags<ClassEnum> flags;
flags = ClassEnum::kZero;
EXPECT_EQ(flags, Flags<ClassEnum>(ClassEnum::kZero));
flags = {ClassEnum::kOne, ClassEnum::kTwo};
EXPECT_EQ(flags, Flags<ClassEnum>(ClassEnum::kOne, ClassEnum::kTwo));
}
TEST(FlagsTest, ClassAddAssign) {
Flags<ClassEnum> flags;
flags += ClassEnum::kZero;
EXPECT_EQ(flags, Flags<ClassEnum>(ClassEnum::kZero));
flags += {ClassEnum::kOne, ClassEnum::kTwo};
EXPECT_EQ(flags, Flags<ClassEnum>(ClassEnum::kZero, ClassEnum::kOne,
ClassEnum::kTwo));
flags += {ClassEnum::kOne, ClassEnum::kThree};
EXPECT_EQ(flags, Flags<ClassEnum>(ClassEnum::kZero, ClassEnum::kOne,
ClassEnum::kTwo, ClassEnum::kThree));
flags += {};
EXPECT_EQ(flags, Flags<ClassEnum>(ClassEnum::kZero, ClassEnum::kOne,
ClassEnum::kTwo, ClassEnum::kThree));
}
TEST(FlagsTest, ClassSubAssign) {
Flags<ClassEnum> flags(ClassEnum::kZero);
flags -= ClassEnum::kZero;
EXPECT_TRUE(flags.IsEmpty());
flags.Set({ClassEnum::kOne, ClassEnum::kTwo, ClassEnum::kThree});
flags -= {ClassEnum::kOne, ClassEnum::kThree};
EXPECT_EQ(flags, ClassEnum::kTwo);
flags -= {};
EXPECT_EQ(flags, ClassEnum::kTwo);
}
} // namespace
} // namespace gb
| 49.347619 | 80 | 0.650455 | jpursey |
335ca6fb8a6661b107a94f833bbb91c34917de01 | 482 | cpp | C++ | vox.render/profiling/profiler_spy.cpp | ArcheGraphics/Arche-cpp | da6770edd4556a920b3f7298f38176107caf7e3a | [
"MIT"
] | 8 | 2022-02-15T12:54:57.000Z | 2022-03-30T16:35:58.000Z | vox.render/profiling/profiler_spy.cpp | yangfengzzz/DigitalArche | da6770edd4556a920b3f7298f38176107caf7e3a | [
"MIT"
] | null | null | null | vox.render/profiling/profiler_spy.cpp | yangfengzzz/DigitalArche | da6770edd4556a920b3f7298f38176107caf7e3a | [
"MIT"
] | null | null | null | // Copyright (c) 2022 Feng Yang
//
// I am making my contributions/submissions to this project solely in my
// personal capacity and am not conveying any rights to any intellectual
// property of any third parties.
#include "profiler_spy.h"
namespace vox {
ProfilerSpy::ProfilerSpy(const std::string &p_name) :
name(p_name),
start(std::chrono::steady_clock::now()) {
}
ProfilerSpy::~ProfilerSpy() {
end = std::chrono::steady_clock::now();
Profiler::save(*this);
}
}
| 22.952381 | 73 | 0.709544 | ArcheGraphics |
335f2459bf433f0f369bf98d4226422b110deaf2 | 2,805 | cpp | C++ | src/utilities/Convertible.cpp | gle8098/succotash | 7f4189418301b4f9322a4cfa6dc205fcbe999d40 | [
"MIT"
] | 2 | 2020-05-19T10:52:20.000Z | 2020-10-26T18:39:22.000Z | src/utilities/Convertible.cpp | gle8098/succotash | 7f4189418301b4f9322a4cfa6dc205fcbe999d40 | [
"MIT"
] | 2 | 2020-04-27T08:27:22.000Z | 2020-05-06T17:27:06.000Z | src/utilities/Convertible.cpp | gle8098/succotash | 7f4189418301b4f9322a4cfa6dc205fcbe999d40 | [
"MIT"
] | null | null | null | #include "Convertible.hpp"
#include <functional>
#include <stdexcept>
namespace succotash::utilities {
//------------------------------------------------------------------------------
// Local functions which are not for export
//------------------------------------------------------------------------------
inline void ThrowErrorImpl(const char* value, const char* desc) {
char buffer[1024];
snprintf(buffer, sizeof(buffer), "Value '%s' exception, %s", value, desc);
throw std::runtime_error(buffer);
}
template <typename T, typename Func>
inline T ConvertStdlib(const char* value, Func func, const char* error_desc) {
char* error;
long result = func(value, &error);
if (*value == '\0' || *error != '\0') {
ThrowErrorImpl(value, error_desc);
}
return result;
}
//------------------------------------------------------------------------------
// Private methods
//------------------------------------------------------------------------------
void Convertible::ThrowError(const char* desc) const {
ThrowErrorImpl(this->value_, desc);
}
//------------------------------------------------------------------------------
// Public methods
//------------------------------------------------------------------------------
Convertible::Convertible(const char* value)
: value_(value) {
}
// String
std::string Convertible::ToString() const {
return std::string(value_);
}
// Basic numbers
int Convertible::ToInt() const {
return ToLong();
}
unsigned int Convertible::ToUInt() const {
return ToULong();
}
long Convertible::ToLong() const {
auto parser = std::bind(&strtol, std::placeholders::_1,
std::placeholders::_2, 0);
return ConvertStdlib<long>(value_, parser,"couldn't convert to int/long");
}
unsigned long Convertible::ToULong() const {
auto parser = std::bind(&strtoul, std::placeholders::_1,
std::placeholders::_2, 0);
return ConvertStdlib<unsigned long>(value_, parser,
"couldn't convert to uint/ulong");
}
// Numbers with floating precision
float Convertible::ToFloat() const {
auto parser = &strtof;
return ConvertStdlib<float>(value_, parser, "couldn't convert to float");
}
double Convertible::ToDouble() const {
auto parser = &strtod;
return ConvertStdlib<double>(value_, parser, "couldn't convert to double");
}
// Bool
bool Convertible::ToBool() const {
std::string_view value(value_);
if (value == "true" || value == "True") {
return true;
} else if (value == "false" || value == "False") {
return false;
} else {
ThrowError("couldn't convert to bool");
return false;
}
}
// Operators
bool Convertible::operator==(const std::string& rhs) const {
return rhs == value_;
}
} // namespace succotash::utilities | 26.214953 | 80 | 0.547594 | gle8098 |
3362d8f4beef1376eb5dba1aaad9fe6cb430e9fe | 872 | hpp | C++ | modules/classes/HoldQueue1.hpp | PercyPanJX/Scheduling-and-Deadlock-Avoidance | d51d6a7b3da7333559592e7381add1c0ef25fb2e | [
"Apache-2.0"
] | null | null | null | modules/classes/HoldQueue1.hpp | PercyPanJX/Scheduling-and-Deadlock-Avoidance | d51d6a7b3da7333559592e7381add1c0ef25fb2e | [
"Apache-2.0"
] | null | null | null | modules/classes/HoldQueue1.hpp | PercyPanJX/Scheduling-and-Deadlock-Avoidance | d51d6a7b3da7333559592e7381add1c0ef25fb2e | [
"Apache-2.0"
] | 1 | 2020-07-24T20:07:23.000Z | 2020-07-24T20:07:23.000Z | /*
* HoldQueue1.hpp
*
* Created on: 2018/5/13
* Author: Qun Cheng
* Author: Jiaxuan(Percy) Pan
*/
#ifndef HOLDQUEUE1_HPP_
#define HOLDQUEUE1_HPP_
#include "HoldQueue2.hpp"
class HoldQueue1{
std::list<Job> q;
public:
HoldQueue1(){}
bool empty(){
return q.empty();
}
Job front(){
return q.front();
}
void pop(){
q.pop_front();
}
void print(){
for(Job j : q){
j.print();
}
}
void push(Job in){
if(q.empty()){
q.push_back(in);
return;
}
list<Job>::iterator it;
it = q.begin();
while(it != q.end()){
if(in.getRuntime() < it->getRuntime()){
q.insert(it, in);
return;
}
it++;
}
if(in.getRuntime() < it->getRuntime()){
q.insert(it, in);
return;
}
q.push_back(in);
return;
}
};
#endif /* HOLDQUEUE1_HPP_ */
| 12.823529 | 43 | 0.516055 | PercyPanJX |
33644ce440a5f006d9304c5e93ae4520c2eb71f8 | 4,892 | cc | C++ | validator/cpp/htmlparser/renderer.cc | li-cai/amphtml | 78fed1a5551cae5486717acbd45878a1e36343a0 | [
"Apache-2.0"
] | 3 | 2016-02-25T15:32:53.000Z | 2021-01-21T16:11:38.000Z | validator/cpp/htmlparser/renderer.cc | ColombiaOnline/amphtml | 92f8b2681933d3904c64f1d162f6e25dc8fb617c | [
"Apache-2.0"
] | 93 | 2020-03-05T19:09:47.000Z | 2021-05-13T15:12:03.000Z | validator/cpp/htmlparser/renderer.cc | ColombiaOnline/amphtml | 92f8b2681933d3904c64f1d162f6e25dc8fb617c | [
"Apache-2.0"
] | 1 | 2018-04-03T08:10:10.000Z | 2018-04-03T08:10:10.000Z | #include <algorithm>
#include "atomutil.h"
#include "elements.h"
#include "renderer.h"
#include "strings.h"
namespace htmlparser {
namespace {
inline void WriteToBuffer(const std::string& str, std::stringbuf* buf) {
buf->sputn(str.c_str(), str.size());
}
// Writes str surrounded by quotes to buf. Normally it will use double quotes,
// but if str contains a double quote, it will use single quotes.
// It is used for writing the identifiers in a doctype declaration.
// In valid HTML, they can't contains both types of quotes.
inline void WriteQuoted(const std::string& str, std::stringbuf* buf) {
char quote = '"';
if (str.find('\"') != std::string::npos) {
quote = '\'';
}
buf->sputc(quote);
WriteToBuffer(str, buf);
buf->sputc(quote);
}
} // namespace.
RenderError Renderer::Render(Node* node, std::stringbuf* buf) {
switch (node->Type()) {
case NodeType::ERROR_NODE:
return RenderError::ERROR_NODE_NO_RENDER;
case NodeType::TEXT_NODE:
Strings::Escape(node->Data().data(), buf);
return RenderError::NO_ERROR;
case NodeType::DOCUMENT_NODE:
for (Node* c = node->FirstChild(); c; c = c->NextSibling()) {
auto err = Render(c, buf);
if (err != RenderError::NO_ERROR) {
return err;
}
}
return RenderError::NO_ERROR;
case NodeType::ELEMENT_NODE:
// No-op.
break;
case NodeType::COMMENT_NODE:
WriteToBuffer("<!--", buf);
WriteToBuffer(node->Data().data(), buf);
WriteToBuffer("-->", buf);
return RenderError::NO_ERROR;
case NodeType::DOCTYPE_NODE: {
WriteToBuffer("<!DOCTYPE ", buf);
WriteToBuffer(node->Data().data(), buf);
std::string p;
std::string s;
for (auto& attr : node->Attributes()) {
std::string key = attr.key;
std::string value = attr.value;
if (key == "public") {
p = value;
} else if (key == "system") {
s = value;
}
}
if (!p.empty()) {
WriteToBuffer(" PUBLIC ", buf);
WriteQuoted(p, buf);
if (!s.empty()) {
buf->sputc(' ');
WriteQuoted(s, buf);
}
} else if (!s.empty()) {
WriteToBuffer(" SYSTEM ", buf);
WriteQuoted(s, buf);
}
buf->sputc('>');
return RenderError::NO_ERROR;
}
default:
return RenderError::UNKNOWN_NODE_TYPE;
}
// Render the <xxx> opening tag.
buf->sputc('<');
WriteToBuffer(node->DataAtom() == Atom::UNKNOWN ?
node->Data().data() : AtomUtil::ToString(node->DataAtom()),
buf);
for (auto& attr : node->Attributes()) {
std::string ns = attr.name_space;
std::string k = attr.key;
std::string v = attr.value;
buf->sputc(' ');
if (!ns.empty()) {
WriteToBuffer(ns, buf);
buf->sputc(':');
}
WriteToBuffer(k, buf);
if (!v.empty()) {
WriteToBuffer("=\"", buf);
Strings::Escape(v, buf);
buf->sputc('"');
}
}
if (auto ve = std::find(kVoidElements.begin(),
kVoidElements.end(),
node->DataAtom());
ve != kVoidElements.end()) {
if (node->FirstChild()) {
return RenderError::VOID_ELEMENT_CHILD_NODE;
}
WriteToBuffer(">", buf);
return RenderError::NO_ERROR;
}
buf->sputc('>');
// Add initial newline where there is danger of a newline being ignored.
if (Node* c = node->FirstChild();
c && c->Type() == NodeType::TEXT_NODE && Strings::StartsWith(
c->Data(), "\n")) {
if (node->DataAtom() == Atom::PRE ||
node->DataAtom() == Atom::LISTING ||
node->DataAtom() == Atom::TEXTAREA) {
buf->sputc('\n');
}
}
// Render any child nodes.
if (std::find(kRawTextNodes.begin(),
kRawTextNodes.end(),
node->DataAtom()) != kRawTextNodes.end()) {
for (Node* c = node->FirstChild(); c; c = c->NextSibling()) {
if (c->Type() == NodeType::TEXT_NODE) {
WriteToBuffer(c->Data().data(), buf);
} else {
auto err = Render(c, buf);
if (err != RenderError::NO_ERROR) {
return err;
}
}
}
if (node->DataAtom() == Atom::PLAINTEXT) {
// Don't render anything else. <plaintext> must be the last element
// in the file, with no closing tag.
return RenderError::PLAIN_TEXT_ABORT;
}
} else {
for (Node* c = node->FirstChild(); c; c = c->NextSibling()) {
auto err = Render(c, buf);
if (err != RenderError::NO_ERROR) {
return err;
}
}
}
// Render the </xxx> closing tag.
WriteToBuffer("</", buf);
WriteToBuffer(node->DataAtom() == Atom::UNKNOWN ?
node->Data().data() : AtomUtil::ToString(node->DataAtom()),
buf);
buf->sputc('>');
return RenderError::NO_ERROR;
}
} // namespace htmlparser.
| 28.44186 | 78 | 0.55601 | li-cai |
3364b3d7bf5e8c34701a087abd3890bbd3554559 | 5,878 | cpp | C++ | src/rpg_setup.cpp | MarianoGnu/liblcf | 02c640335ad13f2d815409a171045c42b23f7b86 | [
"MIT"
] | null | null | null | src/rpg_setup.cpp | MarianoGnu/liblcf | 02c640335ad13f2d815409a171045c42b23f7b86 | [
"MIT"
] | null | null | null | src/rpg_setup.cpp | MarianoGnu/liblcf | 02c640335ad13f2d815409a171045c42b23f7b86 | [
"MIT"
] | null | null | null | /*
* This file is part of liblcf. Copyright (c) 2018 liblcf authors.
* https://github.com/EasyRPG/liblcf - https://easyrpg.org
*
* liblcf is Free/Libre Open Source Software, released under the MIT License.
* For the full copyright and license information, please view the COPYING
* file that was distributed with this source code.
*/
#include "lcf_options.h"
#include "rpg_actor.h"
#include "rpg_event.h"
#include "rpg_map.h"
#include "rpg_mapinfo.h"
#include "rpg_system.h"
#include "rpg_save.h"
#include "rpg_chipset.h"
#include "rpg_parameters.h"
#include "data.h"
void RPG::SaveActor::Setup(int actor_id) {
const RPG::Actor& actor = Data::actors[actor_id - 1];
ID = actor.ID;
name = actor.name;
title = actor.title;
sprite_name = actor.character_name;
sprite_id = actor.character_index;
sprite_flags = actor.transparent ? 3 : 0;
face_name = actor.face_name;
face_id = actor.face_index;
level = actor.initial_level;
exp = 0;
hp_mod = 0;
sp_mod = 0;
attack_mod = 0;
defense_mod = 0;
spirit_mod = 0;
agility_mod = 0;
skills_size = 0;
skills.clear();
equipped.clear();
equipped.push_back(actor.initial_equipment.weapon_id);
equipped.push_back(actor.initial_equipment.shield_id);
equipped.push_back(actor.initial_equipment.armor_id);
equipped.push_back(actor.initial_equipment.helmet_id);
equipped.push_back(actor.initial_equipment.accessory_id);
current_hp = 0;
current_sp = 0;
battle_commands.resize(7, -1);
status.resize(Data::states.size());
changed_battle_commands = false;
class_id = -1;
two_weapon = actor.two_weapon;
lock_equipment = actor.lock_equipment;
auto_battle = actor.auto_battle;
super_guard = actor.super_guard;
}
void RPG::SaveInventory::Setup() {
party = Data::system.party;
party_size = party.size();
}
void RPG::SaveMapEvent::Setup(const RPG::Event& event) {
ID = event.ID;
position_x = event.x;
position_y = event.y;
}
void RPG::SaveMapInfo::Setup() {
position_x = 0;
position_y = 0;
lower_tiles.resize(144);
upper_tiles.resize(144);
for (int i = 0; i < 144; i++) {
lower_tiles[i] = i;
upper_tiles[i] = i;
}
}
void RPG::SaveMapInfo::Setup(const RPG::Map& map) {
chipset_id = map.chipset_id;
parallax_name = map.parallax_name;
parallax_horz = map.parallax_loop_x;
parallax_vert = map.parallax_loop_y;
parallax_horz_auto = map.parallax_auto_loop_x;
parallax_vert_auto = map.parallax_auto_loop_y;
parallax_horz_speed = map.parallax_sx;
parallax_vert_speed = map.parallax_sy;
}
void RPG::SaveSystem::Setup() {
const RPG::System& system = Data::system;
frame_count = 0;
graphics_name = system.system_name;
face_name = "";
face_id = -1;
face_right = false;
face_flip = false;
transparent = false;
music_stopping = false;
title_music = system.title_music;
battle_music = system.battle_music;
battle_end_music = system.battle_end_music;
inn_music = system.inn_music;
// current_music
// unknown1_music FIXME
// unknown2_music FIXME
// stored_music
boat_music = system.boat_music;
ship_music = system.ship_music;
airship_music = system.airship_music;
gameover_music = system.gameover_music;
cursor_se = system.cursor_se;
decision_se = system.decision_se;
cancel_se = system.cancel_se;
buzzer_se = system.buzzer_se;
battle_se = system.battle_se;
escape_se = system.escape_se;
enemy_attack_se = system.enemy_attack_se;
enemy_damaged_se = system.enemy_damaged_se;
actor_damaged_se = system.actor_damaged_se;
dodge_se = system.dodge_se;
enemy_death_se = system.enemy_death_se;
item_se = system.item_se;
transition_out = system.transition_out;
transition_in = system.transition_in;
battle_start_fadeout = system.battle_start_fadeout;
battle_start_fadein = system.battle_start_fadein;
battle_end_fadeout = system.battle_end_fadeout;
battle_end_fadein = system.battle_end_fadein;
message_stretch = system.message_stretch;
font_id = system.font_id;
teleport_allowed = true;
escape_allowed = true;
save_allowed = true;
menu_allowed = true;
background = "";
save_count = 0;
save_slot = -1;
}
void RPG::Save::Setup() {
system.Setup();
screen = RPG::SaveScreen();
pictures.clear();
pictures.resize(50);
for (int i = 1; i <= (int)pictures.size(); i++) {
pictures[i - 1].ID = i;
}
actors.clear();
actors.resize(Data::actors.size());
for (int i = 1; i <= (int) actors.size(); i++)
actors[i - 1].Setup(i);
map_info.Setup();
party_location.move_speed = 4;
boat_location.vehicle = RPG::SaveVehicleLocation::VehicleType_skiff;
ship_location.vehicle = RPG::SaveVehicleLocation::VehicleType_ship;
airship_location.vehicle = RPG::SaveVehicleLocation::VehicleType_airship;
if (targets.empty()) {
targets.resize(1);
}
}
void RPG::Actor::Setup() {
int max_final_level = 0;
if (Data::system.ldb_id == 2003) {
max_final_level = 99;
if (final_level == -1) {
final_level = max_final_level;
}
exp_base = exp_base == -1 ? 300 : exp_base;
exp_inflation = exp_inflation == -1 ? 300 : exp_inflation;
}
else {
max_final_level = 50;
if (final_level == -1) {
final_level = max_final_level;
}
exp_base = exp_base == -1 ? 30 : exp_base;
exp_inflation = exp_inflation == -1 ? 30 : exp_inflation;
}
parameters.Setup(max_final_level);
}
void RPG::Chipset::Init() {
terrain_data.resize(162, 1);
passable_data_lower.resize(162, 15);
passable_data_upper.resize(144, 15);
passable_data_upper.front() = 31;
}
void RPG::System::Init() {
party.resize(1, 1);
menu_commands.resize(1, 1);
}
void RPG::Parameters::Setup(int final_level) {
if (maxhp.size() < final_level) maxhp.resize(final_level, 1);
if (maxsp.size() < final_level) maxsp.resize(final_level, 0);
if (attack.size() < final_level) attack.resize(final_level, 1);
if (defense.size() < final_level) defense.resize(final_level, 1);
if (spirit.size() < final_level) spirit.resize(final_level, 1);
if (agility.size() < final_level) agility.resize(final_level, 1);
}
| 28.259615 | 77 | 0.732052 | MarianoGnu |
3365a5ad57e6c45f7114f0b79dd29353013c7b01 | 2,490 | cpp | C++ | Codeforces/915C/greedy.cpp | codgician/ACM | 391f3ce9b89b0a4bbbe3ff60eb2369fef57460d4 | [
"MIT"
] | 2 | 2018-02-14T01:59:31.000Z | 2018-03-28T03:30:45.000Z | Codeforces/915C/greedy.cpp | codgician/ACM | 391f3ce9b89b0a4bbbe3ff60eb2369fef57460d4 | [
"MIT"
] | null | null | null | Codeforces/915C/greedy.cpp | codgician/ACM | 391f3ce9b89b0a4bbbe3ff60eb2369fef57460d4 | [
"MIT"
] | 2 | 2017-12-30T02:46:35.000Z | 2018-03-28T03:30:49.000Z | #include <iostream>
#include <cstdio>
#include <cmath>
#include <algorithm>
#include <cstring>
#include <string>
#include <iomanip>
#include <climits>
#include <vector>
#include <set>
#include <queue>
#define SIZE 10
using namespace std;
int arr[SIZE];
int main()
{
ios::sync_with_stdio(false);
string origNum, maxNum;
cin >> origNum >> maxNum;
memset(arr, 0, sizeof(arr));
for (int i = 0; i < origNum.length(); i++)
{
arr[origNum[i] - '0']++;
}
/*
if (origNum.length() > maxNum.length())
{
int delta = origNum.length() - maxNum.length();
string tmp = "";
while (delta--)
{
tmp += '0';
}
maxNum = tmp + maxNum;
}
*/
if (maxNum.length() == origNum.length())
{
for (int i = 0; i < origNum.length(); i++)
{
if (arr[maxNum[i] - '0'] > 0)
{
arr[maxNum[i] - '0']--;
string tmp = "";
for (int j = 0; j <= 9; j++)
{
for (int k = 0; k < arr[j]; k++)
{
tmp += (char)(j + '0');
}
}
bool canSelect = true;
for (int j = i + 1; j < origNum.length(); j++)
{
if (maxNum[j] > tmp[j - i - 1])
{
canSelect = true;
break;
}
else if (maxNum[j] < tmp[j - i - 1])
{
canSelect = false;
break;
}
}
if (canSelect)
{
cout << maxNum[i];
continue;
}
else
{
arr[maxNum[i] - '0']++;
}
}
bool quitFlag = false;
for (int j = maxNum[i] - '0' - 1; j >= 0; j--)
{
if (arr[j] > 0)
{
cout << j;
arr[j]--;
quitFlag = true;
break;
}
}
if (quitFlag)
break;
}
}
for (int i = 9; i >= 0; i--)
{
while (arr[i])
{
cout << i;
arr[i]--;
}
}
cout << endl;
return 0;
}
| 22.035398 | 62 | 0.323293 | codgician |
3365b0a0467138b16194c86b4580da019d4859d4 | 703 | cpp | C++ | backend/tests/test-helib-fp-int8_t-multByConst.cpp | vkurilin/SHEEP | 2ccaef32c16efcf5dbc8eefd1dc243bed4ac2fbb | [
"MIT"
] | 40 | 2018-12-03T13:01:06.000Z | 2022-02-23T13:04:12.000Z | backend/tests/test-helib-fp-int8_t-multByConst.cpp | vkurilin/SHEEP | 2ccaef32c16efcf5dbc8eefd1dc243bed4ac2fbb | [
"MIT"
] | 63 | 2018-09-11T14:13:31.000Z | 2020-01-14T16:12:39.000Z | backend/tests/test-helib-fp-int8_t-multByConst.cpp | vkurilin/SHEEP | 2ccaef32c16efcf5dbc8eefd1dc243bed4ac2fbb | [
"MIT"
] | 7 | 2019-07-10T14:48:31.000Z | 2022-03-23T09:12:11.000Z | #include <memory>
#include <algorithm>
#include <cassert>
#include <cstdint>
#include "circuit-repo.hpp"
#include "circuit-test-util.hpp"
#include "context-helib.hpp"
int main(void) {
using namespace SHEEP;
ContextHElib_Fp<int8_t> ctx;
std::vector<ContextHElib_Fp<int8_t>::Plaintext> pt_input = {55, -42, 120};
ContextHElib_Fp<int8_t>::Ciphertext ct = ctx.encrypt(pt_input);
long const_val = 2;
// Perform operation
ContextHElib_Fp<int8_t>::Ciphertext ct_out =
ctx.MultByConstant(ct, const_val);
// Decrypt
std::vector<ContextHElib_Fp<int8_t>::Plaintext> pt_out = ctx.decrypt(ct_out);
assert(pt_out[0] == 110);
assert(pt_out[1] == -84);
assert(pt_out[2] == -16);
}
| 22.677419 | 79 | 0.70128 | vkurilin |
3365bc9ea0ff7a332d8d478b2da52f7d8acda705 | 11,408 | cpp | C++ | wallet/swaps/swap_offers_board.cpp | DavidBurkett/beam | c352f54fb18136188d4470d2178a95b1deb335c2 | [
"Apache-2.0"
] | null | null | null | wallet/swaps/swap_offers_board.cpp | DavidBurkett/beam | c352f54fb18136188d4470d2178a95b1deb335c2 | [
"Apache-2.0"
] | null | null | null | wallet/swaps/swap_offers_board.cpp | DavidBurkett/beam | c352f54fb18136188d4470d2178a95b1deb335c2 | [
"Apache-2.0"
] | null | null | null | // Copyright 2019 The Beam Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "swap_offers_board.h"
#include "p2p/protocol_base.h"
namespace beam::wallet
{
SwapOffersBoard::SwapOffersBoard(FlyClient::INetwork& network, IWalletMessageEndpoint& messageEndpoint)
: m_network(network),
m_messageEndpoint(messageEndpoint)
{
for (auto channel : m_channelsMap)
{
m_network.BbsSubscribe(channel.second, m_lastTimestamp, this);
}
}
const std::map<AtomicSwapCoin, BbsChannel> SwapOffersBoard::m_channelsMap =
{
{AtomicSwapCoin::Bitcoin, proto::Bbs::s_MaxChannels},
{AtomicSwapCoin::Litecoin, proto::Bbs::s_MaxChannels + 1},
{AtomicSwapCoin::Qtum, proto::Bbs::s_MaxChannels + 2}
};
void SwapOffersBoard::OnMsg(proto::BbsMsg &&msg)
{
if (msg.m_Message.empty() || msg.m_Message.size() < MsgHeader::SIZE)
return;
SwapOfferToken token;
SwapOfferConfirmation confirmation;
try
{
MsgHeader header(msg.m_Message.data());
if (header.V0 != 0 ||
header.V1 != 0 ||
header.V2 != m_protocolVersion ||
header.type != 0)
{
LOG_WARNING() << "offer board message version unsupported";
return;
}
// message body
Deserializer d;
d.reset(msg.m_Message.data() + header.SIZE, header.size);
d & token;
d & confirmation.m_Signature;
}
catch(...)
{
LOG_WARNING() << "offer board message deserialization exception";
return;
}
auto newOffer = token.Unpack();
confirmation.m_offerData = toByteBuffer(token);
if (!confirmation.IsValid(newOffer.m_publisherId.m_Pk))
{
LOG_WARNING() << "offer board message signature is invalid";
return;
}
if (newOffer.m_coin >= AtomicSwapCoin::Unknown || newOffer.m_status > SwapOfferStatus::Failed)
{
LOG_WARNING() << "offer board message is invalid";
return;
}
auto it = m_offersCache.find(newOffer.m_txId);
// New offer
if (it == m_offersCache.end())
{
m_offersCache[newOffer.m_txId] = newOffer;
if (newOffer.m_status == SwapOfferStatus::Pending)
{
notifySubscribers(ChangeAction::Added, std::vector<SwapOffer>{newOffer});
}
else
{
// Don't push irrelevant offers to subscribers
}
}
// Existing offer update
else
{
SwapOfferStatus existingStatus = m_offersCache[newOffer.m_txId].m_status;
// Normal case
if (existingStatus == SwapOfferStatus::Pending)
{
if (newOffer.m_status != SwapOfferStatus::Pending)
{
m_offersCache[newOffer.m_txId].m_status = newOffer.m_status;
notifySubscribers(ChangeAction::Removed, std::vector<SwapOffer>{newOffer});
}
}
// Transaction state has changed asynchronously while board was offline.
// Incomplete offer with SwapOfferStatus!=Pending was created.
// If offer with SwapOfferStatus::Pending is still exist in network,
// it need to be updated to latest status.
else
{
if (newOffer.m_status == SwapOfferStatus::Pending)
{
sendUpdateToNetwork(newOffer.m_txId, newOffer.m_publisherId, newOffer.m_coin, existingStatus);
}
}
}
}
/**
* Watches for system state to remove stuck expired offers from board.
* Notify only subscribers. Doesn't push any updates to network.
*/
void SwapOffersBoard::onSystemStateChanged(const Block::SystemState::ID& stateID)
{
Height currentHeight = stateID.m_Height;
for (auto& pair : m_offersCache)
{
if (pair.second.m_status != SwapOfferStatus::Pending) continue; // has to be already removed from board
auto peerResponseTime = pair.second.GetParameter<Height>(TxParameterID::PeerResponseTime);
auto minHeight = pair.second.GetParameter<Height>(TxParameterID::MinHeight);
if (peerResponseTime && minHeight)
{
auto expiresHeight = *minHeight + *peerResponseTime;
if (expiresHeight <= currentHeight)
{
pair.second.m_status = SwapOfferStatus::Expired;
notifySubscribers(ChangeAction::Removed, std::vector<SwapOffer>{pair.second});
}
}
}
}
void SwapOffersBoard::onTransactionChanged(ChangeAction action, const std::vector<TxDescription>& items)
{
if (action != ChangeAction::Removed)
{
for (const auto& item : items)
{
if (item.m_txType != TxType::AtomicSwap) continue;
switch (item.m_status)
{
case TxStatus::InProgress:
updateOffer(item.m_txId, SwapOfferStatus::InProgress);
break;
case TxStatus::Failed:
{
auto reason = item.GetParameter<TxFailureReason>(TxParameterID::InternalFailureReason);
SwapOfferStatus status = SwapOfferStatus::Failed;
if (reason && *reason == TxFailureReason::TransactionExpired)
{
status = SwapOfferStatus::Expired;
}
updateOffer(item.m_txId, status);
break;
}
case TxStatus::Canceled:
updateOffer(item.m_txId, SwapOfferStatus::Canceled);
break;
default:
// ignore
break;
}
}
}
}
void SwapOffersBoard::updateOffer(const TxID& offerTxID, SwapOfferStatus newStatus)
{
if (newStatus == SwapOfferStatus::Pending) return;
auto offerIt = m_offersCache.find(offerTxID);
if (offerIt != m_offersCache.end())
{
AtomicSwapCoin coin = offerIt->second.m_coin;
WalletID publisherId = offerIt->second.m_publisherId;
SwapOfferStatus currentStatus = offerIt->second.m_status;
if (currentStatus == SwapOfferStatus::Pending)
{
m_offersCache[offerTxID].m_status = newStatus;
notifySubscribers(ChangeAction::Removed, std::vector<SwapOffer>{m_offersCache[offerTxID]});
sendUpdateToNetwork(offerTxID, publisherId, coin, newStatus);
}
}
else
{
// Case: updateOffer() had been called before offer appeared on board.
// Here we don't know if offer exists in network at all. So board doesn't send any update to network.
// Board stores incomplete offer to notify network when original Pending offer will be received from network.
SwapOffer incompleteOffer(offerTxID);
incompleteOffer.m_status = newStatus;
m_offersCache[offerTxID] = incompleteOffer;
}
}
auto SwapOffersBoard::getOffersList() const -> std::vector<SwapOffer>
{
std::vector<SwapOffer> offers;
for (auto offer : m_offersCache)
{
SwapOfferStatus status = offer.second.m_status;
if (status == SwapOfferStatus::Pending)
{
offers.push_back(offer.second);
}
}
return offers;
}
auto SwapOffersBoard::getChannel(AtomicSwapCoin coin) const -> BbsChannel
{
auto it = m_channelsMap.find(coin);
assert(it != std::cend(m_channelsMap));
return it->second;
}
void SwapOffersBoard::publishOffer(const SwapOffer& offer) const
{
auto swapCoin = offer.GetParameter<AtomicSwapCoin>(TxParameterID::AtomicSwapCoin);
auto isBeamSide = offer.GetParameter<bool>(TxParameterID::AtomicSwapIsBeamSide);
auto amount = offer.GetParameter<Amount>(TxParameterID::Amount);
auto swapAmount = offer.GetParameter<Amount>(TxParameterID::AtomicSwapAmount);
auto responseTime = offer.GetParameter<Height>(TxParameterID::PeerResponseTime);
auto minimalHeight = offer.GetParameter<Height>(TxParameterID::MinHeight);
if (!swapCoin || !isBeamSide || !amount || !swapAmount || !responseTime || !minimalHeight)
{
LOG_WARNING() << offer.m_txId << " Can't publish invalid offer.\n\t";
return;
}
LOG_INFO() << offer.m_txId << " Publish offer.\n\t"
<< "isBeamSide: " << (*isBeamSide ? "false" : "true") << "\n\t"
<< "swapCoin: " << std::to_string(*swapCoin) << "\n\t"
<< "amount: " << *amount << "\n\t"
<< "swapAmount: " << *swapAmount << "\n\t"
<< "responseTime: " << *responseTime << "\n\t"
<< "minimalHeight: " << *minimalHeight;
beam::wallet::SwapOfferToken token(offer);
m_messageEndpoint.SendAndSign(toByteBuffer(token), getChannel(*swapCoin), offer.m_publisherId, m_protocolVersion);
}
void SwapOffersBoard::sendUpdateToNetwork(const TxID& offerID, const WalletID& publisherID, AtomicSwapCoin coin, SwapOfferStatus newStatus) const
{
LOG_INFO() << offerID << " Update offer status to " << std::to_string(newStatus);
beam::wallet::SwapOfferToken token(SwapOffer(offerID, newStatus, publisherID, coin));
m_messageEndpoint.SendAndSign(toByteBuffer(token), getChannel(coin), publisherID, m_protocolVersion);
}
void SwapOffersBoard::Subscribe(ISwapOffersObserver* observer)
{
assert(std::find(m_subscribers.begin(), m_subscribers.end(), observer) == m_subscribers.end());
m_subscribers.push_back(observer);
}
void SwapOffersBoard::Unsubscribe(ISwapOffersObserver* observer)
{
auto it = std::find(m_subscribers.begin(), m_subscribers.end(), observer);
assert(it != m_subscribers.end());
m_subscribers.erase(it);
}
void SwapOffersBoard::notifySubscribers(ChangeAction action, const std::vector<SwapOffer>& offers) const
{
for (auto sub : m_subscribers)
{
sub->onSwapOffersChanged(action, std::vector<SwapOffer>{offers});
}
}
} // namespace beam::wallet
| 37.526316 | 149 | 0.579856 | DavidBurkett |
3365dfa70b4ebb775aa8effd904b37fe9c221358 | 3,005 | hpp | C++ | libHCore/inc/time.hpp | adeliktas/Hayha3 | a505b6e79e6cabd8ef8d899eeb9f7e39251b58b5 | [
"MIT"
] | 15 | 2021-11-22T07:31:22.000Z | 2022-02-22T22:53:51.000Z | libHCore/inc/time.hpp | adeliktas/Hayha3 | a505b6e79e6cabd8ef8d899eeb9f7e39251b58b5 | [
"MIT"
] | 1 | 2021-11-26T19:27:40.000Z | 2021-11-26T19:27:40.000Z | libHCore/inc/time.hpp | adeliktas/Hayha3 | a505b6e79e6cabd8ef8d899eeb9f7e39251b58b5 | [
"MIT"
] | 5 | 2021-11-20T18:21:24.000Z | 2021-12-26T12:32:47.000Z |
#ifndef TIME_HPP
#define TIME_HPP
#include <chrono>
using namespace std::chrono;
using timeStamp = time_point<steady_clock,microseconds>;
using timeStampSeconds = time_point<steady_clock,seconds>;
extern timeStamp programStart;
timeStamp getCurrentTimeMicro();
timeStamp getTimeInFuture(uint64_t usec);
int64_t timeSince(timeStamp t0);
int64_t timeTo(timeStamp t0);
int64_t getTimeDifference(timeStamp t0, timeStamp t1);
int64_t timeSinceStart(timeStamp t0);
int64_t unixTime(timeStamp t0);
#endif
/*
#ifndef TIME_HPP
#define TIME_HPP
#include <chrono>
#include <time.h>
using namespace std::chrono;
static inline int fastfloor(float fp) {
int i = static_cast<int>(fp);
return (fp < i) ? (i - 1) : (i);
}
nanoseconds timespecToDuration(timespec ts);
time_point<system_clock, nanoseconds>timespecToTimePoint(timespec ts);
struct timeStamp{
timespec time;
timeStamp operator-(timeStamp &t1){
if(t1.time.tv_nsec > time.tv_nsec){
time.tv_sec--;
time.tv_nsec = 999999999 + time.tv_nsec - t1.time.tv_nsec;
}
else{
time.tv_nsec -= t1.time.tv_nsec;
}
time.tv_sec -= t1.time.tv_sec;
return *this;
}
timeStamp operator+(timeStamp &t1){
if(t1.time.tv_nsec + time.tv_nsec > 999999999){
}
else{
time.tv_sec += t1.time.tv_sec;
time.tv_nsec += t1.time.tv_nsec;
}
return *this;
}
timeStamp operator-(uint64_t usec){
long nsec = usec * 1000;
int secx = fastfloor(usec / 1000000);
time.tv_sec -= secx;
nsec -= secx * 1000000000;
if(nsec > time.tv_nsec){
time.tv_sec--;
time.tv_nsec = 999999999 + time.tv_nsec - nsec;
}
else{
time.tv_nsec -= nsec;
}
return *this;
}
timeStamp operator+(uint64_t usec){
long nsec = usec * 1000;
int secx = fastfloor(usec / 1000000);
time.tv_sec -= secx;
nsec -= secx * 1000000000;
if(nsec + time.tv_nsec > 999999999){
time.tv_sec++;
time.tv_nsec = (nsec + time.tv_nsec) - 999999999;
}
else{
time.tv_nsec += nsec;
}
return *this;
}
friend bool operator> (const timeStamp t1, const timeStamp t2){
if(t1.time.tv_sec > t2.time.tv_sec)
return true;
else
return false;
if(t1.time.tv_nsec > t2.time.tv_nsec)
return true;
else
return false;
}
int64_t micros(){
return time.tv_sec * 1000000 + time.tv_nsec / 1000;
}
};
extern timeStamp programStart;
timeStamp getCurrentTimeMicro();
timeStamp getTimeInFuture(uint64_t usec);
int64_t timeSince(timeStamp t0);
int64_t timeTo(timeStamp t0);
int64_t getTimeDifference(timeStamp t0, timeStamp t1);
int64_t timeSinceStart(timeStamp t0);
int64_t unixTime(timeStamp t0);
#endif
*/ | 22.095588 | 70 | 0.607654 | adeliktas |
336b462ccee7e5caea92825ff3340f8f06560b62 | 1,868 | hpp | C++ | include/rba/RBASoundContent.hpp | NaohiroNISHIGUCHI/RBA | ac86e4ffa643b8050b25161c951bb43f4e36235a | [
"Apache-2.0"
] | 2 | 2020-07-17T11:13:48.000Z | 2020-07-30T09:37:08.000Z | include/rba/RBASoundContent.hpp | NaohiroNISHIGUCHI/RBA | ac86e4ffa643b8050b25161c951bb43f4e36235a | [
"Apache-2.0"
] | null | null | null | include/rba/RBASoundContent.hpp | NaohiroNISHIGUCHI/RBA | ac86e4ffa643b8050b25161c951bb43f4e36235a | [
"Apache-2.0"
] | 3 | 2020-06-25T07:19:19.000Z | 2020-06-26T13:06:13.000Z | // Copyright (c) 2018 DENSO CORPORATION. All rights reserved.
/**
* Sound content class
*/
#ifndef RBASOUNDCONTENT_HPP
#define RBASOUNDCONTENT_HPP
#ifdef _MSC_VER
#ifdef _WINDLL
#define DLL_EXPORT __declspec(dllexport)
#else
#define DLL_EXPORT __declspec(dllimport)
#endif
#else
#define DLL_EXPORT
#endif
#include <list>
#include <string>
#include "RBAContentLoserType.hpp"
namespace rba
{
class RBASoundContentState;
class RBAZone;
/**
* @class RBASoundContent
* Define the object of sound content.<br>
* Sound content has plural status.
* When sound contents connected to a zone, active status is output.
* Object has zone definitions, that can output itself.
* Each object can define plural zone which can output sound contents.
*/
class DLL_EXPORT RBASoundContent
{
protected:
RBASoundContent()=default;
RBASoundContent(const RBASoundContent&)=delete;
RBASoundContent(RBASoundContent&&)=delete;
RBASoundContent& operator=(const RBASoundContent&)=delete;
RBASoundContent& operator=(RBASoundContent&&)=delete;
~RBASoundContent()=default;
public:
/**
* @brief Returns the name of the sound content.
* @return Sound content name
*/
virtual std::string getName() const=0;
/**
* @brief Returns the state of the sound content.
* @return List of the sound content state
*/
virtual const std::list<const RBASoundContentState*>& getContentStates() const=0;
/**
* @brief Returns the zone of the sound content.
* @return List of the zone
*/
virtual const std::list<const RBAZone*>& getZones() const=0;
/**
* @brief Returns the loser type.
* @return Loser type
*/
virtual RBAContentLoserType getLoserType() const=0;
public:
/**
* @brief Defines the default loser type.
*/
const static RBAContentLoserType LOSER_TYPE_EDEFAULT =
RBAContentLoserType::NEVER_GIVEUP;
};
}
#endif
| 22.238095 | 83 | 0.728587 | NaohiroNISHIGUCHI |
336ef2fb6bfaec758b0a786ded531b6e095dcf91 | 3,858 | cc | C++ | obj/pillar.cc | vinijabes/simutrans | cb90d7e29e8f7910936d98733fe9fe5f24893535 | [
"Artistic-1.0"
] | null | null | null | obj/pillar.cc | vinijabes/simutrans | cb90d7e29e8f7910936d98733fe9fe5f24893535 | [
"Artistic-1.0"
] | null | null | null | obj/pillar.cc | vinijabes/simutrans | cb90d7e29e8f7910936d98733fe9fe5f24893535 | [
"Artistic-1.0"
] | null | null | null | /*
* This file is part of the Simutrans project under the Artistic License.
* (see LICENSE.txt)
*/
#include <string.h>
#include "../simworld.h"
#include "../simobj.h"
#include "../simmem.h"
#include "../display/simimg.h"
#include "../bauer/brueckenbauer.h"
#include "../descriptor/bridge_desc.h"
#include "../boden/grund.h"
#include "../dataobj/loadsave.h"
#include "../obj/pillar.h"
#include "../obj/bruecke.h"
#include "../dataobj/environment.h"
pillar_t::pillar_t(loadsave_t *file) : obj_t()
{
desc = NULL;
asymmetric = false;
rdwr(file);
}
pillar_t::pillar_t(koord3d pos, player_t *player, const bridge_desc_t *desc, bridge_desc_t::img_t img, int hoehe) : obj_t(pos)
{
this->desc = desc;
this->dir = (uint8)img;
set_yoff(-hoehe);
set_owner( player );
asymmetric = desc->has_pillar_asymmetric();
calc_image();
}
void pillar_t::calc_image()
{
bool hide = false;
int height = get_yoff();
if( grund_t *gr = welt->lookup(get_pos()) ) {
slope_t::type slope = gr->get_grund_hang();
if( desc->has_pillar_asymmetric() ) {
if( dir == bridge_desc_t::NS_Pillar ) {
height += ( (corner_sw(slope) + corner_se(slope) ) * TILE_HEIGHT_STEP )/2;
}
else {
height += ( ( corner_se(slope) + corner_ne(slope) ) * TILE_HEIGHT_STEP ) / 2;
}
if( height > 0 ) {
hide = true;
}
}
else {
// on slope use mean height ...
height += ( ( corner_se(slope) + corner_ne(slope) + corner_sw(slope) + corner_se(slope) ) * TILE_HEIGHT_STEP ) / 4;
}
}
image = hide ? IMG_EMPTY : desc->get_background( (bridge_desc_t::img_t)dir, get_pos().z-height/TILE_HEIGHT_STEP >= welt->get_snowline() || welt->get_climate( get_pos().get_2d() ) == arctic_climate );
}
/**
* @return Einen Beschreibungsstring fuer das Objekt, der z.B. in einem
* Beobachtungsfenster angezeigt wird.
*/
void pillar_t::show_info()
{
planquadrat_t *plan=welt->access(get_pos().get_2d());
for(unsigned i=0; i<plan->get_boden_count(); i++ ) {
grund_t *bd=plan->get_boden_bei(i);
if(bd->ist_bruecke()) {
bruecke_t* br = bd->find<bruecke_t>();
if(br && br->get_desc()==desc) {
br->show_info();
}
}
}
}
void pillar_t::rdwr(loadsave_t *file)
{
xml_tag_t p( file, "pillar_t" );
obj_t::rdwr(file);
if(file->is_saving()) {
const char *s = desc->get_name();
file->rdwr_str(s);
file->rdwr_byte(dir);
}
else {
char s[256];
file->rdwr_str(s, lengthof(s));
file->rdwr_byte(dir);
desc = bridge_builder_t::get_desc(s);
if(desc==0) {
if(strstr(s,"ail")) {
desc = bridge_builder_t::get_desc("ClassicRail");
dbg->warning("pillar_t::rdwr()","Unknown bridge %s replaced by ClassicRail",s);
}
else if(strstr(s,"oad")) {
desc = bridge_builder_t::get_desc("ClassicRoad");
dbg->warning("pillar_t::rdwr()","Unknown bridge %s replaced by ClassicRoad",s);
}
}
asymmetric = desc && desc->has_pillar_asymmetric();
if( file->is_version_less(112, 7) && env_t::pak_height_conversion_factor==2 ) {
switch(dir) {
case bridge_desc_t::OW_Pillar: dir = bridge_desc_t::OW_Pillar2; break;
case bridge_desc_t::NS_Pillar: dir = bridge_desc_t::NS_Pillar2; break;
}
}
}
}
void pillar_t::rotate90()
{
obj_t::rotate90();
// may need to hide/show asymmetric pillars
// this is done now in calc_image, which is called after karte_t::rotate anyway
// we cannot decide this here, since welt->lookup(get_pos())->get_grund_hang() cannot be called
// since we are in the middle of the rotation process
// the rotated image parameter is just one in front/back
switch(dir) {
case bridge_desc_t::NS_Pillar: dir=bridge_desc_t::OW_Pillar ; break;
case bridge_desc_t::OW_Pillar: dir=bridge_desc_t::NS_Pillar ; break;
case bridge_desc_t::NS_Pillar2: dir=bridge_desc_t::OW_Pillar2 ; break;
case bridge_desc_t::OW_Pillar2: dir=bridge_desc_t::NS_Pillar2 ; break;
}
}
| 26.606897 | 202 | 0.666407 | vinijabes |
336f1b15eae0ec9e0b76c2b529b9a3c57d7e2b3e | 2,988 | hpp | C++ | include/veriblock/signutil.hpp | overcookedpanda/alt-integration-cpp | 7932e79a77d9514ca0e0354636e77fba1845d707 | [
"MIT"
] | null | null | null | include/veriblock/signutil.hpp | overcookedpanda/alt-integration-cpp | 7932e79a77d9514ca0e0354636e77fba1845d707 | [
"MIT"
] | null | null | null | include/veriblock/signutil.hpp | overcookedpanda/alt-integration-cpp | 7932e79a77d9514ca0e0354636e77fba1845d707 | [
"MIT"
] | null | null | null | // Copyright (c) 2019-2020 Xenios SEZC
// https://www.veriblock.org
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef __SIGNUTIL__HPP__
#define __SIGNUTIL__HPP__
#include <stdexcept>
#include <vector>
#include "slice.hpp"
#include "blob.hpp"
namespace altintegration {
static const size_t PRIVATE_KEY_SIZE = 32;
static const size_t PUBLIC_KEY_COMPRESSED_SIZE = 33;
static const size_t PUBLIC_KEY_UNCOMPRESSED_SIZE = 65;
using PrivateKey = Blob<PRIVATE_KEY_SIZE>;
using PublicKey = Blob<PUBLIC_KEY_UNCOMPRESSED_SIZE>;
using Signature = std::vector<uint8_t>;
// VBK encoded keys are plain byte arrays
using PrivateKeyVbk = std::vector<uint8_t>;
using PublicKeyVbk = std::vector<uint8_t>;
/**
* Convert VBK encoded private key to the PrivateKey type.
* @param key VBK encoded private key
* @throws std::out_of_range if key is malformed
* @return PrivateKey for inner use
*/
PrivateKey privateKeyFromVbk(PrivateKeyVbk key);
/**
* Convert VBK encoded public key to the PublicKey type.
* @param key VBK encoded public key
* @throws std::out_of_range if key is malformed
* @return PublicKey for inner use
*/
PublicKey publicKeyFromVbk(PublicKeyVbk key);
/**
* Convert PublicKey type to VBK encoding.
* @param key PublicKey format public key
* @throws std::out_of_range if key is malformed
* @return byte array with VBK encoded public key
*/
PublicKeyVbk publicKeyToVbk(PublicKey key);
/**
* Derive public key from the private key.
* @param privateKey use this private key to generate public key
* @throws std::out_of_range if privateKey is malformed
* @return PublicKey type generated public key
*/
PublicKey derivePublicKey(PrivateKey privateKey);
/**
* Sign message for VBK usage.
* This function calculates SHA256 of the message and applies
* secp256k1 signature. Result is encoded in VBK format.
* @param message message to sign
* @param privateKey sign with this private key
* @throws std::out_of_range if privateKey is malformed
* @return byte array with VBK encoded signature
*/
Signature veriBlockSign(Slice<const uint8_t> message,
PrivateKey privateKey);
/**
* Verify message previously signed with veriBlockSign.
* This function calculates SHA256 of the message, decodes
* signature from VBK format and verifies signature
* using provided public key. Signature should be formed
* with secp256k1 algorithm. Public key should be derived
* from signer's private key.
* @param message message to verify with
* @param signature VBK encoded signature to verify
* @param publicKey verify signature with this public key
* @throws std::out_of_range if publicKey is malformed
* @return 1 if signature is valid, 0 - otherwise
*/
int veriBlockVerify(Slice<const uint8_t> message,
Signature signature,
PublicKey publicKey);
} // namespace altintegration
#endif //__SIGNUTIL__HPP__
| 32.478261 | 70 | 0.754351 | overcookedpanda |
33727831f41b819ff16ff7f11aef09eb0e2644a4 | 587 | cpp | C++ | Quick-Sort.cpp | cirno99/Algorithms | 6425b143f406693caf8f882bdfe5497c81df255a | [
"Unlicense"
] | 1,210 | 2016-08-07T13:32:12.000Z | 2022-03-21T01:01:57.000Z | Quick-Sort.cpp | NeilQingqing/Algorithms-2 | c10d4c212fa1fbf8b9fb3c781d61f41e75e96aaa | [
"Unlicense"
] | 7 | 2016-09-11T11:41:03.000Z | 2017-10-29T02:12:57.000Z | Quick-Sort.cpp | NeilQingqing/Algorithms-2 | c10d4c212fa1fbf8b9fb3c781d61f41e75e96aaa | [
"Unlicense"
] | 514 | 2016-10-17T03:52:16.000Z | 2022-03-19T16:23:33.000Z | #include <cstdio>
#include <cstdlib>
#define MAX_ELEMENT_COUNT 1000000
using namespace std;
int d[MAX_ELEMENT_COUNT];
void qsort(int l, int r)
{
if (l < r)
{
int x = d[r];
int j = l - 1;
for (int i = l; i <= r; i++)
{
if (d[i] <= x)
{
j++;
int temp = d[i];
d[i] = d[j];
d[j] = temp;
}
}
qsort(l, j - 1);
qsort(j + 1, r);
}
}
int main()
{
for (int i = 0; i < MAX_ELEMENT_COUNT; i++)
{
d[i] = rand();
}
qsort(0, MAX_ELEMENT_COUNT - 1);
for (int i = 0; i < MAX_ELEMENT_COUNT; i++)
{
printf("%d\n", d[i]);
}
return 0;
}
| 11.979592 | 44 | 0.49063 | cirno99 |
33755a50eb1476a6e77c3e9b7b8f35bbc38bf95a | 3,532 | cpp | C++ | tests/distance_test.cpp | xjdr/VectorSearch | ab2c14c8f49d840a69ee1c93ec2c334704f59153 | [
"MIT"
] | null | null | null | tests/distance_test.cpp | xjdr/VectorSearch | ab2c14c8f49d840a69ee1c93ec2c334704f59153 | [
"MIT"
] | null | null | null | tests/distance_test.cpp | xjdr/VectorSearch | ab2c14c8f49d840a69ee1c93ec2c334704f59153 | [
"MIT"
] | null | null | null | #include "gtest/gtest.h"
#include "../vsearch/distance.h"
#include "../vsearch/vsearch.pb.h"
#include <iostream>
#include <fstream>
#include <string>
class DistanceTest : public testing::Test {
};
TEST(DistanceTest, DistanceTest_L2_Test_Far) {
float pX[] = {1.3, 1.3};
float pY[] = {6.6, 6.2};
float r = vsearch::Distance::ComputeL2Distance(pX, pY, 2);
EXPECT_FLOAT_EQ(r, 52.1);
}
TEST(DistanceTest, DistanceTest_L2_Test_Close) {
float pX[] = {1.3, 1.3};
float pY[] = {1.29, 1.28};
float r = vsearch::Distance::ComputeL2Distance(pX, pY, 2);
EXPECT_FLOAT_EQ(r, 0.00049999903);
}
TEST(DistanceTest, DistanceTest_Cosine_Test_Far) {
float pX[] = {1.3, 1.3};
float pY[] = {6.6, 6.2};
float r = vsearch::Distance::ComputeCosineDistance(pX, pY, 2);
EXPECT_FLOAT_EQ(r, -15.639999);
}
TEST(DistanceTest, DistanceTest_Cosine_Test_Close) {
float pX[] = {1.3, 1.3};
float pY[] = {1.29, 1.28};
float r = vsearch::Distance::ComputeCosineDistance(pX, pY, 2);
EXPECT_FLOAT_EQ(r, -2.3409998);
}
TEST(DistanceTest, DistanceTest_Cosine_Test_S2D_Far) {
float pX[] = {1.3, 1.3};
float pY[] = {6.6, 6.2};
float r = vsearch::Distance::ComputeCosineDistance(pX, pY, 2);
EXPECT_FLOAT_EQ(vsearch::Distance::ConvertCosineSimilarityToDistance(r), 16.639999);
}
TEST(DistanceTest, DistanceTest_Cosine_Test_S2D_Close) {
float pX[] = {1.3, 1.3};
float pY[] = {1.29, 1.28};
float r = vsearch::Distance::ComputeCosineDistance(pX, pY, 2);
EXPECT_FLOAT_EQ(vsearch::Distance::ConvertCosineSimilarityToDistance(r), 3.3409998);
}
TEST(DistanceTest, DistanceTest_L2_Test_Tensor) {
vsearch::Index i;
std::ifstream file;
std::string s = std::string("/home/xjdr/src/xjdr/vectorsearch/tests/tensor.txt");
file.open(s.c_str(), std::fstream::in);
if(!i.ParseFromIstream(&file)) {
std::cerr << "Boooo" << std::endl;
}
float pX[i.index(0).size()];
float pY[i.index(1).size()];
std::memcpy(pX, i.index(0).data().c_str(), sizeof(float) * i.index(0).size());
std::memcpy(pY, i.index(1).data().c_str(), sizeof(float) * i.index(1).size());
float r = vsearch::Distance::ComputeL2Distance(pX, pY, i.index(0).size());
EXPECT_FLOAT_EQ(r, 369.71799);
}
TEST(DistanceTest, DistanceTest_Cosine_Test_Tensor) {
vsearch::Index i;
std::ifstream file;
std::string s = std::string("/home/xjdr/src/xjdr/vectorsearch/tests/tensor.txt");
file.open(s.c_str(), std::fstream::in);
if(!i.ParseFromIstream(&file)) {
std::cerr << "Boooo" << std::endl;
}
float pX[i.index(0).size()];
float pY[i.index(1).size()];
std::memcpy(pX, i.index(0).data().c_str(), sizeof(float) * i.index(0).size());
std::memcpy(pY, i.index(1).data().c_str(), sizeof(float) * i.index(1).size());
float r = vsearch::Distance::ComputeCosineDistance(pX, pY, i.index(0).size());
EXPECT_FLOAT_EQ(r, -1435.8535);
}
TEST(DistanceTest, DistanceTest_CosineDistance_Test_Tensor) {
vsearch::Index i;
std::ifstream file;
std::string s = std::string("/home/xjdr/src/xjdr/vectorsearch/tests/tensor.txt");
file.open(s.c_str(), std::fstream::in);
if (!i.ParseFromIstream(&file)) {
std::cerr << "Boooo" << std::endl;
}
float pX[i.index(0).size()];
float pY[i.index(1).size()];
std::memcpy(pX, i.index(0).data().c_str(), sizeof(float) * i.index(0).size());
std::memcpy(pY, i.index(1).data().c_str(), sizeof(float) * i.index(1).size());
float r = vsearch::Distance::ComputeCosineDistance(pX, pY, i.index(0).size());
EXPECT_FLOAT_EQ(vsearch::Distance::ConvertCosineSimilarityToDistance(r), 1436.8535);
}
| 33.320755 | 86 | 0.669309 | xjdr |
337677706c3aa68ffd78265ced8be2125a993e25 | 1,892 | cpp | C++ | src/util/BalanceTests.cpp | fonero-project/fonero-core | 75d6bcadf36df2bfe10c4b7777281540566f1f34 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSL-1.0",
"BSD-3-Clause"
] | null | null | null | src/util/BalanceTests.cpp | fonero-project/fonero-core | 75d6bcadf36df2bfe10c4b7777281540566f1f34 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSL-1.0",
"BSD-3-Clause"
] | null | null | null | src/util/BalanceTests.cpp | fonero-project/fonero-core | 75d6bcadf36df2bfe10c4b7777281540566f1f34 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSL-1.0",
"BSD-3-Clause"
] | null | null | null | // Copyright 2017 Fonero Development Foundation and contributors. Licensed
// under the Apache License, Version 2.0. See the COPYING file at the root
// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
#include "lib/catch.hpp"
#include "util/types.h"
using namespace fonero;
bool
addBalance(int64_t balance, int64_t delta, int64_t resultBalance,
int64_t maxBalance = std::numeric_limits<int64_t>::max())
{
auto r = fonero::addBalance(balance, delta, maxBalance);
REQUIRE(balance == resultBalance);
return r;
}
TEST_CASE("balance", "[balance]")
{
auto const max = std::numeric_limits<int64_t>::max();
auto const min = std::numeric_limits<int64_t>::min();
REQUIRE(addBalance(0, 0, 0));
REQUIRE(addBalance(0, 10, 10));
REQUIRE(addBalance(10, 0, 10));
REQUIRE(addBalance(10, 10, 20));
REQUIRE(!addBalance(0, -5, 0));
REQUIRE(addBalance(10, -10, 0));
REQUIRE(addBalance(10, -9, 1));
REQUIRE(!addBalance(10, -11, 10));
REQUIRE(!addBalance(5, 5, 5, 9));
REQUIRE(!addBalance(0, 1, 0, 0));
REQUIRE(addBalance(0, 1, 1, max));
REQUIRE(!addBalance(0, max, 0, 0));
REQUIRE(addBalance(0, max, max, max));
REQUIRE(!addBalance(max, 1, max, 0));
REQUIRE(!addBalance(max, 1, max, max));
REQUIRE(!addBalance(max, max, max, 0));
REQUIRE(!addBalance(max, max, max, max));
REQUIRE(!addBalance(0, -1, 0, 0));
REQUIRE(!addBalance(0, -1, 0, max));
REQUIRE(!addBalance(0, min, 0, 0));
REQUIRE(!addBalance(0, -max, 0, 0));
REQUIRE(!addBalance(0, min, 0, max));
REQUIRE(!addBalance(0, -max, 0, max));
REQUIRE(!addBalance(max, -1, max, 0));
REQUIRE(addBalance(max, -1, max - 1, max));
REQUIRE(!addBalance(max, min, max, 0));
REQUIRE(addBalance(max, -max, 0, 0));
REQUIRE(!addBalance(max, min, max, max));
REQUIRE(addBalance(max, -max, 0, max));
}
| 34.4 | 74 | 0.638478 | fonero-project |
3377c4fe87b370c2b62636485d384f0a7bfa2d26 | 2,764 | hpp | C++ | include/sprout/range/adaptor/reversed.hpp | thinkoid/Sprout | a5a5944bb1779d3bb685087c58c20a4e18df2f39 | [
"BSL-1.0"
] | 4 | 2021-12-29T22:17:40.000Z | 2022-03-23T11:53:44.000Z | dsp/lib/sprout/sprout/range/adaptor/reversed.hpp | TheSlowGrowth/TapeLooper | ee8d8dccc27e39a6f6f6f435847e4d5e1b97c264 | [
"MIT"
] | 16 | 2021-10-31T21:41:09.000Z | 2022-01-22T10:51:34.000Z | include/sprout/range/adaptor/reversed.hpp | thinkoid/Sprout | a5a5944bb1779d3bb685087c58c20a4e18df2f39 | [
"BSL-1.0"
] | null | null | null | /*=============================================================================
Copyright (c) 2011-2019 Bolero MURAKAMI
https://github.com/bolero-MURAKAMI/Sprout
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
=============================================================================*/
#ifndef SPROUT_RANGE_ADAPTOR_REVERSED_HPP
#define SPROUT_RANGE_ADAPTOR_REVERSED_HPP
#include <type_traits>
#include <sprout/config.hpp>
#include <sprout/container/traits.hpp>
#include <sprout/container/functions.hpp>
#include <sprout/iterator/reverse_iterator.hpp>
#include <sprout/range/adaptor/detail/adapted_range_default.hpp>
#include <sprout/type_traits/lvalue_reference.hpp>
#include <sprout/utility/forward.hpp>
#include <sprout/utility/lvalue_forward.hpp>
namespace sprout {
namespace adaptors {
//
// reversed_range
//
template<typename Range>
class reversed_range
: public sprout::adaptors::detail::adapted_range_default<
Range,
sprout::reverse_iterator<typename sprout::container_traits<Range>::iterator>
>
{
public:
typedef sprout::adaptors::detail::adapted_range_default<
Range,
sprout::reverse_iterator<typename sprout::container_traits<Range>::iterator>
> base_type;
typedef typename base_type::range_type range_type;
typedef typename base_type::iterator iterator;
public:
SPROUT_CONSTEXPR reversed_range() SPROUT_DEFAULTED_DEFAULT_CONSTRUCTOR_DECL
reversed_range(reversed_range const&) = default;
explicit SPROUT_CONSTEXPR reversed_range(range_type& range)
: base_type(
iterator(sprout::end(range)),
iterator(sprout::begin(range))
)
{}
};
//
// reversed_forwarder
//
class reversed_forwarder {};
//
// reversed
//
namespace {
SPROUT_STATIC_CONSTEXPR sprout::adaptors::reversed_forwarder reversed = {};
} // anonymous-namespace
//
// operator|
//
template<typename Range>
inline SPROUT_CONSTEXPR sprout::adaptors::reversed_range<
typename std::remove_reference<typename sprout::lvalue_reference<Range>::type>::type
>
operator|(Range&& lhs, sprout::adaptors::reversed_forwarder) {
return sprout::adaptors::reversed_range<
typename std::remove_reference<typename sprout::lvalue_reference<Range>::type>::type
>(
sprout::lvalue_forward<Range>(lhs)
);
}
} // namespace adaptors
//
// container_construct_traits
//
template<typename Range>
struct container_construct_traits<sprout::adaptors::reversed_range<Range> >
: public sprout::container_construct_traits<typename sprout::adaptors::reversed_range<Range>::base_type>
{};
} // namespace sprout
#endif // #ifndef SPROUT_RANGE_ADAPTOR_REVERSED_HPP
| 31.05618 | 106 | 0.709479 | thinkoid |
33782bfb7cfbdce14ac6dbc52b9be958ce0ed8dc | 181 | cpp | C++ | VisceralCombatEngine/src/VCE/Renderer/RenderCommand.cpp | celestialkey/VisceralCombatEngine | b8021218401be5504ff07b087d9562c8c8ddbfb4 | [
"Apache-2.0"
] | null | null | null | VisceralCombatEngine/src/VCE/Renderer/RenderCommand.cpp | celestialkey/VisceralCombatEngine | b8021218401be5504ff07b087d9562c8c8ddbfb4 | [
"Apache-2.0"
] | null | null | null | VisceralCombatEngine/src/VCE/Renderer/RenderCommand.cpp | celestialkey/VisceralCombatEngine | b8021218401be5504ff07b087d9562c8c8ddbfb4 | [
"Apache-2.0"
] | null | null | null | #include "vcepch.h"
#include "RenderCommand.h"
#include "Platform/OpenGL/OpenGLRendererAPI.h"
namespace VCE {
RendererAPI* RenderCommand::s_RendererAPI = new OpenGLRendererAPI;
} | 22.625 | 67 | 0.790055 | celestialkey |
3378ca8ab9cff2eb2f2ba1cbcba5f8f9e8d4bc8d | 797 | cpp | C++ | 0139 Word Break/solution.cpp | Aden-Tao/LeetCode | c34019520b5808c4251cb76f69ca2befa820401d | [
"MIT"
] | 1 | 2019-12-19T04:13:15.000Z | 2019-12-19T04:13:15.000Z | 0139 Word Break/solution.cpp | Aden-Tao/LeetCode | c34019520b5808c4251cb76f69ca2befa820401d | [
"MIT"
] | null | null | null | 0139 Word Break/solution.cpp | Aden-Tao/LeetCode | c34019520b5808c4251cb76f69ca2befa820401d | [
"MIT"
] | null | null | null | #include<bits/stdc++.h>
using namespace std;
class Solution {
public:
bool wordBreak(string s, vector<string>& wordDict) {
unordered_set<string> dict(wordDict.begin(), wordDict.end());
vector<bool> dp(s.size() + 1, false);//dp表示字符之间的隔板,n个字符有n+1个隔板
dp[0] = true;//dp[0]是s[0]前面的隔板
for (int i = 1; i <= s.size(); i ++)
{
for (int j = i; j >= 0; j --)
{
if(dict.count(s.substr(j,i - j)) && dp[j])
{
dp[i] = true;
break;
}
}
}
return dp[s.size()];
}
};
int main(){
string s = "leetcode";
vector<string> wordDict{"leet", "code"};
cout << Solution().wordBreak(s, wordDict) << endl;
return 0;
} | 25.709677 | 70 | 0.460477 | Aden-Tao |
337a5ca724a532b719ac2574692b24f9b9005041 | 1,236 | cpp | C++ | joins/src/generators/uniform_generator.cpp | wagjamin/HashJoins | 143ef7a90d8226ce26b8a5e2ec1be33af0f00d74 | [
"MIT"
] | 1 | 2022-01-08T05:55:07.000Z | 2022-01-08T05:55:07.000Z | joins/src/generators/uniform_generator.cpp | wagjamin/HashJoins | 143ef7a90d8226ce26b8a5e2ec1be33af0f00d74 | [
"MIT"
] | null | null | null | joins/src/generators/uniform_generator.cpp | wagjamin/HashJoins | 143ef7a90d8226ce26b8a5e2ec1be33af0f00d74 | [
"MIT"
] | 1 | 2020-05-08T03:58:17.000Z | 2020-05-08T03:58:17.000Z | //
// Benjamin Wagner 2018
//
#include "generators/uniform_generator.h"
#include <random>
namespace generators {
uniform_generator::uniform_generator(size_t min, uint64_t max, uint64_t count):
built(false), min(min), max(max), count (count), data() {
// Generate a pseudo random seed value
std::random_device rd;
seed = rd();
}
uniform_generator::uniform_generator(size_t min, uint64_t max, uint64_t count, uint64_t seed):
built(false), min(min), max(max), count (count), seed(seed), data()
{}
void uniform_generator::build() {
std::mt19937 gen(seed);
std::uniform_int_distribution<uint64_t> dis(min, max);
data.reserve(count);
built = true;
for(uint64_t i = 0; i < count; ++i){
uint64_t val = dis(gen);
data.emplace_back(val, i);
}
}
std::vector<std::tuple<uint64_t, uint64_t>> uniform_generator::get_vec_copy() {
if(!built){
throw std::logic_error("copying may not be called before distribution has been built.");
}
return data;
}
uint64_t uniform_generator::get_count() {
return count;
}
} // namespace generators
| 24.235294 | 100 | 0.604369 | wagjamin |
337b718539ddfe0f1fc0481bc8ceb14308fa1609 | 29,280 | cpp | C++ | include/aegis/impl/guild.cpp | willem640/aegis.cpp | 8b560dc641c5c941c9474bec171999ca4b2c4108 | [
"X11"
] | null | null | null | include/aegis/impl/guild.cpp | willem640/aegis.cpp | 8b560dc641c5c941c9474bec171999ca4b2c4108 | [
"X11"
] | null | null | null | include/aegis/impl/guild.cpp | willem640/aegis.cpp | 8b560dc641c5c941c9474bec171999ca4b2c4108 | [
"X11"
] | null | null | null | //
// guild.cpp
// *********
//
// Copyright (c) 2019 Sharon W (sharon at aegis dot gg)
//
// Distributed under the MIT License. (See accompanying file LICENSE)
//
#include "aegis/guild.hpp"
#include <string>
#include <memory>
#include "aegis/core.hpp"
#include "aegis/member.hpp"
#include "aegis/channel.hpp"
#include "aegis/error.hpp"
#include "aegis/shards/shard.hpp"
#include "aegis/ratelimit/ratelimit.hpp"
namespace aegis
{
using json = nlohmann::json;
AEGIS_DECL guild::guild(const int32_t _shard_id, const snowflake _id, core * _bot, asio::io_context & _io)
: shard_id(_shard_id)
, guild_id(_id)
, _bot(_bot)
, _io_context(_io)
{
}
AEGIS_DECL guild::~guild()
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
//TODO: remove guilds from members elsewhere when bot is removed from guild
// if (get_bot().get_state() != Shutdown)
// for (auto & v : members)
// v.second->leave(guild_id);
#endif
}
AEGIS_DECL core & guild::get_bot() const noexcept
{
return *_bot;
}
#if !defined(AEGIS_DISABLE_ALL_CACHE)
AEGIS_DECL member * guild::self() const
{
return get_bot().self();
}
AEGIS_DECL void guild::add_member(member * _member) noexcept
{
members.emplace(_member->_member_id, _member);
}
AEGIS_DECL void guild::remove_member(snowflake member_id) noexcept
{
auto _member = members.find(member_id);
if (_member == members.end())
{
AEGIS_DEBUG(get_bot().log, "Unable to remove member [{}] from guild [{}] (does not exist)", member_id, guild_id);
return;
}
_member->second->leave(guild_id);
members.erase(member_id);
}
AEGIS_DECL bool guild::member_has_role(snowflake member_id, snowflake role_id) const noexcept
{
std::shared_lock<shared_mutex> l(_m);
auto _member = find_member(member_id);
if (_member == nullptr)
return false;
auto & gi = _member->get_guild_info(guild_id);
auto it = std::find_if(std::begin(gi.roles), std::end(gi.roles), [&](const snowflake & id)
{
if (id == role_id)
return true;
return false;
});
if (it != std::end(gi.roles))
return true;
return false;
}
AEGIS_DECL void guild::load_presence(const json & obj) noexcept
{
json user = obj["user"];
auto _member = _find_member(user["id"]);
if (_member == nullptr)
return;
using user_status = aegis::gateway::objects::presence::user_status;
const std::string & sts = obj["status"];
if (sts == "idle")
_member->_status = user_status::Idle;
else if (sts == "dnd")
_member->_status = user_status::DoNotDisturb;
else if (sts == "online")
_member->_status = user_status::Online;
else
_member->_status = user_status::Offline;
}
AEGIS_DECL void guild::load_role(const json & obj) noexcept
{
snowflake role_id = obj["id"];
if (!roles.count(role_id))
roles.emplace(role_id, gateway::objects::role());
auto & _role = roles[role_id];
_role.role_id = role_id;
_role.hoist = obj["hoist"];
_role.managed = obj["managed"];
_role.mentionable = obj["mentionable"];
_role._permission = permission(obj["permissions"].get<uint64_t>());
_role.position = obj["position"];
if (!obj["name"].is_null()) _role.name = obj["name"].get<std::string>();
_role.color = obj["color"];
}
AEGIS_DECL const snowflake guild::get_owner() const noexcept
{
return owner_id;
}
AEGIS_DECL member * guild::find_member(snowflake member_id) const noexcept
{
std::shared_lock<shared_mutex> l(_m);
auto m = members.find(member_id);
if (m == members.end())
return nullptr;
return m->second;
}
AEGIS_DECL member * guild::_find_member(snowflake member_id) const noexcept
{
auto m = members.find(member_id);
if (m == members.end())
return nullptr;
return m->second;
}
AEGIS_DECL channel * guild::find_channel(snowflake channel_id) const noexcept
{
std::shared_lock<shared_mutex> l(_m);
auto m = channels.find(channel_id);
if (m == channels.end())
return nullptr;
return m->second;
}
AEGIS_DECL channel * guild::_find_channel(snowflake channel_id) const noexcept
{
auto m = channels.find(channel_id);
if (m == channels.end())
return nullptr;
return m->second;
}
AEGIS_DECL permission guild::get_permissions(snowflake member_id, snowflake channel_id) noexcept
{
if (!members.count(member_id) || !channels.count(channel_id))
return 0;
return get_permissions(find_member(member_id), find_channel(channel_id));
}
AEGIS_DECL permission guild::get_permissions(member * _member, channel * _channel) noexcept
{
if (_member == nullptr || _channel == nullptr)
return 0;
int64_t _base_permissions = base_permissions(_member);
return compute_overwrites(_base_permissions, *_member, *_channel);
}
AEGIS_DECL int64_t guild::base_permissions(member & _member) const noexcept
{
try
{
if (owner_id == _member._member_id)
return ~0;
auto & role_everyone = get_role(guild_id);
int64_t permissions = role_everyone._permission.get_allow_perms();
auto g = _member.get_guild_info(guild_id);
for (auto & rl : g.roles)
permissions |= get_role(rl)._permission.get_allow_perms();
if (permissions & 0x8)//admin
return ~0;
return permissions;
}
catch (std::out_of_range &)
{
return 0;
}
catch (std::exception & e)
{
_bot->log->error(fmt::format("guild::base_permissions() [{}]", e.what()));
return 0;
}
catch (...)
{
_bot->log->error("guild::base_permissions uncaught");
return 0;
}
}
AEGIS_DECL int64_t guild::compute_overwrites(int64_t _base_permissions, member & _member, channel & _channel) const noexcept
{
try
{
if (_base_permissions & 0x8)//admin
return ~0;
int64_t permissions = _base_permissions;
if (_channel.overrides.count(guild_id))
{
auto & overwrite_everyone = _channel.overrides[guild_id];
permissions &= ~overwrite_everyone.deny;
permissions |= overwrite_everyone.allow;
}
auto & overwrites = _channel.overrides;
int64_t allow = 0;
int64_t deny = 0;
auto g = _member.get_guild_info(guild_id);
for (auto & rl : g.roles)
{
if (rl == guild_id)
continue;
if (overwrites.count(rl))
{
auto & ow_role = overwrites[rl];
allow |= ow_role.allow;
deny |= ow_role.deny;
}
}
permissions &= ~deny;
permissions |= allow;
if (overwrites.count(_member._member_id))
{
auto & ow_role = overwrites[_member._member_id];
permissions &= ~ow_role.deny;
permissions |= ow_role.allow;
}
return permissions;
}
catch (std::exception &)
{
return 0;
}
}
AEGIS_DECL const gateway::objects::role & guild::get_role(int64_t r) const
{
std::shared_lock<shared_mutex> l(_m);
for (auto & kv : roles)
if (kv.second.role_id == r)
return kv.second;
throw std::out_of_range(fmt::format("G: {} role:[{}] does not exist", guild_id, r));
}
AEGIS_DECL void guild::remove_role(snowflake role_id)
{
std::unique_lock<shared_mutex> l(_m);
try
{
for (auto & kv : members)
{
auto g = kv.second->get_guild_info(guild_id);
for (auto & rl : g.roles)
{
if (rl == role_id)
{
auto it = std::find(g.roles.begin(), g.roles.end(), role_id);
if (it != g.roles.end())
g.roles.erase(it);
break;
}
}
}
roles.erase(role_id);
}
catch (std::out_of_range &)
{
}
}
AEGIS_DECL int32_t guild::get_member_count() const noexcept
{
return static_cast<int32_t>(members.size());
}
AEGIS_DECL void guild::load(const json & obj, shards::shard * _shard) noexcept
{
//uint64_t application_id = obj->get("application_id").convert<uint64_t>();
snowflake g_id = obj["id"];
shard_id = _shard->get_id();
is_init = false;
core & bot = get_bot();
try
{
json voice_states;
if (!obj["name"].is_null()) name = obj["name"].get<std::string>();
if (!obj["icon"].is_null()) icon = obj["icon"].get<std::string>();
if (!obj["splash"].is_null()) splash = obj["splash"].get<std::string>();
owner_id = obj["owner_id"];
region = obj["region"].get<std::string>();
if (!obj["afk_channel_id"].is_null()) afk_channel_id = obj["afk_channel_id"];
afk_timeout = obj["afk_timeout"];//in seconds
if (obj.count("embed_enabled") && !obj["embed_enabled"].is_null()) embed_enabled = obj["embed_enabled"];
//_guild.embed_channel_id = obj->get("embed_channel_id").convert<uint64_t>();
verification_level = obj["verification_level"];
default_message_notifications = obj["default_message_notifications"];
mfa_level = obj["mfa_level"];
if (obj.count("joined_at") && !obj["joined_at"].is_null()) joined_at = obj["joined_at"].get<std::string>();
if (obj.count("large") && !obj["large"].is_null()) large = obj["large"];
if (obj.count("unavailable") && !obj["unavailable"].is_null())
unavailable = obj["unavailable"];
else
unavailable = false;
if (obj.count("member_count") && !obj["member_count"].is_null()) member_count = obj["member_count"];
if (obj.count("voice_states") && !obj["voice_states"].is_null()) voice_states = obj["voice_states"];
if (obj.count("roles"))
{
const json & roles = obj["roles"];
for (auto & role : roles)
{
load_role(role);
}
}
if (obj.count("members"))
{
const json & members = obj["members"];
for (auto & member : members)
{
snowflake member_id = member["user"]["id"];
auto _member = bot.member_create(member_id);
std::unique_lock<shared_mutex> l(_member->mtx());
_member->load(this, member, _shard);
this->members.emplace(member_id, _member);
}
}
if (obj.count("channels"))
{
const json & channels = obj["channels"];
for (auto & channel_obj : channels)
{
snowflake channel_id = channel_obj["id"];
auto _channel = bot.channel_create(channel_id);
_channel->load_with_guild(*this, channel_obj, _shard);
_channel->guild_id = guild_id;
_channel->_guild = this;
this->channels.emplace(channel_id, _channel);
}
}
if (obj.count("presences"))
{
const json & presences = obj["presences"];
for (auto & presence : presences)
{
load_presence(presence);
}
}
if (obj.count("emojis"))
{
const json & emojis = obj["emojis"];
/*for (auto & emoji : emojis)
{
//loadEmoji(emoji, _guild);
}*/
}
if (obj.count("features"))
{
const json & features = obj["features"];
}
/*
for (auto & feature : features)
{
//??
}
for (auto & voicestate : voice_states)
{
//no voice yet
}*/
}
catch (std::exception&e)
{
spdlog::get("aegis")->error("Shard#{} : Error processing guild[{}] {}", _shard->get_id(), g_id, (std::string)e.what());
}
}
#else
AEGIS_DECL void guild::load(const json & obj, shards::shard * _shard) noexcept
{
//uint64_t application_id = obj->get("application_id").convert<uint64_t>();
snowflake g_id = obj["id"];
shard_id = _shard->get_id();
core & bot = get_bot();
try
{
if (obj.count("channels"))
{
const json & channels = obj["channels"];
for (auto & channel_obj : channels)
{
snowflake channel_id = channel_obj["id"];
auto _channel = bot.channel_create(channel_id);
_channel->load_with_guild(*this, channel_obj, _shard);
_channel->guild_id = guild_id;
_channel->_guild = this;
this->channels.emplace(channel_id, _channel);
}
}
}
catch (std::exception&e)
{
spdlog::get("aegis")->error("Shard#{} : Error processing guild[{}] {}", _shard->get_id(), g_id, (std::string)e.what());
}
}
#endif
AEGIS_DECL void guild::remove_channel(snowflake channel_id) noexcept
{
auto it = channels.find(channel_id);
if (it == channels.end())
{
AEGIS_DEBUG(get_bot().log, "Unable to remove channel [{}] from guild [{}] (does not exist)", channel_id, guild_id);
return;
}
channels.erase(it);
}
AEGIS_DECL channel * guild::get_channel(snowflake id) const noexcept
{
std::shared_lock<shared_mutex> l(_m);
auto it = channels.find(id);
if (it == channels.end())
return nullptr;
return it->second;
}
/**\todo Incomplete. Signature may change. Location may change.
*/
AEGIS_DECL aegis::future<gateway::objects::guild> guild::get_guild()
{
return _bot->get_ratelimit().post_task<gateway::objects::guild>({ fmt::format("/guilds/{}", guild_id), rest::Get });
}
AEGIS_DECL aegis::future<gateway::objects::guild> guild::modify_guild(lib::optional<std::string> name, lib::optional<std::string> voice_region, lib::optional<int> verification_level,
lib::optional<int> default_message_notifications, lib::optional<int> explicit_content_filter, lib::optional<snowflake> afk_channel_id, lib::optional<int> afk_timeout,
lib::optional<std::string> icon, lib::optional<snowflake> owner_id, lib::optional<std::string> splash)
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if ((!perms().can_manage_guild()) || (owner_id.has_value() && owner_id != self()->_member_id))
return aegis::make_exception_future<gateway::objects::guild>(error::no_permission);
#endif
json obj;
if (name.has_value())
obj["name"] = name.value();
if (voice_region.has_value())
obj["region"] = voice_region.value();
if (verification_level.has_value())
obj["verification_level"] = verification_level.value();
if (default_message_notifications.has_value())
obj["default_message_notifications"] = default_message_notifications.value();
if (verification_level.has_value())
obj["explicit_content_filter"] = verification_level.value();
if (afk_channel_id.has_value())
obj["afk_channel_id"] = afk_channel_id.value();
if (afk_timeout.has_value())
obj["afk_timeout"] = afk_timeout.value();
if (icon.has_value())
obj["icon"] = icon.value();
if (owner_id.has_value())//requires OWNER
obj["owner_id"] = owner_id.value();
if (splash.has_value())//VIP only
obj["splash"] = splash.value();
return _bot->get_ratelimit().post_task<gateway::objects::guild>({ fmt::format("/guilds/{}", guild_id), rest::Patch, obj.dump() });
}
AEGIS_DECL aegis::future<rest::rest_reply> guild::delete_guild()
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
//requires OWNER
if (owner_id != self()->_member_id)
return aegis::make_exception_future(error::no_permission);
#endif
return _bot->get_ratelimit().post_task({ fmt::format("/guilds/{}", guild_id), rest::Delete });
}
AEGIS_DECL aegis::future<gateway::objects::channel> guild::create_text_channel(const std::string & name,
int64_t parent_id, bool nsfw, const std::vector<gateway::objects::permission_overwrite> & permission_overwrites)
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
//requires MANAGE_CHANNELS
if (!perms().can_manage_channels())
return aegis::make_exception_future<gateway::objects::channel>(error::no_permission);
#endif
json obj;
obj["name"] = name;
obj["type"] = 0;
obj["parent_id"] = parent_id;
obj["nsfw"] = nsfw;
obj["permission_overwrites"] = json::array();
for (auto & p_ow : permission_overwrites)
{
obj["permission_overwrites"].push_back(p_ow);
}
return _bot->get_ratelimit().post_task<gateway::objects::channel>({ fmt::format("/guilds/{}/channels", guild_id), rest::Post, obj.dump() });
}
AEGIS_DECL aegis::future<gateway::objects::channel> guild::create_voice_channel(const std::string & name,
int32_t bitrate, int32_t user_limit, int64_t parent_id,
const std::vector<gateway::objects::permission_overwrite> & permission_overwrites)
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_manage_channels())
return aegis::make_exception_future<gateway::objects::channel>(error::no_permission);
#endif
json obj;
obj["name"] = name;
obj["type"] = 2;
obj["bitrate"] = bitrate;
obj["user_limit"] = user_limit;
obj["parent_id"] = parent_id;
obj["permission_overwrites"] = json::array();
for (auto & p_ow : permission_overwrites)
{
obj["permission_overwrites"].push_back(p_ow);
}
return _bot->get_ratelimit().post_task<gateway::objects::channel>({ fmt::format("/guilds/{}/channels", guild_id), rest::Post, obj.dump() });
}
AEGIS_DECL aegis::future<gateway::objects::channel> guild::create_category_channel(const std::string & name,
int64_t parent_id, const std::vector<gateway::objects::permission_overwrite> & permission_overwrites)
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_manage_channels())
return aegis::make_exception_future<gateway::objects::channel>(error::no_permission);
#endif
json obj;
obj["name"] = name;
obj["type"] = 4;
obj["permission_overwrites"] = json::array();
for (auto & p_ow : permission_overwrites)
{
obj["permission_overwrites"].push_back(p_ow);
}
return _bot->get_ratelimit().post_task<gateway::objects::channel>({ fmt::format("/guilds/{}/channels", guild_id), rest::Post, obj.dump() });
}
/**\todo Incomplete. Signature may change
*/
AEGIS_DECL aegis::future<rest::rest_reply> guild::modify_channel_positions()
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_manage_channels())
return aegis::make_exception_future(error::no_permission);
#endif
return aegis::make_exception_future(error::not_implemented);
}
AEGIS_DECL aegis::future<gateway::objects::member> guild::modify_guild_member(snowflake user_id, lib::optional<std::string> nick, lib::optional<bool> mute,
lib::optional<bool> deaf, lib::optional<std::vector<snowflake>> roles, lib::optional<snowflake> channel_id)
{
json obj;
#if !defined(AEGIS_DISABLE_ALL_CACHE)
permission perm = perms();
if (nick.has_value())
{
if (!perm.can_manage_names())
return aegis::make_exception_future<gateway::objects::member>(error::no_permission);
obj["nick"] = nick.value();//requires MANAGE_NICKNAMES
}
if (mute.has_value())
{
if (!perm.can_voice_mute())
return aegis::make_exception_future<gateway::objects::member>(error::no_permission);
obj["mute"] = mute.value();//requires MUTE_MEMBERS
}
if (deaf.has_value())
{
if (!perm.can_voice_deafen())
return aegis::make_exception_future<gateway::objects::member>(error::no_permission);
obj["deaf"] = deaf.value();//requires DEAFEN_MEMBERS
}
if (roles.has_value())
{
if (!perm.can_manage_roles())
return aegis::make_exception_future<gateway::objects::member>(error::no_permission);
obj["roles"] = roles.value();//requires MANAGE_ROLES
}
if (channel_id.has_value())
{
//TODO: This needs to calculate whether or not the bot has access to the voice channel as well
if (!perm.can_voice_move())
return aegis::make_exception_future<gateway::objects::member>(error::no_permission);
obj["channel_id"] = channel_id.value();//requires MOVE_MEMBERS
}
#else
if (nick.has_value())
obj["nick"] = nick.value();//requires MANAGE_NICKNAMES
if (mute.has_value())
obj["mute"] = mute.value();//requires MUTE_MEMBERS
if (deaf.has_value())
obj["deaf"] = deaf.value();//requires DEAFEN_MEMBERS
if (roles.has_value())
obj["roles"] = roles.value();//requires MANAGE_ROLES
if (channel_id.has_value())
obj["channel_id"] = channel_id.value();//requires MOVE_MEMBERS
#endif
return _bot->get_ratelimit().post_task<gateway::objects::member>({ fmt::format("/guilds/{}/members/{}", guild_id, user_id), rest::Patch, obj.dump() });
}
AEGIS_DECL aegis::future<rest::rest_reply> guild::modify_my_nick(const std::string & newname)
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_change_name())
return aegis::make_exception_future(error::no_permission);
#endif
json obj = { { "nick", newname } };
return _bot->get_ratelimit().post_task({ fmt::format("/guilds/{}/members/@me/nick", guild_id), rest::Patch, obj.dump() });
}
AEGIS_DECL aegis::future<rest::rest_reply> guild::add_guild_member_role(snowflake user_id, snowflake role_id)
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_manage_roles())
return aegis::make_exception_future(error::no_permission);
#endif
return _bot->get_ratelimit().post_task({ fmt::format("/guilds/{}/members/{}/roles/{}", guild_id, user_id, role_id), rest::Put });
}
AEGIS_DECL aegis::future<rest::rest_reply> guild::remove_guild_member_role(snowflake user_id, snowflake role_id)
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_manage_roles())
return aegis::make_exception_future(error::no_permission);
#endif
return _bot->get_ratelimit().post_task({ fmt::format("/guilds/{}/members/{}/roles/{}", guild_id, user_id, role_id), rest::Delete });
}
AEGIS_DECL aegis::future<rest::rest_reply> guild::remove_guild_member(snowflake user_id)
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_kick())
return aegis::make_exception_future(error::no_permission);
#endif
return _bot->get_ratelimit().post_task({ fmt::format("/guilds/{}/members/{}", guild_id, user_id), rest::Delete });
}
AEGIS_DECL aegis::future<rest::rest_reply> guild::create_guild_ban(snowflake user_id, int8_t delete_message_days, const std::string & reason)
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_ban())
return aegis::make_exception_future(error::no_permission);
#endif
std::string query_params = fmt::format("?delete-message-days={}", delete_message_days);
if (!reason.empty())
query_params += fmt::format("&reason={}", utility::url_encode(reason));
return _bot->get_ratelimit().post_task({ fmt::format("/guilds/{}/bans/{}", guild_id, user_id), rest::Put, {}, {}, {}, {}, query_params });
}
AEGIS_DECL aegis::future<rest::rest_reply> guild::remove_guild_ban(snowflake user_id)
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_ban())
return aegis::make_exception_future(error::no_permission);
#endif
return _bot->get_ratelimit().post_task({ fmt::format("/guilds/{}/bans/{}", guild_id, user_id), rest::Delete });
}
AEGIS_DECL aegis::future<gateway::objects::role> guild::create_guild_role(const std::string & name, permission _perms, int32_t color, bool hoist, bool mentionable)
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_manage_roles())
return aegis::make_exception_future<gateway::objects::role>(error::no_permission);
#endif
json obj = { { "name", name },{ "permissions", _perms },{ "color", color },{ "hoist", hoist },{ "mentionable", mentionable } };
return _bot->get_ratelimit().post_task<gateway::objects::role>({ fmt::format("/guilds/{}/roles", guild_id), rest::Post, obj.dump() });
}
AEGIS_DECL aegis::future<rest::rest_reply> guild::modify_guild_role_positions(snowflake role_id, int16_t position)
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_manage_roles())
return aegis::make_exception_future(error::no_permission);
#endif
json obj = { { "id", role_id },{ "position", position } };
return _bot->get_ratelimit().post_task({ fmt::format("/guilds/{}/roles", guild_id), rest::Patch, obj.dump() });
}
AEGIS_DECL aegis::future<gateway::objects::role> guild::modify_guild_role(snowflake role_id, const std::string & name, permission _perms, int32_t color, bool hoist, bool mentionable)
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_manage_roles())
return aegis::make_exception_future<gateway::objects::role>(error::no_permission);
#endif
json obj = { { "name", name },{ "permissions", _perms },{ "color", color },{ "hoist", hoist },{ "mentionable", mentionable } };
return _bot->get_ratelimit().post_task<gateway::objects::role>({ fmt::format("/guilds/{}/roles/{}", guild_id, role_id), rest::Post, obj.dump() });
}
AEGIS_DECL aegis::future<rest::rest_reply> guild::delete_guild_role(snowflake role_id)
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_manage_roles())
return aegis::make_exception_future(error::no_permission);
#endif
return _bot->get_ratelimit().post_task({ fmt::format("/guilds/{}/roles/{}", guild_id, role_id), rest::Delete });
}
/**\todo Incomplete. Signature may change
*/
AEGIS_DECL aegis::future<rest::rest_reply> guild::get_guild_prune_count(int16_t days)
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_kick())
return aegis::make_exception_future(error::no_permission);
#endif
return aegis::make_exception_future(error::not_implemented);
}
/**\todo Incomplete. Signature may change
*/
AEGIS_DECL aegis::future<rest::rest_reply> guild::begin_guild_prune(int16_t days)
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_kick())
return aegis::make_exception_future(error::no_permission);
#endif
return aegis::make_exception_future(error::not_implemented);
}
/**\todo Incomplete. Signature may change
*/
AEGIS_DECL aegis::future<rest::rest_reply> guild::get_guild_invites()
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_manage_guild())
return aegis::make_exception_future(error::no_permission);
#endif
return aegis::make_exception_future(error::not_implemented);
}
/**\todo Incomplete. Signature may change
*/
AEGIS_DECL aegis::future<rest::rest_reply> guild::get_guild_integrations()
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_manage_guild())
return aegis::make_exception_future(error::no_permission);
#endif
return aegis::make_exception_future(error::not_implemented);
}
/**\todo Incomplete. Signature may change
*/
AEGIS_DECL aegis::future<rest::rest_reply> guild::create_guild_integration()
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_manage_guild())
return aegis::make_exception_future(error::no_permission);
#endif
return aegis::make_exception_future(error::not_implemented);
}
/**\todo Incomplete. Signature may change
*/
AEGIS_DECL aegis::future<rest::rest_reply> guild::modify_guild_integration()
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_manage_guild())
return aegis::make_exception_future(error::no_permission);
#endif
return aegis::make_exception_future(error::not_implemented);
}
/**\todo Incomplete. Signature may change
*/
AEGIS_DECL aegis::future<rest::rest_reply> guild::delete_guild_integration()
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_manage_guild())
return aegis::make_exception_future(error::no_permission);
#endif
return aegis::make_exception_future(error::not_implemented);
}
/**\todo Incomplete. Signature may change
*/
AEGIS_DECL aegis::future<rest::rest_reply> guild::sync_guild_integration()
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_manage_guild())
return aegis::make_exception_future(error::no_permission);
#endif
return aegis::make_exception_future(error::not_implemented);
}
/**\todo Incomplete. Signature may change
*/
AEGIS_DECL aegis::future<rest::rest_reply> guild::get_guild_embed()
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_manage_guild())
return aegis::make_exception_future(error::no_permission);
#endif
return aegis::make_exception_future(error::not_implemented);
}
/**\todo Incomplete. Signature may change
*/
AEGIS_DECL aegis::future<rest::rest_reply> guild::modify_guild_embed()
{
#if !defined(AEGIS_DISABLE_ALL_CACHE)
if (!perms().can_manage_guild())
return aegis::make_exception_future(error::no_permission);
#endif
return aegis::make_exception_future(error::not_implemented);
}
AEGIS_DECL aegis::future<rest::rest_reply> guild::leave()
{
return _bot->get_ratelimit().post_task({ fmt::format("/users/@me/guilds/{0}", guild_id), rest::Delete });
}
}
| 32.533333 | 186 | 0.645048 | willem640 |
337d99d360949b5d94b5b3af654da0c7c8ce1e4b | 1,597 | hpp | C++ | src/core/level/graph.hpp | guillaume-haerinck/imac-tower-defense | 365a32642ea0d3ad8b2b7d63347d585c44d9f670 | [
"MIT"
] | 44 | 2019-06-06T21:33:30.000Z | 2022-03-26T06:18:23.000Z | src/core/level/graph.hpp | guillaume-haerinck/imac-tower-defense | 365a32642ea0d3ad8b2b7d63347d585c44d9f670 | [
"MIT"
] | 1 | 2019-09-27T12:04:52.000Z | 2019-09-29T13:30:42.000Z | src/core/level/graph.hpp | guillaume-haerinck/imac-tower-defense | 365a32642ea0d3ad8b2b7d63347d585c44d9f670 | [
"MIT"
] | 8 | 2019-07-26T16:44:26.000Z | 2020-11-24T17:56:18.000Z | #pragma once
#include <vector>
#include <spdlog/spdlog.h>
struct graphNode {
graphNode(int x, int y) : x(x), y(y) {}
int x;
int y;
};
struct graphEdge {
graphEdge(int neighbourIndex, float dist) : neighbourIndex(neighbourIndex), dist(dist) {}
int neighbourIndex;
float dist;
};
class Graph {
public:
Graph();
~Graph();
//Getters
int getNodesCount();
graphNode getNode(int nodeIndex);
int nodeIndex(int x, int y);
std::vector<int> getStartNodes();
int getStartNodeRandom();
int getEndNode();
std::vector<graphEdge>* getNeighbours(int nodeIndex);
//Setters
int addNode(int x, int y); //Returns the index at which the node was inserted
void addStartNode(int nodeIndex);
void addEndNode(int nodeIndex);
void addNeighbourTo(int node, int neighbour, float dist);
void addNeighbourTo(int node, int neighbour, float dist, bool checkRepetitions);
void addNeighbouring(int node1, int node2, float dist);
void addNeighbouring(int node1, int node2, float dist, bool checkRepetitions);
bool isNeighbourOf(int node, int potentialNeighbour);
float distEstimator(int node1);
float distEstimator(int node1, int node2);
int pickNextNode(int currentNode, int previousNode); //WARNING : should only be used if it is a stochastic graph ! (i.e. wheights of edges starting from a given node always add up to 1)
//std::vector<int> trajectory(int startNode, int endNode);
private:
std::vector<graphNode> nodes;
std::vector<int> startNodeIndexes;
int endNodeIndex;
std::vector<std::vector<graphEdge>*> adjencyLists;
//void addNodeToList(graphEdgeList** list, int value, float dist);
}; | 32.591837 | 186 | 0.745773 | guillaume-haerinck |
33815308bd8d30f2771ee2020bb087d87e69c96d | 22,259 | cpp | C++ | extras/jbcoin-libpp/extras/jbcoind/src/jbcoin/json/impl/json_reader.cpp | trongnmchainos/validator-keys-tool | cae131d6ab46051c0f47509b79b6efc47a70eec0 | [
"BSL-1.0"
] | 2 | 2020-03-03T12:46:29.000Z | 2020-11-14T09:52:14.000Z | extras/jbcoin-libpp/extras/jbcoind/src/jbcoin/json/impl/json_reader.cpp | trongnmchainos/validator-keys-tool | cae131d6ab46051c0f47509b79b6efc47a70eec0 | [
"BSL-1.0"
] | null | null | null | extras/jbcoin-libpp/extras/jbcoind/src/jbcoin/json/impl/json_reader.cpp | trongnmchainos/validator-keys-tool | cae131d6ab46051c0f47509b79b6efc47a70eec0 | [
"BSL-1.0"
] | 1 | 2020-03-03T12:46:30.000Z | 2020-03-03T12:46:30.000Z | //------------------------------------------------------------------------------
/*
This file is part of jbcoind: https://github.com/jbcoin/jbcoind
Copyright (c) 2012, 2013 Jbcoin Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <BeastConfig.h>
#include <jbcoin/basics/contract.h>
#include <jbcoin/json/json_reader.h>
#include <algorithm>
#include <string>
#include <cctype>
namespace Json
{
// Implementation of class Reader
// ////////////////////////////////
static
std::string
codePointToUTF8 (unsigned int cp)
{
std::string result;
// based on description from http://en.wikipedia.org/wiki/UTF-8
if (cp <= 0x7f)
{
result.resize (1);
result[0] = static_cast<char> (cp);
}
else if (cp <= 0x7FF)
{
result.resize (2);
result[1] = static_cast<char> (0x80 | (0x3f & cp));
result[0] = static_cast<char> (0xC0 | (0x1f & (cp >> 6)));
}
else if (cp <= 0xFFFF)
{
result.resize (3);
result[2] = static_cast<char> (0x80 | (0x3f & cp));
result[1] = 0x80 | static_cast<char> ((0x3f & (cp >> 6)));
result[0] = 0xE0 | static_cast<char> ((0xf & (cp >> 12)));
}
else if (cp <= 0x10FFFF)
{
result.resize (4);
result[3] = static_cast<char> (0x80 | (0x3f & cp));
result[2] = static_cast<char> (0x80 | (0x3f & (cp >> 6)));
result[1] = static_cast<char> (0x80 | (0x3f & (cp >> 12)));
result[0] = static_cast<char> (0xF0 | (0x7 & (cp >> 18)));
}
return result;
}
// Class Reader
// //////////////////////////////////////////////////////////////////
Reader::Reader ()
{
}
bool
Reader::parse ( std::string const& document,
Value& root)
{
document_ = document;
const char* begin = document_.c_str ();
const char* end = begin + document_.length ();
return parse ( begin, end, root );
}
bool
Reader::parse ( std::istream& sin,
Value& root)
{
//std::istream_iterator<char> begin(sin);
//std::istream_iterator<char> end;
// Those would allow streamed input from a file, if parse() were a
// template function.
// Since std::string is reference-counted, this at least does not
// create an extra copy.
std::string doc;
std::getline (sin, doc, (char)EOF);
return parse ( doc, root );
}
bool
Reader::parse ( const char* beginDoc, const char* endDoc,
Value& root)
{
begin_ = beginDoc;
end_ = endDoc;
current_ = begin_;
lastValueEnd_ = 0;
lastValue_ = 0;
errors_.clear ();
while ( !nodes_.empty () )
nodes_.pop ();
nodes_.push ( &root );
bool successful = readValue ();
Token token;
skipCommentTokens ( token );
if ( !root.isArray () && !root.isObject () )
{
// Set error location to start of doc, ideally should be first token found in doc
token.type_ = tokenError;
token.start_ = beginDoc;
token.end_ = endDoc;
addError ( "A valid JSON document must be either an array or an object value.",
token );
return false;
}
return successful;
}
bool
Reader::readValue ()
{
Token token;
skipCommentTokens ( token );
bool successful = true;
switch ( token.type_ )
{
case tokenObjectBegin:
successful = readObject ( token );
break;
case tokenArrayBegin:
successful = readArray ( token );
break;
case tokenInteger:
successful = decodeNumber ( token );
break;
case tokenDouble:
successful = decodeDouble ( token );
break;
case tokenString:
successful = decodeString ( token );
break;
case tokenTrue:
currentValue () = true;
break;
case tokenFalse:
currentValue () = false;
break;
case tokenNull:
currentValue () = Value ();
break;
default:
return addError ( "Syntax error: value, object or array expected.", token );
}
return successful;
}
void
Reader::skipCommentTokens ( Token& token )
{
do
{
readToken ( token );
}
while ( token.type_ == tokenComment );
}
bool
Reader::expectToken ( TokenType type, Token& token, const char* message )
{
readToken ( token );
if ( token.type_ != type )
return addError ( message, token );
return true;
}
bool
Reader::readToken ( Token& token )
{
skipSpaces ();
token.start_ = current_;
Char c = getNextChar ();
bool ok = true;
switch ( c )
{
case '{':
token.type_ = tokenObjectBegin;
break;
case '}':
token.type_ = tokenObjectEnd;
break;
case '[':
token.type_ = tokenArrayBegin;
break;
case ']':
token.type_ = tokenArrayEnd;
break;
case '"':
token.type_ = tokenString;
ok = readString ();
break;
case '/':
token.type_ = tokenComment;
ok = readComment ();
break;
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
case '-':
token.type_ = readNumber ();
break;
case 't':
token.type_ = tokenTrue;
ok = match ( "rue", 3 );
break;
case 'f':
token.type_ = tokenFalse;
ok = match ( "alse", 4 );
break;
case 'n':
token.type_ = tokenNull;
ok = match ( "ull", 3 );
break;
case ',':
token.type_ = tokenArraySeparator;
break;
case ':':
token.type_ = tokenMemberSeparator;
break;
case 0:
token.type_ = tokenEndOfStream;
break;
default:
ok = false;
break;
}
if ( !ok )
token.type_ = tokenError;
token.end_ = current_;
return true;
}
void
Reader::skipSpaces ()
{
while ( current_ != end_ )
{
Char c = *current_;
if ( c == ' ' || c == '\t' || c == '\r' || c == '\n' )
++current_;
else
break;
}
}
bool
Reader::match ( Location pattern,
int patternLength )
{
if ( end_ - current_ < patternLength )
return false;
int index = patternLength;
while ( index-- )
if ( current_[index] != pattern[index] )
return false;
current_ += patternLength;
return true;
}
bool
Reader::readComment ()
{
Char c = getNextChar ();
if ( c == '*' )
return readCStyleComment ();
if ( c == '/' )
return readCppStyleComment ();
return false;
}
bool
Reader::readCStyleComment ()
{
while ( current_ != end_ )
{
Char c = getNextChar ();
if ( c == '*' && *current_ == '/' )
break;
}
return getNextChar () == '/';
}
bool
Reader::readCppStyleComment ()
{
while ( current_ != end_ )
{
Char c = getNextChar ();
if ( c == '\r' || c == '\n' )
break;
}
return true;
}
Reader::TokenType
Reader::readNumber ()
{
static char const extended_tokens[] = { '.', 'e', 'E', '+', '-' };
TokenType type = tokenInteger;
if ( current_ != end_ )
{
if (*current_ == '-')
++current_;
while ( current_ != end_ )
{
if (!std::isdigit (*current_))
{
auto ret = std::find (std::begin (extended_tokens),
std::end (extended_tokens), *current_);
if (ret == std::end (extended_tokens))
break;
type = tokenDouble;
}
++current_;
}
}
return type;
}
bool
Reader::readString ()
{
Char c = 0;
while ( current_ != end_ )
{
c = getNextChar ();
if ( c == '\\' )
getNextChar ();
else if ( c == '"' )
break;
}
return c == '"';
}
bool
Reader::readObject ( Token& tokenStart )
{
Token tokenName;
std::string name;
currentValue () = Value ( objectValue );
while ( readToken ( tokenName ) )
{
bool initialTokenOk = true;
while ( tokenName.type_ == tokenComment && initialTokenOk )
initialTokenOk = readToken ( tokenName );
if ( !initialTokenOk )
break;
if ( tokenName.type_ == tokenObjectEnd && name.empty () ) // empty object
return true;
if ( tokenName.type_ != tokenString )
break;
name = "";
if ( !decodeString ( tokenName, name ) )
return recoverFromError ( tokenObjectEnd );
Token colon;
if ( !readToken ( colon ) || colon.type_ != tokenMemberSeparator )
{
return addErrorAndRecover ( "Missing ':' after object member name",
colon,
tokenObjectEnd );
}
// Reject duplicate names
if (currentValue ().isMember (name))
return addError ( "Key '" + name + "' appears twice.", tokenName );
Value& value = currentValue ()[ name ];
nodes_.push ( &value );
bool ok = readValue ();
nodes_.pop ();
if ( !ok ) // error already set
return recoverFromError ( tokenObjectEnd );
Token comma;
if ( !readToken ( comma )
|| ( comma.type_ != tokenObjectEnd &&
comma.type_ != tokenArraySeparator &&
comma.type_ != tokenComment ) )
{
return addErrorAndRecover ( "Missing ',' or '}' in object declaration",
comma,
tokenObjectEnd );
}
bool finalizeTokenOk = true;
while ( comma.type_ == tokenComment &&
finalizeTokenOk )
finalizeTokenOk = readToken ( comma );
if ( comma.type_ == tokenObjectEnd )
return true;
}
return addErrorAndRecover ( "Missing '}' or object member name",
tokenName,
tokenObjectEnd );
}
bool
Reader::readArray ( Token& tokenStart )
{
currentValue () = Value ( arrayValue );
skipSpaces ();
if ( *current_ == ']' ) // empty array
{
Token endArray;
readToken ( endArray );
return true;
}
int index = 0;
while ( true )
{
Value& value = currentValue ()[ index++ ];
nodes_.push ( &value );
bool ok = readValue ();
nodes_.pop ();
if ( !ok ) // error already set
return recoverFromError ( tokenArrayEnd );
Token token;
// Accept Comment after last item in the array.
ok = readToken ( token );
while ( token.type_ == tokenComment && ok )
{
ok = readToken ( token );
}
bool badTokenType = ( token.type_ != tokenArraySeparator &&
token.type_ != tokenArrayEnd );
if ( !ok || badTokenType )
{
return addErrorAndRecover ( "Missing ',' or ']' in array declaration",
token,
tokenArrayEnd );
}
if ( token.type_ == tokenArrayEnd )
break;
}
return true;
}
bool
Reader::decodeNumber ( Token& token )
{
Location current = token.start_;
bool isNegative = *current == '-';
if ( isNegative )
++current;
if (current == token.end_)
{
return addError ( "'" + std::string ( token.start_, token.end_ ) +
"' is not a valid number.", token );
}
// The existing Json integers are 32-bit so using a 64-bit value here avoids
// overflows in the conversion code below.
std::int64_t value = 0;
static_assert(sizeof(value) > sizeof(Value::maxUInt),
"The JSON integer overflow logic will need to be reworked.");
while (current < token.end_ && (value <= Value::maxUInt))
{
Char c = *current++;
if ( c < '0' || c > '9' )
{
return addError ( "'" + std::string ( token.start_, token.end_ ) +
"' is not a number.", token );
}
value = (value * 10) + (c - '0');
}
// More tokens left -> input is larger than largest possible return value
if (current != token.end_)
{
return addError ( "'" + std::string ( token.start_, token.end_ ) +
"' exceeds the allowable range.", token );
}
if ( isNegative )
{
value = -value;
if (value < Value::minInt || value > Value::maxInt)
{
return addError ( "'" + std::string ( token.start_, token.end_ ) +
"' exceeds the allowable range.", token );
}
currentValue () = static_cast<Value::Int>( value );
}
else
{
if (value > Value::maxUInt)
{
return addError ( "'" + std::string ( token.start_, token.end_ ) +
"' exceeds the allowable range.", token );
}
// If it's representable as a signed integer, construct it as one.
if ( value <= Value::maxInt )
currentValue () = static_cast<Value::Int>( value );
else
currentValue () = static_cast<Value::UInt>( value );
}
return true;
}
bool
Reader::decodeDouble( Token &token )
{
double value = 0;
const int bufferSize = 32;
int count;
int length = int(token.end_ - token.start_);
// Sanity check to avoid buffer overflow exploits.
if (length < 0) {
return addError( "Unable to parse token length", token );
}
// Avoid using a string constant for the format control string given to
// sscanf, as this can cause hard to debug crashes on OS X. See here for more
// info:
//
// http://developer.apple.com/library/mac/#DOCUMENTATION/DeveloperTools/gcc-4.0.1/gcc/Incompatibilities.html
char format[] = "%lf";
if ( length <= bufferSize )
{
Char buffer[bufferSize+1];
memcpy( buffer, token.start_, length );
buffer[length] = 0;
count = sscanf( buffer, format, &value );
}
else
{
std::string buffer( token.start_, token.end_ );
count = sscanf( buffer.c_str(), format, &value );
}
if ( count != 1 )
return addError( "'" + std::string( token.start_, token.end_ ) + "' is not a number.", token );
currentValue() = value;
return true;
}
bool
Reader::decodeString ( Token& token )
{
std::string decoded;
if ( !decodeString ( token, decoded ) )
return false;
currentValue () = decoded;
return true;
}
bool
Reader::decodeString ( Token& token, std::string& decoded )
{
decoded.reserve ( token.end_ - token.start_ - 2 );
Location current = token.start_ + 1; // skip '"'
Location end = token.end_ - 1; // do not include '"'
while ( current != end )
{
Char c = *current++;
if ( c == '"' )
break;
else if ( c == '\\' )
{
if ( current == end )
return addError ( "Empty escape sequence in string", token, current );
Char escape = *current++;
switch ( escape )
{
case '"':
decoded += '"';
break;
case '/':
decoded += '/';
break;
case '\\':
decoded += '\\';
break;
case 'b':
decoded += '\b';
break;
case 'f':
decoded += '\f';
break;
case 'n':
decoded += '\n';
break;
case 'r':
decoded += '\r';
break;
case 't':
decoded += '\t';
break;
case 'u':
{
unsigned int unicode;
if ( !decodeUnicodeCodePoint ( token, current, end, unicode ) )
return false;
decoded += codePointToUTF8 (unicode);
}
break;
default:
return addError ( "Bad escape sequence in string", token, current );
}
}
else
{
decoded += c;
}
}
return true;
}
bool
Reader::decodeUnicodeCodePoint ( Token& token,
Location& current,
Location end,
unsigned int& unicode )
{
if ( !decodeUnicodeEscapeSequence ( token, current, end, unicode ) )
return false;
if (unicode >= 0xD800 && unicode <= 0xDBFF)
{
// surrogate pairs
if (end - current < 6)
return addError ( "additional six characters expected to parse unicode surrogate pair.", token, current );
unsigned int surrogatePair;
if (* (current++) == '\\' && * (current++) == 'u')
{
if (decodeUnicodeEscapeSequence ( token, current, end, surrogatePair ))
{
unicode = 0x10000 + ((unicode & 0x3FF) << 10) + (surrogatePair & 0x3FF);
}
else
return false;
}
else
return addError ( "expecting another \\u token to begin the second half of a unicode surrogate pair", token, current );
}
return true;
}
bool
Reader::decodeUnicodeEscapeSequence ( Token& token,
Location& current,
Location end,
unsigned int& unicode )
{
if ( end - current < 4 )
return addError ( "Bad unicode escape sequence in string: four digits expected.", token, current );
unicode = 0;
for ( int index = 0; index < 4; ++index )
{
Char c = *current++;
unicode *= 16;
if ( c >= '0' && c <= '9' )
unicode += c - '0';
else if ( c >= 'a' && c <= 'f' )
unicode += c - 'a' + 10;
else if ( c >= 'A' && c <= 'F' )
unicode += c - 'A' + 10;
else
return addError ( "Bad unicode escape sequence in string: hexadecimal digit expected.", token, current );
}
return true;
}
bool
Reader::addError ( std::string const& message,
Token& token,
Location extra )
{
ErrorInfo info;
info.token_ = token;
info.message_ = message;
info.extra_ = extra;
errors_.push_back ( info );
return false;
}
bool
Reader::recoverFromError ( TokenType skipUntilToken )
{
int errorCount = int (errors_.size ());
Token skip;
while ( true )
{
if ( !readToken (skip) )
errors_.resize ( errorCount ); // discard errors caused by recovery
if ( skip.type_ == skipUntilToken || skip.type_ == tokenEndOfStream )
break;
}
errors_.resize ( errorCount );
return false;
}
bool
Reader::addErrorAndRecover ( std::string const& message,
Token& token,
TokenType skipUntilToken )
{
addError ( message, token );
return recoverFromError ( skipUntilToken );
}
Value&
Reader::currentValue ()
{
return * (nodes_.top ());
}
Reader::Char
Reader::getNextChar ()
{
if ( current_ == end_ )
return 0;
return *current_++;
}
void
Reader::getLocationLineAndColumn ( Location location,
int& line,
int& column ) const
{
Location current = begin_;
Location lastLineStart = current;
line = 0;
while ( current < location && current != end_ )
{
Char c = *current++;
if ( c == '\r' )
{
if ( *current == '\n' )
++current;
lastLineStart = current;
++line;
}
else if ( c == '\n' )
{
lastLineStart = current;
++line;
}
}
// column & line start at 1
column = int (location - lastLineStart) + 1;
++line;
}
std::string
Reader::getLocationLineAndColumn ( Location location ) const
{
int line, column;
getLocationLineAndColumn ( location, line, column );
char buffer[18 + 16 + 16 + 1];
sprintf ( buffer, "Line %d, Column %d", line, column );
return buffer;
}
std::string
Reader::getFormatedErrorMessages () const
{
std::string formattedMessage;
for ( Errors::const_iterator itError = errors_.begin ();
itError != errors_.end ();
++itError )
{
const ErrorInfo& error = *itError;
formattedMessage += "* " + getLocationLineAndColumn ( error.token_.start_ ) + "\n";
formattedMessage += " " + error.message_ + "\n";
if ( error.extra_ )
formattedMessage += "See " + getLocationLineAndColumn ( error.extra_ ) + " for detail.\n";
}
return formattedMessage;
}
std::istream& operator>> ( std::istream& sin, Value& root )
{
Json::Reader reader;
bool ok = reader.parse (sin, root);
//JSON_ASSERT( ok );
if (! ok)
jbcoin::Throw<std::runtime_error> (reader.getFormatedErrorMessages ());
return sin;
}
} // namespace Json
| 23.138254 | 131 | 0.508334 | trongnmchainos |
3384423eb091f293f99d66f52a09831939616fde | 32,676 | cpp | C++ | applications/mne_scan/plugins/ssvepbci/ssvepbci.cpp | 13grife37/mne-cpp-swpold | 9b89b3d7fe273d9f4ffd69b504e17f284eaba263 | [
"BSD-3-Clause"
] | 2 | 2017-04-20T20:21:16.000Z | 2017-04-26T16:30:25.000Z | applications/mne_scan/plugins/ssvepbci/ssvepbci.cpp | 13grife37/mne-cpp-swpold | 9b89b3d7fe273d9f4ffd69b504e17f284eaba263 | [
"BSD-3-Clause"
] | null | null | null | applications/mne_scan/plugins/ssvepbci/ssvepbci.cpp | 13grife37/mne-cpp-swpold | 9b89b3d7fe273d9f4ffd69b504e17f284eaba263 | [
"BSD-3-Clause"
] | 1 | 2017-04-23T15:55:31.000Z | 2017-04-23T15:55:31.000Z | //=============================================================================================================
/**
* @file ssvepbci.cpp
* @author Viktor Klüber <[email protected]>;
* Lorenz Esch <[email protected]>;
* Matti Hamalainen <[email protected]>
* @version 1.0
* @date May, 2016
*
* @section LICENSE
*
* Copyright (C) 2016, Lorenz Esch and Matti Hamalainen. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that
* the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of conditions and the
* following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
* the following disclaimer in the documentation and/or other materials provided with the distribution.
* * Neither the name of the Massachusetts General Hospital nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL MASSACHUSETTS GENERAL HOSPITAL BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*
* @brief Contains the implementation of the BCI class.
*
*/
//*************************************************************************************************************
//=============================================================================================================
// INCLUDES
//=============================================================================================================
#include "ssvepbci.h"
#include <iostream>
#include <Eigen/Dense>
#include <utils/ioutils.h>
//*************************************************************************************************************
//=============================================================================================================
// QT INCLUDES
//=============================================================================================================
#include <QtCore/QtPlugin>
#include <QtCore/QTextStream>
#include <QDebug>
//*************************************************************************************************************
//=============================================================================================================
// USED NAMESPACES
//=============================================================================================================
using namespace SSVEPBCIPLUGIN;
using namespace SCSHAREDLIB;
using namespace SCMEASLIB;
using namespace IOBUFFER;
using namespace FSLIB;
using namespace std;
//*************************************************************************************************************
//=============================================================================================================
// DEFINE MEMBER METHODS
//=============================================================================================================
SsvepBci::SsvepBci()
: m_qStringResourcePath(qApp->applicationDirPath()+"/mne_scan_plugins/resources/ssvepbci/")
, m_bProcessData(false)
, m_dAlpha(0.25)
, m_iNumberOfHarmonics(2)
, m_bUseMEC(true)
, m_bRemovePowerLine(false)
, m_iPowerLine(50)
, m_bChangeSSVEPParameterFlag(false)
, m_bInitializeSource(true)
, m_iNumberOfClassHits(15)
, m_iClassListSize(20)
, m_iNumberOfClassBreaks(30)
{
// Create configuration action bar item/button
m_pActionBCIConfiguration = new QAction(QIcon(":/images/configuration.png"),tr("BCI configuration feature"),this);
m_pActionBCIConfiguration->setStatusTip(tr("BCI configuration feature"));
connect(m_pActionBCIConfiguration, &QAction::triggered, this, &SsvepBci::showBCIConfiguration);
addPluginAction(m_pActionBCIConfiguration);
// Create start Stimuli action bar item/button
m_pActionSetupStimulus = new QAction(QIcon(":/images/stimulus.png"),tr("setup stimulus feature"),this);
m_pActionSetupStimulus->setStatusTip(tr("Setup stimulus feature"));
connect(m_pActionSetupStimulus, &QAction::triggered, this, &SsvepBci::showSetupStimulus);
addPluginAction(m_pActionSetupStimulus);
// Intitalise BCI data
m_slChosenChannelsSensor << "9Z" << "8Z" << "7Z" << "6Z" << "9L" << "8L" << "9R" << "8R"; //<< "TEST";
//m_slChosenChannelsSensor << "24" << "25" << "26" << "28" << "29" << "30" << "31" << "32";
m_lElectrodeNumbers << 33 << 34 << 35 << 36 << 40 << 41 << 42 << 43;
//m_lElectrodeNumbers << 24 << 25 << 26 << 28 << 29 << 30 << 31 << 32;
m_lDesFrequencies << 6.66 << 7.5 <<8.57 << 10 << 12;
m_lThresholdValues << 0.12 << 0.12 << 0.12 << 0.12 << 0.12;
setFrequencyList(m_lDesFrequencies);
}
//*************************************************************************************************************
SsvepBci::~SsvepBci()
{
//If the program is closed while the sampling is in process
if(this->isRunning()){
this->stop();
}
}
//*************************************************************************************************************
QSharedPointer<IPlugin> SsvepBci::clone() const
{
QSharedPointer<SsvepBci> pSSVEPClone(new SsvepBci());
return pSSVEPClone;
}
//*************************************************************************************************************
void SsvepBci::init()
{
m_bIsRunning = false;
// Inputs - Source estimates and sensor level
m_pRTSEInput = PluginInputData<RealTimeSourceEstimate>::create(this, "BCIInSource", "BCI source input data");
connect(m_pRTSEInput.data(), &PluginInputConnector::notify, this, &SsvepBci::updateSource, Qt::DirectConnection);
m_inputConnectors.append(m_pRTSEInput);
m_pRTMSAInput = PluginInputData<NewRealTimeMultiSampleArray>::create(this, "BCIInSensor", "SourceLab sensor input data");
connect(m_pRTMSAInput.data(), &PluginInputConnector::notify, this, &SsvepBci::updateSensor, Qt::DirectConnection);
m_inputConnectors.append(m_pRTMSAInput);
// // Output streams
// m_pBCIOutputOne = PluginOutputData<NewRealTimeSampleArray>::create(this, "ControlSignal", "BCI output data One");
// m_pBCIOutputOne->data()->setArraySize(1);
// m_pBCIOutputOne->data()->setName("Boundary");
// m_outputConnectors.append(m_pBCIOutputOne);
// m_pBCIOutputTwo = PluginOutputData<NewRealTimeSampleArray>::create(this, "ControlSignal", "BCI output data Two");
// m_pBCIOutputTwo->data()->setArraySize(1);
// m_pBCIOutputTwo->data()->setName("Left electrode var");
// m_outputConnectors.append(m_pBCIOutputTwo);
// m_pBCIOutputThree = PluginOutputData<NewRealTimeSampleArray>::create(this, "ControlSignal", "BCI output data Three");
// m_pBCIOutputThree->data()->setArraySize(1);
// m_pBCIOutputThree->data()->setName("Right electrode var");
// m_outputConnectors.append(m_pBCIOutputThree);
// m_pBCIOutputFour = PluginOutputData<NewRealTimeSampleArray>::create(this, "ControlSignal", "BCI output data Four");
// m_pBCIOutputFour->data()->setArraySize(1);
// m_pBCIOutputFour->data()->setName("Left electrode");
// m_outputConnectors.append(m_pBCIOutputFour);
// m_pBCIOutputFive = PluginOutputData<NewRealTimeSampleArray>::create(this, "ControlSignal", "BCI output data Five");
// m_pBCIOutputFive->data()->setArraySize(1);
// m_pBCIOutputFive->data()->setName("Right electrode");
// m_outputConnectors.append(m_pBCIOutputFive);
// Delete Buffer - will be initailzed with first incoming data
m_pBCIBuffer_Sensor = CircularMatrixBuffer<double>::SPtr();
m_pBCIBuffer_Source = CircularMatrixBuffer<double>::SPtr();
// Delete fiff info because the initialisation of the fiff info is seen as the first data acquisition from the input stream
m_pFiffInfo_Sensor = FiffInfo::SPtr();
// Intitalise GUI stuff
m_bUseSensorData = true;
// // Init BCIFeatureWindow for visualization
// m_BCIFeatureWindow = QSharedPointer<BCIFeatureWindow>(new BCIFeatureWindow(this));
}
//*************************************************************************************************************
void SsvepBci::unload()
{
}
//*************************************************************************************************************
bool SsvepBci::start()
{
// Init debug output stream
QString path("BCIDebugFile.txt");
path.prepend(m_qStringResourcePath);
m_outStreamDebug.open(path.toStdString(), ios::trunc);
m_pFiffInfo_Sensor = FiffInfo::SPtr();
// initialize time window parameters
m_iWriteIndex = 0;
m_iReadIndex = 0;
m_iCounter = 0;
m_iReadToWriteBuffer = 0;
m_iDownSampleIndex = 0;
m_iFormerDownSampleIndex = 0;
m_iWindowSize = 8;
m_bIsRunning = true;
// starting the thread for data processing
QThread::start();
return true;
}
//*************************************************************************************************************
bool SsvepBci::stop()
{
m_bIsRunning = false;
// Get data buffers out of idle state if they froze in the acquire or release function
//In case the semaphore blocks the thread -> Release the QSemaphore and let it exit from the pop function (acquire statement)
if(m_bProcessData) // Only clear if buffers have been initialised
{
m_pBCIBuffer_Sensor->releaseFromPop();
m_pBCIBuffer_Sensor->releaseFromPush();
// m_pBCIBuffer_Source->releaseFromPop();
// m_pBCIBuffer_Source->releaseFromPush();
}
// Stop filling buffers with data from the inputs
m_bProcessData = false;
// Delete all features and classification results
clearClassifications();
return true;
}
//*************************************************************************************************************
IPlugin::PluginType SsvepBci::getType() const
{
return _IAlgorithm;
}
//*************************************************************************************************************
QString SsvepBci::getName() const
{
return "SSVEP-BCI-EEG";
}
//*************************************************************************************************************
QWidget* SsvepBci::setupWidget()
{
SsvepBciWidget* setupWidget = new SsvepBciWidget(this);//widget is later destroyed by CentralWidget - so it has to be created everytime new
//init properties dialog
setupWidget->initGui();
return setupWidget;
}
//*************************************************************************************************************
void SsvepBci::updateSensor(SCMEASLIB::NewMeasurement::SPtr pMeasurement)
{
// initialize the sample array which will be filled with raw data
QSharedPointer<NewRealTimeMultiSampleArray> pRTMSA = pMeasurement.dynamicCast<NewRealTimeMultiSampleArray>();
if(pRTMSA){
//Check if buffer initialized
m_qMutex.lock();
if(!m_pBCIBuffer_Sensor)
m_pBCIBuffer_Sensor = CircularMatrixBuffer<double>::SPtr(new CircularMatrixBuffer<double>(64, pRTMSA->getNumChannels(), pRTMSA->getMultiSampleArray()[0].cols()));
}
//Fiff information
if(!m_pFiffInfo_Sensor)
{
m_pFiffInfo_Sensor = pRTMSA->info();
//emit fiffInfoAvailable();
// QStringList chs = m_pFiffInfo_Sensor->ch_names;
//calculating downsampling parameter for incoming data
m_iDownSampleIncrement = m_pFiffInfo_Sensor->sfreq/100;
m_dSampleFrequency = m_pFiffInfo_Sensor->sfreq/m_iDownSampleIncrement;//m_pFiffInfo_Sensor->sfreq;
// determine sliding time window parameters
m_iReadSampleSize = 0.1*m_dSampleFrequency; // about 0.1 second long time segment as basic read increment
m_iWriteSampleSize = pRTMSA->getMultiSampleArray()[0].cols();
m_iTimeWindowLength = int(5*m_dSampleFrequency) + int(pRTMSA->getMultiSampleArray()[0].cols()/m_iDownSampleIncrement) + 1 ;
//m_iTimeWindowSegmentSize = int(5*m_dSampleFrequency / m_iWriteSampleSize) + 1; // 4 seconds long maximal sized window
m_matSlidingTimeWindow.resize(m_lElectrodeNumbers.size(), m_iTimeWindowLength);//m_matSlidingTimeWindow.resize(rows, m_iTimeWindowSegmentSize*pRTMSA->getMultiSampleArray()[0].cols());
cout << "Down Sample Increment:" << m_iDownSampleIncrement << endl;
cout << "Read Sample Size:" << m_iReadSampleSize << endl;
cout << "Downsampled Frequency:" << m_dSampleFrequency << endl;
cout << "Write Sample SIze :" << m_iWriteSampleSize<< endl;
cout << "Length of the time window:" << m_iTimeWindowLength << endl;
}
m_qMutex.unlock();
// filling the matrix buffer
if(m_bProcessData){
MatrixXd t_mat;
for(qint32 i = 0; i < pRTMSA->getMultiArraySize(); ++i){
t_mat = pRTMSA->getMultiSampleArray()[i];
m_pBCIBuffer_Sensor->push(&t_mat);
}
}
}
//*************************************************************************************************************
void SsvepBci::updateSource(SCMEASLIB::NewMeasurement::SPtr pMeasurement)
{
QSharedPointer<RealTimeSourceEstimate> pRTSE = pMeasurement.dynamicCast<RealTimeSourceEstimate>();
if(pRTSE)
{
//Check if buffer initialized
if(!m_pBCIBuffer_Source){
m_pBCIBuffer_Source = CircularMatrixBuffer<double>::SPtr(new CircularMatrixBuffer<double>(64, pRTSE->getValue()->data.rows(), pRTSE->getValue()->data.cols()));
}
if(m_bProcessData)
{
MatrixXd t_mat(pRTSE->getValue()->data.rows(), pRTSE->getValue()->data.cols());
for(unsigned char i = 0; i < pRTSE->getValue()->data.cols(); ++i)
t_mat.col(i) = pRTSE->getValue()->data.col(i);
m_pBCIBuffer_Source->push(&t_mat);
}
}
// Initalize parameter for processing BCI on source level
if(m_bInitializeSource){
m_bInitializeSource = false;
}
QList<Label> labels;
QList<RowVector4i> labelRGBAs;
QSharedPointer<SurfaceSet> SPtrSurfSet = pRTSE->getSurfSet();
SurfaceSet *pSurf = SPtrSurfSet.data();
SurfaceSet surf = *pSurf;
qDebug() << "label acquisation successful:" << pRTSE->getAnnotSet()->toLabels(surf, labels, labelRGBAs);
foreach(Label label, labels){
qDebug() << "label IDs: " << label.label_id << "\v" << "label name: " << label.name;
}
QList<VectorXi> vertNo = pRTSE->getFwdSolution()->src.get_vertno();
foreach(VectorXi vector, vertNo){
cout << "vertNo:" << vector << endl;
}
}
//*************************************************************************************************************
void SsvepBci::clearClassifications()
{
m_qMutex.lock();
m_lIndexOfClassResultSensor.clear();
m_qMutex.unlock();
}
//*************************************************************************************************************
void SsvepBci::setNumClassHits(int numClassHits){
m_iNumberOfClassHits = numClassHits;
}
//*************************************************************************************************************
void SsvepBci::setNumClassBreaks(int numClassBreaks){
m_iNumberOfClassBreaks = numClassBreaks;
}
//*************************************************************************************************************
void SsvepBci::setChangeSSVEPParameterFlag(){
m_bChangeSSVEPParameterFlag = true;
}
//*************************************************************************************************************
void SsvepBci::setSizeClassList(int classListSize){
m_iClassListSize = classListSize;
}
//*************************************************************************************************************
QString SsvepBci::getSsvepBciResourcePath(){
return m_qStringResourcePath;
}
//*************************************************************************************************************
void SsvepBci::showSetupStimulus()
{
QDesktopWidget Desktop; // Desktop Widget for getting the number of accessible screens
if(Desktop.numScreens()> 1){
// Open setup stimulus widget
if(m_pSsvepBciSetupStimulusWidget == NULL)
m_pSsvepBciSetupStimulusWidget = QSharedPointer<SsvepBciSetupStimulusWidget>(new SsvepBciSetupStimulusWidget(this));
if(!m_pSsvepBciSetupStimulusWidget->isVisible()){
m_pSsvepBciSetupStimulusWidget->setWindowTitle("ssvepBCI - Setup Stimulus");
//m_pSsvepBciSetupStimulusWidget->initGui();
m_pSsvepBciSetupStimulusWidget->show();
m_pSsvepBciSetupStimulusWidget->raise();
}
//sets Window to the foreground and activates it for editing
m_pSsvepBciSetupStimulusWidget->activateWindow();
}
else{
QMessageBox msgBox;
msgBox.setText("Only one screen detected!\nFor stimulus visualization attach one more.");
msgBox.exec();
return;
}
}
//*************************************************************************************************************
void SsvepBci::showBCIConfiguration()
{
// Open setup stimulus widget
if(m_pSsvepBciConfigurationWidget == NULL)
m_pSsvepBciConfigurationWidget = QSharedPointer<SsvepBciConfigurationWidget>(new SsvepBciConfigurationWidget(this));
if(!m_pSsvepBciConfigurationWidget->isVisible()){
m_pSsvepBciConfigurationWidget->setWindowTitle("ssvepBCI - Configuration");
m_pSsvepBciConfigurationWidget->show();
m_pSsvepBciConfigurationWidget->raise();
}
//sets Window to the foreground and activates it for editing
m_pSsvepBciConfigurationWidget->activateWindow();
}
//*************************************************************************************************************
void SsvepBci::removePowerLine(bool removePowerLine)
{
m_qMutex.lock();
m_bRemovePowerLine = removePowerLine;
m_qMutex.unlock();
}
//*************************************************************************************************************
void SsvepBci::setPowerLine(int powerLine)
{
m_qMutex.lock();
m_iPowerLine = powerLine;
m_qMutex.unlock();
}
//*************************************************************************************************************
void SsvepBci::setFeatureExtractionMethod(bool useMEC)
{
m_qMutex.lock();
m_bUseMEC = useMEC;
m_qMutex.unlock();
}
//*************************************************************************************************************
void SsvepBci::changeSSVEPParameter(){
// update frequency list from setup stimulus widget if activated
if(m_pSsvepBciSetupStimulusWidget){
setFrequencyList(m_pSsvepBciSetupStimulusWidget->getFrequencies());
}
if(m_pSsvepBciConfigurationWidget){
// update number of harmonics of reference signal
m_iNumberOfHarmonics = 1 + m_pSsvepBciConfigurationWidget->getNumOfHarmonics();
// update channel select
QStringList channelSelectSensor = m_pSsvepBciConfigurationWidget->getSensorChannelSelection();
if(channelSelectSensor.size() > 0){
// update the list of selected channels
m_slChosenChannelsSensor = channelSelectSensor;
// get new list of electrode numbers
m_lElectrodeNumbers.clear();
foreach(const QString &str, m_slChosenChannelsSensor){
m_lElectrodeNumbers << m_mapElectrodePinningScheme.value(str);
}
// reset sliding time window parameter
m_iWriteIndex = 0;
m_iReadIndex = 0;
m_iCounter = 0;
m_iReadToWriteBuffer = 0;
m_iDownSampleIndex = 0;
m_iFormerDownSampleIndex = 0;
// resize the time window with new electrode numbers
m_matSlidingTimeWindow.resize(m_lElectrodeNumbers.size(), m_iTimeWindowLength);
}
}
// reset flag for changing SSVEP parameter
m_bChangeSSVEPParameterFlag = false;
}
//*************************************************************************************************************
void SsvepBci::setThresholdValues(MyQList thresholds){
m_lThresholdValues = thresholds;
}
//*************************************************************************************************************
void SsvepBci::run(){
while(m_bIsRunning){
if(m_bUseSensorData){
ssvepBciOnSensor();
}
else{
ssvepBciOnSource();
}
}
}
//*************************************************************************************************************
void SsvepBci::setFrequencyList(QList<double> frequencyList)
{
if(!frequencyList.isEmpty()){
// update list of desired frequencies
m_lDesFrequencies.clear();
m_lDesFrequencies = frequencyList;
// update the list of all frequencies
m_lAllFrequencies.clear();
m_lAllFrequencies = m_lDesFrequencies;
for(int i = 0; i < m_lDesFrequencies.size() - 1; i++){
m_lAllFrequencies.append((m_lDesFrequencies.at(i) + m_lDesFrequencies.at(i + 1) ) / 2);
}
// emit novel frequency list
emit getFrequencyLabels(m_lDesFrequencies);
}
}
//*************************************************************************************************************
QList<double> SsvepBci::getCurrentListOfFrequencies(){
return m_lDesFrequencies;
}
//*************************************************************************************************************
double SsvepBci::MEC(MatrixXd &Y, MatrixXd &X)
{
// Remove SSVEP harmonic frequencies
MatrixXd X_help = X.transpose()*X;
MatrixXd Ytilde = Y - X*X_help.inverse()*X.transpose()*Y;
// Find eigenvalues and eigenvectors
SelfAdjointEigenSolver<MatrixXd> eigensolver(Ytilde.transpose()*Ytilde);
// Determine number of channels Ns
int Ns;
VectorXd cumsum = eigensolver.eigenvalues();
for(int j = 1; j < eigensolver.eigenvalues().size(); j++){
cumsum(j) += cumsum(j - 1);
}
for(Ns = 0; Ns < eigensolver.eigenvalues().size() ; Ns++){
if(cumsum(Ns)/eigensolver.eigenvalues().sum() > 0.1){
break;
}
}
Ns += 1;
// Determine spatial filter matrix W
MatrixXd W = eigensolver.eigenvectors().block(0, 0, eigensolver.eigenvectors().rows(), Ns);
for(int k = 0; k < Ns; k++){
W.col(k) = W.col(k)*(1/sqrt(eigensolver.eigenvalues()(k)));
}
// Calcuclate channel signals
MatrixXd S = Y*W;
// Calculate signal energy
MatrixXd P(2, Ns);
double power = 0;
for(int k = 0; k < m_iNumberOfHarmonics; k++){
P = X.block(0, 2*k, X.rows(), 2).transpose()*S;
P = P.array()*P.array();
power += 1 / double(m_iNumberOfHarmonics*Ns) * P.sum();
}
return power;
}
//*************************************************************************************************************
double SsvepBci::CCA(MatrixXd &Y, MatrixXd &X)
{
// CCA parameter
int n = X.rows();
int p1 = X.cols();
int p2 = Y.cols();
// center data sets
MatrixXd X_center(n, p1);
MatrixXd Y_center(n, p2);
for(int i = 0; i < p1; i++){
X_center.col(i) = X.col(i).array() - X.col(i).mean();
}
for(int i = 0; i < p2; i++){
Y_center.col(i) = Y.col(i).array() - Y.col(i).mean();
}
// QR decomposition
MatrixXd Q1, Q2;
ColPivHouseholderQR<MatrixXd> qr1(X_center), qr2(Y_center);
Q1 = qr1.householderQ() * MatrixXd::Identity(n, p1);
Q2 = qr2.householderQ() * MatrixXd::Identity(n, p2);
// SVD decomposition, determine max correlation
JacobiSVD<MatrixXd> svd(Q1.transpose()*Q2); // ComputeThinU | ComputeThinV
return svd.singularValues().maxCoeff();
}
//*************************************************************************************************************
void SsvepBci::readFromSlidingTimeWindow(MatrixXd &data)
{
data.resize(m_matSlidingTimeWindow.rows(), m_iWindowSize*m_iReadSampleSize);
// consider matrix overflow case
if(data.cols() > m_iReadIndex + 1){
int width = data.cols() - (m_iReadIndex + 1);
data.block(0, 0, data.rows(), width) = m_matSlidingTimeWindow.block(0, m_matSlidingTimeWindow.cols() - width , data.rows(), width );
data.block(0, width, data.rows(), m_iReadIndex + 1) = m_matSlidingTimeWindow.block(0, 0, data.rows(), m_iReadIndex + 1);
}
else{
data = m_matSlidingTimeWindow.block(0, m_iReadIndex - (data.cols() - 1), data.rows(), data.cols()); // consider case without matrix overflow
}
// transpose in the same data space and avoiding aliasing
data.transposeInPlace();
}
//*************************************************************************************************************
void SsvepBci::ssvepBciOnSensor()
{
// Wait for fiff Info if not yet received - this is needed because we have to wait until the buffers are firstly initiated in the update functions
while(!m_pFiffInfo_Sensor){
msleep(10);
}
// reset list of classifiaction results
MatrixXd m_matSSVEPProbabilities(m_lDesFrequencies.size(), 0);
// Start filling buffers with data from the inputs
m_bProcessData = true;
MatrixXd t_mat = m_pBCIBuffer_Sensor->pop();
// writing selected feature channels to the time window storage and increase the segment index
int writtenSamples = 0;
while(m_iDownSampleIndex >= m_iFormerDownSampleIndex){
// write from t_mat to the sliding time window while doing channel select and downsampling
m_iFormerDownSampleIndex = m_iDownSampleIndex;
for(int i = 0; i < m_lElectrodeNumbers.size(); i++){
m_matSlidingTimeWindow(i, m_iWriteIndex) = t_mat(m_lElectrodeNumbers.at(i), m_iDownSampleIndex);
}
writtenSamples++;
// update counter variables
m_iWriteIndex = (m_iWriteIndex + 1) % m_iTimeWindowLength;
m_iDownSampleIndex = (m_iDownSampleIndex + m_iDownSampleIncrement ) % m_iWriteSampleSize;
}
m_iFormerDownSampleIndex = m_iDownSampleIndex;
// calculate buffer between read- and write index
m_iReadToWriteBuffer = m_iReadToWriteBuffer + writtenSamples;
// execute processing loop as long as there is new data to be red from the time window
while(m_iReadToWriteBuffer >= m_iReadSampleSize)
{
if(m_iCounter > m_iNumberOfClassBreaks)
{
// determine window size according to former counted miss classifications
m_iWindowSize = 10;
if(m_iCounter <= 50 && m_iCounter > 40){
m_iWindowSize = 20;
}
if(m_iCounter > 50){
m_iWindowSize = 40;
}
// create current data matrix Y
MatrixXd Y;
readFromSlidingTimeWindow(Y);
// create realtive timeline according to Y
int samples = Y.rows();
ArrayXd t = 2*M_PI/m_dSampleFrequency * ArrayXd::LinSpaced(samples, 1, samples);
// Remove 50 Hz Power line signal
if(m_bRemovePowerLine){
MatrixXd Zp(samples,2);
ArrayXd t_PL = t*m_iPowerLine;
Zp.col(0) = t_PL.sin();
Zp.col(1) = t_PL.cos();
MatrixXd Zp_help = Zp.transpose()*Zp;
Y = Y - Zp*Zp_help.inverse()*Zp.transpose()*Y;
}
qDebug() << "size of Matrix:" << Y.rows() << Y.cols();
// apply feature extraction for all frequencies of interest
VectorXd ssvepProbabilities(m_lAllFrequencies.size());
for(int i = 0; i < m_lAllFrequencies.size(); i++)
{
// create reference signal matrix X
MatrixXd X(samples, 2*m_iNumberOfHarmonics);
for(int k = 0; k < m_iNumberOfHarmonics; k++){
ArrayXd t_k = t*(k+1)*m_lAllFrequencies.at(i);
X.col(2*k) = t_k.sin();
X.col(2*k+1) = t_k.cos();
}
// extracting the features from the data Y with the reference signal X
if(m_bUseMEC){
ssvepProbabilities(i) = MEC(Y, X); // using Minimum Energy Combination as feature-extraction tool
}
else{
ssvepProbabilities(i) = CCA(Y, X); // using Canonical Correlation Analysis as feature-extraction tool
}
}
// normalize features to probabilities and transfering it into a softmax function
ssvepProbabilities = m_dAlpha / ssvepProbabilities.sum() * ssvepProbabilities;
ssvepProbabilities = ssvepProbabilities.array().exp(); // softmax function for better distinguishability between the probabilities
ssvepProbabilities = 1 / ssvepProbabilities.sum() * ssvepProbabilities;
// classify probabilites
int index = 0;
double maxProbability = ssvepProbabilities.maxCoeff(&index);
if(index < m_lDesFrequencies.size()){
//qDebug()<< "index:" << index;
if(m_lThresholdValues[index] < maxProbability){
//qDebug() << "comparison: "<< m_lThresholdValues[index] << "and" << maxProbability;
m_lIndexOfClassResultSensor.append(index+1);
}
else{
m_lIndexOfClassResultSensor.append(0);
}
}
else{
m_lIndexOfClassResultSensor.append(0);
}
// clear classifiaction if it hits its threshold
if(m_lIndexOfClassResultSensor.size() > m_iClassListSize){
m_lIndexOfClassResultSensor.pop_front();
}
// transfer values to matrix containing all SSVEPProabibilities of desired frequencies of one calculationstep
m_matSSVEPProbabilities.conservativeResize(m_lDesFrequencies.size(), m_matSSVEPProbabilities.cols() + 1);
m_matSSVEPProbabilities.col( m_matSSVEPProbabilities.cols() - 1) = ssvepProbabilities.head(m_lDesFrequencies.size());
}
// update counter and index variables
m_iCounter++;
m_iReadToWriteBuffer = m_iReadToWriteBuffer - m_iReadSampleSize;
m_iReadIndex = (m_iReadIndex + m_iReadSampleSize) % (m_iTimeWindowLength);
}
// emit classifiaction results if any classifiaction has been done
if(!m_lIndexOfClassResultSensor.isEmpty()){
// finding a classifiaction result that satisfies the number of classifiaction hits
for(int i = 1; (i <= m_lDesFrequencies.size()) && (!m_lIndexOfClassResultSensor.isEmpty() ); i++){
if(m_lIndexOfClassResultSensor.count(i) >= m_iNumberOfClassHits){
emit classificationResult(m_lDesFrequencies[i - 1]);
m_lIndexOfClassResultSensor.clear();
m_iCounter = 0;
break;
}
else{
emit classificationResult(0);
}
}
}
// calculate and emit signal of mean probabilities
if(m_matSSVEPProbabilities.cols() != 0){
QList<double> meanSSVEPProbabilities;
for(int i = 0; i < m_lDesFrequencies.size(); i++){
meanSSVEPProbabilities << m_matSSVEPProbabilities.row(i).mean();
}
emit SSVEPprob(meanSSVEPProbabilities);
//qDebug() << "emit ssvep:" << meanSSVEPProbabilities;
}
// change parameter and reset the time window if the change flag has been set
if(m_bChangeSSVEPParameterFlag){
changeSSVEPParameter();
}
}
//*************************************************************************************************************
void SsvepBci::ssvepBciOnSource()
{
}
| 37.344 | 191 | 0.562095 | 13grife37 |
33858aa8773c010d80a42ebadd564f73db9b6324 | 1,549 | hpp | C++ | include/wtengine/mnu/apply.hpp | wtfsystems/wtengine | 0fb56d6eb2ac6359509e7a52876c8656da6b3ce0 | [
"MIT"
] | 7 | 2020-06-16T18:47:35.000Z | 2021-08-25T13:41:13.000Z | include/wtengine/mnu/apply.hpp | wtfsystems/wtengine | 0fb56d6eb2ac6359509e7a52876c8656da6b3ce0 | [
"MIT"
] | 15 | 2020-07-23T14:03:39.000Z | 2022-01-28T02:32:07.000Z | include/wtengine/mnu/apply.hpp | wtfsystems/wtengine | 0fb56d6eb2ac6359509e7a52876c8656da6b3ce0 | [
"MIT"
] | null | null | null | /*!
* WTEngine | File: apply.hpp
*
* \author Matthew Evans
* \version 0.7
* \copyright See LICENSE.md for copyright information.
* \date 2019-2021
*/
#ifndef WTE_MNU_ITEM_APPLY_HPP
#define WTE_MNU_ITEM_APPLY_HPP
#include <string>
#include <vector>
#include "wtengine/mnu/item.hpp"
#include "wtengine/mgr/menus.hpp"
#include "wtengine/mgr/messages.hpp"
namespace wte::mnu {
/*!
* \class apply
* \brief An apply option for the menus.
*/
class apply final : public item {
public:
/*!
* \brief Menu Item Apply constructor.
*/
apply();
~apply() = default; //!< Default destructor.
private:
/*
* Set the apply item to cancel.
*/
void on_left(void) override;
/*
* Set the apply item to apply.
*/
void on_right(void) override;
/*
* On select trigger.
*/
void on_select(void) override;
/*
* Return display text for the menu item when rendering.
*/
const std::vector<std::string> get_text(void) const override;
/*
* Reset the apply item to the canceled state.
*/
void reset_to_default(void) override;
/*
* Set the apply item's default state to canceled.
*/
void set_default(void) override;
/*
* Reset the apply item to the canceled state.
*/
void apply_setting(void) override;
bool do_apply;
};
} // end namespace wte::mnu
#endif
| 19.858974 | 69 | 0.564881 | wtfsystems |
3387f7cceec9f7b22b107e00570175c4f67730f5 | 921 | cpp | C++ | dev/TreeView/TreeViewDragItemsCompletedEventArgs.cpp | riverar/microsoft-ui-xaml | ef3a0fcd85d200c98514e765eea94323b943cf1e | [
"MIT"
] | 3,788 | 2019-05-07T02:41:36.000Z | 2022-03-30T12:34:15.000Z | dev/TreeView/TreeViewDragItemsCompletedEventArgs.cpp | riverar/microsoft-ui-xaml | ef3a0fcd85d200c98514e765eea94323b943cf1e | [
"MIT"
] | 6,170 | 2019-05-06T21:32:43.000Z | 2022-03-31T23:46:55.000Z | dev/TreeView/TreeViewDragItemsCompletedEventArgs.cpp | riverar/microsoft-ui-xaml | ef3a0fcd85d200c98514e765eea94323b943cf1e | [
"MIT"
] | 532 | 2019-05-07T12:15:58.000Z | 2022-03-31T11:36:26.000Z | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See LICENSE in the project root for license information.
#include "pch.h"
#include "common.h"
#include "Vector.h"
#include "TreeViewDragItemsCompletedEventArgs.h"
TreeViewDragItemsCompletedEventArgs::TreeViewDragItemsCompletedEventArgs(const winrt::DragItemsCompletedEventArgs& args, const winrt::IInspectable& newParentItem)
{
m_dragItemsCompletedEventArgs = args;
m_newParentItem = newParentItem;
}
DataPackageOperation TreeViewDragItemsCompletedEventArgs::DropResult() const
{
return m_dragItemsCompletedEventArgs.DropResult();
}
winrt::IVectorView<winrt::IInspectable> TreeViewDragItemsCompletedEventArgs::Items()
{
return m_dragItemsCompletedEventArgs.Items();
}
winrt::IInspectable TreeViewDragItemsCompletedEventArgs::NewParentItem()
{
return m_newParentItem;
}
| 30.7 | 163 | 0.785016 | riverar |
338935672ccf113b972a0343825e98e27b4f3d91 | 13,245 | cc | C++ | cpp/tests/test_five_round_distinguisher_present_sbox_small.cc | medsec/expectation-cryptanalysis-on-round-reduced-aes | 52a4d5b4dd7555a1b4badafd95c5aeed92a70872 | [
"Unlicense"
] | 2 | 2019-06-04T00:52:17.000Z | 2022-01-19T08:06:44.000Z | cpp/tests/test_five_round_distinguisher_present_sbox_small.cc | medsec/expectation-cryptanalysis-on-round-reduced-aes | 52a4d5b4dd7555a1b4badafd95c5aeed92a70872 | [
"Unlicense"
] | null | null | null | cpp/tests/test_five_round_distinguisher_present_sbox_small.cc | medsec/expectation-cryptanalysis-on-round-reduced-aes | 52a4d5b4dd7555a1b4badafd95c5aeed92a70872 | [
"Unlicense"
] | 2 | 2019-06-03T08:12:29.000Z | 2019-06-03T12:53:26.000Z | /**
* __author__ = anonymized
* __date__ = 2019-05
* __copyright__ = Creative Commons CC0
*/
#include <array>
#include <vector>
#include <stdint.h>
#include <stdlib.h>
#include "ciphers/random_function.h"
#include "ciphers/small_aes_present_sbox.h"
#include "ciphers/small_state.h"
#include "ciphers/speck64.h"
#include "utils/argparse.h"
#include "utils/utils.h"
#include "utils/xorshift1024.h"
using ciphers::small_aes_ctx_t;
using ciphers::small_aes_state_t;
using ciphers::small_aes_key_t;
using ciphers::SmallState;
using ciphers::speck64_context_t;
using ciphers::speck64_96_key_t;
using ciphers::speck64_state_t;
using utils::assert_equal;
using utils::compute_mean;
using utils::compute_variance;
using utils::xor_arrays;
using utils::ArgumentParser;
// ---------------------------------------------------------
static const size_t NUM_CONSIDERED_ROUNDS = 5;
static const size_t NUM_TEXTS_IN_DELTA_SET = 16;
// ---------------------------------------------------------
typedef struct {
small_aes_key_t key;
small_aes_ctx_t cipher_ctx;
size_t num_keys;
size_t num_sets_per_key;
std::vector<size_t> num_matches;
bool use_prp = false;
bool use_all_delta_sets_from_diagonal = false;
} ExperimentContext;
typedef struct {
std::vector<size_t> num_collisions_per_set;
size_t num_collisions;
} ExperimentResult;
typedef size_t (*experiment_function_t)(ExperimentContext *);
typedef std::vector<SmallState> SmallStatesVector;
// ---------------------------------------------------------
static void
generate_base_plaintext(small_aes_state_t plaintext) {
utils::get_random_bytes(plaintext, SMALL_AES_NUM_STATE_BYTES);
}
// ---------------------------------------------------------
static void
get_text_from_delta_set(small_aes_state_t base_text, const size_t i) {
base_text[0] = (uint8_t) ((i << 4) & 0xF0);
}
// ---------------------------------------------------------
static void
generate_base_plaintext_in_diagonal(small_aes_state_t plaintext,
const size_t set_index_in_diagonal,
const size_t byte_index_in_diagonal) {
if (byte_index_in_diagonal == 0) {
plaintext[2] = (uint8_t) (((set_index_in_diagonal >> 8) & 0x0F) |
(plaintext[2] & 0xF0));
plaintext[5] = (uint8_t) ((set_index_in_diagonal & 0xF0) |
(plaintext[5] & 0x0F));
plaintext[7] = (uint8_t) ((set_index_in_diagonal & 0x0F) |
(plaintext[7] & 0xF0));
} else if (byte_index_in_diagonal == 1) {
plaintext[0] = (uint8_t) (((set_index_in_diagonal >> 4) & 0xF0) |
(plaintext[0] & 0x0F));
plaintext[5] = (uint8_t) ((set_index_in_diagonal & 0xF0) |
(plaintext[5] & 0x0F));
plaintext[7] = (uint8_t) ((set_index_in_diagonal & 0x0F) |
(plaintext[7] & 0xF0));
} else if (byte_index_in_diagonal == 2) {
plaintext[0] = (uint8_t) (((set_index_in_diagonal >> 4) & 0xF0) |
(plaintext[0] & 0x0F));
plaintext[2] = (uint8_t) (((set_index_in_diagonal >> 4) & 0x0F) |
(plaintext[2] & 0xF0));
plaintext[7] = (uint8_t) ((set_index_in_diagonal & 0x0F) |
(plaintext[7] & 0xF0));
} else if (byte_index_in_diagonal == 3) {
plaintext[0] = (uint8_t) (((set_index_in_diagonal >> 4) & 0xF0) |
(plaintext[0] & 0x0F));
plaintext[2] = (uint8_t) (((set_index_in_diagonal >> 4) & 0x0F) |
(plaintext[2] & 0xF0));
plaintext[5] = (uint8_t) ((set_index_in_diagonal & 0xF0) |
(plaintext[5] & 0x0F));
}
}
// ---------------------------------------------------------
static void
get_text_from_diagonal_delta_set(small_aes_state_t plaintext,
const size_t byte_index_in_diagonal,
const size_t index_in_delta_set) {
if (byte_index_in_diagonal == 0) {
plaintext[0] = (uint8_t) ((plaintext[0] & 0x0F) |
((index_in_delta_set << 4) & 0xF0));
} else if (byte_index_in_diagonal == 1) {
plaintext[2] = (uint8_t) ((plaintext[2] & 0xF0) |
(index_in_delta_set & 0x0F));
} else if (byte_index_in_diagonal == 2) {
plaintext[5] = (uint8_t) ((plaintext[5] & 0x0F) |
((index_in_delta_set << 4) & 0xF0));
} else if (byte_index_in_diagonal == 3) {
plaintext[7] = (uint8_t) ((plaintext[7] & 0xF0) |
(index_in_delta_set & 0x0F));
}
}
// ---------------------------------------------------------
static void encrypt(const small_aes_ctx_t *aes_context,
small_aes_state_t plaintext,
SmallState &ciphertext) {
small_aes_present_sbox_encrypt_rounds_only_sbox_in_final(
aes_context, plaintext, ciphertext.state, NUM_CONSIDERED_ROUNDS
);
}
// ---------------------------------------------------------
bool has_zero_column(const small_aes_state_t state) {
return ((state[0] == 0) && (state[1] == 0))
|| ((state[2] == 0) && (state[3] == 0))
|| ((state[4] == 0) && (state[5] == 0))
|| ((state[6] == 0) && (state[7] == 0));
}
// ---------------------------------------------------------
static size_t find_num_collisions(SmallStatesVector &ciphertexts) {
const size_t num_texts = ciphertexts.size();
size_t num_collisions = 0;
small_aes_state_t temp;
for (size_t i = 0; i != num_texts; ++i) {
const SmallState left = ciphertexts[i];
for (size_t j = i + 1; j != num_texts; ++j) {
const SmallState right = ciphertexts[j];
xor_arrays(temp, left.state, right.state,
SMALL_AES_NUM_STATE_BYTES);
if (has_zero_column(temp)) {
num_collisions++;
}
}
}
return num_collisions;
}
// ---------------------------------------------------------
static size_t perform_experiment(ExperimentContext *context) {
small_aes_ctx_t cipher_ctx = context->cipher_ctx;
small_aes_key_t key;
utils::get_random_bytes(key, SMALL_AES_NUM_KEY_BYTES);
small_aes_key_setup(&cipher_ctx, key);
utils::print_hex("# Key", key, SMALL_AES_NUM_KEY_BYTES);
size_t num_collisions = 0;
for (size_t i = 0; i < context->num_sets_per_key; ++i) {
SmallStatesVector ciphertexts;
small_aes_state_t plaintext;
generate_base_plaintext(plaintext);
for (size_t j = 0; j < NUM_TEXTS_IN_DELTA_SET; ++j) {
SmallState ciphertext;
get_text_from_delta_set(plaintext, j);
encrypt(&cipher_ctx, plaintext, ciphertext);
ciphertexts.push_back(ciphertext);
}
num_collisions += find_num_collisions(ciphertexts);
if (i > 0) {
if ((i & 0xFFFFF) == 0) {
printf("# Tested %8zu sets. Collisions: %8zu\n", i,
num_collisions);
}
}
}
return num_collisions;
}
// ---------------------------------------------------------
static size_t perform_experiment_from_diagonal(ExperimentContext *context) {
small_aes_ctx_t cipher_ctx = context->cipher_ctx;
small_aes_key_t key;
utils::get_random_bytes(key, SMALL_AES_NUM_KEY_BYTES);
small_aes_key_setup(&cipher_ctx, key);
utils::print_hex("# Key", key, SMALL_AES_NUM_KEY_BYTES);
size_t num_collisions = 0;
const size_t num_sets_in_diagonal = 1L << 12;
const size_t num_bytes_in_diagonal = 4;
small_aes_state_t plaintext;
generate_base_plaintext(plaintext);
for (size_t i = 0; i < num_sets_in_diagonal; ++i) {
for (size_t m = 0; m < num_bytes_in_diagonal; ++m) {
generate_base_plaintext_in_diagonal(plaintext, i, m);
SmallStatesVector ciphertexts;
for (size_t j = 0; j < NUM_TEXTS_IN_DELTA_SET; ++j) {
SmallState ciphertext;
get_text_from_diagonal_delta_set(plaintext, m, j);
encrypt(&cipher_ctx, plaintext, ciphertext);
ciphertexts.push_back(ciphertext);
}
num_collisions += find_num_collisions(ciphertexts);
}
if (i > 0) {
if ((i & 0xFFFFF) == 0) {
printf("# Tested %8zu sets. Collisions: %8zu\n", i,
num_collisions);
}
}
}
return num_collisions;
}
// ---------------------------------------------------------
static size_t perform_experiment_with_prp(ExperimentContext *context) {
speck64_context_t cipher_ctx;
speck64_96_key_t key;
utils::get_random_bytes(key, SPECK_64_96_NUM_KEY_BYTES);
utils::print_hex("# Key", key, SPECK_64_96_NUM_KEY_BYTES);
speck64_96_key_schedule(&cipher_ctx, key);
size_t num_collisions = 0;
for (size_t i = 0; i < context->num_sets_per_key; ++i) {
SmallStatesVector ciphertexts;
speck64_state_t plaintext;
generate_base_plaintext(plaintext);
for (size_t j = 0; j < NUM_TEXTS_IN_DELTA_SET; ++j) {
SmallState ciphertext;
get_text_from_delta_set(plaintext, j);
speck64_encrypt(&cipher_ctx, plaintext, ciphertext.state);
ciphertexts.push_back(ciphertext);
}
num_collisions += find_num_collisions(ciphertexts);
if (i > 0) {
if ((i & 0xFFFFF) == 0) {
printf("# Tested %8zu sets. Collisions: %8zu\n", i,
num_collisions);
}
}
}
return num_collisions;
}
// ---------------------------------------------------------
static void perform_experiments(ExperimentContext *context) {
experiment_function_t experiment_function = nullptr;
if (context->use_prp) {
experiment_function = &perform_experiment_with_prp;
} else if (!context->use_prp &&
!context->use_all_delta_sets_from_diagonal) {
experiment_function = &perform_experiment;
} else if (!context->use_prp && context->use_all_delta_sets_from_diagonal) {
experiment_function = &perform_experiment_from_diagonal;
}
ExperimentResult all_results;
all_results.num_collisions = 0;
printf("#%8zu Experiments\n", context->num_keys);
printf("#%8zu Sets/key\n", context->num_sets_per_key);
printf("# Key Collisions Mean Variance \n");
for (size_t i = 0; i < context->num_keys; ++i) {
const size_t num_collisions = experiment_function(context);
const double mean =
(double) num_collisions / (double) context->num_sets_per_key;
all_results.num_collisions += num_collisions;
all_results.num_collisions_per_set.push_back(num_collisions);
printf("%4zu %8zu %8.4f\n", i + 1, num_collisions, mean);
}
const double mean = compute_mean(all_results.num_collisions_per_set);
const double variance = compute_variance(
all_results.num_collisions_per_set);
printf("# Total Keys Collisions Mean Variance \n");
printf("# %4zu %8zu %8.4f %8.8f\n",
context->num_keys,
all_results.num_collisions,
mean,
variance);
}
// ---------------------------------------------------------
// Argument parsing
// ---------------------------------------------------------
static void
parse_args(ExperimentContext *context, int argc, const char **argv) {
ArgumentParser parser;
parser.appName("Test for the Small-AES five-round distinguisher."
"If -d 1 -r 0 is set, uses all 4 * 2^12 * binom(16, 2) "
"delta-sets from diagonals, but only for the Small-AES, "
"not for the PRP.");
parser.addArgument("-k", "--num_keys", 1, false);
parser.addArgument("-s", "--num_sets_per_key", 1, false);
parser.addArgument("-r", "--use_random_function", 1, false);
parser.addArgument("-d", "--use_diagonals", 1, false);
try {
parser.parse((size_t) argc, argv);
context->num_sets_per_key = static_cast<const size_t>(1L
<< parser.retrieveAsLong("s"));
context->num_keys = parser.retrieveAsLong("k");
context->use_prp = (bool) parser.retrieveAsInt("r");
context->use_all_delta_sets_from_diagonal = (bool) parser.retrieveAsInt(
"d");
} catch (...) {
fprintf(stderr, "%s\n", parser.usage().c_str());
exit(EXIT_FAILURE);
}
printf("#Keys %8zu\n", context->num_keys);
printf("#Sets/Key (log) %8zu\n", context->num_sets_per_key);
printf("#Uses PRP %8d\n", context->use_prp);
printf("#Uses Diagonal %8d\n", context->use_all_delta_sets_from_diagonal);
}
// ---------------------------------------------------------
int main(int argc, const char **argv) {
ExperimentContext context;
parse_args(&context, argc, argv);
perform_experiments(&context);
return EXIT_SUCCESS;
}
| 34.672775 | 80 | 0.564137 | medsec |
3391d4ec4e8475f0efde936f6982ebe7ad0db7af | 4,624 | cc | C++ | EventGenerator/src/PrimaryProtonGunR_module.cc | pavel1murat/Offline | 729840761324a4d9c49c037114dd3a3ec38e3358 | [
"Apache-2.0"
] | null | null | null | EventGenerator/src/PrimaryProtonGunR_module.cc | pavel1murat/Offline | 729840761324a4d9c49c037114dd3a3ec38e3358 | [
"Apache-2.0"
] | null | null | null | EventGenerator/src/PrimaryProtonGunR_module.cc | pavel1murat/Offline | 729840761324a4d9c49c037114dd3a3ec38e3358 | [
"Apache-2.0"
] | null | null | null |
/*
This is a Replicated Module.
A plug-in for running PrimaryProtonGun-based event generator for running in MT art.
It produces a GenParticleCollection of primary protons using the PrimaryProtonGun.
These Collections are used in Mu2eG4_module.cc.
Original author Lisa Goodenough
*/
// Mu2e includes.
#include "ConfigTools/inc/SimpleConfig.hh"
#include "MCDataProducts/inc/GenId.hh"
#include "MCDataProducts/inc/GenParticleCollection.hh"
// Particular generators that this code knows about.
#include "EventGenerator/inc/PrimaryProtonGun.hh"
#include "SeedService/inc/SeedService.hh"
// Includes from art and its toolchain.
#include "art/Framework/Core/ReplicatedProducer.h"
#include "art/Framework/Principal/Event.h"
#include "art/Framework/Core/ModuleMacros.h"
#include "art/Framework/Services/Registry/ServiceHandle.h"
#include "art/Framework/Principal/Handle.h"
#include "fhiclcpp/ParameterSet.h"
#include "messagefacility/MessageLogger/MessageLogger.h"
// C++ includes.
#include <iostream>
#include <sstream>
#include <string>
#include <vector>
using namespace std;
namespace mu2e {
class PrimaryProtonGunR : public art::ReplicatedProducer {
public:
explicit PrimaryProtonGunR(fhicl::ParameterSet const& pS, art::ProcessingFrame const& pF);
// Accept compiler written d'tor. Modules are never moved or copied.
virtual void produce (art::Event& e, art::ProcessingFrame const& pF) override;
virtual void beginRun(art::Run const& r, art::ProcessingFrame const& pF) override;
private:
// Name of the run-time configuration file.
string _configfile;
bool _allowReplacement;
bool _messageOnReplacement;
bool _messageOnDefault;
int _configStatsVerbosity;
// Print final config file after all replacements.
bool _printConfig;
CLHEP::HepJamesRandom _engine;
std::unique_ptr<PrimaryProtonGun> _primaryProtonGunGenerator;
// Number of times BeginRun is called on this module
int ncalls = 0;
};
PrimaryProtonGunR::PrimaryProtonGunR(fhicl::ParameterSet const& pSet, art::ProcessingFrame const& procFrame):
art::ReplicatedProducer{pSet,procFrame},
_configfile( pSet.get<std::string> ("inputfile")),
_allowReplacement( pSet.get<bool> ("allowReplacement", true)),
_messageOnReplacement( pSet.get<bool> ("messageOnReplacement", false)),
_messageOnDefault( pSet.get<bool> ("messageOnDefault", false)),
_configStatsVerbosity( pSet.get<int> ("configStatsVerbosity", 0)),
_printConfig( pSet.get<bool> ("printConfig", false)),
_engine{art::ServiceHandle<SeedService>{}->getSeed()}
{
produces<GenParticleCollection>();
}
void PrimaryProtonGunR::beginRun(art::Run const& run, art::ProcessingFrame const& procFrame){
// The configuration of the PPG Generator does not change within a job.
if ( ++ncalls > 1){
mf::LogInfo("PrimaryProtonGunR")
<< "For Schedule: " << procFrame.scheduleID()
<< ", PrimaryProtonGunR Generator does not change state at beginRun. Hope that's OK.";
return;
}
// We don't want to print this out more than once,
// regardless of the number of instances/schedules running.
std::string schedID = std::to_string(procFrame.scheduleID().id());
if ( schedID == "0"){
cout << "Event generator configuration file: "
<< _configfile
<< "\n"
<< endl;
}
// Load the configuration, make modifications if required, and print if desired.
SimpleConfig config(_configfile, _allowReplacement, _messageOnReplacement, _messageOnDefault );
if ( _printConfig ){
config.print(cout,"PrimaryProtonGunR: ");
}
config.printAllSummaries( cout, _configStatsVerbosity, "PrimaryProtonGunR: ");
// Instantiate generator for this run.
_primaryProtonGunGenerator = std::make_unique <PrimaryProtonGun>( _engine, run, config);
}//beginRun
void PrimaryProtonGunR::produce(art::Event& evt, art::ProcessingFrame const& procFrame) {
// Make the collections to hold the output.
unique_ptr<GenParticleCollection> genParticles(new GenParticleCollection);
// Run the generator and put the generated particles into the event.
_primaryProtonGunGenerator->generate(*genParticles);
evt.put(std::move(genParticles));
}//produce()
}
DEFINE_ART_MODULE(mu2e::PrimaryProtonGunR);
| 34.766917 | 113 | 0.683824 | pavel1murat |
3392c25bc82629445d57429d1aaac874c7306995 | 635 | hpp | C++ | pythran/pythonic/numpy/ones_like.hpp | rfiischer/pythran | a580245b1b45e5eb0df01c518e442041b89afa21 | [
"BSD-3-Clause"
] | null | null | null | pythran/pythonic/numpy/ones_like.hpp | rfiischer/pythran | a580245b1b45e5eb0df01c518e442041b89afa21 | [
"BSD-3-Clause"
] | null | null | null | pythran/pythonic/numpy/ones_like.hpp | rfiischer/pythran | a580245b1b45e5eb0df01c518e442041b89afa21 | [
"BSD-3-Clause"
] | null | null | null | #ifndef PYTHONIC_NUMPY_ONESLIKE_HPP
#define PYTHONIC_NUMPY_ONESLIKE_HPP
#include "pythonic/include/numpy/ones_like.hpp"
#include "pythonic/utils/functor.hpp"
#include "pythonic/numpy/ones.hpp"
PYTHONIC_NS_BEGIN
namespace numpy
{
template <class E, class dtype>
auto ones_like(E const &expr, dtype d) -> decltype(ones(expr.shape(), d))
{
return ones(expr.shape(), d);
}
template <class E>
auto ones_like(E const &expr, types::none_type)
-> decltype(ones(expr.shape(), types::dtype_t<typename E::dtype>()))
{
return ones(expr.shape(), types::dtype_t<typename E::dtype>());
}
}
PYTHONIC_NS_END
#endif
| 21.166667 | 75 | 0.710236 | rfiischer |
33945075e34bacee54c2f5c3effe95f5674874e5 | 3,225 | cpp | C++ | src/MushGame/MushGameMessageControlInfo.cpp | quimnuss/adanaxis | a04eb945fe808aeb9be5da305a37ff47e04dd006 | [
"MIT"
] | null | null | null | src/MushGame/MushGameMessageControlInfo.cpp | quimnuss/adanaxis | a04eb945fe808aeb9be5da305a37ff47e04dd006 | [
"MIT"
] | null | null | null | src/MushGame/MushGameMessageControlInfo.cpp | quimnuss/adanaxis | a04eb945fe808aeb9be5da305a37ff47e04dd006 | [
"MIT"
] | null | null | null | //%Header {
/*****************************************************************************
*
* File: src/MushGame/MushGameMessageControlInfo.cpp
*
* Author: Andy Southgate 2002-2005
*
* This file contains original work by Andy Southgate. The author and his
* employer (Mushware Limited) irrevocably waive all of their copyright rights
* vested in this particular version of this file to the furthest extent
* permitted. The author and Mushware Limited also irrevocably waive any and
* all of their intellectual property rights arising from said file and its
* creation that would otherwise restrict the rights of any party to use and/or
* distribute the use of, the techniques and methods used herein. A written
* waiver can be obtained via http://www.mushware.com/.
*
* This software carries NO WARRANTY of any kind.
*
****************************************************************************/
//%Header } w1/s8yA5XXvqhMRSZQYCrg
/*
* $Id: MushGameMessageControlInfo.cpp,v 1.1 2005/07/06 19:08:27 southa Exp $
* $Log: MushGameMessageControlInfo.cpp,v $
* Revision 1.1 2005/07/06 19:08:27 southa
* Adanaxis control work
*
*/
#include "MushGameMessageControlInfo.h"
//%outOfLineFunctions {
const char *MushGameMessageControlInfo::AutoName(void) const
{
return "MushGameMessageControlInfo";
}
MushcoreVirtualObject *MushGameMessageControlInfo::AutoClone(void) const
{
return new MushGameMessageControlInfo(*this);
}
MushcoreVirtualObject *MushGameMessageControlInfo::AutoCreate(void) const
{
return new MushGameMessageControlInfo;
}
MushcoreVirtualObject *MushGameMessageControlInfo::AutoVirtualFactory(void)
{
return new MushGameMessageControlInfo;
}
namespace
{
void AutoInstall(void)
{
MushcoreFactory::Sgl().FactoryAdd("MushGameMessageControlInfo", MushGameMessageControlInfo::AutoVirtualFactory);
}
MushcoreInstaller AutoInstaller(AutoInstall);
} // end anonymous namespace
void
MushGameMessageControlInfo::AutoPrint(std::ostream& ioOut) const
{
ioOut << "[";
MushGameMessage::AutoPrint(ioOut);
ioOut << "timestamp=" << m_timestamp << ", ";
ioOut << "axisEvents=" << m_axisEvents << ", ";
ioOut << "keyEvents=" << m_keyEvents;
ioOut << "]";
}
bool
MushGameMessageControlInfo::AutoXMLDataProcess(MushcoreXMLIStream& ioIn, const std::string& inTagStr)
{
if (inTagStr == "obj")
{
AutoInputPrologue(ioIn);
ioIn >> *this;
AutoInputEpilogue(ioIn);
}
else if (inTagStr == "timestamp")
{
ioIn >> m_timestamp;
}
else if (inTagStr == "axisEvents")
{
ioIn >> m_axisEvents;
}
else if (inTagStr == "keyEvents")
{
ioIn >> m_keyEvents;
}
else if (MushGameMessage::AutoXMLDataProcess(ioIn, inTagStr))
{
// Tag consumed by base class
}
else
{
return false;
}
return true;
}
void
MushGameMessageControlInfo::AutoXMLPrint(MushcoreXMLOStream& ioOut) const
{
MushGameMessage::AutoXMLPrint(ioOut);
ioOut.TagSet("timestamp");
ioOut << m_timestamp;
ioOut.TagSet("axisEvents");
ioOut << m_axisEvents;
ioOut.TagSet("keyEvents");
ioOut << m_keyEvents;
}
//%outOfLineFunctions } 2pzNQJVGzbzqSBE8BGC1KA
| 28.794643 | 116 | 0.674419 | quimnuss |
33949f698540e0c307de44ebe94117d7d98436bd | 95,213 | cpp | C++ | HotSpot1.7-JVM-Linux-x86/src/share/vm/adlc/output_h.cpp | codefollower/Open-Source-Research | b9f2aed9d0f060b80be45f713c3d48fe91f247b2 | [
"Apache-2.0"
] | 184 | 2015-01-04T03:38:20.000Z | 2022-03-30T05:47:21.000Z | HotSpot1.7/src/share/vm/adlc/output_h.cpp | doczyw/Open-Source-Research | b9f2aed9d0f060b80be45f713c3d48fe91f247b2 | [
"Apache-2.0"
] | 1 | 2016-01-17T09:18:17.000Z | 2016-01-17T09:18:17.000Z | HotSpot1.7/src/share/vm/adlc/output_h.cpp | doczyw/Open-Source-Research | b9f2aed9d0f060b80be45f713c3d48fe91f247b2 | [
"Apache-2.0"
] | 101 | 2015-01-16T23:46:31.000Z | 2022-03-30T05:47:06.000Z | /*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// output_h.cpp - Class HPP file output routines for architecture definition
#include "adlc.hpp"
// The comment delimiter used in format statements after assembler instructions.
#define commentSeperator "!"
// Generate the #define that describes the number of registers.
static void defineRegCount(FILE *fp, RegisterForm *registers) {
if (registers) {
int regCount = AdlcVMDeps::Physical + registers->_rdefs.count();
fprintf(fp,"\n");
fprintf(fp,"// the number of reserved registers + machine registers.\n");
fprintf(fp,"#define REG_COUNT %d\n", regCount);
}
}
// Output enumeration of machine register numbers
// (1)
// // Enumerate machine registers starting after reserved regs.
// // in the order of occurrence in the register block.
// enum MachRegisterNumbers {
// EAX_num = 0,
// ...
// _last_Mach_Reg
// }
void ArchDesc::buildMachRegisterNumbers(FILE *fp_hpp) {
if (_register) {
RegDef *reg_def = NULL;
// Output a #define for the number of machine registers
defineRegCount(fp_hpp, _register);
// Count all the Save_On_Entry and Always_Save registers
int saved_on_entry = 0;
int c_saved_on_entry = 0;
_register->reset_RegDefs();
while( (reg_def = _register->iter_RegDefs()) != NULL ) {
if( strcmp(reg_def->_callconv,"SOE") == 0 ||
strcmp(reg_def->_callconv,"AS") == 0 ) ++saved_on_entry;
if( strcmp(reg_def->_c_conv,"SOE") == 0 ||
strcmp(reg_def->_c_conv,"AS") == 0 ) ++c_saved_on_entry;
}
fprintf(fp_hpp, "\n");
fprintf(fp_hpp, "// the number of save_on_entry + always_saved registers.\n");
fprintf(fp_hpp, "#define MAX_SAVED_ON_ENTRY_REG_COUNT %d\n", max(saved_on_entry,c_saved_on_entry));
fprintf(fp_hpp, "#define SAVED_ON_ENTRY_REG_COUNT %d\n", saved_on_entry);
fprintf(fp_hpp, "#define C_SAVED_ON_ENTRY_REG_COUNT %d\n", c_saved_on_entry);
// (1)
// Build definition for enumeration of register numbers
fprintf(fp_hpp, "\n");
fprintf(fp_hpp, "// Enumerate machine register numbers starting after reserved regs.\n");
fprintf(fp_hpp, "// in the order of occurrence in the register block.\n");
fprintf(fp_hpp, "enum MachRegisterNumbers {\n");
// Output the register number for each register in the allocation classes
_register->reset_RegDefs();
int i = 0;
while( (reg_def = _register->iter_RegDefs()) != NULL ) {
fprintf(fp_hpp," %s_num,", reg_def->_regname);
for (int j = 0; j < 20-(int)strlen(reg_def->_regname); j++) fprintf(fp_hpp, " ");
fprintf(fp_hpp," // enum %3d, regnum %3d, reg encode %3s\n",
i++,
reg_def->register_num(),
reg_def->register_encode());
}
// Finish defining enumeration
fprintf(fp_hpp, " _last_Mach_Reg // %d\n", i);
fprintf(fp_hpp, "};\n");
}
fprintf(fp_hpp, "\n// Size of register-mask in ints\n");
fprintf(fp_hpp, "#define RM_SIZE %d\n",RegisterForm::RegMask_Size());
fprintf(fp_hpp, "// Unroll factor for loops over the data in a RegMask\n");
fprintf(fp_hpp, "#define FORALL_BODY ");
int len = RegisterForm::RegMask_Size();
for( int i = 0; i < len; i++ )
fprintf(fp_hpp, "BODY(%d) ",i);
fprintf(fp_hpp, "\n\n");
fprintf(fp_hpp,"class RegMask;\n");
// All RegMasks are declared "extern const ..." in ad_<arch>.hpp
// fprintf(fp_hpp,"extern RegMask STACK_OR_STACK_SLOTS_mask;\n\n");
}
// Output enumeration of machine register encodings
// (2)
// // Enumerate machine registers starting after reserved regs.
// // in the order of occurrence in the alloc_class(es).
// enum MachRegisterEncodes {
// EAX_enc = 0x00,
// ...
// }
void ArchDesc::buildMachRegisterEncodes(FILE *fp_hpp) {
if (_register) {
RegDef *reg_def = NULL;
RegDef *reg_def_next = NULL;
// (2)
// Build definition for enumeration of encode values
fprintf(fp_hpp, "\n");
fprintf(fp_hpp, "// Enumerate machine registers starting after reserved regs.\n");
fprintf(fp_hpp, "// in the order of occurrence in the alloc_class(es).\n");
fprintf(fp_hpp, "enum MachRegisterEncodes {\n");
// Find max enum string length.
size_t maxlen = 0;
_register->reset_RegDefs();
reg_def = _register->iter_RegDefs();
while (reg_def != NULL) {
size_t len = strlen(reg_def->_regname);
if (len > maxlen) maxlen = len;
reg_def = _register->iter_RegDefs();
}
// Output the register encoding for each register in the allocation classes
_register->reset_RegDefs();
reg_def_next = _register->iter_RegDefs();
while( (reg_def = reg_def_next) != NULL ) {
reg_def_next = _register->iter_RegDefs();
fprintf(fp_hpp," %s_enc", reg_def->_regname);
for (size_t i = strlen(reg_def->_regname); i < maxlen; i++) fprintf(fp_hpp, " ");
fprintf(fp_hpp," = %3s%s\n", reg_def->register_encode(), reg_def_next == NULL? "" : "," );
}
// Finish defining enumeration
fprintf(fp_hpp, "};\n");
} // Done with register form
}
// Declare an array containing the machine register names, strings.
static void declareRegNames(FILE *fp, RegisterForm *registers) {
if (registers) {
// fprintf(fp,"\n");
// fprintf(fp,"// An array of character pointers to machine register names.\n");
// fprintf(fp,"extern const char *regName[];\n");
}
}
// Declare an array containing the machine register sizes in 32-bit words.
void ArchDesc::declareRegSizes(FILE *fp) {
// regSize[] is not used
}
// Declare an array containing the machine register encoding values
static void declareRegEncodes(FILE *fp, RegisterForm *registers) {
if (registers) {
// // //
// fprintf(fp,"\n");
// fprintf(fp,"// An array containing the machine register encode values\n");
// fprintf(fp,"extern const char regEncode[];\n");
}
}
// ---------------------------------------------------------------------------
//------------------------------Utilities to build Instruction Classes--------
// ---------------------------------------------------------------------------
static void out_RegMask(FILE *fp) {
fprintf(fp," virtual const RegMask &out_RegMask() const;\n");
}
// ---------------------------------------------------------------------------
//--------Utilities to build MachOper and MachNode derived Classes------------
// ---------------------------------------------------------------------------
//------------------------------Utilities to build Operand Classes------------
static void in_RegMask(FILE *fp) {
fprintf(fp," virtual const RegMask *in_RegMask(int index) const;\n");
}
static void declareConstStorage(FILE *fp, FormDict &globals, OperandForm *oper) {
int i = 0;
Component *comp;
if (oper->num_consts(globals) == 0) return;
// Iterate over the component list looking for constants
oper->_components.reset();
if ((comp = oper->_components.iter()) == NULL) {
assert(oper->num_consts(globals) == 1, "Bad component list detected.\n");
const char *type = oper->ideal_type(globals);
if (!strcmp(type, "ConI")) {
if (i > 0) fprintf(fp,", ");
fprintf(fp," int32 _c%d;\n", i);
}
else if (!strcmp(type, "ConP")) {
if (i > 0) fprintf(fp,", ");
fprintf(fp," const TypePtr *_c%d;\n", i);
}
else if (!strcmp(type, "ConN")) {
if (i > 0) fprintf(fp,", ");
fprintf(fp," const TypeNarrowOop *_c%d;\n", i);
}
else if (!strcmp(type, "ConL")) {
if (i > 0) fprintf(fp,", ");
fprintf(fp," jlong _c%d;\n", i);
}
else if (!strcmp(type, "ConF")) {
if (i > 0) fprintf(fp,", ");
fprintf(fp," jfloat _c%d;\n", i);
}
else if (!strcmp(type, "ConD")) {
if (i > 0) fprintf(fp,", ");
fprintf(fp," jdouble _c%d;\n", i);
}
else if (!strcmp(type, "Bool")) {
fprintf(fp,"private:\n");
fprintf(fp," BoolTest::mask _c%d;\n", i);
fprintf(fp,"public:\n");
}
else {
assert(0, "Non-constant operand lacks component list.");
}
} // end if NULL
else {
oper->_components.reset();
while ((comp = oper->_components.iter()) != NULL) {
if (!strcmp(comp->base_type(globals), "ConI")) {
fprintf(fp," jint _c%d;\n", i);
i++;
}
else if (!strcmp(comp->base_type(globals), "ConP")) {
fprintf(fp," const TypePtr *_c%d;\n", i);
i++;
}
else if (!strcmp(comp->base_type(globals), "ConN")) {
fprintf(fp," const TypePtr *_c%d;\n", i);
i++;
}
else if (!strcmp(comp->base_type(globals), "ConL")) {
fprintf(fp," jlong _c%d;\n", i);
i++;
}
else if (!strcmp(comp->base_type(globals), "ConF")) {
fprintf(fp," jfloat _c%d;\n", i);
i++;
}
else if (!strcmp(comp->base_type(globals), "ConD")) {
fprintf(fp," jdouble _c%d;\n", i);
i++;
}
}
}
}
// Declare constructor.
// Parameters start with condition code, then all other constants
//
// (0) public:
// (1) MachXOper(int32 ccode, int32 c0, int32 c1, ..., int32 cn)
// (2) : _ccode(ccode), _c0(c0), _c1(c1), ..., _cn(cn) { }
//
static void defineConstructor(FILE *fp, const char *name, uint num_consts,
ComponentList &lst, bool is_ideal_bool,
Form::DataType constant_type, FormDict &globals) {
fprintf(fp,"public:\n");
// generate line (1)
fprintf(fp," %sOper(", name);
if( num_consts == 0 ) {
fprintf(fp,") {}\n");
return;
}
// generate parameters for constants
uint i = 0;
Component *comp;
lst.reset();
if ((comp = lst.iter()) == NULL) {
assert(num_consts == 1, "Bad component list detected.\n");
switch( constant_type ) {
case Form::idealI : {
fprintf(fp,is_ideal_bool ? "BoolTest::mask c%d" : "int32 c%d", i);
break;
}
case Form::idealN : { fprintf(fp,"const TypeNarrowOop *c%d", i); break; }
case Form::idealP : { fprintf(fp,"const TypePtr *c%d", i); break; }
case Form::idealL : { fprintf(fp,"jlong c%d", i); break; }
case Form::idealF : { fprintf(fp,"jfloat c%d", i); break; }
case Form::idealD : { fprintf(fp,"jdouble c%d", i); break; }
default:
assert(!is_ideal_bool, "Non-constant operand lacks component list.");
break;
}
} // end if NULL
else {
lst.reset();
while((comp = lst.iter()) != NULL) {
if (!strcmp(comp->base_type(globals), "ConI")) {
if (i > 0) fprintf(fp,", ");
fprintf(fp,"int32 c%d", i);
i++;
}
else if (!strcmp(comp->base_type(globals), "ConP")) {
if (i > 0) fprintf(fp,", ");
fprintf(fp,"const TypePtr *c%d", i);
i++;
}
else if (!strcmp(comp->base_type(globals), "ConN")) {
if (i > 0) fprintf(fp,", ");
fprintf(fp,"const TypePtr *c%d", i);
i++;
}
else if (!strcmp(comp->base_type(globals), "ConL")) {
if (i > 0) fprintf(fp,", ");
fprintf(fp,"jlong c%d", i);
i++;
}
else if (!strcmp(comp->base_type(globals), "ConF")) {
if (i > 0) fprintf(fp,", ");
fprintf(fp,"jfloat c%d", i);
i++;
}
else if (!strcmp(comp->base_type(globals), "ConD")) {
if (i > 0) fprintf(fp,", ");
fprintf(fp,"jdouble c%d", i);
i++;
}
else if (!strcmp(comp->base_type(globals), "Bool")) {
if (i > 0) fprintf(fp,", ");
fprintf(fp,"BoolTest::mask c%d", i);
i++;
}
}
}
// finish line (1) and start line (2)
fprintf(fp,") : ");
// generate initializers for constants
i = 0;
fprintf(fp,"_c%d(c%d)", i, i);
for( i = 1; i < num_consts; ++i) {
fprintf(fp,", _c%d(c%d)", i, i);
}
// The body for the constructor is empty
fprintf(fp," {}\n");
}
// ---------------------------------------------------------------------------
// Utilities to generate format rules for machine operands and instructions
// ---------------------------------------------------------------------------
// Generate the format rule for condition codes
static void defineCCodeDump(OperandForm* oper, FILE *fp, int i) {
assert(oper != NULL, "what");
CondInterface* cond = oper->_interface->is_CondInterface();
fprintf(fp, " if( _c%d == BoolTest::eq ) st->print(\"%s\");\n",i,cond->_equal_format);
fprintf(fp, " else if( _c%d == BoolTest::ne ) st->print(\"%s\");\n",i,cond->_not_equal_format);
fprintf(fp, " else if( _c%d == BoolTest::le ) st->print(\"%s\");\n",i,cond->_less_equal_format);
fprintf(fp, " else if( _c%d == BoolTest::ge ) st->print(\"%s\");\n",i,cond->_greater_equal_format);
fprintf(fp, " else if( _c%d == BoolTest::lt ) st->print(\"%s\");\n",i,cond->_less_format);
fprintf(fp, " else if( _c%d == BoolTest::gt ) st->print(\"%s\");\n",i,cond->_greater_format);
}
// Output code that dumps constant values, increment "i" if type is constant
static uint dump_spec_constant(FILE *fp, const char *ideal_type, uint i, OperandForm* oper) {
if (!strcmp(ideal_type, "ConI")) {
fprintf(fp," st->print(\"#%%d\", _c%d);\n", i);
fprintf(fp," st->print(\"/0x%%08x\", _c%d);\n", i);
++i;
}
else if (!strcmp(ideal_type, "ConP")) {
fprintf(fp," _c%d->dump_on(st);\n", i);
++i;
}
else if (!strcmp(ideal_type, "ConN")) {
fprintf(fp," _c%d->dump_on(st);\n", i);
++i;
}
else if (!strcmp(ideal_type, "ConL")) {
fprintf(fp," st->print(\"#\" INT64_FORMAT, _c%d);\n", i);
fprintf(fp," st->print(\"/\" PTR64_FORMAT, _c%d);\n", i);
++i;
}
else if (!strcmp(ideal_type, "ConF")) {
fprintf(fp," st->print(\"#%%f\", _c%d);\n", i);
fprintf(fp," jint _c%di = JavaValue(_c%d).get_jint();\n", i, i);
fprintf(fp," st->print(\"/0x%%x/\", _c%di);\n", i);
++i;
}
else if (!strcmp(ideal_type, "ConD")) {
fprintf(fp," st->print(\"#%%f\", _c%d);\n", i);
fprintf(fp," jlong _c%dl = JavaValue(_c%d).get_jlong();\n", i, i);
fprintf(fp," st->print(\"/\" PTR64_FORMAT, _c%dl);\n", i);
++i;
}
else if (!strcmp(ideal_type, "Bool")) {
defineCCodeDump(oper, fp,i);
++i;
}
return i;
}
// Generate the format rule for an operand
void gen_oper_format(FILE *fp, FormDict &globals, OperandForm &oper, bool for_c_file = false) {
if (!for_c_file) {
// invoked after output #ifndef PRODUCT to ad_<arch>.hpp
// compile the bodies separately, to cut down on recompilations
fprintf(fp," virtual void int_format(PhaseRegAlloc *ra, const MachNode *node, outputStream *st) const;\n");
fprintf(fp," virtual void ext_format(PhaseRegAlloc *ra, const MachNode *node, int idx, outputStream *st) const;\n");
return;
}
// Local pointer indicates remaining part of format rule
int idx = 0; // position of operand in match rule
// Generate internal format function, used when stored locally
fprintf(fp, "\n#ifndef PRODUCT\n");
fprintf(fp,"void %sOper::int_format(PhaseRegAlloc *ra, const MachNode *node, outputStream *st) const {\n", oper._ident);
// Generate the user-defined portion of the format
if (oper._format) {
if ( oper._format->_strings.count() != 0 ) {
// No initialization code for int_format
// Build the format from the entries in strings and rep_vars
const char *string = NULL;
oper._format->_rep_vars.reset();
oper._format->_strings.reset();
while ( (string = oper._format->_strings.iter()) != NULL ) {
// Check if this is a standard string or a replacement variable
if ( string != NameList::_signal ) {
// Normal string
// Pass through to st->print
fprintf(fp," st->print(\"%s\");\n", string);
} else {
// Replacement variable
const char *rep_var = oper._format->_rep_vars.iter();
// Check that it is a local name, and an operand
const Form* form = oper._localNames[rep_var];
if (form == NULL) {
globalAD->syntax_err(oper._linenum,
"\'%s\' not found in format for %s\n", rep_var, oper._ident);
assert(form, "replacement variable was not found in local names");
}
OperandForm *op = form->is_operand();
// Get index if register or constant
if ( op->_matrule && op->_matrule->is_base_register(globals) ) {
idx = oper.register_position( globals, rep_var);
}
else if (op->_matrule && op->_matrule->is_base_constant(globals)) {
idx = oper.constant_position( globals, rep_var);
} else {
idx = 0;
}
// output invocation of "$..."s format function
if ( op != NULL ) op->int_format(fp, globals, idx);
if ( idx == -1 ) {
fprintf(stderr,
"Using a name, %s, that isn't in match rule\n", rep_var);
assert( strcmp(op->_ident,"label")==0, "Unimplemented");
}
} // Done with a replacement variable
} // Done with all format strings
} else {
// Default formats for base operands (RegI, RegP, ConI, ConP, ...)
oper.int_format(fp, globals, 0);
}
} else { // oper._format == NULL
// Provide a few special case formats where the AD writer cannot.
if ( strcmp(oper._ident,"Universe")==0 ) {
fprintf(fp, " st->print(\"$$univ\");\n");
}
// labelOper::int_format is defined in ad_<...>.cpp
}
// ALWAYS! Provide a special case output for condition codes.
if( oper.is_ideal_bool() ) {
defineCCodeDump(&oper, fp,0);
}
fprintf(fp,"}\n");
// Generate external format function, when data is stored externally
fprintf(fp,"void %sOper::ext_format(PhaseRegAlloc *ra, const MachNode *node, int idx, outputStream *st) const {\n", oper._ident);
// Generate the user-defined portion of the format
if (oper._format) {
if ( oper._format->_strings.count() != 0 ) {
// Check for a replacement string "$..."
if ( oper._format->_rep_vars.count() != 0 ) {
// Initialization code for ext_format
}
// Build the format from the entries in strings and rep_vars
const char *string = NULL;
oper._format->_rep_vars.reset();
oper._format->_strings.reset();
while ( (string = oper._format->_strings.iter()) != NULL ) {
// Check if this is a standard string or a replacement variable
if ( string != NameList::_signal ) {
// Normal string
// Pass through to st->print
fprintf(fp," st->print(\"%s\");\n", string);
} else {
// Replacement variable
const char *rep_var = oper._format->_rep_vars.iter();
// Check that it is a local name, and an operand
const Form* form = oper._localNames[rep_var];
if (form == NULL) {
globalAD->syntax_err(oper._linenum,
"\'%s\' not found in format for %s\n", rep_var, oper._ident);
assert(form, "replacement variable was not found in local names");
}
OperandForm *op = form->is_operand();
// Get index if register or constant
if ( op->_matrule && op->_matrule->is_base_register(globals) ) {
idx = oper.register_position( globals, rep_var);
}
else if (op->_matrule && op->_matrule->is_base_constant(globals)) {
idx = oper.constant_position( globals, rep_var);
} else {
idx = 0;
}
// output invocation of "$..."s format function
if ( op != NULL ) op->ext_format(fp, globals, idx);
// Lookup the index position of the replacement variable
idx = oper._components.operand_position_format(rep_var, &oper);
if ( idx == -1 ) {
fprintf(stderr,
"Using a name, %s, that isn't in match rule\n", rep_var);
assert( strcmp(op->_ident,"label")==0, "Unimplemented");
}
} // Done with a replacement variable
} // Done with all format strings
} else {
// Default formats for base operands (RegI, RegP, ConI, ConP, ...)
oper.ext_format(fp, globals, 0);
}
} else { // oper._format == NULL
// Provide a few special case formats where the AD writer cannot.
if ( strcmp(oper._ident,"Universe")==0 ) {
fprintf(fp, " st->print(\"$$univ\");\n");
}
// labelOper::ext_format is defined in ad_<...>.cpp
}
// ALWAYS! Provide a special case output for condition codes.
if( oper.is_ideal_bool() ) {
defineCCodeDump(&oper, fp,0);
}
fprintf(fp, "}\n");
fprintf(fp, "#endif\n");
}
// Generate the format rule for an instruction
void gen_inst_format(FILE *fp, FormDict &globals, InstructForm &inst, bool for_c_file = false) {
if (!for_c_file) {
// compile the bodies separately, to cut down on recompilations
// #ifndef PRODUCT region generated by caller
fprintf(fp," virtual void format(PhaseRegAlloc *ra, outputStream *st) const;\n");
return;
}
// Define the format function
fprintf(fp, "#ifndef PRODUCT\n");
fprintf(fp, "void %sNode::format(PhaseRegAlloc *ra, outputStream *st) const {\n", inst._ident);
// Generate the user-defined portion of the format
if( inst._format ) {
// If there are replacement variables,
// Generate index values needed for determining the operand position
if( inst._format->_rep_vars.count() )
inst.index_temps(fp, globals);
// Build the format from the entries in strings and rep_vars
const char *string = NULL;
inst._format->_rep_vars.reset();
inst._format->_strings.reset();
while( (string = inst._format->_strings.iter()) != NULL ) {
fprintf(fp," ");
// Check if this is a standard string or a replacement variable
if( string == NameList::_signal ) { // Replacement variable
const char* rep_var = inst._format->_rep_vars.iter();
inst.rep_var_format( fp, rep_var);
} else if( string == NameList::_signal3 ) { // Replacement variable in raw text
const char* rep_var = inst._format->_rep_vars.iter();
const Form *form = inst._localNames[rep_var];
if (form == NULL) {
fprintf(stderr, "unknown replacement variable in format statement: '%s'\n", rep_var);
assert(false, "ShouldNotReachHere()");
}
OpClassForm *opc = form->is_opclass();
assert( opc, "replacement variable was not found in local names");
// Lookup the index position of the replacement variable
int idx = inst.operand_position_format(rep_var);
if ( idx == -1 ) {
assert( strcmp(opc->_ident,"label")==0, "Unimplemented");
assert( false, "ShouldNotReachHere()");
}
if (inst.is_noninput_operand(idx)) {
assert( false, "ShouldNotReachHere()");
} else {
// Output the format call for this operand
fprintf(fp,"opnd_array(%d)",idx);
}
rep_var = inst._format->_rep_vars.iter();
inst._format->_strings.iter();
if ( strcmp(rep_var,"$constant") == 0 && opc->is_operand()) {
Form::DataType constant_type = form->is_operand()->is_base_constant(globals);
if ( constant_type == Form::idealD ) {
fprintf(fp,"->constantD()");
} else if ( constant_type == Form::idealF ) {
fprintf(fp,"->constantF()");
} else if ( constant_type == Form::idealL ) {
fprintf(fp,"->constantL()");
} else {
fprintf(fp,"->constant()");
}
} else if ( strcmp(rep_var,"$cmpcode") == 0) {
fprintf(fp,"->ccode()");
} else {
assert( false, "ShouldNotReachHere()");
}
} else if( string == NameList::_signal2 ) // Raw program text
fputs(inst._format->_strings.iter(), fp);
else
fprintf(fp,"st->print(\"%s\");\n", string);
} // Done with all format strings
} // Done generating the user-defined portion of the format
// Add call debug info automatically
Form::CallType call_type = inst.is_ideal_call();
if( call_type != Form::invalid_type ) {
switch( call_type ) {
case Form::JAVA_DYNAMIC:
fprintf(fp," _method->print_short_name(st);\n");
break;
case Form::JAVA_STATIC:
fprintf(fp," if( _method ) _method->print_short_name(st);\n");
fprintf(fp," else st->print(\" wrapper for: %%s\", _name);\n");
fprintf(fp," if( !_method ) dump_trap_args(st);\n");
break;
case Form::JAVA_COMPILED:
case Form::JAVA_INTERP:
break;
case Form::JAVA_RUNTIME:
case Form::JAVA_LEAF:
case Form::JAVA_NATIVE:
fprintf(fp," st->print(\" %%s\", _name);");
break;
default:
assert(0,"ShouldNotReachHere");
}
fprintf(fp, " st->print_cr(\"\");\n" );
fprintf(fp, " if (_jvms) _jvms->format(ra, this, st); else st->print_cr(\" No JVM State Info\");\n" );
fprintf(fp, " st->print(\" # \");\n" );
fprintf(fp, " if( _jvms && _oop_map ) _oop_map->print_on(st);\n");
}
else if(inst.is_ideal_safepoint()) {
fprintf(fp, " st->print(\"\");\n" );
fprintf(fp, " if (_jvms) _jvms->format(ra, this, st); else st->print_cr(\" No JVM State Info\");\n" );
fprintf(fp, " st->print(\" # \");\n" );
fprintf(fp, " if( _jvms && _oop_map ) _oop_map->print_on(st);\n");
}
else if( inst.is_ideal_if() ) {
fprintf(fp, " st->print(\" P=%%f C=%%f\",_prob,_fcnt);\n" );
}
else if( inst.is_ideal_mem() ) {
// Print out the field name if available to improve readability
fprintf(fp, " if (ra->C->alias_type(adr_type())->field() != NULL) {\n");
fprintf(fp, " ciField* f = ra->C->alias_type(adr_type())->field();\n");
fprintf(fp, " st->print(\" %s Field: \");\n", commentSeperator);
fprintf(fp, " if (f->is_volatile())\n");
fprintf(fp, " st->print(\"volatile \");\n");
fprintf(fp, " f->holder()->name()->print_symbol_on(st);\n");
fprintf(fp, " st->print(\".\");\n");
fprintf(fp, " f->name()->print_symbol_on(st);\n");
fprintf(fp, " if (f->is_constant())\n");
fprintf(fp, " st->print(\" (constant)\");\n");
fprintf(fp, " } else {\n");
// Make sure 'Volatile' gets printed out
fprintf(fp, " if (ra->C->alias_type(adr_type())->is_volatile())\n");
fprintf(fp, " st->print(\" volatile!\");\n");
fprintf(fp, " }\n");
}
// Complete the definition of the format function
fprintf(fp, "}\n#endif\n");
}
void ArchDesc::declare_pipe_classes(FILE *fp_hpp) {
if (!_pipeline)
return;
fprintf(fp_hpp, "\n");
fprintf(fp_hpp, "// Pipeline_Use_Cycle_Mask Class\n");
fprintf(fp_hpp, "class Pipeline_Use_Cycle_Mask {\n");
if (_pipeline->_maxcycleused <=
#ifdef SPARC
64
#else
32
#endif
) {
fprintf(fp_hpp, "protected:\n");
fprintf(fp_hpp, " %s _mask;\n\n", _pipeline->_maxcycleused <= 32 ? "uint" : "uint64_t" );
fprintf(fp_hpp, "public:\n");
fprintf(fp_hpp, " Pipeline_Use_Cycle_Mask() : _mask(0) {}\n\n");
if (_pipeline->_maxcycleused <= 32)
fprintf(fp_hpp, " Pipeline_Use_Cycle_Mask(uint mask) : _mask(mask) {}\n\n");
else {
fprintf(fp_hpp, " Pipeline_Use_Cycle_Mask(uint mask1, uint mask2) : _mask((((uint64_t)mask1) << 32) | mask2) {}\n\n");
fprintf(fp_hpp, " Pipeline_Use_Cycle_Mask(uint64_t mask) : _mask(mask) {}\n\n");
}
fprintf(fp_hpp, " Pipeline_Use_Cycle_Mask& operator=(const Pipeline_Use_Cycle_Mask &in) {\n");
fprintf(fp_hpp, " _mask = in._mask;\n");
fprintf(fp_hpp, " return *this;\n");
fprintf(fp_hpp, " }\n\n");
fprintf(fp_hpp, " bool overlaps(const Pipeline_Use_Cycle_Mask &in2) const {\n");
fprintf(fp_hpp, " return ((_mask & in2._mask) != 0);\n");
fprintf(fp_hpp, " }\n\n");
fprintf(fp_hpp, " Pipeline_Use_Cycle_Mask& operator<<=(int n) {\n");
fprintf(fp_hpp, " _mask <<= n;\n");
fprintf(fp_hpp, " return *this;\n");
fprintf(fp_hpp, " }\n\n");
fprintf(fp_hpp, " void Or(const Pipeline_Use_Cycle_Mask &in2) {\n");
fprintf(fp_hpp, " _mask |= in2._mask;\n");
fprintf(fp_hpp, " }\n\n");
fprintf(fp_hpp, " friend Pipeline_Use_Cycle_Mask operator&(const Pipeline_Use_Cycle_Mask &, const Pipeline_Use_Cycle_Mask &);\n");
fprintf(fp_hpp, " friend Pipeline_Use_Cycle_Mask operator|(const Pipeline_Use_Cycle_Mask &, const Pipeline_Use_Cycle_Mask &);\n\n");
}
else {
fprintf(fp_hpp, "protected:\n");
uint masklen = (_pipeline->_maxcycleused + 31) >> 5;
uint l;
fprintf(fp_hpp, " uint ");
for (l = 1; l <= masklen; l++)
fprintf(fp_hpp, "_mask%d%s", l, l < masklen ? ", " : ";\n\n");
fprintf(fp_hpp, "public:\n");
fprintf(fp_hpp, " Pipeline_Use_Cycle_Mask() : ");
for (l = 1; l <= masklen; l++)
fprintf(fp_hpp, "_mask%d(0)%s", l, l < masklen ? ", " : " {}\n\n");
fprintf(fp_hpp, " Pipeline_Use_Cycle_Mask(");
for (l = 1; l <= masklen; l++)
fprintf(fp_hpp, "uint mask%d%s", l, l < masklen ? ", " : ") : ");
for (l = 1; l <= masklen; l++)
fprintf(fp_hpp, "_mask%d(mask%d)%s", l, l, l < masklen ? ", " : " {}\n\n");
fprintf(fp_hpp, " Pipeline_Use_Cycle_Mask& operator=(const Pipeline_Use_Cycle_Mask &in) {\n");
for (l = 1; l <= masklen; l++)
fprintf(fp_hpp, " _mask%d = in._mask%d;\n", l, l);
fprintf(fp_hpp, " return *this;\n");
fprintf(fp_hpp, " }\n\n");
fprintf(fp_hpp, " Pipeline_Use_Cycle_Mask intersect(const Pipeline_Use_Cycle_Mask &in2) {\n");
fprintf(fp_hpp, " Pipeline_Use_Cycle_Mask out;\n");
for (l = 1; l <= masklen; l++)
fprintf(fp_hpp, " out._mask%d = _mask%d & in2._mask%d;\n", l, l, l);
fprintf(fp_hpp, " return out;\n");
fprintf(fp_hpp, " }\n\n");
fprintf(fp_hpp, " bool overlaps(const Pipeline_Use_Cycle_Mask &in2) const {\n");
fprintf(fp_hpp, " return (");
for (l = 1; l <= masklen; l++)
fprintf(fp_hpp, "((_mask%d & in2._mask%d) != 0)%s", l, l, l < masklen ? " || " : "");
fprintf(fp_hpp, ") ? true : false;\n");
fprintf(fp_hpp, " }\n\n");
fprintf(fp_hpp, " Pipeline_Use_Cycle_Mask& operator<<=(int n) {\n");
fprintf(fp_hpp, " if (n >= 32)\n");
fprintf(fp_hpp, " do {\n ");
for (l = masklen; l > 1; l--)
fprintf(fp_hpp, " _mask%d = _mask%d;", l, l-1);
fprintf(fp_hpp, " _mask%d = 0;\n", 1);
fprintf(fp_hpp, " } while ((n -= 32) >= 32);\n\n");
fprintf(fp_hpp, " if (n > 0) {\n");
fprintf(fp_hpp, " uint m = 32 - n;\n");
fprintf(fp_hpp, " uint mask = (1 << n) - 1;\n");
fprintf(fp_hpp, " uint temp%d = mask & (_mask%d >> m); _mask%d <<= n;\n", 2, 1, 1);
for (l = 2; l < masklen; l++) {
fprintf(fp_hpp, " uint temp%d = mask & (_mask%d >> m); _mask%d <<= n; _mask%d |= temp%d;\n", l+1, l, l, l, l);
}
fprintf(fp_hpp, " _mask%d <<= n; _mask%d |= temp%d;\n", masklen, masklen, masklen);
fprintf(fp_hpp, " }\n");
fprintf(fp_hpp, " return *this;\n");
fprintf(fp_hpp, " }\n\n");
fprintf(fp_hpp, " void Or(const Pipeline_Use_Cycle_Mask &);\n\n");
fprintf(fp_hpp, " friend Pipeline_Use_Cycle_Mask operator&(const Pipeline_Use_Cycle_Mask &, const Pipeline_Use_Cycle_Mask &);\n");
fprintf(fp_hpp, " friend Pipeline_Use_Cycle_Mask operator|(const Pipeline_Use_Cycle_Mask &, const Pipeline_Use_Cycle_Mask &);\n\n");
}
fprintf(fp_hpp, " friend class Pipeline_Use;\n\n");
fprintf(fp_hpp, " friend class Pipeline_Use_Element;\n\n");
fprintf(fp_hpp, "};\n\n");
uint rescount = 0;
const char *resource;
for ( _pipeline->_reslist.reset(); (resource = _pipeline->_reslist.iter()) != NULL; ) {
int mask = _pipeline->_resdict[resource]->is_resource()->mask();
if ((mask & (mask-1)) == 0)
rescount++;
}
fprintf(fp_hpp, "// Pipeline_Use_Element Class\n");
fprintf(fp_hpp, "class Pipeline_Use_Element {\n");
fprintf(fp_hpp, "protected:\n");
fprintf(fp_hpp, " // Mask of used functional units\n");
fprintf(fp_hpp, " uint _used;\n\n");
fprintf(fp_hpp, " // Lower and upper bound of functional unit number range\n");
fprintf(fp_hpp, " uint _lb, _ub;\n\n");
fprintf(fp_hpp, " // Indicates multiple functionals units available\n");
fprintf(fp_hpp, " bool _multiple;\n\n");
fprintf(fp_hpp, " // Mask of specific used cycles\n");
fprintf(fp_hpp, " Pipeline_Use_Cycle_Mask _mask;\n\n");
fprintf(fp_hpp, "public:\n");
fprintf(fp_hpp, " Pipeline_Use_Element() {}\n\n");
fprintf(fp_hpp, " Pipeline_Use_Element(uint used, uint lb, uint ub, bool multiple, Pipeline_Use_Cycle_Mask mask)\n");
fprintf(fp_hpp, " : _used(used), _lb(lb), _ub(ub), _multiple(multiple), _mask(mask) {}\n\n");
fprintf(fp_hpp, " uint used() const { return _used; }\n\n");
fprintf(fp_hpp, " uint lowerBound() const { return _lb; }\n\n");
fprintf(fp_hpp, " uint upperBound() const { return _ub; }\n\n");
fprintf(fp_hpp, " bool multiple() const { return _multiple; }\n\n");
fprintf(fp_hpp, " Pipeline_Use_Cycle_Mask mask() const { return _mask; }\n\n");
fprintf(fp_hpp, " bool overlaps(const Pipeline_Use_Element &in2) const {\n");
fprintf(fp_hpp, " return ((_used & in2._used) != 0 && _mask.overlaps(in2._mask));\n");
fprintf(fp_hpp, " }\n\n");
fprintf(fp_hpp, " void step(uint cycles) {\n");
fprintf(fp_hpp, " _used = 0;\n");
fprintf(fp_hpp, " _mask <<= cycles;\n");
fprintf(fp_hpp, " }\n\n");
fprintf(fp_hpp, " friend class Pipeline_Use;\n");
fprintf(fp_hpp, "};\n\n");
fprintf(fp_hpp, "// Pipeline_Use Class\n");
fprintf(fp_hpp, "class Pipeline_Use {\n");
fprintf(fp_hpp, "protected:\n");
fprintf(fp_hpp, " // These resources can be used\n");
fprintf(fp_hpp, " uint _resources_used;\n\n");
fprintf(fp_hpp, " // These resources are used; excludes multiple choice functional units\n");
fprintf(fp_hpp, " uint _resources_used_exclusively;\n\n");
fprintf(fp_hpp, " // Number of elements\n");
fprintf(fp_hpp, " uint _count;\n\n");
fprintf(fp_hpp, " // This is the array of Pipeline_Use_Elements\n");
fprintf(fp_hpp, " Pipeline_Use_Element * _elements;\n\n");
fprintf(fp_hpp, "public:\n");
fprintf(fp_hpp, " Pipeline_Use(uint resources_used, uint resources_used_exclusively, uint count, Pipeline_Use_Element *elements)\n");
fprintf(fp_hpp, " : _resources_used(resources_used)\n");
fprintf(fp_hpp, " , _resources_used_exclusively(resources_used_exclusively)\n");
fprintf(fp_hpp, " , _count(count)\n");
fprintf(fp_hpp, " , _elements(elements)\n");
fprintf(fp_hpp, " {}\n\n");
fprintf(fp_hpp, " uint resourcesUsed() const { return _resources_used; }\n\n");
fprintf(fp_hpp, " uint resourcesUsedExclusively() const { return _resources_used_exclusively; }\n\n");
fprintf(fp_hpp, " uint count() const { return _count; }\n\n");
fprintf(fp_hpp, " Pipeline_Use_Element * element(uint i) const { return &_elements[i]; }\n\n");
fprintf(fp_hpp, " uint full_latency(uint delay, const Pipeline_Use &pred) const;\n\n");
fprintf(fp_hpp, " void add_usage(const Pipeline_Use &pred);\n\n");
fprintf(fp_hpp, " void reset() {\n");
fprintf(fp_hpp, " _resources_used = _resources_used_exclusively = 0;\n");
fprintf(fp_hpp, " };\n\n");
fprintf(fp_hpp, " void step(uint cycles) {\n");
fprintf(fp_hpp, " reset();\n");
fprintf(fp_hpp, " for (uint i = 0; i < %d; i++)\n",
rescount);
fprintf(fp_hpp, " (&_elements[i])->step(cycles);\n");
fprintf(fp_hpp, " };\n\n");
fprintf(fp_hpp, " static const Pipeline_Use elaborated_use;\n");
fprintf(fp_hpp, " static const Pipeline_Use_Element elaborated_elements[%d];\n\n",
rescount);
fprintf(fp_hpp, " friend class Pipeline;\n");
fprintf(fp_hpp, "};\n\n");
fprintf(fp_hpp, "// Pipeline Class\n");
fprintf(fp_hpp, "class Pipeline {\n");
fprintf(fp_hpp, "public:\n");
fprintf(fp_hpp, " static bool enabled() { return %s; }\n\n",
_pipeline ? "true" : "false" );
assert( _pipeline->_maxInstrsPerBundle &&
( _pipeline->_instrUnitSize || _pipeline->_bundleUnitSize) &&
_pipeline->_instrFetchUnitSize &&
_pipeline->_instrFetchUnits,
"unspecified pipeline architecture units");
uint unitSize = _pipeline->_instrUnitSize ? _pipeline->_instrUnitSize : _pipeline->_bundleUnitSize;
fprintf(fp_hpp, " enum {\n");
fprintf(fp_hpp, " _variable_size_instructions = %d,\n",
_pipeline->_variableSizeInstrs ? 1 : 0);
fprintf(fp_hpp, " _fixed_size_instructions = %d,\n",
_pipeline->_variableSizeInstrs ? 0 : 1);
fprintf(fp_hpp, " _branch_has_delay_slot = %d,\n",
_pipeline->_branchHasDelaySlot ? 1 : 0);
fprintf(fp_hpp, " _max_instrs_per_bundle = %d,\n",
_pipeline->_maxInstrsPerBundle);
fprintf(fp_hpp, " _max_bundles_per_cycle = %d,\n",
_pipeline->_maxBundlesPerCycle);
fprintf(fp_hpp, " _max_instrs_per_cycle = %d\n",
_pipeline->_maxBundlesPerCycle * _pipeline->_maxInstrsPerBundle);
fprintf(fp_hpp, " };\n\n");
fprintf(fp_hpp, " static bool instr_has_unit_size() { return %s; }\n\n",
_pipeline->_instrUnitSize != 0 ? "true" : "false" );
if( _pipeline->_bundleUnitSize != 0 )
if( _pipeline->_instrUnitSize != 0 )
fprintf(fp_hpp, "// Individual Instructions may be bundled together by the hardware\n\n");
else
fprintf(fp_hpp, "// Instructions exist only in bundles\n\n");
else
fprintf(fp_hpp, "// Bundling is not supported\n\n");
if( _pipeline->_instrUnitSize != 0 )
fprintf(fp_hpp, " // Size of an instruction\n");
else
fprintf(fp_hpp, " // Size of an individual instruction does not exist - unsupported\n");
fprintf(fp_hpp, " static uint instr_unit_size() {");
if( _pipeline->_instrUnitSize == 0 )
fprintf(fp_hpp, " assert( false, \"Instructions are only in bundles\" );");
fprintf(fp_hpp, " return %d; };\n\n", _pipeline->_instrUnitSize);
if( _pipeline->_bundleUnitSize != 0 )
fprintf(fp_hpp, " // Size of a bundle\n");
else
fprintf(fp_hpp, " // Bundles do not exist - unsupported\n");
fprintf(fp_hpp, " static uint bundle_unit_size() {");
if( _pipeline->_bundleUnitSize == 0 )
fprintf(fp_hpp, " assert( false, \"Bundles are not supported\" );");
fprintf(fp_hpp, " return %d; };\n\n", _pipeline->_bundleUnitSize);
fprintf(fp_hpp, " static bool requires_bundling() { return %s; }\n\n",
_pipeline->_bundleUnitSize != 0 && _pipeline->_instrUnitSize == 0 ? "true" : "false" );
fprintf(fp_hpp, "private:\n");
fprintf(fp_hpp, " Pipeline(); // Not a legal constructor\n");
fprintf(fp_hpp, "\n");
fprintf(fp_hpp, " const unsigned char _read_stage_count;\n");
fprintf(fp_hpp, " const unsigned char _write_stage;\n");
fprintf(fp_hpp, " const unsigned char _fixed_latency;\n");
fprintf(fp_hpp, " const unsigned char _instruction_count;\n");
fprintf(fp_hpp, " const bool _has_fixed_latency;\n");
fprintf(fp_hpp, " const bool _has_branch_delay;\n");
fprintf(fp_hpp, " const bool _has_multiple_bundles;\n");
fprintf(fp_hpp, " const bool _force_serialization;\n");
fprintf(fp_hpp, " const bool _may_have_no_code;\n");
fprintf(fp_hpp, " const enum machPipelineStages * const _read_stages;\n");
fprintf(fp_hpp, " const enum machPipelineStages * const _resource_stage;\n");
fprintf(fp_hpp, " const uint * const _resource_cycles;\n");
fprintf(fp_hpp, " const Pipeline_Use _resource_use;\n");
fprintf(fp_hpp, "\n");
fprintf(fp_hpp, "public:\n");
fprintf(fp_hpp, " Pipeline(uint write_stage,\n");
fprintf(fp_hpp, " uint count,\n");
fprintf(fp_hpp, " bool has_fixed_latency,\n");
fprintf(fp_hpp, " uint fixed_latency,\n");
fprintf(fp_hpp, " uint instruction_count,\n");
fprintf(fp_hpp, " bool has_branch_delay,\n");
fprintf(fp_hpp, " bool has_multiple_bundles,\n");
fprintf(fp_hpp, " bool force_serialization,\n");
fprintf(fp_hpp, " bool may_have_no_code,\n");
fprintf(fp_hpp, " enum machPipelineStages * const dst,\n");
fprintf(fp_hpp, " enum machPipelineStages * const stage,\n");
fprintf(fp_hpp, " uint * const cycles,\n");
fprintf(fp_hpp, " Pipeline_Use resource_use)\n");
fprintf(fp_hpp, " : _write_stage(write_stage)\n");
fprintf(fp_hpp, " , _read_stage_count(count)\n");
fprintf(fp_hpp, " , _has_fixed_latency(has_fixed_latency)\n");
fprintf(fp_hpp, " , _fixed_latency(fixed_latency)\n");
fprintf(fp_hpp, " , _read_stages(dst)\n");
fprintf(fp_hpp, " , _resource_stage(stage)\n");
fprintf(fp_hpp, " , _resource_cycles(cycles)\n");
fprintf(fp_hpp, " , _resource_use(resource_use)\n");
fprintf(fp_hpp, " , _instruction_count(instruction_count)\n");
fprintf(fp_hpp, " , _has_branch_delay(has_branch_delay)\n");
fprintf(fp_hpp, " , _has_multiple_bundles(has_multiple_bundles)\n");
fprintf(fp_hpp, " , _force_serialization(force_serialization)\n");
fprintf(fp_hpp, " , _may_have_no_code(may_have_no_code)\n");
fprintf(fp_hpp, " {};\n");
fprintf(fp_hpp, "\n");
fprintf(fp_hpp, " uint writeStage() const {\n");
fprintf(fp_hpp, " return (_write_stage);\n");
fprintf(fp_hpp, " }\n");
fprintf(fp_hpp, "\n");
fprintf(fp_hpp, " enum machPipelineStages readStage(int ndx) const {\n");
fprintf(fp_hpp, " return (ndx < _read_stage_count ? _read_stages[ndx] : stage_undefined);");
fprintf(fp_hpp, " }\n\n");
fprintf(fp_hpp, " uint resourcesUsed() const {\n");
fprintf(fp_hpp, " return _resource_use.resourcesUsed();\n }\n\n");
fprintf(fp_hpp, " uint resourcesUsedExclusively() const {\n");
fprintf(fp_hpp, " return _resource_use.resourcesUsedExclusively();\n }\n\n");
fprintf(fp_hpp, " bool hasFixedLatency() const {\n");
fprintf(fp_hpp, " return (_has_fixed_latency);\n }\n\n");
fprintf(fp_hpp, " uint fixedLatency() const {\n");
fprintf(fp_hpp, " return (_fixed_latency);\n }\n\n");
fprintf(fp_hpp, " uint functional_unit_latency(uint start, const Pipeline *pred) const;\n\n");
fprintf(fp_hpp, " uint operand_latency(uint opnd, const Pipeline *pred) const;\n\n");
fprintf(fp_hpp, " const Pipeline_Use& resourceUse() const {\n");
fprintf(fp_hpp, " return (_resource_use); }\n\n");
fprintf(fp_hpp, " const Pipeline_Use_Element * resourceUseElement(uint i) const {\n");
fprintf(fp_hpp, " return (&_resource_use._elements[i]); }\n\n");
fprintf(fp_hpp, " uint resourceUseCount() const {\n");
fprintf(fp_hpp, " return (_resource_use._count); }\n\n");
fprintf(fp_hpp, " uint instructionCount() const {\n");
fprintf(fp_hpp, " return (_instruction_count); }\n\n");
fprintf(fp_hpp, " bool hasBranchDelay() const {\n");
fprintf(fp_hpp, " return (_has_branch_delay); }\n\n");
fprintf(fp_hpp, " bool hasMultipleBundles() const {\n");
fprintf(fp_hpp, " return (_has_multiple_bundles); }\n\n");
fprintf(fp_hpp, " bool forceSerialization() const {\n");
fprintf(fp_hpp, " return (_force_serialization); }\n\n");
fprintf(fp_hpp, " bool mayHaveNoCode() const {\n");
fprintf(fp_hpp, " return (_may_have_no_code); }\n\n");
fprintf(fp_hpp, "//const Pipeline_Use_Cycle_Mask& resourceUseMask(int resource) const {\n");
fprintf(fp_hpp, "// return (_resource_use_masks[resource]); }\n\n");
fprintf(fp_hpp, "\n#ifndef PRODUCT\n");
fprintf(fp_hpp, " static const char * stageName(uint i);\n");
fprintf(fp_hpp, "#endif\n");
fprintf(fp_hpp, "};\n\n");
fprintf(fp_hpp, "// Bundle class\n");
fprintf(fp_hpp, "class Bundle {\n");
uint mshift = 0;
for (uint msize = _pipeline->_maxInstrsPerBundle * _pipeline->_maxBundlesPerCycle; msize != 0; msize >>= 1)
mshift++;
uint rshift = rescount;
fprintf(fp_hpp, "protected:\n");
fprintf(fp_hpp, " enum {\n");
fprintf(fp_hpp, " _unused_delay = 0x%x,\n", 0);
fprintf(fp_hpp, " _use_nop_delay = 0x%x,\n", 1);
fprintf(fp_hpp, " _use_unconditional_delay = 0x%x,\n", 2);
fprintf(fp_hpp, " _use_conditional_delay = 0x%x,\n", 3);
fprintf(fp_hpp, " _used_in_conditional_delay = 0x%x,\n", 4);
fprintf(fp_hpp, " _used_in_unconditional_delay = 0x%x,\n", 5);
fprintf(fp_hpp, " _used_in_all_conditional_delays = 0x%x,\n", 6);
fprintf(fp_hpp, "\n");
fprintf(fp_hpp, " _use_delay = 0x%x,\n", 3);
fprintf(fp_hpp, " _used_in_delay = 0x%x\n", 4);
fprintf(fp_hpp, " };\n\n");
fprintf(fp_hpp, " uint _flags : 3,\n");
fprintf(fp_hpp, " _starts_bundle : 1,\n");
fprintf(fp_hpp, " _instr_count : %d,\n", mshift);
fprintf(fp_hpp, " _resources_used : %d;\n", rshift);
fprintf(fp_hpp, "public:\n");
fprintf(fp_hpp, " Bundle() : _flags(_unused_delay), _starts_bundle(0), _instr_count(0), _resources_used(0) {}\n\n");
fprintf(fp_hpp, " void set_instr_count(uint i) { _instr_count = i; }\n");
fprintf(fp_hpp, " void set_resources_used(uint i) { _resources_used = i; }\n");
fprintf(fp_hpp, " void clear_usage() { _flags = _unused_delay; }\n");
fprintf(fp_hpp, " void set_starts_bundle() { _starts_bundle = true; }\n");
fprintf(fp_hpp, " uint flags() const { return (_flags); }\n");
fprintf(fp_hpp, " uint instr_count() const { return (_instr_count); }\n");
fprintf(fp_hpp, " uint resources_used() const { return (_resources_used); }\n");
fprintf(fp_hpp, " bool starts_bundle() const { return (_starts_bundle != 0); }\n");
fprintf(fp_hpp, " void set_use_nop_delay() { _flags = _use_nop_delay; }\n");
fprintf(fp_hpp, " void set_use_unconditional_delay() { _flags = _use_unconditional_delay; }\n");
fprintf(fp_hpp, " void set_use_conditional_delay() { _flags = _use_conditional_delay; }\n");
fprintf(fp_hpp, " void set_used_in_unconditional_delay() { _flags = _used_in_unconditional_delay; }\n");
fprintf(fp_hpp, " void set_used_in_conditional_delay() { _flags = _used_in_conditional_delay; }\n");
fprintf(fp_hpp, " void set_used_in_all_conditional_delays() { _flags = _used_in_all_conditional_delays; }\n");
fprintf(fp_hpp, " bool use_nop_delay() { return (_flags == _use_nop_delay); }\n");
fprintf(fp_hpp, " bool use_unconditional_delay() { return (_flags == _use_unconditional_delay); }\n");
fprintf(fp_hpp, " bool use_conditional_delay() { return (_flags == _use_conditional_delay); }\n");
fprintf(fp_hpp, " bool used_in_unconditional_delay() { return (_flags == _used_in_unconditional_delay); }\n");
fprintf(fp_hpp, " bool used_in_conditional_delay() { return (_flags == _used_in_conditional_delay); }\n");
fprintf(fp_hpp, " bool used_in_all_conditional_delays() { return (_flags == _used_in_all_conditional_delays); }\n");
fprintf(fp_hpp, " bool use_delay() { return ((_flags & _use_delay) != 0); }\n");
fprintf(fp_hpp, " bool used_in_delay() { return ((_flags & _used_in_delay) != 0); }\n\n");
fprintf(fp_hpp, " enum {\n");
fprintf(fp_hpp, " _nop_count = %d\n",
_pipeline->_nopcnt);
fprintf(fp_hpp, " };\n\n");
fprintf(fp_hpp, " static void initialize_nops(MachNode *nop_list[%d], Compile* C);\n\n",
_pipeline->_nopcnt);
fprintf(fp_hpp, "#ifndef PRODUCT\n");
fprintf(fp_hpp, " void dump(outputStream *st = tty) const;\n");
fprintf(fp_hpp, "#endif\n");
fprintf(fp_hpp, "};\n\n");
// const char *classname;
// for (_pipeline->_classlist.reset(); (classname = _pipeline->_classlist.iter()) != NULL; ) {
// PipeClassForm *pipeclass = _pipeline->_classdict[classname]->is_pipeclass();
// fprintf(fp_hpp, "// Pipeline Class Instance for \"%s\"\n", classname);
// }
}
//------------------------------declareClasses---------------------------------
// Construct the class hierarchy of MachNode classes from the instruction &
// operand lists
void ArchDesc::declareClasses(FILE *fp) {
// Declare an array containing the machine register names, strings.
declareRegNames(fp, _register);
// Declare an array containing the machine register encoding values
declareRegEncodes(fp, _register);
// Generate declarations for the total number of operands
fprintf(fp,"\n");
fprintf(fp,"// Total number of operands defined in architecture definition\n");
int num_operands = 0;
OperandForm *op;
for (_operands.reset(); (op = (OperandForm*)_operands.iter()) != NULL; ) {
// Ensure this is a machine-world instruction
if (op->ideal_only()) continue;
++num_operands;
}
int first_operand_class = num_operands;
OpClassForm *opc;
for (_opclass.reset(); (opc = (OpClassForm*)_opclass.iter()) != NULL; ) {
// Ensure this is a machine-world instruction
if (opc->ideal_only()) continue;
++num_operands;
}
fprintf(fp,"#define FIRST_OPERAND_CLASS %d\n", first_operand_class);
fprintf(fp,"#define NUM_OPERANDS %d\n", num_operands);
fprintf(fp,"\n");
// Generate declarations for the total number of instructions
fprintf(fp,"// Total number of instructions defined in architecture definition\n");
fprintf(fp,"#define NUM_INSTRUCTIONS %d\n",instructFormCount());
// Generate Machine Classes for each operand defined in AD file
fprintf(fp,"\n");
fprintf(fp,"//----------------------------Declare classes derived from MachOper----------\n");
// Iterate through all operands
_operands.reset();
OperandForm *oper;
for( ; (oper = (OperandForm*)_operands.iter()) != NULL;) {
// Ensure this is a machine-world instruction
if (oper->ideal_only() ) continue;
// The declaration of labelOper is in machine-independent file: machnode
if ( strcmp(oper->_ident,"label") == 0 ) continue;
// The declaration of methodOper is in machine-independent file: machnode
if ( strcmp(oper->_ident,"method") == 0 ) continue;
// Build class definition for this operand
fprintf(fp,"\n");
fprintf(fp,"class %sOper : public MachOper { \n",oper->_ident);
fprintf(fp,"private:\n");
// Operand definitions that depend upon number of input edges
{
uint num_edges = oper->num_edges(_globalNames);
if( num_edges != 1 ) { // Use MachOper::num_edges() {return 1;}
fprintf(fp," virtual uint num_edges() const { return %d; }\n",
num_edges );
}
if( num_edges > 0 ) {
in_RegMask(fp);
}
}
// Support storing constants inside the MachOper
declareConstStorage(fp,_globalNames,oper);
// Support storage of the condition codes
if( oper->is_ideal_bool() ) {
fprintf(fp," virtual int ccode() const { \n");
fprintf(fp," switch (_c0) {\n");
fprintf(fp," case BoolTest::eq : return equal();\n");
fprintf(fp," case BoolTest::gt : return greater();\n");
fprintf(fp," case BoolTest::lt : return less();\n");
fprintf(fp," case BoolTest::ne : return not_equal();\n");
fprintf(fp," case BoolTest::le : return less_equal();\n");
fprintf(fp," case BoolTest::ge : return greater_equal();\n");
fprintf(fp," default : ShouldNotReachHere(); return 0;\n");
fprintf(fp," }\n");
fprintf(fp," };\n");
}
// Support storage of the condition codes
if( oper->is_ideal_bool() ) {
fprintf(fp," virtual void negate() { \n");
fprintf(fp," _c0 = (BoolTest::mask)((int)_c0^0x4); \n");
fprintf(fp," };\n");
}
// Declare constructor.
// Parameters start with condition code, then all other constants
//
// (1) MachXOper(int32 ccode, int32 c0, int32 c1, ..., int32 cn)
// (2) : _ccode(ccode), _c0(c0), _c1(c1), ..., _cn(cn) { }
//
Form::DataType constant_type = oper->simple_type(_globalNames);
defineConstructor(fp, oper->_ident, oper->num_consts(_globalNames),
oper->_components, oper->is_ideal_bool(),
constant_type, _globalNames);
// Clone function
fprintf(fp," virtual MachOper *clone(Compile* C) const;\n");
// Support setting a spill offset into a constant operand.
// We only support setting an 'int' offset, while in the
// LP64 build spill offsets are added with an AddP which
// requires a long constant. Thus we don't support spilling
// in frames larger than 4Gig.
if( oper->has_conI(_globalNames) ||
oper->has_conL(_globalNames) )
fprintf(fp, " virtual void set_con( jint c0 ) { _c0 = c0; }\n");
// virtual functions for encoding and format
// fprintf(fp," virtual void encode() const {\n %s }\n",
// (oper->_encrule)?(oper->_encrule->_encrule):"");
// Check the interface type, and generate the correct query functions
// encoding queries based upon MEMORY_INTER, REG_INTER, CONST_INTER.
fprintf(fp," virtual uint opcode() const { return %s; }\n",
machOperEnum(oper->_ident));
// virtual function to look up ideal return type of machine instruction
//
// (1) virtual const Type *type() const { return .....; }
//
if ((oper->_matrule) && (oper->_matrule->_lChild == NULL) &&
(oper->_matrule->_rChild == NULL)) {
unsigned int position = 0;
const char *opret, *opname, *optype;
oper->_matrule->base_operand(position,_globalNames,opret,opname,optype);
fprintf(fp," virtual const Type *type() const {");
const char *type = getIdealType(optype);
if( type != NULL ) {
Form::DataType data_type = oper->is_base_constant(_globalNames);
// Check if we are an ideal pointer type
if( data_type == Form::idealP || data_type == Form::idealN ) {
// Return the ideal type we already have: <TypePtr *>
fprintf(fp," return _c0;");
} else {
// Return the appropriate bottom type
fprintf(fp," return %s;", getIdealType(optype));
}
} else {
fprintf(fp," ShouldNotCallThis(); return Type::BOTTOM;");
}
fprintf(fp," }\n");
} else {
// Check for user-defined stack slots, based upon sRegX
Form::DataType data_type = oper->is_user_name_for_sReg();
if( data_type != Form::none ){
const char *type = NULL;
switch( data_type ) {
case Form::idealI: type = "TypeInt::INT"; break;
case Form::idealP: type = "TypePtr::BOTTOM";break;
case Form::idealF: type = "Type::FLOAT"; break;
case Form::idealD: type = "Type::DOUBLE"; break;
case Form::idealL: type = "TypeLong::LONG"; break;
case Form::none: // fall through
default:
assert( false, "No support for this type of stackSlot");
}
fprintf(fp," virtual const Type *type() const { return %s; } // stackSlotX\n", type);
}
}
//
// virtual functions for defining the encoding interface.
//
// Access the linearized ideal register mask,
// map to physical register encoding
if ( oper->_matrule && oper->_matrule->is_base_register(_globalNames) ) {
// Just use the default virtual 'reg' call
} else if ( oper->ideal_to_sReg_type(oper->_ident) != Form::none ) {
// Special handling for operand 'sReg', a Stack Slot Register.
// Map linearized ideal register mask to stack slot number
fprintf(fp," virtual int reg(PhaseRegAlloc *ra_, const Node *node) const {\n");
fprintf(fp," return (int)OptoReg::reg2stack(ra_->get_reg_first(node));/* sReg */\n");
fprintf(fp," }\n");
fprintf(fp," virtual int reg(PhaseRegAlloc *ra_, const Node *node, int idx) const {\n");
fprintf(fp," return (int)OptoReg::reg2stack(ra_->get_reg_first(node->in(idx)));/* sReg */\n");
fprintf(fp," }\n");
}
// Output the operand specific access functions used by an enc_class
// These are only defined when we want to override the default virtual func
if (oper->_interface != NULL) {
fprintf(fp,"\n");
// Check if it is a Memory Interface
if ( oper->_interface->is_MemInterface() != NULL ) {
MemInterface *mem_interface = oper->_interface->is_MemInterface();
const char *base = mem_interface->_base;
if( base != NULL ) {
define_oper_interface(fp, *oper, _globalNames, "base", base);
}
char *index = mem_interface->_index;
if( index != NULL ) {
define_oper_interface(fp, *oper, _globalNames, "index", index);
}
const char *scale = mem_interface->_scale;
if( scale != NULL ) {
define_oper_interface(fp, *oper, _globalNames, "scale", scale);
}
const char *disp = mem_interface->_disp;
if( disp != NULL ) {
define_oper_interface(fp, *oper, _globalNames, "disp", disp);
oper->disp_is_oop(fp, _globalNames);
}
if( oper->stack_slots_only(_globalNames) ) {
// should not call this:
fprintf(fp," virtual int constant_disp() const { return Type::OffsetBot; }");
} else if ( disp != NULL ) {
define_oper_interface(fp, *oper, _globalNames, "constant_disp", disp);
}
} // end Memory Interface
// Check if it is a Conditional Interface
else if (oper->_interface->is_CondInterface() != NULL) {
CondInterface *cInterface = oper->_interface->is_CondInterface();
const char *equal = cInterface->_equal;
if( equal != NULL ) {
define_oper_interface(fp, *oper, _globalNames, "equal", equal);
}
const char *not_equal = cInterface->_not_equal;
if( not_equal != NULL ) {
define_oper_interface(fp, *oper, _globalNames, "not_equal", not_equal);
}
const char *less = cInterface->_less;
if( less != NULL ) {
define_oper_interface(fp, *oper, _globalNames, "less", less);
}
const char *greater_equal = cInterface->_greater_equal;
if( greater_equal != NULL ) {
define_oper_interface(fp, *oper, _globalNames, "greater_equal", greater_equal);
}
const char *less_equal = cInterface->_less_equal;
if( less_equal != NULL ) {
define_oper_interface(fp, *oper, _globalNames, "less_equal", less_equal);
}
const char *greater = cInterface->_greater;
if( greater != NULL ) {
define_oper_interface(fp, *oper, _globalNames, "greater", greater);
}
} // end Conditional Interface
// Check if it is a Constant Interface
else if (oper->_interface->is_ConstInterface() != NULL ) {
assert( oper->num_consts(_globalNames) == 1,
"Must have one constant when using CONST_INTER encoding");
if (!strcmp(oper->ideal_type(_globalNames), "ConI")) {
// Access the locally stored constant
fprintf(fp," virtual intptr_t constant() const {");
fprintf(fp, " return (intptr_t)_c0;");
fprintf(fp," }\n");
}
else if (!strcmp(oper->ideal_type(_globalNames), "ConP")) {
// Access the locally stored constant
fprintf(fp," virtual intptr_t constant() const {");
fprintf(fp, " return _c0->get_con();");
fprintf(fp, " }\n");
// Generate query to determine if this pointer is an oop
fprintf(fp," virtual bool constant_is_oop() const {");
fprintf(fp, " return _c0->isa_oop_ptr();");
fprintf(fp, " }\n");
}
else if (!strcmp(oper->ideal_type(_globalNames), "ConN")) {
// Access the locally stored constant
fprintf(fp," virtual intptr_t constant() const {");
fprintf(fp, " return _c0->get_ptrtype()->get_con();");
fprintf(fp, " }\n");
// Generate query to determine if this pointer is an oop
fprintf(fp," virtual bool constant_is_oop() const {");
fprintf(fp, " return _c0->get_ptrtype()->isa_oop_ptr();");
fprintf(fp, " }\n");
}
else if (!strcmp(oper->ideal_type(_globalNames), "ConL")) {
fprintf(fp," virtual intptr_t constant() const {");
// We don't support addressing modes with > 4Gig offsets.
// Truncate to int.
fprintf(fp, " return (intptr_t)_c0;");
fprintf(fp, " }\n");
fprintf(fp," virtual jlong constantL() const {");
fprintf(fp, " return _c0;");
fprintf(fp, " }\n");
}
else if (!strcmp(oper->ideal_type(_globalNames), "ConF")) {
fprintf(fp," virtual intptr_t constant() const {");
fprintf(fp, " ShouldNotReachHere(); return 0; ");
fprintf(fp, " }\n");
fprintf(fp," virtual jfloat constantF() const {");
fprintf(fp, " return (jfloat)_c0;");
fprintf(fp, " }\n");
}
else if (!strcmp(oper->ideal_type(_globalNames), "ConD")) {
fprintf(fp," virtual intptr_t constant() const {");
fprintf(fp, " ShouldNotReachHere(); return 0; ");
fprintf(fp, " }\n");
fprintf(fp," virtual jdouble constantD() const {");
fprintf(fp, " return _c0;");
fprintf(fp, " }\n");
}
}
else if (oper->_interface->is_RegInterface() != NULL) {
// make sure that a fixed format string isn't used for an
// operand which might be assiged to multiple registers.
// Otherwise the opto assembly output could be misleading.
if (oper->_format->_strings.count() != 0 && !oper->is_bound_register()) {
syntax_err(oper->_linenum,
"Only bound registers can have fixed formats: %s\n",
oper->_ident);
}
}
else {
assert( false, "ShouldNotReachHere();");
}
}
fprintf(fp,"\n");
// // Currently all XXXOper::hash() methods are identical (990820)
// declare_hash(fp);
// // Currently all XXXOper::Cmp() methods are identical (990820)
// declare_cmp(fp);
// Do not place dump_spec() and Name() into PRODUCT code
// int_format and ext_format are not needed in PRODUCT code either
fprintf(fp, "#ifndef PRODUCT\n");
// Declare int_format() and ext_format()
gen_oper_format(fp, _globalNames, *oper);
// Machine independent print functionality for debugging
// IF we have constants, create a dump_spec function for the derived class
//
// (1) virtual void dump_spec() const {
// (2) st->print("#%d", _c#); // Constant != ConP
// OR _c#->dump_on(st); // Type ConP
// ...
// (3) }
uint num_consts = oper->num_consts(_globalNames);
if( num_consts > 0 ) {
// line (1)
fprintf(fp, " virtual void dump_spec(outputStream *st) const {\n");
// generate format string for st->print
// Iterate over the component list & spit out the right thing
uint i = 0;
const char *type = oper->ideal_type(_globalNames);
Component *comp;
oper->_components.reset();
if ((comp = oper->_components.iter()) == NULL) {
assert(num_consts == 1, "Bad component list detected.\n");
i = dump_spec_constant( fp, type, i, oper );
// Check that type actually matched
assert( i != 0, "Non-constant operand lacks component list.");
} // end if NULL
else {
// line (2)
// dump all components
oper->_components.reset();
while((comp = oper->_components.iter()) != NULL) {
type = comp->base_type(_globalNames);
i = dump_spec_constant( fp, type, i, NULL );
}
}
// finish line (3)
fprintf(fp," }\n");
}
fprintf(fp," virtual const char *Name() const { return \"%s\";}\n",
oper->_ident);
fprintf(fp,"#endif\n");
// Close definition of this XxxMachOper
fprintf(fp,"};\n");
}
// Generate Machine Classes for each instruction defined in AD file
fprintf(fp,"\n");
fprintf(fp,"//----------------------------Declare classes for Pipelines-----------------\n");
declare_pipe_classes(fp);
// Generate Machine Classes for each instruction defined in AD file
fprintf(fp,"\n");
fprintf(fp,"//----------------------------Declare classes derived from MachNode----------\n");
_instructions.reset();
InstructForm *instr;
for( ; (instr = (InstructForm*)_instructions.iter()) != NULL; ) {
// Ensure this is a machine-world instruction
if ( instr->ideal_only() ) continue;
// Build class definition for this instruction
fprintf(fp,"\n");
fprintf(fp,"class %sNode : public %s { \n",
instr->_ident, instr->mach_base_class(_globalNames) );
fprintf(fp,"private:\n");
fprintf(fp," MachOper *_opnd_array[%d];\n", instr->num_opnds() );
if ( instr->is_ideal_jump() ) {
fprintf(fp, " GrowableArray<Label*> _index2label;\n");
}
fprintf(fp,"public:\n");
fprintf(fp," MachOper *opnd_array(uint operand_index) const {\n");
fprintf(fp," assert(operand_index < _num_opnds, \"invalid _opnd_array index\");\n");
fprintf(fp," return _opnd_array[operand_index];\n");
fprintf(fp," }\n");
fprintf(fp," void set_opnd_array(uint operand_index, MachOper *operand) {\n");
fprintf(fp," assert(operand_index < _num_opnds, \"invalid _opnd_array index\");\n");
fprintf(fp," _opnd_array[operand_index] = operand;\n");
fprintf(fp," }\n");
fprintf(fp,"private:\n");
if ( instr->is_ideal_jump() ) {
fprintf(fp," virtual void add_case_label(int index_num, Label* blockLabel) {\n");
fprintf(fp," _index2label.at_put_grow(index_num, blockLabel);\n");
fprintf(fp," }\n");
}
if( can_cisc_spill() && (instr->cisc_spill_alternate() != NULL) ) {
fprintf(fp," const RegMask *_cisc_RegMask;\n");
}
out_RegMask(fp); // output register mask
fprintf(fp," virtual uint rule() const { return %s_rule; }\n",
instr->_ident);
// If this instruction contains a labelOper
// Declare Node::methods that set operand Label's contents
int label_position = instr->label_position();
if( label_position != -1 ) {
// Set/Save the label, stored in labelOper::_branch_label
fprintf(fp," virtual void label_set( Label* label, uint block_num );\n");
fprintf(fp," virtual void save_label( Label** label, uint* block_num );\n");
}
// If this instruction contains a methodOper
// Declare Node::methods that set operand method's contents
int method_position = instr->method_position();
if( method_position != -1 ) {
// Set the address method, stored in methodOper::_method
fprintf(fp," virtual void method_set( intptr_t method );\n");
}
// virtual functions for attributes
//
// Each instruction attribute results in a virtual call of same name.
// The ins_cost is not handled here.
Attribute *attr = instr->_attribs;
bool avoid_back_to_back = false;
while (attr != NULL) {
if (strcmp(attr->_ident,"ins_cost") &&
strcmp(attr->_ident,"ins_short_branch")) {
fprintf(fp," int %s() const { return %s; }\n",
attr->_ident, attr->_val);
}
// Check value for ins_avoid_back_to_back, and if it is true (1), set the flag
if (!strcmp(attr->_ident,"ins_avoid_back_to_back") && attr->int_val(*this) != 0)
avoid_back_to_back = true;
attr = (Attribute *)attr->_next;
}
// virtual functions for encode and format
// Virtual function for evaluating the constant.
if (instr->is_mach_constant()) {
fprintf(fp," virtual void eval_constant(Compile* C);\n");
}
// Output the opcode function and the encode function here using the
// encoding class information in the _insencode slot.
if ( instr->_insencode ) {
fprintf(fp," virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;\n");
}
// virtual function for getting the size of an instruction
if ( instr->_size ) {
fprintf(fp," virtual uint size(PhaseRegAlloc *ra_) const;\n");
}
// Return the top-level ideal opcode.
// Use MachNode::ideal_Opcode() for nodes based on MachNode class
// if the ideal_Opcode == Op_Node.
if ( strcmp("Node", instr->ideal_Opcode(_globalNames)) != 0 ||
strcmp("MachNode", instr->mach_base_class(_globalNames)) != 0 ) {
fprintf(fp," virtual int ideal_Opcode() const { return Op_%s; }\n",
instr->ideal_Opcode(_globalNames) );
}
// Allow machine-independent optimization, invert the sense of the IF test
if( instr->is_ideal_if() ) {
fprintf(fp," virtual void negate() { \n");
// Identify which operand contains the negate(able) ideal condition code
int idx = 0;
instr->_components.reset();
for( Component *comp; (comp = instr->_components.iter()) != NULL; ) {
// Check that component is an operand
Form *form = (Form*)_globalNames[comp->_type];
OperandForm *opForm = form ? form->is_operand() : NULL;
if( opForm == NULL ) continue;
// Lookup the position of the operand in the instruction.
if( opForm->is_ideal_bool() ) {
idx = instr->operand_position(comp->_name, comp->_usedef);
assert( idx != NameList::Not_in_list, "Did not find component in list that contained it.");
break;
}
}
fprintf(fp," opnd_array(%d)->negate();\n", idx);
fprintf(fp," _prob = 1.0f - _prob;\n");
fprintf(fp," };\n");
}
// Identify which input register matches the input register.
uint matching_input = instr->two_address(_globalNames);
// Generate the method if it returns != 0 otherwise use MachNode::two_adr()
if( matching_input != 0 ) {
fprintf(fp," virtual uint two_adr() const ");
fprintf(fp,"{ return oper_input_base()");
for( uint i = 2; i <= matching_input; i++ )
fprintf(fp," + opnd_array(%d)->num_edges()",i-1);
fprintf(fp,"; }\n");
}
// Declare cisc_version, if applicable
// MachNode *cisc_version( int offset /* ,... */ );
instr->declare_cisc_version(*this, fp);
// If there is an explicit peephole rule, build it
if ( instr->peepholes() != NULL ) {
fprintf(fp," virtual MachNode *peephole(Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted, Compile *C);\n");
}
// Output the declaration for number of relocation entries
if ( instr->reloc(_globalNames) != 0 ) {
fprintf(fp," virtual int reloc() const;\n");
}
if (instr->alignment() != 1) {
fprintf(fp," virtual int alignment_required() const { return %d; }\n", instr->alignment());
fprintf(fp," virtual int compute_padding(int current_offset) const;\n");
}
// Starting point for inputs matcher wants.
// Use MachNode::oper_input_base() for nodes based on MachNode class
// if the base == 1.
if ( instr->oper_input_base(_globalNames) != 1 ||
strcmp("MachNode", instr->mach_base_class(_globalNames)) != 0 ) {
fprintf(fp," virtual uint oper_input_base() const { return %d; }\n",
instr->oper_input_base(_globalNames));
}
// Make the constructor and following methods 'public:'
fprintf(fp,"public:\n");
// Constructor
if ( instr->is_ideal_jump() ) {
fprintf(fp," %sNode() : _index2label(MinJumpTableSize*2) { ", instr->_ident);
} else {
fprintf(fp," %sNode() { ", instr->_ident);
if( can_cisc_spill() && (instr->cisc_spill_alternate() != NULL) ) {
fprintf(fp,"_cisc_RegMask = NULL; ");
}
}
fprintf(fp," _num_opnds = %d; _opnds = _opnd_array; ", instr->num_opnds());
bool node_flags_set = false;
// flag: if this instruction matches an ideal 'Copy*' node
if ( instr->is_ideal_copy() != 0 ) {
fprintf(fp,"init_flags(Flag_is_Copy");
node_flags_set = true;
}
// Is an instruction is a constant? If so, get its type
Form::DataType data_type;
const char *opType = NULL;
const char *result = NULL;
data_type = instr->is_chain_of_constant(_globalNames, opType, result);
// Check if this instruction is a constant
if ( data_type != Form::none ) {
if ( node_flags_set ) {
fprintf(fp," | Flag_is_Con");
} else {
fprintf(fp,"init_flags(Flag_is_Con");
node_flags_set = true;
}
}
// flag: if this instruction is cisc alternate
if ( can_cisc_spill() && instr->is_cisc_alternate() ) {
if ( node_flags_set ) {
fprintf(fp," | Flag_is_cisc_alternate");
} else {
fprintf(fp,"init_flags(Flag_is_cisc_alternate");
node_flags_set = true;
}
}
// flag: if this instruction has short branch form
if ( instr->has_short_branch_form() ) {
if ( node_flags_set ) {
fprintf(fp," | Flag_may_be_short_branch");
} else {
fprintf(fp,"init_flags(Flag_may_be_short_branch");
node_flags_set = true;
}
}
// flag: if this instruction should not be generated back to back.
if ( avoid_back_to_back ) {
if ( node_flags_set ) {
fprintf(fp," | Flag_avoid_back_to_back");
} else {
fprintf(fp,"init_flags(Flag_avoid_back_to_back");
node_flags_set = true;
}
}
// Check if machine instructions that USE memory, but do not DEF memory,
// depend upon a node that defines memory in machine-independent graph.
if ( instr->needs_anti_dependence_check(_globalNames) ) {
if ( node_flags_set ) {
fprintf(fp," | Flag_needs_anti_dependence_check");
} else {
fprintf(fp,"init_flags(Flag_needs_anti_dependence_check");
node_flags_set = true;
}
}
// flag: if this instruction is implemented with a call
if ( instr->_has_call ) {
if ( node_flags_set ) {
fprintf(fp," | Flag_has_call");
} else {
fprintf(fp,"init_flags(Flag_has_call");
node_flags_set = true;
}
}
if ( node_flags_set ) {
fprintf(fp,"); ");
}
fprintf(fp,"}\n");
// size_of, used by base class's clone to obtain the correct size.
fprintf(fp," virtual uint size_of() const {");
fprintf(fp, " return sizeof(%sNode);", instr->_ident);
fprintf(fp, " }\n");
// Virtual methods which are only generated to override base class
if( instr->expands() || instr->needs_projections() ||
instr->has_temps() ||
instr->is_mach_constant() ||
instr->_matrule != NULL &&
instr->num_opnds() != instr->num_unique_opnds() ) {
fprintf(fp," virtual MachNode *Expand(State *state, Node_List &proj_list, Node* mem);\n");
}
if (instr->is_pinned(_globalNames)) {
fprintf(fp," virtual bool pinned() const { return ");
if (instr->is_parm(_globalNames)) {
fprintf(fp,"_in[0]->pinned();");
} else {
fprintf(fp,"true;");
}
fprintf(fp," }\n");
}
if (instr->is_projection(_globalNames)) {
fprintf(fp," virtual const Node *is_block_proj() const { return this; }\n");
}
if ( instr->num_post_match_opnds() != 0
|| instr->is_chain_of_constant(_globalNames) ) {
fprintf(fp," friend MachNode *State::MachNodeGenerator(int opcode, Compile* C);\n");
}
if ( instr->rematerialize(_globalNames, get_registers()) ) {
fprintf(fp," // Rematerialize %s\n", instr->_ident);
}
// Declare short branch methods, if applicable
instr->declare_short_branch_methods(fp);
// See if there is an "ins_pipe" declaration for this instruction
if (instr->_ins_pipe) {
fprintf(fp," static const Pipeline *pipeline_class();\n");
fprintf(fp," virtual const Pipeline *pipeline() const;\n");
}
// Generate virtual function for MachNodeX::bottom_type when necessary
//
// Note on accuracy: Pointer-types of machine nodes need to be accurate,
// or else alias analysis on the matched graph may produce bad code.
// Moreover, the aliasing decisions made on machine-node graph must be
// no less accurate than those made on the ideal graph, or else the graph
// may fail to schedule. (Reason: Memory ops which are reordered in
// the ideal graph might look interdependent in the machine graph,
// thereby removing degrees of scheduling freedom that the optimizer
// assumed would be available.)
//
// %%% We should handle many of these cases with an explicit ADL clause:
// instruct foo() %{ ... bottom_type(TypeRawPtr::BOTTOM); ... %}
if( data_type != Form::none ) {
// A constant's bottom_type returns a Type containing its constant value
// !!!!!
// Convert all ints, floats, ... to machine-independent TypeXs
// as is done for pointers
//
// Construct appropriate constant type containing the constant value.
fprintf(fp," virtual const class Type *bottom_type() const {\n");
switch( data_type ) {
case Form::idealI:
fprintf(fp," return TypeInt::make(opnd_array(1)->constant());\n");
break;
case Form::idealP:
case Form::idealN:
fprintf(fp," return opnd_array(1)->type();\n");
break;
case Form::idealD:
fprintf(fp," return TypeD::make(opnd_array(1)->constantD());\n");
break;
case Form::idealF:
fprintf(fp," return TypeF::make(opnd_array(1)->constantF());\n");
break;
case Form::idealL:
fprintf(fp," return TypeLong::make(opnd_array(1)->constantL());\n");
break;
default:
assert( false, "Unimplemented()" );
break;
}
fprintf(fp," };\n");
}
/* else if ( instr->_matrule && instr->_matrule->_rChild &&
( strcmp("ConvF2I",instr->_matrule->_rChild->_opType)==0
|| strcmp("ConvD2I",instr->_matrule->_rChild->_opType)==0 ) ) {
// !!!!! !!!!!
// Provide explicit bottom type for conversions to int
// On Intel the result operand is a stackSlot, untyped.
fprintf(fp," virtual const class Type *bottom_type() const {");
fprintf(fp, " return TypeInt::INT;");
fprintf(fp, " };\n");
}*/
else if( instr->is_ideal_copy() &&
!strcmp(instr->_matrule->_lChild->_opType,"stackSlotP") ) {
// !!!!!
// Special hack for ideal Copy of pointer. Bottom type is oop or not depending on input.
fprintf(fp," const Type *bottom_type() const { return in(1)->bottom_type(); } // Copy?\n");
}
else if( instr->is_ideal_loadPC() ) {
// LoadPCNode provides the return address of a call to native code.
// Define its bottom type to be TypeRawPtr::BOTTOM instead of TypePtr::BOTTOM
// since it is a pointer to an internal VM location and must have a zero offset.
// Allocation detects derived pointers, in part, by their non-zero offsets.
fprintf(fp," const Type *bottom_type() const { return TypeRawPtr::BOTTOM; } // LoadPC?\n");
}
else if( instr->is_ideal_box() ) {
// BoxNode provides the address of a stack slot.
// Define its bottom type to be TypeRawPtr::BOTTOM instead of TypePtr::BOTTOM
// This prevent s insert_anti_dependencies from complaining. It will
// complain if it sees that the pointer base is TypePtr::BOTTOM since
// it doesn't understand what that might alias.
fprintf(fp," const Type *bottom_type() const { return TypeRawPtr::BOTTOM; } // Box?\n");
}
else if( instr->_matrule && instr->_matrule->_rChild && !strcmp(instr->_matrule->_rChild->_opType,"CMoveP") ) {
int offset = 1;
// Special special hack to see if the Cmp? has been incorporated in the conditional move
MatchNode *rl = instr->_matrule->_rChild->_lChild;
if( rl && !strcmp(rl->_opType, "Binary") ) {
MatchNode *rlr = rl->_rChild;
if (rlr && strncmp(rlr->_opType, "Cmp", 3) == 0)
offset = 2;
}
// Special hack for ideal CMoveP; ideal type depends on inputs
fprintf(fp," const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveP\n",
offset, offset+1, offset+1);
}
else if( instr->_matrule && instr->_matrule->_rChild && !strcmp(instr->_matrule->_rChild->_opType,"CMoveN") ) {
int offset = 1;
// Special special hack to see if the Cmp? has been incorporated in the conditional move
MatchNode *rl = instr->_matrule->_rChild->_lChild;
if( rl && !strcmp(rl->_opType, "Binary") ) {
MatchNode *rlr = rl->_rChild;
if (rlr && strncmp(rlr->_opType, "Cmp", 3) == 0)
offset = 2;
}
// Special hack for ideal CMoveN; ideal type depends on inputs
fprintf(fp," const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveN\n",
offset, offset+1, offset+1);
}
else if (instr->is_tls_instruction()) {
// Special hack for tlsLoadP
fprintf(fp," const Type *bottom_type() const { return TypeRawPtr::BOTTOM; } // tlsLoadP\n");
}
else if ( instr->is_ideal_if() ) {
fprintf(fp," const Type *bottom_type() const { return TypeTuple::IFBOTH; } // matched IfNode\n");
}
else if ( instr->is_ideal_membar() ) {
fprintf(fp," const Type *bottom_type() const { return TypeTuple::MEMBAR; } // matched MemBar\n");
}
// Check where 'ideal_type' must be customized
/*
if ( instr->_matrule && instr->_matrule->_rChild &&
( strcmp("ConvF2I",instr->_matrule->_rChild->_opType)==0
|| strcmp("ConvD2I",instr->_matrule->_rChild->_opType)==0 ) ) {
fprintf(fp," virtual uint ideal_reg() const { return Compile::current()->matcher()->base2reg[Type::Int]; }\n");
}*/
// Analyze machine instructions that either USE or DEF memory.
int memory_operand = instr->memory_operand(_globalNames);
// Some guys kill all of memory
if ( instr->is_wide_memory_kill(_globalNames) ) {
memory_operand = InstructForm::MANY_MEMORY_OPERANDS;
}
if ( memory_operand != InstructForm::NO_MEMORY_OPERAND ) {
if( memory_operand == InstructForm::MANY_MEMORY_OPERANDS ) {
fprintf(fp," virtual const TypePtr *adr_type() const;\n");
}
fprintf(fp," virtual const MachOper *memory_operand() const;\n");
}
fprintf(fp, "#ifndef PRODUCT\n");
// virtual function for generating the user's assembler output
gen_inst_format(fp, _globalNames,*instr);
// Machine independent print functionality for debugging
fprintf(fp," virtual const char *Name() const { return \"%s\";}\n",
instr->_ident);
fprintf(fp, "#endif\n");
// Close definition of this XxxMachNode
fprintf(fp,"};\n");
};
}
void ArchDesc::defineStateClass(FILE *fp) {
static const char *state__valid = "_valid[((uint)index) >> 5] & (0x1 << (((uint)index) & 0x0001F))";
static const char *state__set_valid= "_valid[((uint)index) >> 5] |= (0x1 << (((uint)index) & 0x0001F))";
fprintf(fp,"\n");
fprintf(fp,"// MACROS to inline and constant fold State::valid(index)...\n");
fprintf(fp,"// when given a constant 'index' in dfa_<arch>.cpp\n");
fprintf(fp,"// uint word = index >> 5; // Shift out bit position\n");
fprintf(fp,"// uint bitpos = index & 0x0001F; // Mask off word bits\n");
fprintf(fp,"#define STATE__VALID(index) ");
fprintf(fp," (%s)\n", state__valid);
fprintf(fp,"\n");
fprintf(fp,"#define STATE__NOT_YET_VALID(index) ");
fprintf(fp," ( (%s) == 0 )\n", state__valid);
fprintf(fp,"\n");
fprintf(fp,"#define STATE__VALID_CHILD(state,index) ");
fprintf(fp," ( state && (state->%s) )\n", state__valid);
fprintf(fp,"\n");
fprintf(fp,"#define STATE__SET_VALID(index) ");
fprintf(fp," (%s)\n", state__set_valid);
fprintf(fp,"\n");
fprintf(fp,
"//---------------------------State-------------------------------------------\n");
fprintf(fp,"// State contains an integral cost vector, indexed by machine operand opcodes,\n");
fprintf(fp,"// a rule vector consisting of machine operand/instruction opcodes, and also\n");
fprintf(fp,"// indexed by machine operand opcodes, pointers to the children in the label\n");
fprintf(fp,"// tree generated by the Label routines in ideal nodes (currently limited to\n");
fprintf(fp,"// two for convenience, but this could change).\n");
fprintf(fp,"class State : public ResourceObj {\n");
fprintf(fp,"public:\n");
fprintf(fp," int _id; // State identifier\n");
fprintf(fp," Node *_leaf; // Ideal (non-machine-node) leaf of match tree\n");
fprintf(fp," State *_kids[2]; // Children of state node in label tree\n");
fprintf(fp," unsigned int _cost[_LAST_MACH_OPER]; // Cost vector, indexed by operand opcodes\n");
fprintf(fp," unsigned int _rule[_LAST_MACH_OPER]; // Rule vector, indexed by operand opcodes\n");
fprintf(fp," unsigned int _valid[(_LAST_MACH_OPER/32)+1]; // Bit Map of valid Cost/Rule entries\n");
fprintf(fp,"\n");
fprintf(fp," State(void); // Constructor\n");
fprintf(fp," DEBUG_ONLY( ~State(void); ) // Destructor\n");
fprintf(fp,"\n");
fprintf(fp," // Methods created by ADLC and invoked by Reduce\n");
fprintf(fp," MachOper *MachOperGenerator( int opcode, Compile* C );\n");
fprintf(fp," MachNode *MachNodeGenerator( int opcode, Compile* C );\n");
fprintf(fp,"\n");
fprintf(fp," // Assign a state to a node, definition of method produced by ADLC\n");
fprintf(fp," bool DFA( int opcode, const Node *ideal );\n");
fprintf(fp,"\n");
fprintf(fp," // Access function for _valid bit vector\n");
fprintf(fp," bool valid(uint index) {\n");
fprintf(fp," return( STATE__VALID(index) != 0 );\n");
fprintf(fp," }\n");
fprintf(fp,"\n");
fprintf(fp," // Set function for _valid bit vector\n");
fprintf(fp," void set_valid(uint index) {\n");
fprintf(fp," STATE__SET_VALID(index);\n");
fprintf(fp," }\n");
fprintf(fp,"\n");
fprintf(fp,"#ifndef PRODUCT\n");
fprintf(fp," void dump(); // Debugging prints\n");
fprintf(fp," void dump(int depth);\n");
fprintf(fp,"#endif\n");
if (_dfa_small) {
// Generate the routine name we'll need
for (int i = 1; i < _last_opcode; i++) {
if (_mlistab[i] == NULL) continue;
fprintf(fp, " void _sub_Op_%s(const Node *n);\n", NodeClassNames[i]);
}
}
fprintf(fp,"};\n");
fprintf(fp,"\n");
fprintf(fp,"\n");
}
//---------------------------buildMachOperEnum---------------------------------
// Build enumeration for densely packed operands.
// This enumeration is used to index into the arrays in the State objects
// that indicate cost and a successfull rule match.
// Information needed to generate the ReduceOp mapping for the DFA
class OutputMachOperands : public OutputMap {
public:
OutputMachOperands(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD)
: OutputMap(hpp, cpp, globals, AD, "MachOperands") {};
void declaration() { }
void definition() { fprintf(_cpp, "enum MachOperands {\n"); }
void closing() { fprintf(_cpp, " _LAST_MACH_OPER\n");
OutputMap::closing();
}
void map(OpClassForm &opc) {
const char* opc_ident_to_upper = _AD.machOperEnum(opc._ident);
fprintf(_cpp, " %s", opc_ident_to_upper);
delete[] opc_ident_to_upper;
}
void map(OperandForm &oper) {
const char* oper_ident_to_upper = _AD.machOperEnum(oper._ident);
fprintf(_cpp, " %s", oper_ident_to_upper);
delete[] oper_ident_to_upper;
}
void map(char *name) {
const char* name_to_upper = _AD.machOperEnum(name);
fprintf(_cpp, " %s", name_to_upper);
delete[] name_to_upper;
}
bool do_instructions() { return false; }
void map(InstructForm &inst){ assert( false, "ShouldNotCallThis()"); }
};
void ArchDesc::buildMachOperEnum(FILE *fp_hpp) {
// Construct the table for MachOpcodes
OutputMachOperands output_mach_operands(fp_hpp, fp_hpp, _globalNames, *this);
build_map(output_mach_operands);
}
//---------------------------buildMachEnum----------------------------------
// Build enumeration for all MachOpers and all MachNodes
// Information needed to generate the ReduceOp mapping for the DFA
class OutputMachOpcodes : public OutputMap {
int begin_inst_chain_rule;
int end_inst_chain_rule;
int begin_rematerialize;
int end_rematerialize;
int end_instructions;
public:
OutputMachOpcodes(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD)
: OutputMap(hpp, cpp, globals, AD, "MachOpcodes"),
begin_inst_chain_rule(-1), end_inst_chain_rule(-1), end_instructions(-1)
{};
void declaration() { }
void definition() { fprintf(_cpp, "enum MachOpcodes {\n"); }
void closing() {
if( begin_inst_chain_rule != -1 )
fprintf(_cpp, " _BEGIN_INST_CHAIN_RULE = %d,\n", begin_inst_chain_rule);
if( end_inst_chain_rule != -1 )
fprintf(_cpp, " _END_INST_CHAIN_RULE = %d,\n", end_inst_chain_rule);
if( begin_rematerialize != -1 )
fprintf(_cpp, " _BEGIN_REMATERIALIZE = %d,\n", begin_rematerialize);
if( end_rematerialize != -1 )
fprintf(_cpp, " _END_REMATERIALIZE = %d,\n", end_rematerialize);
// always execute since do_instructions() is true, and avoids trailing comma
fprintf(_cpp, " _last_Mach_Node = %d \n", end_instructions);
OutputMap::closing();
}
void map(OpClassForm &opc) { fprintf(_cpp, " %s_rule", opc._ident ); }
void map(OperandForm &oper) { fprintf(_cpp, " %s_rule", oper._ident ); }
void map(char *name) { if (name) fprintf(_cpp, " %s_rule", name);
else fprintf(_cpp, " 0"); }
void map(InstructForm &inst) {fprintf(_cpp, " %s_rule", inst._ident ); }
void record_position(OutputMap::position place, int idx ) {
switch(place) {
case OutputMap::BEGIN_INST_CHAIN_RULES :
begin_inst_chain_rule = idx;
break;
case OutputMap::END_INST_CHAIN_RULES :
end_inst_chain_rule = idx;
break;
case OutputMap::BEGIN_REMATERIALIZE :
begin_rematerialize = idx;
break;
case OutputMap::END_REMATERIALIZE :
end_rematerialize = idx;
break;
case OutputMap::END_INSTRUCTIONS :
end_instructions = idx;
break;
default:
break;
}
}
};
void ArchDesc::buildMachOpcodesEnum(FILE *fp_hpp) {
// Construct the table for MachOpcodes
OutputMachOpcodes output_mach_opcodes(fp_hpp, fp_hpp, _globalNames, *this);
build_map(output_mach_opcodes);
}
// Generate an enumeration of the pipeline states, and both
// the functional units (resources) and the masks for
// specifying resources
void ArchDesc::build_pipeline_enums(FILE *fp_hpp) {
int stagelen = (int)strlen("undefined");
int stagenum = 0;
if (_pipeline) { // Find max enum string length
const char *stage;
for ( _pipeline->_stages.reset(); (stage = _pipeline->_stages.iter()) != NULL; ) {
int len = (int)strlen(stage);
if (stagelen < len) stagelen = len;
}
}
// Generate a list of stages
fprintf(fp_hpp, "\n");
fprintf(fp_hpp, "// Pipeline Stages\n");
fprintf(fp_hpp, "enum machPipelineStages {\n");
fprintf(fp_hpp, " stage_%-*s = 0,\n", stagelen, "undefined");
if( _pipeline ) {
const char *stage;
for ( _pipeline->_stages.reset(); (stage = _pipeline->_stages.iter()) != NULL; )
fprintf(fp_hpp, " stage_%-*s = %d,\n", stagelen, stage, ++stagenum);
}
fprintf(fp_hpp, " stage_%-*s = %d\n", stagelen, "count", stagenum);
fprintf(fp_hpp, "};\n");
fprintf(fp_hpp, "\n");
fprintf(fp_hpp, "// Pipeline Resources\n");
fprintf(fp_hpp, "enum machPipelineResources {\n");
int rescount = 0;
if( _pipeline ) {
const char *resource;
int reslen = 0;
// Generate a list of resources, and masks
for ( _pipeline->_reslist.reset(); (resource = _pipeline->_reslist.iter()) != NULL; ) {
int len = (int)strlen(resource);
if (reslen < len)
reslen = len;
}
for ( _pipeline->_reslist.reset(); (resource = _pipeline->_reslist.iter()) != NULL; ) {
const ResourceForm *resform = _pipeline->_resdict[resource]->is_resource();
int mask = resform->mask();
if ((mask & (mask-1)) == 0)
fprintf(fp_hpp, " resource_%-*s = %d,\n", reslen, resource, rescount++);
}
fprintf(fp_hpp, "\n");
for ( _pipeline->_reslist.reset(); (resource = _pipeline->_reslist.iter()) != NULL; ) {
const ResourceForm *resform = _pipeline->_resdict[resource]->is_resource();
fprintf(fp_hpp, " res_mask_%-*s = 0x%08x,\n", reslen, resource, resform->mask());
}
fprintf(fp_hpp, "\n");
}
fprintf(fp_hpp, " resource_count = %d\n", rescount);
fprintf(fp_hpp, "};\n");
}
| 43.258973 | 232 | 0.602449 | codefollower |
3397507940455c76012aaa8f7131477356eca3a0 | 11,711 | hh | C++ | third_party/masstree-beta/masstree_remove.hh | nilyibo/eRPC | c9ec2c81672c45cca04a8bb2e0d7887c3e8cff32 | [
"Apache-2.0"
] | 1 | 2022-03-08T00:36:10.000Z | 2022-03-08T00:36:10.000Z | third_party/masstree-beta/masstree_remove.hh | nilyibo/eRPC | c9ec2c81672c45cca04a8bb2e0d7887c3e8cff32 | [
"Apache-2.0"
] | null | null | null | third_party/masstree-beta/masstree_remove.hh | nilyibo/eRPC | c9ec2c81672c45cca04a8bb2e0d7887c3e8cff32 | [
"Apache-2.0"
] | null | null | null | /* Masstree
* Eddie Kohler, Yandong Mao, Robert Morris
* Copyright (c) 2012-2016 President and Fellows of Harvard College
* Copyright (c) 2012-2016 Massachusetts Institute of Technology
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, subject to the conditions
* listed in the Masstree LICENSE file. These conditions include: you must
* preserve this copyright notice, and you cannot mention the copyright
* holders in advertising related to the Software without their permission.
* The Software is provided WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED. This
* notice is a summary of the Masstree LICENSE file; the license in that file
* is legally binding.
*/
#ifndef MASSTREE_REMOVE_HH
#define MASSTREE_REMOVE_HH
#include "masstree_get.hh"
#include "btree_leaflink.hh"
#include "circular_int.hh"
namespace Masstree {
template <typename P>
bool tcursor<P>::gc_layer(threadinfo& ti)
{
find_locked(ti);
masstree_precondition(!n_->deleted() && !n_->deleted_layer());
// find_locked might return early if another gc_layer attempt has
// succeeded at removing multiple tree layers. So check that the whole
// key has been consumed
if (ka_.has_suffix())
return false;
// find the slot for the child tree
// ka_ is a multiple of ikey_size bytes long. We are looking for the entry
// for the next tree layer, which has keylenx_ corresponding to ikey_size+1.
// So if has_value(), then we found an entry for the same ikey, but with
// length ikey_size; we need to adjust ki_.
kx_.i += has_value();
if (kx_.i >= n_->size())
return false;
permuter_type perm(n_->permutation_);
kx_.p = perm[kx_.i];
if (n_->ikey0_[kx_.p] != ka_.ikey() || !n_->is_layer(kx_.p))
return false;
// remove redundant internode layers
node_type *layer;
while (1) {
layer = n_->lv_[kx_.p].layer();
if (!layer->is_root()) {
n_->lv_[kx_.p] = layer->maybe_parent();
continue;
}
if (layer->isleaf())
break;
internode_type *in = static_cast<internode_type *>(layer);
if (in->size() > 0)
return false;
in->lock(*layer, ti.lock_fence(tc_internode_lock));
if (!in->is_root() || in->size() > 0)
goto unlock_layer;
node_type *child = in->child_[0];
child->make_layer_root();
n_->lv_[kx_.p] = child;
in->mark_split();
in->set_parent(child); // ensure concurrent reader finds true root
// NB: now in->parent() might weirdly be a LEAF!
in->unlock();
in->deallocate_rcu(ti);
}
{
leaf_type* lf = static_cast<leaf_type*>(layer);
if (lf->size() > 0)
return false;
lf->lock(*lf, ti.lock_fence(tc_leaf_lock));
if (!lf->is_root() || lf->size() > 0)
goto unlock_layer;
// child is an empty leaf: kill it
masstree_invariant(!lf->prev_ && !lf->next_.ptr);
masstree_invariant(!lf->deleted());
masstree_invariant(!lf->deleted_layer());
if (P::need_phantom_epoch
&& circular_int<typename P::phantom_epoch_type>::less(n_->phantom_epoch_[0], lf->phantom_epoch_[0]))
n_->phantom_epoch_[0] = lf->phantom_epoch_[0];
lf->mark_deleted_layer(); // NB DO NOT mark as deleted (see above)
lf->unlock();
lf->deallocate_rcu(ti);
return true;
}
unlock_layer:
layer->unlock();
return false;
}
template <typename P>
struct gc_layer_rcu_callback : public P::threadinfo_type::mrcu_callback {
typedef typename P::threadinfo_type threadinfo;
node_base<P>* root_;
int len_;
char s_[0];
gc_layer_rcu_callback(node_base<P>* root, Str prefix)
: root_(root), len_(prefix.length()) {
memcpy(s_, prefix.data(), len_);
}
void operator()(threadinfo& ti);
size_t size() const {
return len_ + sizeof(*this);
}
static void make(node_base<P>* root, Str prefix, threadinfo& ti);
};
template <typename P>
void gc_layer_rcu_callback<P>::operator()(threadinfo& ti)
{
while (!root_->is_root())
root_ = root_->maybe_parent();
if (!root_->deleted()) { // if not destroying tree...
tcursor<P> lp(root_, s_, len_);
bool do_remove = lp.gc_layer(ti);
if (!do_remove || !lp.finish_remove(ti))
lp.n_->unlock();
ti.deallocate(this, size(), memtag_masstree_gc);
}
}
template <typename P>
void gc_layer_rcu_callback<P>::make(node_base<P>* root, Str prefix,
threadinfo& ti)
{
size_t sz = prefix.len + sizeof(gc_layer_rcu_callback<P>);
void *data = ti.allocate(sz, memtag_masstree_gc);
gc_layer_rcu_callback<P> *cb =
new(data) gc_layer_rcu_callback<P>(root, prefix);
ti.rcu_register(cb);
}
template <typename P>
bool tcursor<P>::finish_remove(threadinfo& ti)
{
if (n_->modstate_ == leaf<P>::modstate_insert) {
n_->mark_insert();
n_->modstate_ = leaf<P>::modstate_remove;
}
permuter_type perm(n_->permutation_);
perm.remove(kx_.i);
n_->permutation_ = perm.value();
if (perm.size())
return false;
else
return remove_leaf(n_, root_, ka_.prefix_string(), ti);
}
template <typename P>
bool tcursor<P>::remove_leaf(leaf_type* leaf, node_type* root,
Str prefix, threadinfo& ti)
{
if (!leaf->prev_) {
if (!leaf->next_.ptr && !prefix.empty())
gc_layer_rcu_callback<P>::make(root, prefix, ti);
return false;
}
// mark leaf deleted, RCU-free
leaf->mark_deleted();
leaf->deallocate_rcu(ti);
// Ensure node that becomes responsible for our keys has its phantom epoch
// kept up to date
while (P::need_phantom_epoch) {
leaf_type *prev = leaf->prev_;
typename P::phantom_epoch_type prev_ts = prev->phantom_epoch();
while (circular_int<typename P::phantom_epoch_type>::less(prev_ts, leaf->phantom_epoch())
&& !bool_cmpxchg(&prev->phantom_epoch_[0], prev_ts, leaf->phantom_epoch()))
prev_ts = prev->phantom_epoch();
fence();
if (prev == leaf->prev_)
break;
}
// Unlink leaf from doubly-linked leaf list
btree_leaflink<leaf_type>::unlink(leaf);
// Remove leaf from tree. This is simple unless the leaf is the first
// child of its parent, in which case we need to traverse up until we find
// its key.
node_type *n = leaf;
ikey_type ikey = leaf->ikey_bound();
while (1) {
internode_type *p = n->locked_parent(ti);
masstree_invariant(p);
n->unlock();
int kp = internode_type::bound_type::upper(ikey, *p);
masstree_invariant(kp == 0 || p->compare_key(ikey, kp - 1) == 0);
if (kp > 0) {
p->mark_insert();
p->shift_down(kp - 1, kp, p->nkeys_ - kp);
--p->nkeys_;
if (kp > 1 || p->child_[0])
return collapse(p, ikey, root, prefix, ti);
}
if (p->size() == 0) {
p->mark_deleted();
p->deallocate_rcu(ti);
} else
return reshape(p, ikey, root, prefix, ti);
n = p;
}
}
template <typename P>
bool tcursor<P>::reshape(internode_type* n, ikey_type ikey,
node_type* root, Str prefix, threadinfo& ti)
{
masstree_precondition(n && n->locked());
n->child_[0] = 0;
ikey_type patchkey = n->ikey0_[0];
while (1) {
internode_type *p = n->locked_parent(ti);
masstree_invariant(p);
n->unlock();
int kp = internode_type::bound_type::upper(ikey, *p);
masstree_invariant(kp == 0 || p->compare_key(ikey, kp - 1) == 0);
if (kp > 0) {
p->mark_insert();
p->ikey0_[kp - 1] = patchkey;
if (kp > 1 || p->child_[0])
return collapse(p, ikey, root, prefix, ti);
}
n = p;
}
}
template <typename P>
bool tcursor<P>::collapse(internode_type* n, ikey_type ikey,
node_type* root, Str prefix, threadinfo& ti)
{
masstree_precondition(n && n->locked());
while (n->size() == 0) {
internode_type *p = n->locked_parent(ti);
if (!n->parent_exists(p)) {
if (!prefix.empty())
gc_layer_rcu_callback<P>::make(root, prefix, ti);
break;
}
int kp = key_upper_bound(ikey, *p);
masstree_invariant(p->child_[kp] == n);
p->child_[kp] = n->child_[0];
n->child_[0]->set_parent(p);
n->mark_deleted();
n->unlock();
n->deallocate_rcu(ti);
n = p;
}
n->unlock();
return true;
}
template <typename P>
struct destroy_rcu_callback : public P::threadinfo_type::mrcu_callback {
typedef typename P::threadinfo_type threadinfo;
typedef typename node_base<P>::leaf_type leaf_type;
typedef typename node_base<P>::internode_type internode_type;
node_base<P>* root_;
int count_;
destroy_rcu_callback(node_base<P>* root)
: root_(root), count_(0) {
}
void operator()(threadinfo& ti);
static void make(node_base<P>* root, Str prefix, threadinfo& ti);
private:
static inline node_base<P>** link_ptr(node_base<P>* n);
static inline void enqueue(node_base<P>* n, node_base<P>**& tailp);
};
template <typename P>
inline node_base<P>** destroy_rcu_callback<P>::link_ptr(node_base<P>* n) {
if (n->isleaf())
return &static_cast<leaf_type*>(n)->parent_;
else
return &static_cast<internode_type*>(n)->parent_;
}
template <typename P>
inline void destroy_rcu_callback<P>::enqueue(node_base<P>* n,
node_base<P>**& tailp) {
*tailp = n;
tailp = link_ptr(n);
}
template <typename P>
void destroy_rcu_callback<P>::operator()(threadinfo& ti) {
if (++count_ == 1) {
while (!root_->is_root())
root_ = root_->maybe_parent();
root_->lock();
root_->mark_deleted_tree(); // i.e., deleted but not splitting
root_->unlock();
ti.rcu_register(this);
return;
}
node_base<P>* workq;
node_base<P>** tailp = &workq;
enqueue(root_, tailp);
while (node_base<P>* n = workq) {
node_base<P>** linkp = link_ptr(n);
if (linkp != tailp)
workq = *linkp;
else {
workq = 0;
tailp = &workq;
}
if (n->isleaf()) {
leaf_type* l = static_cast<leaf_type*>(n);
typename leaf_type::permuter_type perm = l->permutation();
for (int i = 0; i != l->size(); ++i) {
int p = perm[i];
if (l->is_layer(p))
enqueue(l->lv_[p].layer(), tailp);
}
l->deallocate(ti);
} else {
internode_type* in = static_cast<internode_type*>(n);
for (int i = 0; i != in->size() + 1; ++i)
if (in->child_[i])
enqueue(in->child_[i], tailp);
in->deallocate(ti);
}
}
ti.deallocate(this, sizeof(this), memtag_masstree_gc);
}
template <typename P>
void basic_table<P>::destroy(threadinfo& ti) {
if (root_) {
void* data = ti.allocate(sizeof(destroy_rcu_callback<P>), memtag_masstree_gc);
destroy_rcu_callback<P>* cb = new(data) destroy_rcu_callback<P>(root_);
ti.rcu_register(cb);
root_ = 0;
}
}
} // namespace Masstree
#endif
| 31.566038 | 112 | 0.590129 | nilyibo |
339cfee207ba0db4e066b4a04a55a4a710578796 | 8,710 | cpp | C++ | RemoteControl/LinuxDevice.cpp | dwrobel/WPEFrameworkPlugins | 0cf410aebe5b9d7a516eb0995e32383cc944f838 | [
"Apache-2.0"
] | null | null | null | RemoteControl/LinuxDevice.cpp | dwrobel/WPEFrameworkPlugins | 0cf410aebe5b9d7a516eb0995e32383cc944f838 | [
"Apache-2.0"
] | null | null | null | RemoteControl/LinuxDevice.cpp | dwrobel/WPEFrameworkPlugins | 0cf410aebe5b9d7a516eb0995e32383cc944f838 | [
"Apache-2.0"
] | null | null | null | #include "RemoteAdministrator.h"
#include <interfaces/IKeyHandler.h>
#include <libudev.h>
#include <linux/uinput.h>
namespace WPEFramework {
namespace Plugin {
static char Locator[] = _T("/dev/input");
class LinuxDevice : public Exchange::IKeyProducer, Core::Thread {
private:
LinuxDevice(const LinuxDevice&) = delete;
LinuxDevice& operator=(const LinuxDevice&) = delete;
public:
LinuxDevice()
: Core::Thread(Core::Thread::DefaultStackSize(), _T("LinuxInputSystem"))
, _devices()
, _monitor(nullptr)
, _update(-1)
, _callback(nullptr)
{
_pipe[0] = -1;
_pipe[1] = -1;
if (::pipe(_pipe) < 0) {
// Pipe not successfully opened. Close, if needed;
if (_pipe[0] != -1) {
close(_pipe[0]);
}
if (_pipe[1] != -1) {
close(_pipe[1]);
}
_pipe[0] = -1;
_pipe[1] = -1;
} else {
struct udev* udev = udev_new();
// Set up a monitor to monitor event devices
_monitor = udev_monitor_new_from_netlink(udev, "udev");
udev_monitor_filter_add_match_subsystem_devtype(_monitor, "input", nullptr);
udev_monitor_enable_receiving(_monitor);
// Get the file descriptor (fd) for the monitor
_update = udev_monitor_get_fd(_monitor);
udev_unref(udev);
Remotes::RemoteAdministrator::Instance().Announce(*this);
}
}
virtual ~LinuxDevice()
{
Block();
Clear();
if (_pipe[0] != -1) {
Remotes::RemoteAdministrator::Instance().Revoke(*this);
close(_pipe[0]);
close(_pipe[1]);
}
if (_update != -1) {
::close(_update);
}
if (_monitor != nullptr) {
udev_monitor_unref(_monitor);
}
}
public:
virtual const TCHAR* Name() const
{
return (_T("DevInput"));
}
virtual void Configure(const string&)
{
Pair();
}
virtual bool Pair()
{
// Make sure we are not processing anything.
Block();
Refresh();
// We are done, start observing again.
Run();
return (true);
}
virtual bool Unpair(string bindingId)
{
// Make sure we are not processing anything.
Block();
Refresh();
// We are done, start observing again.
Run();
return (true);
}
virtual uint32_t Callback(Exchange::IKeyHandler* callback)
{
ASSERT((callback == nullptr) ^ (_callback == nullptr));
if (callback == nullptr) {
// We are unlinked. Deinitialize the stuff.
_callback = nullptr;
} else {
TRACE_L1("%s: callback=%p _callback=%p", __FUNCTION__, callback, _callback);
_callback = callback;
}
return (Core::ERROR_NONE);
}
virtual uint32_t Error() const
{
return (Core::ERROR_NONE);
}
virtual string MetaData() const
{
return (Name());
}
BEGIN_INTERFACE_MAP(LinuxDevice)
INTERFACE_ENTRY(Exchange::IKeyProducer)
END_INTERFACE_MAP
private:
void Refresh()
{
// Remove all current open devices.
Clear();
// find devices in /dev/input/
Core::Directory dir(Locator);
while (dir.Next() == true) {
Core::File entry(dir.Current(), false);
if ((entry.IsDirectory() == false) && (entry.FileName().substr(0, 5) == _T("event"))) {
TRACE(Trace::Information, (_T("Opening input device: %s"), entry.Name().c_str()));
if (entry.Open(true) == true) {
_devices.push_back(entry.DuplicateHandle());
}
}
}
}
void Clear()
{
for (std::vector<int>::const_iterator it = _devices.begin(), end = _devices.end();
it != end; ++it) {
close(*it);
}
_devices.clear();
}
void Block()
{
Core::Thread::Block();
write(_pipe[1], " ", 1);
Wait(Core::Thread::INITIALIZED | Core::Thread::BLOCKED | Core::Thread::STOPPED, Core::infinite);
}
virtual uint32_t Worker()
{
while (IsRunning() == true) {
fd_set readset;
FD_ZERO(&readset);
FD_SET(_pipe[0], &readset);
FD_SET(_update, &readset);
int result = std::max(_pipe[0], _update);
// set up all the input devices
for (std::vector<int>::const_iterator index = _devices.begin(), end = _devices.end(); index != end; ++index) {
FD_SET(*index, &readset);
result = std::max(result, *index);
}
result = select(result + 1, &readset, 0, 0, nullptr);
if (result > 0) {
if (FD_ISSET(_pipe[0], &readset)) {
char buff;
(void)read(_pipe[0], &buff, 1);
}
if (FD_ISSET(_update, &readset)) {
// Make the call to receive the device. select() ensured that this will not block.
udev_device* dev = udev_monitor_receive_device(_monitor);
if (dev) {
const char* nodeId = udev_device_get_devnode(dev);
bool reload = ((nodeId != nullptr) && (strncmp(Locator, nodeId, sizeof(Locator) - 1) == 0));
udev_device_unref(dev);
TRACE_L1("Changes from udev perspective. Reload (%s)", reload ? _T("true") : _T("false"));
if (reload == true) {
Refresh();
}
}
}
// find the devices to read from
std::vector<int>::iterator index = _devices.begin();
while (index != _devices.end()) {
if (FD_ISSET(*index, &readset)) {
if (HandleInput(*index) == false) {
// fd closed?
close(*index);
index = _devices.erase(index);
} else {
++index;
}
} else {
++index;
}
}
}
}
return (Core::infinite);
}
bool HandleInput(const int fd)
{
input_event entry[16];
int index = 0;
int result = ::read(fd, entry, sizeof(entry));
if (result > 0) {
while (result >= static_cast<int>(sizeof(input_event))) {
ASSERT(index < static_cast<int>((sizeof(entry) / sizeof(input_event))));
// If it is a KEY and it is *NOT* a repeat, send it..
// Repeat gets constructed by the framework anyway.
if ((entry[index].type == EV_KEY) && (entry[index].value != 2)) {
const uint16_t code = entry[index].code;
const bool pressed = entry[index].value != 0;
TRACE(Trace::Information, (_T("Sending pressed: %s, code: 0x%04X"), (pressed ? _T("true") : _T("false")), code));
_callback->KeyEvent(pressed, code, Name());
}
index++;
result -= sizeof(input_event);
}
}
return (result >= 0);
}
private:
std::vector<int> _devices;
int _pipe[2];
udev_monitor* _monitor;
int _update;
Exchange::IKeyHandler* _callback;
static LinuxDevice* _singleton;
};
/* static */ LinuxDevice* LinuxDevice::_singleton = Core::Service<LinuxDevice>::Create<LinuxDevice>();
}
}
| 32.259259 | 137 | 0.443169 | dwrobel |
339dce0367f9e7ea9b31beb134823f9b12d7d9d2 | 1,900 | cpp | C++ | src/+cv/colorChange.cpp | 1123852253/mexopencv | 17db690133299f561924a45e9092673a4df66c5b | [
"BSD-3-Clause"
] | 571 | 2015-01-04T06:23:19.000Z | 2022-03-31T07:37:19.000Z | src/+cv/colorChange.cpp | 1123852253/mexopencv | 17db690133299f561924a45e9092673a4df66c5b | [
"BSD-3-Clause"
] | 362 | 2015-01-06T14:20:46.000Z | 2022-01-20T08:10:46.000Z | src/+cv/colorChange.cpp | 1123852253/mexopencv | 17db690133299f561924a45e9092673a4df66c5b | [
"BSD-3-Clause"
] | 300 | 2015-01-20T03:21:27.000Z | 2022-03-31T07:36:37.000Z | /**
* @file colorChange.cpp
* @brief mex interface for cv::colorChange
* @ingroup photo
* @author Amro
* @date 2015
*/
#include "mexopencv.hpp"
#include "opencv2/photo.hpp"
using namespace std;
using namespace cv;
/**
* Main entry called from Matlab
* @param nlhs number of left-hand-side arguments
* @param plhs pointers to mxArrays in the left-hand-side
* @param nrhs number of right-hand-side arguments
* @param prhs pointers to mxArrays in the right-hand-side
*/
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
// Check the number of arguments
nargchk(nrhs>=2 && (nrhs%2)==0 && nlhs<=1);
// Argument vector
vector<MxArray> rhs(prhs, prhs+nrhs);
// Option processing
float red_mul = 1.0f;
float green_mul = 1.0f;
float blue_mul = 1.0f;
bool flip = false;
for (int i=2; i<nrhs; i+=2) {
string key(rhs[i].toString());
if (key == "R")
red_mul = rhs[i+1].toFloat();
else if (key == "G")
green_mul = rhs[i+1].toFloat();
else if (key == "B")
blue_mul = rhs[i+1].toFloat();
else if (key == "FlipChannels")
flip = rhs[i+1].toBool();
else
mexErrMsgIdAndTxt("mexopencv:error",
"Unrecognized option %s", key.c_str());
}
// Process
Mat src(rhs[0].toMat(CV_8U)),
mask(rhs[1].toMat(CV_8U)),
dst;
// MATLAB's default is RGB while OpenCV's is BGR
if (flip) {
if (src.channels() == 3)
cvtColor(src, src, cv::COLOR_RGB2BGR);
if (mask.channels() == 3)
cvtColor(mask, mask, cv::COLOR_RGB2BGR);
}
colorChange(src, mask, dst, red_mul, green_mul, blue_mul);
// OpenCV's default is BGR while MATLAB's is RGB
if (flip && dst.channels() == 3)
cvtColor(dst, dst, cv::COLOR_BGR2RGB);
plhs[0] = MxArray(dst);
}
| 29.230769 | 76 | 0.584737 | 1123852253 |
339e0d3a8aba3297551cefdabb6e799b63201e1a | 438,399 | cc | C++ | deps/v8/src/hydrogen.cc | Myrannas/runtime | d81f9ec641bb84e6713ae2ad8de29e1e54c65f4d | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2018-03-06T20:08:50.000Z | 2018-03-06T20:08:50.000Z | deps/v8/src/hydrogen.cc | Myrannas/runtime | d81f9ec641bb84e6713ae2ad8de29e1e54c65f4d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | deps/v8/src/hydrogen.cc | Myrannas/runtime | d81f9ec641bb84e6713ae2ad8de29e1e54c65f4d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | // Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/hydrogen.h"
#include <algorithm>
#include "src/v8.h"
#include "src/allocation-site-scopes.h"
#include "src/codegen.h"
#include "src/full-codegen.h"
#include "src/hashmap.h"
#include "src/hydrogen-bce.h"
#include "src/hydrogen-bch.h"
#include "src/hydrogen-canonicalize.h"
#include "src/hydrogen-check-elimination.h"
#include "src/hydrogen-dce.h"
#include "src/hydrogen-dehoist.h"
#include "src/hydrogen-environment-liveness.h"
#include "src/hydrogen-escape-analysis.h"
#include "src/hydrogen-gvn.h"
#include "src/hydrogen-infer-representation.h"
#include "src/hydrogen-infer-types.h"
#include "src/hydrogen-load-elimination.h"
#include "src/hydrogen-mark-deoptimize.h"
#include "src/hydrogen-mark-unreachable.h"
#include "src/hydrogen-osr.h"
#include "src/hydrogen-range-analysis.h"
#include "src/hydrogen-redundant-phi.h"
#include "src/hydrogen-removable-simulates.h"
#include "src/hydrogen-representation-changes.h"
#include "src/hydrogen-sce.h"
#include "src/hydrogen-store-elimination.h"
#include "src/hydrogen-uint32-analysis.h"
#include "src/lithium-allocator.h"
#include "src/parser.h"
#include "src/runtime.h"
#include "src/scopeinfo.h"
#include "src/scopes.h"
#include "src/stub-cache.h"
#include "src/typing.h"
#if V8_TARGET_ARCH_IA32
#include "src/ia32/lithium-codegen-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_X64
#include "src/x64/lithium-codegen-x64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
#include "src/arm64/lithium-codegen-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/lithium-codegen-arm.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-codegen-mips.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/lithium-codegen-x87.h" // NOLINT
#else
#error Unsupported target architecture.
#endif
namespace v8 {
namespace internal {
HBasicBlock::HBasicBlock(HGraph* graph)
: block_id_(graph->GetNextBlockID()),
graph_(graph),
phis_(4, graph->zone()),
first_(NULL),
last_(NULL),
end_(NULL),
loop_information_(NULL),
predecessors_(2, graph->zone()),
dominator_(NULL),
dominated_blocks_(4, graph->zone()),
last_environment_(NULL),
argument_count_(-1),
first_instruction_index_(-1),
last_instruction_index_(-1),
deleted_phis_(4, graph->zone()),
parent_loop_header_(NULL),
inlined_entry_block_(NULL),
is_inline_return_target_(false),
is_reachable_(true),
dominates_loop_successors_(false),
is_osr_entry_(false),
is_ordered_(false) { }
Isolate* HBasicBlock::isolate() const {
return graph_->isolate();
}
void HBasicBlock::MarkUnreachable() {
is_reachable_ = false;
}
void HBasicBlock::AttachLoopInformation() {
ASSERT(!IsLoopHeader());
loop_information_ = new(zone()) HLoopInformation(this, zone());
}
void HBasicBlock::DetachLoopInformation() {
ASSERT(IsLoopHeader());
loop_information_ = NULL;
}
void HBasicBlock::AddPhi(HPhi* phi) {
ASSERT(!IsStartBlock());
phis_.Add(phi, zone());
phi->SetBlock(this);
}
void HBasicBlock::RemovePhi(HPhi* phi) {
ASSERT(phi->block() == this);
ASSERT(phis_.Contains(phi));
phi->Kill();
phis_.RemoveElement(phi);
phi->SetBlock(NULL);
}
void HBasicBlock::AddInstruction(HInstruction* instr,
HSourcePosition position) {
ASSERT(!IsStartBlock() || !IsFinished());
ASSERT(!instr->IsLinked());
ASSERT(!IsFinished());
if (!position.IsUnknown()) {
instr->set_position(position);
}
if (first_ == NULL) {
ASSERT(last_environment() != NULL);
ASSERT(!last_environment()->ast_id().IsNone());
HBlockEntry* entry = new(zone()) HBlockEntry();
entry->InitializeAsFirst(this);
if (!position.IsUnknown()) {
entry->set_position(position);
} else {
ASSERT(!FLAG_hydrogen_track_positions ||
!graph()->info()->IsOptimizing());
}
first_ = last_ = entry;
}
instr->InsertAfter(last_);
}
HPhi* HBasicBlock::AddNewPhi(int merged_index) {
if (graph()->IsInsideNoSideEffectsScope()) {
merged_index = HPhi::kInvalidMergedIndex;
}
HPhi* phi = new(zone()) HPhi(merged_index, zone());
AddPhi(phi);
return phi;
}
HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
RemovableSimulate removable) {
ASSERT(HasEnvironment());
HEnvironment* environment = last_environment();
ASSERT(ast_id.IsNone() ||
ast_id == BailoutId::StubEntry() ||
environment->closure()->shared()->VerifyBailoutId(ast_id));
int push_count = environment->push_count();
int pop_count = environment->pop_count();
HSimulate* instr =
new(zone()) HSimulate(ast_id, pop_count, zone(), removable);
#ifdef DEBUG
instr->set_closure(environment->closure());
#endif
// Order of pushed values: newest (top of stack) first. This allows
// HSimulate::MergeWith() to easily append additional pushed values
// that are older (from further down the stack).
for (int i = 0; i < push_count; ++i) {
instr->AddPushedValue(environment->ExpressionStackAt(i));
}
for (GrowableBitVector::Iterator it(environment->assigned_variables(),
zone());
!it.Done();
it.Advance()) {
int index = it.Current();
instr->AddAssignedValue(index, environment->Lookup(index));
}
environment->ClearHistory();
return instr;
}
void HBasicBlock::Finish(HControlInstruction* end, HSourcePosition position) {
ASSERT(!IsFinished());
AddInstruction(end, position);
end_ = end;
for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
it.Current()->RegisterPredecessor(this);
}
}
void HBasicBlock::Goto(HBasicBlock* block,
HSourcePosition position,
FunctionState* state,
bool add_simulate) {
bool drop_extra = state != NULL &&
state->inlining_kind() == NORMAL_RETURN;
if (block->IsInlineReturnTarget()) {
HEnvironment* env = last_environment();
int argument_count = env->arguments_environment()->parameter_count();
AddInstruction(new(zone())
HLeaveInlined(state->entry(), argument_count),
position);
UpdateEnvironment(last_environment()->DiscardInlined(drop_extra));
}
if (add_simulate) AddNewSimulate(BailoutId::None(), position);
HGoto* instr = new(zone()) HGoto(block);
Finish(instr, position);
}
void HBasicBlock::AddLeaveInlined(HValue* return_value,
FunctionState* state,
HSourcePosition position) {
HBasicBlock* target = state->function_return();
bool drop_extra = state->inlining_kind() == NORMAL_RETURN;
ASSERT(target->IsInlineReturnTarget());
ASSERT(return_value != NULL);
HEnvironment* env = last_environment();
int argument_count = env->arguments_environment()->parameter_count();
AddInstruction(new(zone()) HLeaveInlined(state->entry(), argument_count),
position);
UpdateEnvironment(last_environment()->DiscardInlined(drop_extra));
last_environment()->Push(return_value);
AddNewSimulate(BailoutId::None(), position);
HGoto* instr = new(zone()) HGoto(target);
Finish(instr, position);
}
void HBasicBlock::SetInitialEnvironment(HEnvironment* env) {
ASSERT(!HasEnvironment());
ASSERT(first() == NULL);
UpdateEnvironment(env);
}
void HBasicBlock::UpdateEnvironment(HEnvironment* env) {
last_environment_ = env;
graph()->update_maximum_environment_size(env->first_expression_index());
}
void HBasicBlock::SetJoinId(BailoutId ast_id) {
int length = predecessors_.length();
ASSERT(length > 0);
for (int i = 0; i < length; i++) {
HBasicBlock* predecessor = predecessors_[i];
ASSERT(predecessor->end()->IsGoto());
HSimulate* simulate = HSimulate::cast(predecessor->end()->previous());
ASSERT(i != 0 ||
(predecessor->last_environment()->closure().is_null() ||
predecessor->last_environment()->closure()->shared()
->VerifyBailoutId(ast_id)));
simulate->set_ast_id(ast_id);
predecessor->last_environment()->set_ast_id(ast_id);
}
}
bool HBasicBlock::Dominates(HBasicBlock* other) const {
HBasicBlock* current = other->dominator();
while (current != NULL) {
if (current == this) return true;
current = current->dominator();
}
return false;
}
bool HBasicBlock::EqualToOrDominates(HBasicBlock* other) const {
if (this == other) return true;
return Dominates(other);
}
int HBasicBlock::LoopNestingDepth() const {
const HBasicBlock* current = this;
int result = (current->IsLoopHeader()) ? 1 : 0;
while (current->parent_loop_header() != NULL) {
current = current->parent_loop_header();
result++;
}
return result;
}
void HBasicBlock::PostProcessLoopHeader(IterationStatement* stmt) {
ASSERT(IsLoopHeader());
SetJoinId(stmt->EntryId());
if (predecessors()->length() == 1) {
// This is a degenerated loop.
DetachLoopInformation();
return;
}
// Only the first entry into the loop is from outside the loop. All other
// entries must be back edges.
for (int i = 1; i < predecessors()->length(); ++i) {
loop_information()->RegisterBackEdge(predecessors()->at(i));
}
}
void HBasicBlock::MarkSuccEdgeUnreachable(int succ) {
ASSERT(IsFinished());
HBasicBlock* succ_block = end()->SuccessorAt(succ);
ASSERT(succ_block->predecessors()->length() == 1);
succ_block->MarkUnreachable();
}
void HBasicBlock::RegisterPredecessor(HBasicBlock* pred) {
if (HasPredecessor()) {
// Only loop header blocks can have a predecessor added after
// instructions have been added to the block (they have phis for all
// values in the environment, these phis may be eliminated later).
ASSERT(IsLoopHeader() || first_ == NULL);
HEnvironment* incoming_env = pred->last_environment();
if (IsLoopHeader()) {
ASSERT(phis()->length() == incoming_env->length());
for (int i = 0; i < phis_.length(); ++i) {
phis_[i]->AddInput(incoming_env->values()->at(i));
}
} else {
last_environment()->AddIncomingEdge(this, pred->last_environment());
}
} else if (!HasEnvironment() && !IsFinished()) {
ASSERT(!IsLoopHeader());
SetInitialEnvironment(pred->last_environment()->Copy());
}
predecessors_.Add(pred, zone());
}
void HBasicBlock::AddDominatedBlock(HBasicBlock* block) {
ASSERT(!dominated_blocks_.Contains(block));
// Keep the list of dominated blocks sorted such that if there is two
// succeeding block in this list, the predecessor is before the successor.
int index = 0;
while (index < dominated_blocks_.length() &&
dominated_blocks_[index]->block_id() < block->block_id()) {
++index;
}
dominated_blocks_.InsertAt(index, block, zone());
}
void HBasicBlock::AssignCommonDominator(HBasicBlock* other) {
if (dominator_ == NULL) {
dominator_ = other;
other->AddDominatedBlock(this);
} else if (other->dominator() != NULL) {
HBasicBlock* first = dominator_;
HBasicBlock* second = other;
while (first != second) {
if (first->block_id() > second->block_id()) {
first = first->dominator();
} else {
second = second->dominator();
}
ASSERT(first != NULL && second != NULL);
}
if (dominator_ != first) {
ASSERT(dominator_->dominated_blocks_.Contains(this));
dominator_->dominated_blocks_.RemoveElement(this);
dominator_ = first;
first->AddDominatedBlock(this);
}
}
}
void HBasicBlock::AssignLoopSuccessorDominators() {
// Mark blocks that dominate all subsequent reachable blocks inside their
// loop. Exploit the fact that blocks are sorted in reverse post order. When
// the loop is visited in increasing block id order, if the number of
// non-loop-exiting successor edges at the dominator_candidate block doesn't
// exceed the number of previously encountered predecessor edges, there is no
// path from the loop header to any block with higher id that doesn't go
// through the dominator_candidate block. In this case, the
// dominator_candidate block is guaranteed to dominate all blocks reachable
// from it with higher ids.
HBasicBlock* last = loop_information()->GetLastBackEdge();
int outstanding_successors = 1; // one edge from the pre-header
// Header always dominates everything.
MarkAsLoopSuccessorDominator();
for (int j = block_id(); j <= last->block_id(); ++j) {
HBasicBlock* dominator_candidate = graph_->blocks()->at(j);
for (HPredecessorIterator it(dominator_candidate); !it.Done();
it.Advance()) {
HBasicBlock* predecessor = it.Current();
// Don't count back edges.
if (predecessor->block_id() < dominator_candidate->block_id()) {
outstanding_successors--;
}
}
// If more successors than predecessors have been seen in the loop up to
// now, it's not possible to guarantee that the current block dominates
// all of the blocks with higher IDs. In this case, assume conservatively
// that those paths through loop that don't go through the current block
// contain all of the loop's dependencies. Also be careful to record
// dominator information about the current loop that's being processed,
// and not nested loops, which will be processed when
// AssignLoopSuccessorDominators gets called on their header.
ASSERT(outstanding_successors >= 0);
HBasicBlock* parent_loop_header = dominator_candidate->parent_loop_header();
if (outstanding_successors == 0 &&
(parent_loop_header == this && !dominator_candidate->IsLoopHeader())) {
dominator_candidate->MarkAsLoopSuccessorDominator();
}
HControlInstruction* end = dominator_candidate->end();
for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
HBasicBlock* successor = it.Current();
// Only count successors that remain inside the loop and don't loop back
// to a loop header.
if (successor->block_id() > dominator_candidate->block_id() &&
successor->block_id() <= last->block_id()) {
// Backwards edges must land on loop headers.
ASSERT(successor->block_id() > dominator_candidate->block_id() ||
successor->IsLoopHeader());
outstanding_successors++;
}
}
}
}
int HBasicBlock::PredecessorIndexOf(HBasicBlock* predecessor) const {
for (int i = 0; i < predecessors_.length(); ++i) {
if (predecessors_[i] == predecessor) return i;
}
UNREACHABLE();
return -1;
}
#ifdef DEBUG
void HBasicBlock::Verify() {
// Check that every block is finished.
ASSERT(IsFinished());
ASSERT(block_id() >= 0);
// Check that the incoming edges are in edge split form.
if (predecessors_.length() > 1) {
for (int i = 0; i < predecessors_.length(); ++i) {
ASSERT(predecessors_[i]->end()->SecondSuccessor() == NULL);
}
}
}
#endif
void HLoopInformation::RegisterBackEdge(HBasicBlock* block) {
this->back_edges_.Add(block, block->zone());
AddBlock(block);
}
HBasicBlock* HLoopInformation::GetLastBackEdge() const {
int max_id = -1;
HBasicBlock* result = NULL;
for (int i = 0; i < back_edges_.length(); ++i) {
HBasicBlock* cur = back_edges_[i];
if (cur->block_id() > max_id) {
max_id = cur->block_id();
result = cur;
}
}
return result;
}
void HLoopInformation::AddBlock(HBasicBlock* block) {
if (block == loop_header()) return;
if (block->parent_loop_header() == loop_header()) return;
if (block->parent_loop_header() != NULL) {
AddBlock(block->parent_loop_header());
} else {
block->set_parent_loop_header(loop_header());
blocks_.Add(block, block->zone());
for (int i = 0; i < block->predecessors()->length(); ++i) {
AddBlock(block->predecessors()->at(i));
}
}
}
#ifdef DEBUG
// Checks reachability of the blocks in this graph and stores a bit in
// the BitVector "reachable()" for every block that can be reached
// from the start block of the graph. If "dont_visit" is non-null, the given
// block is treated as if it would not be part of the graph. "visited_count()"
// returns the number of reachable blocks.
class ReachabilityAnalyzer BASE_EMBEDDED {
public:
ReachabilityAnalyzer(HBasicBlock* entry_block,
int block_count,
HBasicBlock* dont_visit)
: visited_count_(0),
stack_(16, entry_block->zone()),
reachable_(block_count, entry_block->zone()),
dont_visit_(dont_visit) {
PushBlock(entry_block);
Analyze();
}
int visited_count() const { return visited_count_; }
const BitVector* reachable() const { return &reachable_; }
private:
void PushBlock(HBasicBlock* block) {
if (block != NULL && block != dont_visit_ &&
!reachable_.Contains(block->block_id())) {
reachable_.Add(block->block_id());
stack_.Add(block, block->zone());
visited_count_++;
}
}
void Analyze() {
while (!stack_.is_empty()) {
HControlInstruction* end = stack_.RemoveLast()->end();
for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
PushBlock(it.Current());
}
}
}
int visited_count_;
ZoneList<HBasicBlock*> stack_;
BitVector reachable_;
HBasicBlock* dont_visit_;
};
void HGraph::Verify(bool do_full_verify) const {
Heap::RelocationLock relocation_lock(isolate()->heap());
AllowHandleDereference allow_deref;
AllowDeferredHandleDereference allow_deferred_deref;
for (int i = 0; i < blocks_.length(); i++) {
HBasicBlock* block = blocks_.at(i);
block->Verify();
// Check that every block contains at least one node and that only the last
// node is a control instruction.
HInstruction* current = block->first();
ASSERT(current != NULL && current->IsBlockEntry());
while (current != NULL) {
ASSERT((current->next() == NULL) == current->IsControlInstruction());
ASSERT(current->block() == block);
current->Verify();
current = current->next();
}
// Check that successors are correctly set.
HBasicBlock* first = block->end()->FirstSuccessor();
HBasicBlock* second = block->end()->SecondSuccessor();
ASSERT(second == NULL || first != NULL);
// Check that the predecessor array is correct.
if (first != NULL) {
ASSERT(first->predecessors()->Contains(block));
if (second != NULL) {
ASSERT(second->predecessors()->Contains(block));
}
}
// Check that phis have correct arguments.
for (int j = 0; j < block->phis()->length(); j++) {
HPhi* phi = block->phis()->at(j);
phi->Verify();
}
// Check that all join blocks have predecessors that end with an
// unconditional goto and agree on their environment node id.
if (block->predecessors()->length() >= 2) {
BailoutId id =
block->predecessors()->first()->last_environment()->ast_id();
for (int k = 0; k < block->predecessors()->length(); k++) {
HBasicBlock* predecessor = block->predecessors()->at(k);
ASSERT(predecessor->end()->IsGoto() ||
predecessor->end()->IsDeoptimize());
ASSERT(predecessor->last_environment()->ast_id() == id);
}
}
}
// Check special property of first block to have no predecessors.
ASSERT(blocks_.at(0)->predecessors()->is_empty());
if (do_full_verify) {
// Check that the graph is fully connected.
ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
ASSERT(analyzer.visited_count() == blocks_.length());
// Check that entry block dominator is NULL.
ASSERT(entry_block_->dominator() == NULL);
// Check dominators.
for (int i = 0; i < blocks_.length(); ++i) {
HBasicBlock* block = blocks_.at(i);
if (block->dominator() == NULL) {
// Only start block may have no dominator assigned to.
ASSERT(i == 0);
} else {
// Assert that block is unreachable if dominator must not be visited.
ReachabilityAnalyzer dominator_analyzer(entry_block_,
blocks_.length(),
block->dominator());
ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id()));
}
}
}
}
#endif
HConstant* HGraph::GetConstant(SetOncePointer<HConstant>* pointer,
int32_t value) {
if (!pointer->is_set()) {
// Can't pass GetInvalidContext() to HConstant::New, because that will
// recursively call GetConstant
HConstant* constant = HConstant::New(zone(), NULL, value);
constant->InsertAfter(entry_block()->first());
pointer->set(constant);
return constant;
}
return ReinsertConstantIfNecessary(pointer->get());
}
HConstant* HGraph::ReinsertConstantIfNecessary(HConstant* constant) {
if (!constant->IsLinked()) {
// The constant was removed from the graph. Reinsert.
constant->ClearFlag(HValue::kIsDead);
constant->InsertAfter(entry_block()->first());
}
return constant;
}
HConstant* HGraph::GetConstant0() {
return GetConstant(&constant_0_, 0);
}
HConstant* HGraph::GetConstant1() {
return GetConstant(&constant_1_, 1);
}
HConstant* HGraph::GetConstantMinus1() {
return GetConstant(&constant_minus1_, -1);
}
#define DEFINE_GET_CONSTANT(Name, name, type, htype, boolean_value) \
HConstant* HGraph::GetConstant##Name() { \
if (!constant_##name##_.is_set()) { \
HConstant* constant = new(zone()) HConstant( \
Unique<Object>::CreateImmovable(isolate()->factory()->name##_value()), \
Unique<Map>::CreateImmovable(isolate()->factory()->type##_map()), \
false, \
Representation::Tagged(), \
htype, \
true, \
boolean_value, \
false, \
ODDBALL_TYPE); \
constant->InsertAfter(entry_block()->first()); \
constant_##name##_.set(constant); \
} \
return ReinsertConstantIfNecessary(constant_##name##_.get()); \
}
DEFINE_GET_CONSTANT(Undefined, undefined, undefined, HType::Undefined(), false)
DEFINE_GET_CONSTANT(True, true, boolean, HType::Boolean(), true)
DEFINE_GET_CONSTANT(False, false, boolean, HType::Boolean(), false)
DEFINE_GET_CONSTANT(Hole, the_hole, the_hole, HType::None(), false)
DEFINE_GET_CONSTANT(Null, null, null, HType::Null(), false)
#undef DEFINE_GET_CONSTANT
#define DEFINE_IS_CONSTANT(Name, name) \
bool HGraph::IsConstant##Name(HConstant* constant) { \
return constant_##name##_.is_set() && constant == constant_##name##_.get(); \
}
DEFINE_IS_CONSTANT(Undefined, undefined)
DEFINE_IS_CONSTANT(0, 0)
DEFINE_IS_CONSTANT(1, 1)
DEFINE_IS_CONSTANT(Minus1, minus1)
DEFINE_IS_CONSTANT(True, true)
DEFINE_IS_CONSTANT(False, false)
DEFINE_IS_CONSTANT(Hole, the_hole)
DEFINE_IS_CONSTANT(Null, null)
#undef DEFINE_IS_CONSTANT
HConstant* HGraph::GetInvalidContext() {
return GetConstant(&constant_invalid_context_, 0xFFFFC0C7);
}
bool HGraph::IsStandardConstant(HConstant* constant) {
if (IsConstantUndefined(constant)) return true;
if (IsConstant0(constant)) return true;
if (IsConstant1(constant)) return true;
if (IsConstantMinus1(constant)) return true;
if (IsConstantTrue(constant)) return true;
if (IsConstantFalse(constant)) return true;
if (IsConstantHole(constant)) return true;
if (IsConstantNull(constant)) return true;
return false;
}
HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder)
: builder_(builder),
finished_(false),
did_then_(false),
did_else_(false),
did_else_if_(false),
did_and_(false),
did_or_(false),
captured_(false),
needs_compare_(true),
pending_merge_block_(false),
split_edge_merge_block_(NULL),
merge_at_join_blocks_(NULL),
normal_merge_at_join_block_count_(0),
deopt_merge_at_join_block_count_(0) {
HEnvironment* env = builder->environment();
first_true_block_ = builder->CreateBasicBlock(env->Copy());
first_false_block_ = builder->CreateBasicBlock(env->Copy());
}
HGraphBuilder::IfBuilder::IfBuilder(
HGraphBuilder* builder,
HIfContinuation* continuation)
: builder_(builder),
finished_(false),
did_then_(false),
did_else_(false),
did_else_if_(false),
did_and_(false),
did_or_(false),
captured_(false),
needs_compare_(false),
pending_merge_block_(false),
first_true_block_(NULL),
first_false_block_(NULL),
split_edge_merge_block_(NULL),
merge_at_join_blocks_(NULL),
normal_merge_at_join_block_count_(0),
deopt_merge_at_join_block_count_(0) {
continuation->Continue(&first_true_block_,
&first_false_block_);
}
HControlInstruction* HGraphBuilder::IfBuilder::AddCompare(
HControlInstruction* compare) {
ASSERT(did_then_ == did_else_);
if (did_else_) {
// Handle if-then-elseif
did_else_if_ = true;
did_else_ = false;
did_then_ = false;
did_and_ = false;
did_or_ = false;
pending_merge_block_ = false;
split_edge_merge_block_ = NULL;
HEnvironment* env = builder_->environment();
first_true_block_ = builder_->CreateBasicBlock(env->Copy());
first_false_block_ = builder_->CreateBasicBlock(env->Copy());
}
if (split_edge_merge_block_ != NULL) {
HEnvironment* env = first_false_block_->last_environment();
HBasicBlock* split_edge =
builder_->CreateBasicBlock(env->Copy());
if (did_or_) {
compare->SetSuccessorAt(0, split_edge);
compare->SetSuccessorAt(1, first_false_block_);
} else {
compare->SetSuccessorAt(0, first_true_block_);
compare->SetSuccessorAt(1, split_edge);
}
builder_->GotoNoSimulate(split_edge, split_edge_merge_block_);
} else {
compare->SetSuccessorAt(0, first_true_block_);
compare->SetSuccessorAt(1, first_false_block_);
}
builder_->FinishCurrentBlock(compare);
needs_compare_ = false;
return compare;
}
void HGraphBuilder::IfBuilder::Or() {
ASSERT(!needs_compare_);
ASSERT(!did_and_);
did_or_ = true;
HEnvironment* env = first_false_block_->last_environment();
if (split_edge_merge_block_ == NULL) {
split_edge_merge_block_ =
builder_->CreateBasicBlock(env->Copy());
builder_->GotoNoSimulate(first_true_block_, split_edge_merge_block_);
first_true_block_ = split_edge_merge_block_;
}
builder_->set_current_block(first_false_block_);
first_false_block_ = builder_->CreateBasicBlock(env->Copy());
}
void HGraphBuilder::IfBuilder::And() {
ASSERT(!needs_compare_);
ASSERT(!did_or_);
did_and_ = true;
HEnvironment* env = first_false_block_->last_environment();
if (split_edge_merge_block_ == NULL) {
split_edge_merge_block_ = builder_->CreateBasicBlock(env->Copy());
builder_->GotoNoSimulate(first_false_block_, split_edge_merge_block_);
first_false_block_ = split_edge_merge_block_;
}
builder_->set_current_block(first_true_block_);
first_true_block_ = builder_->CreateBasicBlock(env->Copy());
}
void HGraphBuilder::IfBuilder::CaptureContinuation(
HIfContinuation* continuation) {
ASSERT(!did_else_if_);
ASSERT(!finished_);
ASSERT(!captured_);
HBasicBlock* true_block = NULL;
HBasicBlock* false_block = NULL;
Finish(&true_block, &false_block);
ASSERT(true_block != NULL);
ASSERT(false_block != NULL);
continuation->Capture(true_block, false_block);
captured_ = true;
builder_->set_current_block(NULL);
End();
}
void HGraphBuilder::IfBuilder::JoinContinuation(HIfContinuation* continuation) {
ASSERT(!did_else_if_);
ASSERT(!finished_);
ASSERT(!captured_);
HBasicBlock* true_block = NULL;
HBasicBlock* false_block = NULL;
Finish(&true_block, &false_block);
merge_at_join_blocks_ = NULL;
if (true_block != NULL && !true_block->IsFinished()) {
ASSERT(continuation->IsTrueReachable());
builder_->GotoNoSimulate(true_block, continuation->true_branch());
}
if (false_block != NULL && !false_block->IsFinished()) {
ASSERT(continuation->IsFalseReachable());
builder_->GotoNoSimulate(false_block, continuation->false_branch());
}
captured_ = true;
End();
}
void HGraphBuilder::IfBuilder::Then() {
ASSERT(!captured_);
ASSERT(!finished_);
did_then_ = true;
if (needs_compare_) {
// Handle if's without any expressions, they jump directly to the "else"
// branch. However, we must pretend that the "then" branch is reachable,
// so that the graph builder visits it and sees any live range extending
// constructs within it.
HConstant* constant_false = builder_->graph()->GetConstantFalse();
ToBooleanStub::Types boolean_type = ToBooleanStub::Types();
boolean_type.Add(ToBooleanStub::BOOLEAN);
HBranch* branch = builder()->New<HBranch>(
constant_false, boolean_type, first_true_block_, first_false_block_);
builder_->FinishCurrentBlock(branch);
}
builder_->set_current_block(first_true_block_);
pending_merge_block_ = true;
}
void HGraphBuilder::IfBuilder::Else() {
ASSERT(did_then_);
ASSERT(!captured_);
ASSERT(!finished_);
AddMergeAtJoinBlock(false);
builder_->set_current_block(first_false_block_);
pending_merge_block_ = true;
did_else_ = true;
}
void HGraphBuilder::IfBuilder::Deopt(const char* reason) {
ASSERT(did_then_);
builder_->Add<HDeoptimize>(reason, Deoptimizer::EAGER);
AddMergeAtJoinBlock(true);
}
void HGraphBuilder::IfBuilder::Return(HValue* value) {
HValue* parameter_count = builder_->graph()->GetConstantMinus1();
builder_->FinishExitCurrentBlock(
builder_->New<HReturn>(value, parameter_count));
AddMergeAtJoinBlock(false);
}
void HGraphBuilder::IfBuilder::AddMergeAtJoinBlock(bool deopt) {
if (!pending_merge_block_) return;
HBasicBlock* block = builder_->current_block();
ASSERT(block == NULL || !block->IsFinished());
MergeAtJoinBlock* record =
new(builder_->zone()) MergeAtJoinBlock(block, deopt,
merge_at_join_blocks_);
merge_at_join_blocks_ = record;
if (block != NULL) {
ASSERT(block->end() == NULL);
if (deopt) {
normal_merge_at_join_block_count_++;
} else {
deopt_merge_at_join_block_count_++;
}
}
builder_->set_current_block(NULL);
pending_merge_block_ = false;
}
void HGraphBuilder::IfBuilder::Finish() {
ASSERT(!finished_);
if (!did_then_) {
Then();
}
AddMergeAtJoinBlock(false);
if (!did_else_) {
Else();
AddMergeAtJoinBlock(false);
}
finished_ = true;
}
void HGraphBuilder::IfBuilder::Finish(HBasicBlock** then_continuation,
HBasicBlock** else_continuation) {
Finish();
MergeAtJoinBlock* else_record = merge_at_join_blocks_;
if (else_continuation != NULL) {
*else_continuation = else_record->block_;
}
MergeAtJoinBlock* then_record = else_record->next_;
if (then_continuation != NULL) {
*then_continuation = then_record->block_;
}
ASSERT(then_record->next_ == NULL);
}
void HGraphBuilder::IfBuilder::End() {
if (captured_) return;
Finish();
int total_merged_blocks = normal_merge_at_join_block_count_ +
deopt_merge_at_join_block_count_;
ASSERT(total_merged_blocks >= 1);
HBasicBlock* merge_block = total_merged_blocks == 1
? NULL : builder_->graph()->CreateBasicBlock();
// Merge non-deopt blocks first to ensure environment has right size for
// padding.
MergeAtJoinBlock* current = merge_at_join_blocks_;
while (current != NULL) {
if (!current->deopt_ && current->block_ != NULL) {
// If there is only one block that makes it through to the end of the
// if, then just set it as the current block and continue rather then
// creating an unnecessary merge block.
if (total_merged_blocks == 1) {
builder_->set_current_block(current->block_);
return;
}
builder_->GotoNoSimulate(current->block_, merge_block);
}
current = current->next_;
}
// Merge deopt blocks, padding when necessary.
current = merge_at_join_blocks_;
while (current != NULL) {
if (current->deopt_ && current->block_ != NULL) {
current->block_->FinishExit(
HAbnormalExit::New(builder_->zone(), NULL),
HSourcePosition::Unknown());
}
current = current->next_;
}
builder_->set_current_block(merge_block);
}
HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder,
HValue* context,
LoopBuilder::Direction direction)
: builder_(builder),
context_(context),
direction_(direction),
finished_(false) {
header_block_ = builder->CreateLoopHeaderBlock();
body_block_ = NULL;
exit_block_ = NULL;
exit_trampoline_block_ = NULL;
increment_amount_ = builder_->graph()->GetConstant1();
}
HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder,
HValue* context,
LoopBuilder::Direction direction,
HValue* increment_amount)
: builder_(builder),
context_(context),
direction_(direction),
finished_(false) {
header_block_ = builder->CreateLoopHeaderBlock();
body_block_ = NULL;
exit_block_ = NULL;
exit_trampoline_block_ = NULL;
increment_amount_ = increment_amount;
}
HValue* HGraphBuilder::LoopBuilder::BeginBody(
HValue* initial,
HValue* terminating,
Token::Value token) {
HEnvironment* env = builder_->environment();
phi_ = header_block_->AddNewPhi(env->values()->length());
phi_->AddInput(initial);
env->Push(initial);
builder_->GotoNoSimulate(header_block_);
HEnvironment* body_env = env->Copy();
HEnvironment* exit_env = env->Copy();
// Remove the phi from the expression stack
body_env->Pop();
exit_env->Pop();
body_block_ = builder_->CreateBasicBlock(body_env);
exit_block_ = builder_->CreateBasicBlock(exit_env);
builder_->set_current_block(header_block_);
env->Pop();
builder_->FinishCurrentBlock(builder_->New<HCompareNumericAndBranch>(
phi_, terminating, token, body_block_, exit_block_));
builder_->set_current_block(body_block_);
if (direction_ == kPreIncrement || direction_ == kPreDecrement) {
HValue* one = builder_->graph()->GetConstant1();
if (direction_ == kPreIncrement) {
increment_ = HAdd::New(zone(), context_, phi_, one);
} else {
increment_ = HSub::New(zone(), context_, phi_, one);
}
increment_->ClearFlag(HValue::kCanOverflow);
builder_->AddInstruction(increment_);
return increment_;
} else {
return phi_;
}
}
void HGraphBuilder::LoopBuilder::Break() {
if (exit_trampoline_block_ == NULL) {
// Its the first time we saw a break.
HEnvironment* env = exit_block_->last_environment()->Copy();
exit_trampoline_block_ = builder_->CreateBasicBlock(env);
builder_->GotoNoSimulate(exit_block_, exit_trampoline_block_);
}
builder_->GotoNoSimulate(exit_trampoline_block_);
builder_->set_current_block(NULL);
}
void HGraphBuilder::LoopBuilder::EndBody() {
ASSERT(!finished_);
if (direction_ == kPostIncrement || direction_ == kPostDecrement) {
if (direction_ == kPostIncrement) {
increment_ = HAdd::New(zone(), context_, phi_, increment_amount_);
} else {
increment_ = HSub::New(zone(), context_, phi_, increment_amount_);
}
increment_->ClearFlag(HValue::kCanOverflow);
builder_->AddInstruction(increment_);
}
// Push the new increment value on the expression stack to merge into the phi.
builder_->environment()->Push(increment_);
HBasicBlock* last_block = builder_->current_block();
builder_->GotoNoSimulate(last_block, header_block_);
header_block_->loop_information()->RegisterBackEdge(last_block);
if (exit_trampoline_block_ != NULL) {
builder_->set_current_block(exit_trampoline_block_);
} else {
builder_->set_current_block(exit_block_);
}
finished_ = true;
}
HGraph* HGraphBuilder::CreateGraph() {
graph_ = new(zone()) HGraph(info_);
if (FLAG_hydrogen_stats) isolate()->GetHStatistics()->Initialize(info_);
CompilationPhase phase("H_Block building", info_);
set_current_block(graph()->entry_block());
if (!BuildGraph()) return NULL;
graph()->FinalizeUniqueness();
return graph_;
}
HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
ASSERT(current_block() != NULL);
ASSERT(!FLAG_hydrogen_track_positions ||
!position_.IsUnknown() ||
!info_->IsOptimizing());
current_block()->AddInstruction(instr, source_position());
if (graph()->IsInsideNoSideEffectsScope()) {
instr->SetFlag(HValue::kHasNoObservableSideEffects);
}
return instr;
}
void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) {
ASSERT(!FLAG_hydrogen_track_positions ||
!info_->IsOptimizing() ||
!position_.IsUnknown());
current_block()->Finish(last, source_position());
if (last->IsReturn() || last->IsAbnormalExit()) {
set_current_block(NULL);
}
}
void HGraphBuilder::FinishExitCurrentBlock(HControlInstruction* instruction) {
ASSERT(!FLAG_hydrogen_track_positions || !info_->IsOptimizing() ||
!position_.IsUnknown());
current_block()->FinishExit(instruction, source_position());
if (instruction->IsReturn() || instruction->IsAbnormalExit()) {
set_current_block(NULL);
}
}
void HGraphBuilder::AddIncrementCounter(StatsCounter* counter) {
if (FLAG_native_code_counters && counter->Enabled()) {
HValue* reference = Add<HConstant>(ExternalReference(counter));
HValue* old_value = Add<HLoadNamedField>(
reference, static_cast<HValue*>(NULL), HObjectAccess::ForCounter());
HValue* new_value = AddUncasted<HAdd>(old_value, graph()->GetConstant1());
new_value->ClearFlag(HValue::kCanOverflow); // Ignore counter overflow
Add<HStoreNamedField>(reference, HObjectAccess::ForCounter(),
new_value, STORE_TO_INITIALIZED_ENTRY);
}
}
void HGraphBuilder::AddSimulate(BailoutId id,
RemovableSimulate removable) {
ASSERT(current_block() != NULL);
ASSERT(!graph()->IsInsideNoSideEffectsScope());
current_block()->AddNewSimulate(id, source_position(), removable);
}
HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) {
HBasicBlock* b = graph()->CreateBasicBlock();
b->SetInitialEnvironment(env);
return b;
}
HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() {
HBasicBlock* header = graph()->CreateBasicBlock();
HEnvironment* entry_env = environment()->CopyAsLoopHeader(header);
header->SetInitialEnvironment(entry_env);
header->AttachLoopInformation();
return header;
}
HValue* HGraphBuilder::BuildGetElementsKind(HValue* object) {
HValue* map = Add<HLoadNamedField>(object, static_cast<HValue*>(NULL),
HObjectAccess::ForMap());
HValue* bit_field2 = Add<HLoadNamedField>(map, static_cast<HValue*>(NULL),
HObjectAccess::ForMapBitField2());
return BuildDecodeField<Map::ElementsKindBits>(bit_field2);
}
HValue* HGraphBuilder::BuildCheckHeapObject(HValue* obj) {
if (obj->type().IsHeapObject()) return obj;
return Add<HCheckHeapObject>(obj);
}
void HGraphBuilder::FinishExitWithHardDeoptimization(const char* reason) {
Add<HDeoptimize>(reason, Deoptimizer::EAGER);
FinishExitCurrentBlock(New<HAbnormalExit>());
}
HValue* HGraphBuilder::BuildCheckString(HValue* string) {
if (!string->type().IsString()) {
ASSERT(!string->IsConstant() ||
!HConstant::cast(string)->HasStringValue());
BuildCheckHeapObject(string);
return Add<HCheckInstanceType>(string, HCheckInstanceType::IS_STRING);
}
return string;
}
HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* function) {
if (object->type().IsJSObject()) return object;
if (function->IsConstant() &&
HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
Handle<JSFunction> f = Handle<JSFunction>::cast(
HConstant::cast(function)->handle(isolate()));
SharedFunctionInfo* shared = f->shared();
if (shared->strict_mode() == STRICT || shared->native()) return object;
}
return Add<HWrapReceiver>(object, function);
}
HValue* HGraphBuilder::BuildCheckForCapacityGrow(
HValue* object,
HValue* elements,
ElementsKind kind,
HValue* length,
HValue* key,
bool is_js_array,
PropertyAccessType access_type) {
IfBuilder length_checker(this);
Token::Value token = IsHoleyElementsKind(kind) ? Token::GTE : Token::EQ;
length_checker.If<HCompareNumericAndBranch>(key, length, token);
length_checker.Then();
HValue* current_capacity = AddLoadFixedArrayLength(elements);
IfBuilder capacity_checker(this);
capacity_checker.If<HCompareNumericAndBranch>(key, current_capacity,
Token::GTE);
capacity_checker.Then();
HValue* max_gap = Add<HConstant>(static_cast<int32_t>(JSObject::kMaxGap));
HValue* max_capacity = AddUncasted<HAdd>(current_capacity, max_gap);
Add<HBoundsCheck>(key, max_capacity);
HValue* new_capacity = BuildNewElementsCapacity(key);
HValue* new_elements = BuildGrowElementsCapacity(object, elements,
kind, kind, length,
new_capacity);
environment()->Push(new_elements);
capacity_checker.Else();
environment()->Push(elements);
capacity_checker.End();
if (is_js_array) {
HValue* new_length = AddUncasted<HAdd>(key, graph_->GetConstant1());
new_length->ClearFlag(HValue::kCanOverflow);
Add<HStoreNamedField>(object, HObjectAccess::ForArrayLength(kind),
new_length);
}
if (access_type == STORE && kind == FAST_SMI_ELEMENTS) {
HValue* checked_elements = environment()->Top();
// Write zero to ensure that the new element is initialized with some smi.
Add<HStoreKeyed>(checked_elements, key, graph()->GetConstant0(), kind);
}
length_checker.Else();
Add<HBoundsCheck>(key, length);
environment()->Push(elements);
length_checker.End();
return environment()->Pop();
}
HValue* HGraphBuilder::BuildCopyElementsOnWrite(HValue* object,
HValue* elements,
ElementsKind kind,
HValue* length) {
Factory* factory = isolate()->factory();
IfBuilder cow_checker(this);
cow_checker.If<HCompareMap>(elements, factory->fixed_cow_array_map());
cow_checker.Then();
HValue* capacity = AddLoadFixedArrayLength(elements);
HValue* new_elements = BuildGrowElementsCapacity(object, elements, kind,
kind, length, capacity);
environment()->Push(new_elements);
cow_checker.Else();
environment()->Push(elements);
cow_checker.End();
return environment()->Pop();
}
void HGraphBuilder::BuildTransitionElementsKind(HValue* object,
HValue* map,
ElementsKind from_kind,
ElementsKind to_kind,
bool is_jsarray) {
ASSERT(!IsFastHoleyElementsKind(from_kind) ||
IsFastHoleyElementsKind(to_kind));
if (AllocationSite::GetMode(from_kind, to_kind) == TRACK_ALLOCATION_SITE) {
Add<HTrapAllocationMemento>(object);
}
if (!IsSimpleMapChangeTransition(from_kind, to_kind)) {
HInstruction* elements = AddLoadElements(object);
HInstruction* empty_fixed_array = Add<HConstant>(
isolate()->factory()->empty_fixed_array());
IfBuilder if_builder(this);
if_builder.IfNot<HCompareObjectEqAndBranch>(elements, empty_fixed_array);
if_builder.Then();
HInstruction* elements_length = AddLoadFixedArrayLength(elements);
HInstruction* array_length = is_jsarray
? Add<HLoadNamedField>(object, static_cast<HValue*>(NULL),
HObjectAccess::ForArrayLength(from_kind))
: elements_length;
BuildGrowElementsCapacity(object, elements, from_kind, to_kind,
array_length, elements_length);
if_builder.End();
}
Add<HStoreNamedField>(object, HObjectAccess::ForMap(), map);
}
void HGraphBuilder::BuildJSObjectCheck(HValue* receiver,
int bit_field_mask) {
// Check that the object isn't a smi.
Add<HCheckHeapObject>(receiver);
// Get the map of the receiver.
HValue* map = Add<HLoadNamedField>(receiver, static_cast<HValue*>(NULL),
HObjectAccess::ForMap());
// Check the instance type and if an access check is needed, this can be
// done with a single load, since both bytes are adjacent in the map.
HObjectAccess access(HObjectAccess::ForMapInstanceTypeAndBitField());
HValue* instance_type_and_bit_field =
Add<HLoadNamedField>(map, static_cast<HValue*>(NULL), access);
HValue* mask = Add<HConstant>(0x00FF | (bit_field_mask << 8));
HValue* and_result = AddUncasted<HBitwise>(Token::BIT_AND,
instance_type_and_bit_field,
mask);
HValue* sub_result = AddUncasted<HSub>(and_result,
Add<HConstant>(JS_OBJECT_TYPE));
Add<HBoundsCheck>(sub_result, Add<HConstant>(0x100 - JS_OBJECT_TYPE));
}
void HGraphBuilder::BuildKeyedIndexCheck(HValue* key,
HIfContinuation* join_continuation) {
// The sometimes unintuitively backward ordering of the ifs below is
// convoluted, but necessary. All of the paths must guarantee that the
// if-true of the continuation returns a smi element index and the if-false of
// the continuation returns either a symbol or a unique string key. All other
// object types cause a deopt to fall back to the runtime.
IfBuilder key_smi_if(this);
key_smi_if.If<HIsSmiAndBranch>(key);
key_smi_if.Then();
{
Push(key); // Nothing to do, just continue to true of continuation.
}
key_smi_if.Else();
{
HValue* map = Add<HLoadNamedField>(key, static_cast<HValue*>(NULL),
HObjectAccess::ForMap());
HValue* instance_type =
Add<HLoadNamedField>(map, static_cast<HValue*>(NULL),
HObjectAccess::ForMapInstanceType());
// Non-unique string, check for a string with a hash code that is actually
// an index.
STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
IfBuilder not_string_or_name_if(this);
not_string_or_name_if.If<HCompareNumericAndBranch>(
instance_type,
Add<HConstant>(LAST_UNIQUE_NAME_TYPE),
Token::GT);
not_string_or_name_if.Then();
{
// Non-smi, non-Name, non-String: Try to convert to smi in case of
// HeapNumber.
// TODO(danno): This could call some variant of ToString
Push(AddUncasted<HForceRepresentation>(key, Representation::Smi()));
}
not_string_or_name_if.Else();
{
// String or Name: check explicitly for Name, they can short-circuit
// directly to unique non-index key path.
IfBuilder not_symbol_if(this);
not_symbol_if.If<HCompareNumericAndBranch>(
instance_type,
Add<HConstant>(SYMBOL_TYPE),
Token::NE);
not_symbol_if.Then();
{
// String: check whether the String is a String of an index. If it is,
// extract the index value from the hash.
HValue* hash =
Add<HLoadNamedField>(key, static_cast<HValue*>(NULL),
HObjectAccess::ForNameHashField());
HValue* not_index_mask = Add<HConstant>(static_cast<int>(
String::kContainsCachedArrayIndexMask));
HValue* not_index_test = AddUncasted<HBitwise>(
Token::BIT_AND, hash, not_index_mask);
IfBuilder string_index_if(this);
string_index_if.If<HCompareNumericAndBranch>(not_index_test,
graph()->GetConstant0(),
Token::EQ);
string_index_if.Then();
{
// String with index in hash: extract string and merge to index path.
Push(BuildDecodeField<String::ArrayIndexValueBits>(hash));
}
string_index_if.Else();
{
// Key is a non-index String, check for uniqueness/internalization. If
// it's not, deopt.
HValue* not_internalized_bit = AddUncasted<HBitwise>(
Token::BIT_AND,
instance_type,
Add<HConstant>(static_cast<int>(kIsNotInternalizedMask)));
DeoptimizeIf<HCompareNumericAndBranch>(
not_internalized_bit,
graph()->GetConstant0(),
Token::NE,
"BuildKeyedIndexCheck: string isn't internalized");
// Key guaranteed to be a unqiue string
Push(key);
}
string_index_if.JoinContinuation(join_continuation);
}
not_symbol_if.Else();
{
Push(key); // Key is symbol
}
not_symbol_if.JoinContinuation(join_continuation);
}
not_string_or_name_if.JoinContinuation(join_continuation);
}
key_smi_if.JoinContinuation(join_continuation);
}
void HGraphBuilder::BuildNonGlobalObjectCheck(HValue* receiver) {
// Get the the instance type of the receiver, and make sure that it is
// not one of the global object types.
HValue* map = Add<HLoadNamedField>(receiver, static_cast<HValue*>(NULL),
HObjectAccess::ForMap());
HValue* instance_type =
Add<HLoadNamedField>(map, static_cast<HValue*>(NULL),
HObjectAccess::ForMapInstanceType());
STATIC_ASSERT(JS_BUILTINS_OBJECT_TYPE == JS_GLOBAL_OBJECT_TYPE + 1);
HValue* min_global_type = Add<HConstant>(JS_GLOBAL_OBJECT_TYPE);
HValue* max_global_type = Add<HConstant>(JS_BUILTINS_OBJECT_TYPE);
IfBuilder if_global_object(this);
if_global_object.If<HCompareNumericAndBranch>(instance_type,
max_global_type,
Token::LTE);
if_global_object.And();
if_global_object.If<HCompareNumericAndBranch>(instance_type,
min_global_type,
Token::GTE);
if_global_object.ThenDeopt("receiver was a global object");
if_global_object.End();
}
void HGraphBuilder::BuildTestForDictionaryProperties(
HValue* object,
HIfContinuation* continuation) {
HValue* properties = Add<HLoadNamedField>(
object, static_cast<HValue*>(NULL),
HObjectAccess::ForPropertiesPointer());
HValue* properties_map =
Add<HLoadNamedField>(properties, static_cast<HValue*>(NULL),
HObjectAccess::ForMap());
HValue* hash_map = Add<HLoadRoot>(Heap::kHashTableMapRootIndex);
IfBuilder builder(this);
builder.If<HCompareObjectEqAndBranch>(properties_map, hash_map);
builder.CaptureContinuation(continuation);
}
HValue* HGraphBuilder::BuildKeyedLookupCacheHash(HValue* object,
HValue* key) {
// Load the map of the receiver, compute the keyed lookup cache hash
// based on 32 bits of the map pointer and the string hash.
HValue* object_map =
Add<HLoadNamedField>(object, static_cast<HValue*>(NULL),
HObjectAccess::ForMapAsInteger32());
HValue* shifted_map = AddUncasted<HShr>(
object_map, Add<HConstant>(KeyedLookupCache::kMapHashShift));
HValue* string_hash =
Add<HLoadNamedField>(key, static_cast<HValue*>(NULL),
HObjectAccess::ForStringHashField());
HValue* shifted_hash = AddUncasted<HShr>(
string_hash, Add<HConstant>(String::kHashShift));
HValue* xor_result = AddUncasted<HBitwise>(Token::BIT_XOR, shifted_map,
shifted_hash);
int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
return AddUncasted<HBitwise>(Token::BIT_AND, xor_result,
Add<HConstant>(mask));
}
HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoadHelper(
HValue* elements,
HValue* key,
HValue* hash,
HValue* mask,
int current_probe) {
if (current_probe == kNumberDictionaryProbes) {
return NULL;
}
int32_t offset = SeededNumberDictionary::GetProbeOffset(current_probe);
HValue* raw_index = (current_probe == 0)
? hash
: AddUncasted<HAdd>(hash, Add<HConstant>(offset));
raw_index = AddUncasted<HBitwise>(Token::BIT_AND, raw_index, mask);
int32_t entry_size = SeededNumberDictionary::kEntrySize;
raw_index = AddUncasted<HMul>(raw_index, Add<HConstant>(entry_size));
raw_index->ClearFlag(HValue::kCanOverflow);
int32_t base_offset = SeededNumberDictionary::kElementsStartIndex;
HValue* key_index = AddUncasted<HAdd>(raw_index, Add<HConstant>(base_offset));
key_index->ClearFlag(HValue::kCanOverflow);
HValue* candidate_key = Add<HLoadKeyed>(elements, key_index,
static_cast<HValue*>(NULL),
FAST_ELEMENTS);
IfBuilder key_compare(this);
key_compare.IfNot<HCompareObjectEqAndBranch>(key, candidate_key);
key_compare.Then();
{
// Key at the current probe doesn't match, try at the next probe.
HValue* result = BuildUncheckedDictionaryElementLoadHelper(
elements, key, hash, mask, current_probe + 1);
if (result == NULL) {
key_compare.Deopt("probes exhausted in keyed load dictionary lookup");
result = graph()->GetConstantUndefined();
} else {
Push(result);
}
}
key_compare.Else();
{
// Key at current probe matches. Details must be zero, otherwise the
// dictionary element requires special handling.
HValue* details_index = AddUncasted<HAdd>(
raw_index, Add<HConstant>(base_offset + 2));
details_index->ClearFlag(HValue::kCanOverflow);
HValue* details = Add<HLoadKeyed>(elements, details_index,
static_cast<HValue*>(NULL),
FAST_ELEMENTS);
IfBuilder details_compare(this);
details_compare.If<HCompareNumericAndBranch>(details,
graph()->GetConstant0(),
Token::NE);
details_compare.ThenDeopt("keyed load dictionary element not fast case");
details_compare.Else();
{
// Key matches and details are zero --> fast case. Load and return the
// value.
HValue* result_index = AddUncasted<HAdd>(
raw_index, Add<HConstant>(base_offset + 1));
result_index->ClearFlag(HValue::kCanOverflow);
Push(Add<HLoadKeyed>(elements, result_index,
static_cast<HValue*>(NULL),
FAST_ELEMENTS));
}
details_compare.End();
}
key_compare.End();
return Pop();
}
HValue* HGraphBuilder::BuildElementIndexHash(HValue* index) {
int32_t seed_value = static_cast<uint32_t>(isolate()->heap()->HashSeed());
HValue* seed = Add<HConstant>(seed_value);
HValue* hash = AddUncasted<HBitwise>(Token::BIT_XOR, index, seed);
// hash = ~hash + (hash << 15);
HValue* shifted_hash = AddUncasted<HShl>(hash, Add<HConstant>(15));
HValue* not_hash = AddUncasted<HBitwise>(Token::BIT_XOR, hash,
graph()->GetConstantMinus1());
hash = AddUncasted<HAdd>(shifted_hash, not_hash);
// hash = hash ^ (hash >> 12);
shifted_hash = AddUncasted<HShr>(hash, Add<HConstant>(12));
hash = AddUncasted<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
// hash = hash + (hash << 2);
shifted_hash = AddUncasted<HShl>(hash, Add<HConstant>(2));
hash = AddUncasted<HAdd>(hash, shifted_hash);
// hash = hash ^ (hash >> 4);
shifted_hash = AddUncasted<HShr>(hash, Add<HConstant>(4));
hash = AddUncasted<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
// hash = hash * 2057;
hash = AddUncasted<HMul>(hash, Add<HConstant>(2057));
hash->ClearFlag(HValue::kCanOverflow);
// hash = hash ^ (hash >> 16);
shifted_hash = AddUncasted<HShr>(hash, Add<HConstant>(16));
return AddUncasted<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
}
HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
HValue* elements,
HValue* key,
HValue* hash) {
HValue* capacity = Add<HLoadKeyed>(
elements,
Add<HConstant>(NameDictionary::kCapacityIndex),
static_cast<HValue*>(NULL),
FAST_ELEMENTS);
HValue* mask = AddUncasted<HSub>(capacity, graph()->GetConstant1());
mask->ChangeRepresentation(Representation::Integer32());
mask->ClearFlag(HValue::kCanOverflow);
return BuildUncheckedDictionaryElementLoadHelper(elements, key,
hash, mask, 0);
}
HValue* HGraphBuilder::BuildRegExpConstructResult(HValue* length,
HValue* index,
HValue* input) {
NoObservableSideEffectsScope scope(this);
HConstant* max_length = Add<HConstant>(JSObject::kInitialMaxFastElementArray);
Add<HBoundsCheck>(length, max_length);
// Generate size calculation code here in order to make it dominate
// the JSRegExpResult allocation.
ElementsKind elements_kind = FAST_ELEMENTS;
HValue* size = BuildCalculateElementsSize(elements_kind, length);
// Allocate the JSRegExpResult and the FixedArray in one step.
HValue* result = Add<HAllocate>(
Add<HConstant>(JSRegExpResult::kSize), HType::JSArray(),
NOT_TENURED, JS_ARRAY_TYPE);
// Initialize the JSRegExpResult header.
HValue* global_object = Add<HLoadNamedField>(
context(), static_cast<HValue*>(NULL),
HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
HValue* native_context = Add<HLoadNamedField>(
global_object, static_cast<HValue*>(NULL),
HObjectAccess::ForGlobalObjectNativeContext());
Add<HStoreNamedField>(
result, HObjectAccess::ForMap(),
Add<HLoadNamedField>(
native_context, static_cast<HValue*>(NULL),
HObjectAccess::ForContextSlot(Context::REGEXP_RESULT_MAP_INDEX)));
HConstant* empty_fixed_array =
Add<HConstant>(isolate()->factory()->empty_fixed_array());
Add<HStoreNamedField>(
result, HObjectAccess::ForJSArrayOffset(JSArray::kPropertiesOffset),
empty_fixed_array);
Add<HStoreNamedField>(
result, HObjectAccess::ForJSArrayOffset(JSArray::kElementsOffset),
empty_fixed_array);
Add<HStoreNamedField>(
result, HObjectAccess::ForJSArrayOffset(JSArray::kLengthOffset), length);
// Initialize the additional fields.
Add<HStoreNamedField>(
result, HObjectAccess::ForJSArrayOffset(JSRegExpResult::kIndexOffset),
index);
Add<HStoreNamedField>(
result, HObjectAccess::ForJSArrayOffset(JSRegExpResult::kInputOffset),
input);
// Allocate and initialize the elements header.
HAllocate* elements = BuildAllocateElements(elements_kind, size);
BuildInitializeElementsHeader(elements, elements_kind, length);
HConstant* size_in_bytes_upper_bound = EstablishElementsAllocationSize(
elements_kind, max_length->Integer32Value());
elements->set_size_upper_bound(size_in_bytes_upper_bound);
Add<HStoreNamedField>(
result, HObjectAccess::ForJSArrayOffset(JSArray::kElementsOffset),
elements);
// Initialize the elements contents with undefined.
BuildFillElementsWithValue(
elements, elements_kind, graph()->GetConstant0(), length,
graph()->GetConstantUndefined());
return result;
}
HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
NoObservableSideEffectsScope scope(this);
// Convert constant numbers at compile time.
if (object->IsConstant() && HConstant::cast(object)->HasNumberValue()) {
Handle<Object> number = HConstant::cast(object)->handle(isolate());
Handle<String> result = isolate()->factory()->NumberToString(number);
return Add<HConstant>(result);
}
// Create a joinable continuation.
HIfContinuation found(graph()->CreateBasicBlock(),
graph()->CreateBasicBlock());
// Load the number string cache.
HValue* number_string_cache =
Add<HLoadRoot>(Heap::kNumberStringCacheRootIndex);
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
HValue* mask = AddLoadFixedArrayLength(number_string_cache);
mask->set_type(HType::Smi());
mask = AddUncasted<HSar>(mask, graph()->GetConstant1());
mask = AddUncasted<HSub>(mask, graph()->GetConstant1());
// Check whether object is a smi.
IfBuilder if_objectissmi(this);
if_objectissmi.If<HIsSmiAndBranch>(object);
if_objectissmi.Then();
{
// Compute hash for smi similar to smi_get_hash().
HValue* hash = AddUncasted<HBitwise>(Token::BIT_AND, object, mask);
// Load the key.
HValue* key_index = AddUncasted<HShl>(hash, graph()->GetConstant1());
HValue* key = Add<HLoadKeyed>(number_string_cache, key_index,
static_cast<HValue*>(NULL),
FAST_ELEMENTS, ALLOW_RETURN_HOLE);
// Check if object == key.
IfBuilder if_objectiskey(this);
if_objectiskey.If<HCompareObjectEqAndBranch>(object, key);
if_objectiskey.Then();
{
// Make the key_index available.
Push(key_index);
}
if_objectiskey.JoinContinuation(&found);
}
if_objectissmi.Else();
{
if (type->Is(Type::SignedSmall())) {
if_objectissmi.Deopt("Expected smi");
} else {
// Check if the object is a heap number.
IfBuilder if_objectisnumber(this);
HValue* objectisnumber = if_objectisnumber.If<HCompareMap>(
object, isolate()->factory()->heap_number_map());
if_objectisnumber.Then();
{
// Compute hash for heap number similar to double_get_hash().
HValue* low = Add<HLoadNamedField>(
object, objectisnumber,
HObjectAccess::ForHeapNumberValueLowestBits());
HValue* high = Add<HLoadNamedField>(
object, objectisnumber,
HObjectAccess::ForHeapNumberValueHighestBits());
HValue* hash = AddUncasted<HBitwise>(Token::BIT_XOR, low, high);
hash = AddUncasted<HBitwise>(Token::BIT_AND, hash, mask);
// Load the key.
HValue* key_index = AddUncasted<HShl>(hash, graph()->GetConstant1());
HValue* key = Add<HLoadKeyed>(number_string_cache, key_index,
static_cast<HValue*>(NULL),
FAST_ELEMENTS, ALLOW_RETURN_HOLE);
// Check if the key is a heap number and compare it with the object.
IfBuilder if_keyisnotsmi(this);
HValue* keyisnotsmi = if_keyisnotsmi.IfNot<HIsSmiAndBranch>(key);
if_keyisnotsmi.Then();
{
IfBuilder if_keyisheapnumber(this);
if_keyisheapnumber.If<HCompareMap>(
key, isolate()->factory()->heap_number_map());
if_keyisheapnumber.Then();
{
// Check if values of key and object match.
IfBuilder if_keyeqobject(this);
if_keyeqobject.If<HCompareNumericAndBranch>(
Add<HLoadNamedField>(key, keyisnotsmi,
HObjectAccess::ForHeapNumberValue()),
Add<HLoadNamedField>(object, objectisnumber,
HObjectAccess::ForHeapNumberValue()),
Token::EQ);
if_keyeqobject.Then();
{
// Make the key_index available.
Push(key_index);
}
if_keyeqobject.JoinContinuation(&found);
}
if_keyisheapnumber.JoinContinuation(&found);
}
if_keyisnotsmi.JoinContinuation(&found);
}
if_objectisnumber.Else();
{
if (type->Is(Type::Number())) {
if_objectisnumber.Deopt("Expected heap number");
}
}
if_objectisnumber.JoinContinuation(&found);
}
}
if_objectissmi.JoinContinuation(&found);
// Check for cache hit.
IfBuilder if_found(this, &found);
if_found.Then();
{
// Count number to string operation in native code.
AddIncrementCounter(isolate()->counters()->number_to_string_native());
// Load the value in case of cache hit.
HValue* key_index = Pop();
HValue* value_index = AddUncasted<HAdd>(key_index, graph()->GetConstant1());
Push(Add<HLoadKeyed>(number_string_cache, value_index,
static_cast<HValue*>(NULL),
FAST_ELEMENTS, ALLOW_RETURN_HOLE));
}
if_found.Else();
{
// Cache miss, fallback to runtime.
Add<HPushArguments>(object);
Push(Add<HCallRuntime>(
isolate()->factory()->empty_string(),
Runtime::FunctionForId(Runtime::kHiddenNumberToStringSkipCache),
1));
}
if_found.End();
return Pop();
}
HAllocate* HGraphBuilder::BuildAllocate(
HValue* object_size,
HType type,
InstanceType instance_type,
HAllocationMode allocation_mode) {
// Compute the effective allocation size.
HValue* size = object_size;
if (allocation_mode.CreateAllocationMementos()) {
size = AddUncasted<HAdd>(size, Add<HConstant>(AllocationMemento::kSize));
size->ClearFlag(HValue::kCanOverflow);
}
// Perform the actual allocation.
HAllocate* object = Add<HAllocate>(
size, type, allocation_mode.GetPretenureMode(),
instance_type, allocation_mode.feedback_site());
// Setup the allocation memento.
if (allocation_mode.CreateAllocationMementos()) {
BuildCreateAllocationMemento(
object, object_size, allocation_mode.current_site());
}
return object;
}
HValue* HGraphBuilder::BuildAddStringLengths(HValue* left_length,
HValue* right_length) {
// Compute the combined string length and check against max string length.
HValue* length = AddUncasted<HAdd>(left_length, right_length);
// Check that length <= kMaxLength <=> length < MaxLength + 1.
HValue* max_length = Add<HConstant>(String::kMaxLength + 1);
Add<HBoundsCheck>(length, max_length);
return length;
}
HValue* HGraphBuilder::BuildCreateConsString(
HValue* length,
HValue* left,
HValue* right,
HAllocationMode allocation_mode) {
// Determine the string instance types.
HInstruction* left_instance_type = AddLoadStringInstanceType(left);
HInstruction* right_instance_type = AddLoadStringInstanceType(right);
// Allocate the cons string object. HAllocate does not care whether we
// pass CONS_STRING_TYPE or CONS_ASCII_STRING_TYPE here, so we just use
// CONS_STRING_TYPE here. Below we decide whether the cons string is
// one-byte or two-byte and set the appropriate map.
ASSERT(HAllocate::CompatibleInstanceTypes(CONS_STRING_TYPE,
CONS_ASCII_STRING_TYPE));
HAllocate* result = BuildAllocate(Add<HConstant>(ConsString::kSize),
HType::String(), CONS_STRING_TYPE,
allocation_mode);
// Compute intersection and difference of instance types.
HValue* anded_instance_types = AddUncasted<HBitwise>(
Token::BIT_AND, left_instance_type, right_instance_type);
HValue* xored_instance_types = AddUncasted<HBitwise>(
Token::BIT_XOR, left_instance_type, right_instance_type);
// We create a one-byte cons string if
// 1. both strings are one-byte, or
// 2. at least one of the strings is two-byte, but happens to contain only
// one-byte characters.
// To do this, we check
// 1. if both strings are one-byte, or if the one-byte data hint is set in
// both strings, or
// 2. if one of the strings has the one-byte data hint set and the other
// string is one-byte.
IfBuilder if_onebyte(this);
STATIC_ASSERT(kOneByteStringTag != 0);
STATIC_ASSERT(kOneByteDataHintMask != 0);
if_onebyte.If<HCompareNumericAndBranch>(
AddUncasted<HBitwise>(
Token::BIT_AND, anded_instance_types,
Add<HConstant>(static_cast<int32_t>(
kStringEncodingMask | kOneByteDataHintMask))),
graph()->GetConstant0(), Token::NE);
if_onebyte.Or();
STATIC_ASSERT(kOneByteStringTag != 0 &&
kOneByteDataHintTag != 0 &&
kOneByteDataHintTag != kOneByteStringTag);
if_onebyte.If<HCompareNumericAndBranch>(
AddUncasted<HBitwise>(
Token::BIT_AND, xored_instance_types,
Add<HConstant>(static_cast<int32_t>(
kOneByteStringTag | kOneByteDataHintTag))),
Add<HConstant>(static_cast<int32_t>(
kOneByteStringTag | kOneByteDataHintTag)), Token::EQ);
if_onebyte.Then();
{
// We can safely skip the write barrier for storing the map here.
Add<HStoreNamedField>(
result, HObjectAccess::ForMap(),
Add<HConstant>(isolate()->factory()->cons_ascii_string_map()));
}
if_onebyte.Else();
{
// We can safely skip the write barrier for storing the map here.
Add<HStoreNamedField>(
result, HObjectAccess::ForMap(),
Add<HConstant>(isolate()->factory()->cons_string_map()));
}
if_onebyte.End();
// Initialize the cons string fields.
Add<HStoreNamedField>(result, HObjectAccess::ForStringHashField(),
Add<HConstant>(String::kEmptyHashField));
Add<HStoreNamedField>(result, HObjectAccess::ForStringLength(), length);
Add<HStoreNamedField>(result, HObjectAccess::ForConsStringFirst(), left);
Add<HStoreNamedField>(result, HObjectAccess::ForConsStringSecond(), right);
// Count the native string addition.
AddIncrementCounter(isolate()->counters()->string_add_native());
return result;
}
void HGraphBuilder::BuildCopySeqStringChars(HValue* src,
HValue* src_offset,
String::Encoding src_encoding,
HValue* dst,
HValue* dst_offset,
String::Encoding dst_encoding,
HValue* length) {
ASSERT(dst_encoding != String::ONE_BYTE_ENCODING ||
src_encoding == String::ONE_BYTE_ENCODING);
LoopBuilder loop(this, context(), LoopBuilder::kPostIncrement);
HValue* index = loop.BeginBody(graph()->GetConstant0(), length, Token::LT);
{
HValue* src_index = AddUncasted<HAdd>(src_offset, index);
HValue* value =
AddUncasted<HSeqStringGetChar>(src_encoding, src, src_index);
HValue* dst_index = AddUncasted<HAdd>(dst_offset, index);
Add<HSeqStringSetChar>(dst_encoding, dst, dst_index, value);
}
loop.EndBody();
}
HValue* HGraphBuilder::BuildObjectSizeAlignment(
HValue* unaligned_size, int header_size) {
ASSERT((header_size & kObjectAlignmentMask) == 0);
HValue* size = AddUncasted<HAdd>(
unaligned_size, Add<HConstant>(static_cast<int32_t>(
header_size + kObjectAlignmentMask)));
size->ClearFlag(HValue::kCanOverflow);
return AddUncasted<HBitwise>(
Token::BIT_AND, size, Add<HConstant>(static_cast<int32_t>(
~kObjectAlignmentMask)));
}
HValue* HGraphBuilder::BuildUncheckedStringAdd(
HValue* left,
HValue* right,
HAllocationMode allocation_mode) {
// Determine the string lengths.
HValue* left_length = AddLoadStringLength(left);
HValue* right_length = AddLoadStringLength(right);
// Compute the combined string length.
HValue* length = BuildAddStringLengths(left_length, right_length);
// Do some manual constant folding here.
if (left_length->IsConstant()) {
HConstant* c_left_length = HConstant::cast(left_length);
ASSERT_NE(0, c_left_length->Integer32Value());
if (c_left_length->Integer32Value() + 1 >= ConsString::kMinLength) {
// The right string contains at least one character.
return BuildCreateConsString(length, left, right, allocation_mode);
}
} else if (right_length->IsConstant()) {
HConstant* c_right_length = HConstant::cast(right_length);
ASSERT_NE(0, c_right_length->Integer32Value());
if (c_right_length->Integer32Value() + 1 >= ConsString::kMinLength) {
// The left string contains at least one character.
return BuildCreateConsString(length, left, right, allocation_mode);
}
}
// Check if we should create a cons string.
IfBuilder if_createcons(this);
if_createcons.If<HCompareNumericAndBranch>(
length, Add<HConstant>(ConsString::kMinLength), Token::GTE);
if_createcons.Then();
{
// Create a cons string.
Push(BuildCreateConsString(length, left, right, allocation_mode));
}
if_createcons.Else();
{
// Determine the string instance types.
HValue* left_instance_type = AddLoadStringInstanceType(left);
HValue* right_instance_type = AddLoadStringInstanceType(right);
// Compute union and difference of instance types.
HValue* ored_instance_types = AddUncasted<HBitwise>(
Token::BIT_OR, left_instance_type, right_instance_type);
HValue* xored_instance_types = AddUncasted<HBitwise>(
Token::BIT_XOR, left_instance_type, right_instance_type);
// Check if both strings have the same encoding and both are
// sequential.
IfBuilder if_sameencodingandsequential(this);
if_sameencodingandsequential.If<HCompareNumericAndBranch>(
AddUncasted<HBitwise>(
Token::BIT_AND, xored_instance_types,
Add<HConstant>(static_cast<int32_t>(kStringEncodingMask))),
graph()->GetConstant0(), Token::EQ);
if_sameencodingandsequential.And();
STATIC_ASSERT(kSeqStringTag == 0);
if_sameencodingandsequential.If<HCompareNumericAndBranch>(
AddUncasted<HBitwise>(
Token::BIT_AND, ored_instance_types,
Add<HConstant>(static_cast<int32_t>(kStringRepresentationMask))),
graph()->GetConstant0(), Token::EQ);
if_sameencodingandsequential.Then();
{
HConstant* string_map =
Add<HConstant>(isolate()->factory()->string_map());
HConstant* ascii_string_map =
Add<HConstant>(isolate()->factory()->ascii_string_map());
// Determine map and size depending on whether result is one-byte string.
IfBuilder if_onebyte(this);
STATIC_ASSERT(kOneByteStringTag != 0);
if_onebyte.If<HCompareNumericAndBranch>(
AddUncasted<HBitwise>(
Token::BIT_AND, ored_instance_types,
Add<HConstant>(static_cast<int32_t>(kStringEncodingMask))),
graph()->GetConstant0(), Token::NE);
if_onebyte.Then();
{
// Allocate sequential one-byte string object.
Push(length);
Push(ascii_string_map);
}
if_onebyte.Else();
{
// Allocate sequential two-byte string object.
HValue* size = AddUncasted<HShl>(length, graph()->GetConstant1());
size->ClearFlag(HValue::kCanOverflow);
size->SetFlag(HValue::kUint32);
Push(size);
Push(string_map);
}
if_onebyte.End();
HValue* map = Pop();
// Calculate the number of bytes needed for the characters in the
// string while observing object alignment.
STATIC_ASSERT((SeqString::kHeaderSize & kObjectAlignmentMask) == 0);
HValue* size = BuildObjectSizeAlignment(Pop(), SeqString::kHeaderSize);
// Allocate the string object. HAllocate does not care whether we pass
// STRING_TYPE or ASCII_STRING_TYPE here, so we just use STRING_TYPE here.
HAllocate* result = BuildAllocate(
size, HType::String(), STRING_TYPE, allocation_mode);
Add<HStoreNamedField>(result, HObjectAccess::ForMap(), map);
// Initialize the string fields.
Add<HStoreNamedField>(result, HObjectAccess::ForStringHashField(),
Add<HConstant>(String::kEmptyHashField));
Add<HStoreNamedField>(result, HObjectAccess::ForStringLength(), length);
// Copy characters to the result string.
IfBuilder if_twobyte(this);
if_twobyte.If<HCompareObjectEqAndBranch>(map, string_map);
if_twobyte.Then();
{
// Copy characters from the left string.
BuildCopySeqStringChars(
left, graph()->GetConstant0(), String::TWO_BYTE_ENCODING,
result, graph()->GetConstant0(), String::TWO_BYTE_ENCODING,
left_length);
// Copy characters from the right string.
BuildCopySeqStringChars(
right, graph()->GetConstant0(), String::TWO_BYTE_ENCODING,
result, left_length, String::TWO_BYTE_ENCODING,
right_length);
}
if_twobyte.Else();
{
// Copy characters from the left string.
BuildCopySeqStringChars(
left, graph()->GetConstant0(), String::ONE_BYTE_ENCODING,
result, graph()->GetConstant0(), String::ONE_BYTE_ENCODING,
left_length);
// Copy characters from the right string.
BuildCopySeqStringChars(
right, graph()->GetConstant0(), String::ONE_BYTE_ENCODING,
result, left_length, String::ONE_BYTE_ENCODING,
right_length);
}
if_twobyte.End();
// Count the native string addition.
AddIncrementCounter(isolate()->counters()->string_add_native());
// Return the sequential string.
Push(result);
}
if_sameencodingandsequential.Else();
{
// Fallback to the runtime to add the two strings.
Add<HPushArguments>(left, right);
Push(Add<HCallRuntime>(
isolate()->factory()->empty_string(),
Runtime::FunctionForId(Runtime::kHiddenStringAdd),
2));
}
if_sameencodingandsequential.End();
}
if_createcons.End();
return Pop();
}
HValue* HGraphBuilder::BuildStringAdd(
HValue* left,
HValue* right,
HAllocationMode allocation_mode) {
NoObservableSideEffectsScope no_effects(this);
// Determine string lengths.
HValue* left_length = AddLoadStringLength(left);
HValue* right_length = AddLoadStringLength(right);
// Check if left string is empty.
IfBuilder if_leftempty(this);
if_leftempty.If<HCompareNumericAndBranch>(
left_length, graph()->GetConstant0(), Token::EQ);
if_leftempty.Then();
{
// Count the native string addition.
AddIncrementCounter(isolate()->counters()->string_add_native());
// Just return the right string.
Push(right);
}
if_leftempty.Else();
{
// Check if right string is empty.
IfBuilder if_rightempty(this);
if_rightempty.If<HCompareNumericAndBranch>(
right_length, graph()->GetConstant0(), Token::EQ);
if_rightempty.Then();
{
// Count the native string addition.
AddIncrementCounter(isolate()->counters()->string_add_native());
// Just return the left string.
Push(left);
}
if_rightempty.Else();
{
// Add the two non-empty strings.
Push(BuildUncheckedStringAdd(left, right, allocation_mode));
}
if_rightempty.End();
}
if_leftempty.End();
return Pop();
}
HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
HValue* checked_object,
HValue* key,
HValue* val,
bool is_js_array,
ElementsKind elements_kind,
PropertyAccessType access_type,
LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode) {
ASSERT((!IsExternalArrayElementsKind(elements_kind) &&
!IsFixedTypedArrayElementsKind(elements_kind)) ||
!is_js_array);
// No GVNFlag is necessary for ElementsKind if there is an explicit dependency
// on a HElementsTransition instruction. The flag can also be removed if the
// map to check has FAST_HOLEY_ELEMENTS, since there can be no further
// ElementsKind transitions. Finally, the dependency can be removed for stores
// for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
// generated store code.
if ((elements_kind == FAST_HOLEY_ELEMENTS) ||
(elements_kind == FAST_ELEMENTS && access_type == STORE)) {
checked_object->ClearDependsOnFlag(kElementsKind);
}
bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind);
bool fast_elements = IsFastObjectElementsKind(elements_kind);
HValue* elements = AddLoadElements(checked_object);
if (access_type == STORE && (fast_elements || fast_smi_only_elements) &&
store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
HCheckMaps* check_cow_map = Add<HCheckMaps>(
elements, isolate()->factory()->fixed_array_map());
check_cow_map->ClearDependsOnFlag(kElementsKind);
}
HInstruction* length = NULL;
if (is_js_array) {
length = Add<HLoadNamedField>(
checked_object->ActualValue(), checked_object,
HObjectAccess::ForArrayLength(elements_kind));
} else {
length = AddLoadFixedArrayLength(elements);
}
length->set_type(HType::Smi());
HValue* checked_key = NULL;
if (IsExternalArrayElementsKind(elements_kind) ||
IsFixedTypedArrayElementsKind(elements_kind)) {
HValue* backing_store;
if (IsExternalArrayElementsKind(elements_kind)) {
backing_store = Add<HLoadNamedField>(
elements, static_cast<HValue*>(NULL),
HObjectAccess::ForExternalArrayExternalPointer());
} else {
backing_store = elements;
}
if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
NoObservableSideEffectsScope no_effects(this);
IfBuilder length_checker(this);
length_checker.If<HCompareNumericAndBranch>(key, length, Token::LT);
length_checker.Then();
IfBuilder negative_checker(this);
HValue* bounds_check = negative_checker.If<HCompareNumericAndBranch>(
key, graph()->GetConstant0(), Token::GTE);
negative_checker.Then();
HInstruction* result = AddElementAccess(
backing_store, key, val, bounds_check, elements_kind, access_type);
negative_checker.ElseDeopt("Negative key encountered");
negative_checker.End();
length_checker.End();
return result;
} else {
ASSERT(store_mode == STANDARD_STORE);
checked_key = Add<HBoundsCheck>(key, length);
return AddElementAccess(
backing_store, checked_key, val,
checked_object, elements_kind, access_type);
}
}
ASSERT(fast_smi_only_elements ||
fast_elements ||
IsFastDoubleElementsKind(elements_kind));
// In case val is stored into a fast smi array, assure that the value is a smi
// before manipulating the backing store. Otherwise the actual store may
// deopt, leaving the backing store in an invalid state.
if (access_type == STORE && IsFastSmiElementsKind(elements_kind) &&
!val->type().IsSmi()) {
val = AddUncasted<HForceRepresentation>(val, Representation::Smi());
}
if (IsGrowStoreMode(store_mode)) {
NoObservableSideEffectsScope no_effects(this);
elements = BuildCheckForCapacityGrow(checked_object, elements,
elements_kind, length, key,
is_js_array, access_type);
checked_key = key;
} else {
checked_key = Add<HBoundsCheck>(key, length);
if (access_type == STORE && (fast_elements || fast_smi_only_elements)) {
if (store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
NoObservableSideEffectsScope no_effects(this);
elements = BuildCopyElementsOnWrite(checked_object, elements,
elements_kind, length);
} else {
HCheckMaps* check_cow_map = Add<HCheckMaps>(
elements, isolate()->factory()->fixed_array_map());
check_cow_map->ClearDependsOnFlag(kElementsKind);
}
}
}
return AddElementAccess(elements, checked_key, val, checked_object,
elements_kind, access_type, load_mode);
}
HValue* HGraphBuilder::BuildAllocateArrayFromLength(
JSArrayBuilder* array_builder,
HValue* length_argument) {
if (length_argument->IsConstant() &&
HConstant::cast(length_argument)->HasSmiValue()) {
int array_length = HConstant::cast(length_argument)->Integer32Value();
if (array_length == 0) {
return array_builder->AllocateEmptyArray();
} else {
return array_builder->AllocateArray(length_argument,
array_length,
length_argument);
}
}
HValue* constant_zero = graph()->GetConstant0();
HConstant* max_alloc_length =
Add<HConstant>(JSObject::kInitialMaxFastElementArray);
HInstruction* checked_length = Add<HBoundsCheck>(length_argument,
max_alloc_length);
IfBuilder if_builder(this);
if_builder.If<HCompareNumericAndBranch>(checked_length, constant_zero,
Token::EQ);
if_builder.Then();
const int initial_capacity = JSArray::kPreallocatedArrayElements;
HConstant* initial_capacity_node = Add<HConstant>(initial_capacity);
Push(initial_capacity_node); // capacity
Push(constant_zero); // length
if_builder.Else();
if (!(top_info()->IsStub()) &&
IsFastPackedElementsKind(array_builder->kind())) {
// We'll come back later with better (holey) feedback.
if_builder.Deopt("Holey array despite packed elements_kind feedback");
} else {
Push(checked_length); // capacity
Push(checked_length); // length
}
if_builder.End();
// Figure out total size
HValue* length = Pop();
HValue* capacity = Pop();
return array_builder->AllocateArray(capacity, max_alloc_length, length);
}
HValue* HGraphBuilder::BuildCalculateElementsSize(ElementsKind kind,
HValue* capacity) {
int elements_size = IsFastDoubleElementsKind(kind)
? kDoubleSize
: kPointerSize;
HConstant* elements_size_value = Add<HConstant>(elements_size);
HInstruction* mul = HMul::NewImul(zone(), context(),
capacity->ActualValue(),
elements_size_value);
AddInstruction(mul);
mul->ClearFlag(HValue::kCanOverflow);
STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
HConstant* header_size = Add<HConstant>(FixedArray::kHeaderSize);
HValue* total_size = AddUncasted<HAdd>(mul, header_size);
total_size->ClearFlag(HValue::kCanOverflow);
return total_size;
}
HAllocate* HGraphBuilder::AllocateJSArrayObject(AllocationSiteMode mode) {
int base_size = JSArray::kSize;
if (mode == TRACK_ALLOCATION_SITE) {
base_size += AllocationMemento::kSize;
}
HConstant* size_in_bytes = Add<HConstant>(base_size);
return Add<HAllocate>(
size_in_bytes, HType::JSArray(), NOT_TENURED, JS_OBJECT_TYPE);
}
HConstant* HGraphBuilder::EstablishElementsAllocationSize(
ElementsKind kind,
int capacity) {
int base_size = IsFastDoubleElementsKind(kind)
? FixedDoubleArray::SizeFor(capacity)
: FixedArray::SizeFor(capacity);
return Add<HConstant>(base_size);
}
HAllocate* HGraphBuilder::BuildAllocateElements(ElementsKind kind,
HValue* size_in_bytes) {
InstanceType instance_type = IsFastDoubleElementsKind(kind)
? FIXED_DOUBLE_ARRAY_TYPE
: FIXED_ARRAY_TYPE;
return Add<HAllocate>(size_in_bytes, HType::HeapObject(), NOT_TENURED,
instance_type);
}
void HGraphBuilder::BuildInitializeElementsHeader(HValue* elements,
ElementsKind kind,
HValue* capacity) {
Factory* factory = isolate()->factory();
Handle<Map> map = IsFastDoubleElementsKind(kind)
? factory->fixed_double_array_map()
: factory->fixed_array_map();
Add<HStoreNamedField>(elements, HObjectAccess::ForMap(), Add<HConstant>(map));
Add<HStoreNamedField>(elements, HObjectAccess::ForFixedArrayLength(),
capacity);
}
HValue* HGraphBuilder::BuildAllocateElementsAndInitializeElementsHeader(
ElementsKind kind,
HValue* capacity) {
// The HForceRepresentation is to prevent possible deopt on int-smi
// conversion after allocation but before the new object fields are set.
capacity = AddUncasted<HForceRepresentation>(capacity, Representation::Smi());
HValue* size_in_bytes = BuildCalculateElementsSize(kind, capacity);
HValue* new_elements = BuildAllocateElements(kind, size_in_bytes);
BuildInitializeElementsHeader(new_elements, kind, capacity);
return new_elements;
}
void HGraphBuilder::BuildJSArrayHeader(HValue* array,
HValue* array_map,
HValue* elements,
AllocationSiteMode mode,
ElementsKind elements_kind,
HValue* allocation_site_payload,
HValue* length_field) {
Add<HStoreNamedField>(array, HObjectAccess::ForMap(), array_map);
HConstant* empty_fixed_array =
Add<HConstant>(isolate()->factory()->empty_fixed_array());
Add<HStoreNamedField>(
array, HObjectAccess::ForPropertiesPointer(), empty_fixed_array);
Add<HStoreNamedField>(
array, HObjectAccess::ForElementsPointer(),
elements != NULL ? elements : empty_fixed_array);
Add<HStoreNamedField>(
array, HObjectAccess::ForArrayLength(elements_kind), length_field);
if (mode == TRACK_ALLOCATION_SITE) {
BuildCreateAllocationMemento(
array, Add<HConstant>(JSArray::kSize), allocation_site_payload);
}
}
HInstruction* HGraphBuilder::AddElementAccess(
HValue* elements,
HValue* checked_key,
HValue* val,
HValue* dependency,
ElementsKind elements_kind,
PropertyAccessType access_type,
LoadKeyedHoleMode load_mode) {
if (access_type == STORE) {
ASSERT(val != NULL);
if (elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
elements_kind == UINT8_CLAMPED_ELEMENTS) {
val = Add<HClampToUint8>(val);
}
return Add<HStoreKeyed>(elements, checked_key, val, elements_kind,
elements_kind == FAST_SMI_ELEMENTS
? STORE_TO_INITIALIZED_ENTRY
: INITIALIZING_STORE);
}
ASSERT(access_type == LOAD);
ASSERT(val == NULL);
HLoadKeyed* load = Add<HLoadKeyed>(
elements, checked_key, dependency, elements_kind, load_mode);
if (FLAG_opt_safe_uint32_operations &&
(elements_kind == EXTERNAL_UINT32_ELEMENTS ||
elements_kind == UINT32_ELEMENTS)) {
graph()->RecordUint32Instruction(load);
}
return load;
}
HLoadNamedField* HGraphBuilder::AddLoadMap(HValue* object,
HValue* dependency) {
return Add<HLoadNamedField>(object, dependency, HObjectAccess::ForMap());
}
HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object,
HValue* dependency) {
return Add<HLoadNamedField>(
object, dependency, HObjectAccess::ForElementsPointer());
}
HLoadNamedField* HGraphBuilder::AddLoadFixedArrayLength(
HValue* array,
HValue* dependency) {
return Add<HLoadNamedField>(
array, dependency, HObjectAccess::ForFixedArrayLength());
}
HLoadNamedField* HGraphBuilder::AddLoadArrayLength(HValue* array,
ElementsKind kind,
HValue* dependency) {
return Add<HLoadNamedField>(
array, dependency, HObjectAccess::ForArrayLength(kind));
}
HValue* HGraphBuilder::BuildNewElementsCapacity(HValue* old_capacity) {
HValue* half_old_capacity = AddUncasted<HShr>(old_capacity,
graph_->GetConstant1());
HValue* new_capacity = AddUncasted<HAdd>(half_old_capacity, old_capacity);
new_capacity->ClearFlag(HValue::kCanOverflow);
HValue* min_growth = Add<HConstant>(16);
new_capacity = AddUncasted<HAdd>(new_capacity, min_growth);
new_capacity->ClearFlag(HValue::kCanOverflow);
return new_capacity;
}
HValue* HGraphBuilder::BuildGrowElementsCapacity(HValue* object,
HValue* elements,
ElementsKind kind,
ElementsKind new_kind,
HValue* length,
HValue* new_capacity) {
Add<HBoundsCheck>(new_capacity, Add<HConstant>(
(Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) >>
ElementsKindToShiftSize(kind)));
HValue* new_elements = BuildAllocateElementsAndInitializeElementsHeader(
new_kind, new_capacity);
BuildCopyElements(elements, kind, new_elements,
new_kind, length, new_capacity);
Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
new_elements);
return new_elements;
}
void HGraphBuilder::BuildFillElementsWithValue(HValue* elements,
ElementsKind elements_kind,
HValue* from,
HValue* to,
HValue* value) {
if (to == NULL) {
to = AddLoadFixedArrayLength(elements);
}
// Special loop unfolding case
STATIC_ASSERT(JSArray::kPreallocatedArrayElements <=
kElementLoopUnrollThreshold);
int initial_capacity = -1;
if (from->IsInteger32Constant() && to->IsInteger32Constant()) {
int constant_from = from->GetInteger32Constant();
int constant_to = to->GetInteger32Constant();
if (constant_from == 0 && constant_to <= kElementLoopUnrollThreshold) {
initial_capacity = constant_to;
}
}
// Since we're about to store a hole value, the store instruction below must
// assume an elements kind that supports heap object values.
if (IsFastSmiOrObjectElementsKind(elements_kind)) {
elements_kind = FAST_HOLEY_ELEMENTS;
}
if (initial_capacity >= 0) {
for (int i = 0; i < initial_capacity; i++) {
HInstruction* key = Add<HConstant>(i);
Add<HStoreKeyed>(elements, key, value, elements_kind);
}
} else {
// Carefully loop backwards so that the "from" remains live through the loop
// rather than the to. This often corresponds to keeping length live rather
// then capacity, which helps register allocation, since length is used more
// other than capacity after filling with holes.
LoopBuilder builder(this, context(), LoopBuilder::kPostDecrement);
HValue* key = builder.BeginBody(to, from, Token::GT);
HValue* adjusted_key = AddUncasted<HSub>(key, graph()->GetConstant1());
adjusted_key->ClearFlag(HValue::kCanOverflow);
Add<HStoreKeyed>(elements, adjusted_key, value, elements_kind);
builder.EndBody();
}
}
void HGraphBuilder::BuildFillElementsWithHole(HValue* elements,
ElementsKind elements_kind,
HValue* from,
HValue* to) {
// Fast elements kinds need to be initialized in case statements below cause a
// garbage collection.
Factory* factory = isolate()->factory();
double nan_double = FixedDoubleArray::hole_nan_as_double();
HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind)
? Add<HConstant>(factory->the_hole_value())
: Add<HConstant>(nan_double);
BuildFillElementsWithValue(elements, elements_kind, from, to, hole);
}
void HGraphBuilder::BuildCopyElements(HValue* from_elements,
ElementsKind from_elements_kind,
HValue* to_elements,
ElementsKind to_elements_kind,
HValue* length,
HValue* capacity) {
int constant_capacity = -1;
if (capacity != NULL &&
capacity->IsConstant() &&
HConstant::cast(capacity)->HasInteger32Value()) {
int constant_candidate = HConstant::cast(capacity)->Integer32Value();
if (constant_candidate <= kElementLoopUnrollThreshold) {
constant_capacity = constant_candidate;
}
}
bool pre_fill_with_holes =
IsFastDoubleElementsKind(from_elements_kind) &&
IsFastObjectElementsKind(to_elements_kind);
if (pre_fill_with_holes) {
// If the copy might trigger a GC, make sure that the FixedArray is
// pre-initialized with holes to make sure that it's always in a
// consistent state.
BuildFillElementsWithHole(to_elements, to_elements_kind,
graph()->GetConstant0(), NULL);
}
if (constant_capacity != -1) {
// Unroll the loop for small elements kinds.
for (int i = 0; i < constant_capacity; i++) {
HValue* key_constant = Add<HConstant>(i);
HInstruction* value = Add<HLoadKeyed>(from_elements, key_constant,
static_cast<HValue*>(NULL),
from_elements_kind);
Add<HStoreKeyed>(to_elements, key_constant, value, to_elements_kind);
}
} else {
if (!pre_fill_with_holes &&
(capacity == NULL || !length->Equals(capacity))) {
BuildFillElementsWithHole(to_elements, to_elements_kind,
length, NULL);
}
if (capacity == NULL) {
capacity = AddLoadFixedArrayLength(to_elements);
}
LoopBuilder builder(this, context(), LoopBuilder::kPostDecrement);
HValue* key = builder.BeginBody(length, graph()->GetConstant0(),
Token::GT);
key = AddUncasted<HSub>(key, graph()->GetConstant1());
key->ClearFlag(HValue::kCanOverflow);
HValue* element = Add<HLoadKeyed>(from_elements, key,
static_cast<HValue*>(NULL),
from_elements_kind,
ALLOW_RETURN_HOLE);
ElementsKind kind = (IsHoleyElementsKind(from_elements_kind) &&
IsFastSmiElementsKind(to_elements_kind))
? FAST_HOLEY_ELEMENTS : to_elements_kind;
if (IsHoleyElementsKind(from_elements_kind) &&
from_elements_kind != to_elements_kind) {
IfBuilder if_hole(this);
if_hole.If<HCompareHoleAndBranch>(element);
if_hole.Then();
HConstant* hole_constant = IsFastDoubleElementsKind(to_elements_kind)
? Add<HConstant>(FixedDoubleArray::hole_nan_as_double())
: graph()->GetConstantHole();
Add<HStoreKeyed>(to_elements, key, hole_constant, kind);
if_hole.Else();
HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind);
store->SetFlag(HValue::kAllowUndefinedAsNaN);
if_hole.End();
} else {
HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind);
store->SetFlag(HValue::kAllowUndefinedAsNaN);
}
builder.EndBody();
}
Counters* counters = isolate()->counters();
AddIncrementCounter(counters->inlined_copied_elements());
}
HValue* HGraphBuilder::BuildCloneShallowArrayCow(HValue* boilerplate,
HValue* allocation_site,
AllocationSiteMode mode,
ElementsKind kind) {
HAllocate* array = AllocateJSArrayObject(mode);
HValue* map = AddLoadMap(boilerplate);
HValue* elements = AddLoadElements(boilerplate);
HValue* length = AddLoadArrayLength(boilerplate, kind);
BuildJSArrayHeader(array,
map,
elements,
mode,
FAST_ELEMENTS,
allocation_site,
length);
return array;
}
HValue* HGraphBuilder::BuildCloneShallowArrayEmpty(HValue* boilerplate,
HValue* allocation_site,
AllocationSiteMode mode) {
HAllocate* array = AllocateJSArrayObject(mode);
HValue* map = AddLoadMap(boilerplate);
BuildJSArrayHeader(array,
map,
NULL, // set elements to empty fixed array
mode,
FAST_ELEMENTS,
allocation_site,
graph()->GetConstant0());
return array;
}
HValue* HGraphBuilder::BuildCloneShallowArrayNonEmpty(HValue* boilerplate,
HValue* allocation_site,
AllocationSiteMode mode,
ElementsKind kind) {
HValue* boilerplate_elements = AddLoadElements(boilerplate);
HValue* capacity = AddLoadFixedArrayLength(boilerplate_elements);
// Generate size calculation code here in order to make it dominate
// the JSArray allocation.
HValue* elements_size = BuildCalculateElementsSize(kind, capacity);
// Create empty JSArray object for now, store elimination should remove
// redundant initialization of elements and length fields and at the same
// time the object will be fully prepared for GC if it happens during
// elements allocation.
HValue* result = BuildCloneShallowArrayEmpty(
boilerplate, allocation_site, mode);
HAllocate* elements = BuildAllocateElements(kind, elements_size);
// This function implicitly relies on the fact that the
// FastCloneShallowArrayStub is called only for literals shorter than
// JSObject::kInitialMaxFastElementArray.
// Can't add HBoundsCheck here because otherwise the stub will eager a frame.
HConstant* size_upper_bound = EstablishElementsAllocationSize(
kind, JSObject::kInitialMaxFastElementArray);
elements->set_size_upper_bound(size_upper_bound);
Add<HStoreNamedField>(result, HObjectAccess::ForElementsPointer(), elements);
// The allocation for the cloned array above causes register pressure on
// machines with low register counts. Force a reload of the boilerplate
// elements here to free up a register for the allocation to avoid unnecessary
// spillage.
boilerplate_elements = AddLoadElements(boilerplate);
boilerplate_elements->SetFlag(HValue::kCantBeReplaced);
// Copy the elements array header.
for (int i = 0; i < FixedArrayBase::kHeaderSize; i += kPointerSize) {
HObjectAccess access = HObjectAccess::ForFixedArrayHeader(i);
Add<HStoreNamedField>(elements, access,
Add<HLoadNamedField>(boilerplate_elements,
static_cast<HValue*>(NULL), access));
}
// And the result of the length
HValue* length = AddLoadArrayLength(boilerplate, kind);
Add<HStoreNamedField>(result, HObjectAccess::ForArrayLength(kind), length);
BuildCopyElements(boilerplate_elements, kind, elements,
kind, length, NULL);
return result;
}
void HGraphBuilder::BuildCompareNil(
HValue* value,
Type* type,
HIfContinuation* continuation) {
IfBuilder if_nil(this);
bool some_case_handled = false;
bool some_case_missing = false;
if (type->Maybe(Type::Null())) {
if (some_case_handled) if_nil.Or();
if_nil.If<HCompareObjectEqAndBranch>(value, graph()->GetConstantNull());
some_case_handled = true;
} else {
some_case_missing = true;
}
if (type->Maybe(Type::Undefined())) {
if (some_case_handled) if_nil.Or();
if_nil.If<HCompareObjectEqAndBranch>(value,
graph()->GetConstantUndefined());
some_case_handled = true;
} else {
some_case_missing = true;
}
if (type->Maybe(Type::Undetectable())) {
if (some_case_handled) if_nil.Or();
if_nil.If<HIsUndetectableAndBranch>(value);
some_case_handled = true;
} else {
some_case_missing = true;
}
if (some_case_missing) {
if_nil.Then();
if_nil.Else();
if (type->NumClasses() == 1) {
BuildCheckHeapObject(value);
// For ICs, the map checked below is a sentinel map that gets replaced by
// the monomorphic map when the code is used as a template to generate a
// new IC. For optimized functions, there is no sentinel map, the map
// emitted below is the actual monomorphic map.
Add<HCheckMaps>(value, type->Classes().Current());
} else {
if_nil.Deopt("Too many undetectable types");
}
}
if_nil.CaptureContinuation(continuation);
}
void HGraphBuilder::BuildCreateAllocationMemento(
HValue* previous_object,
HValue* previous_object_size,
HValue* allocation_site) {
ASSERT(allocation_site != NULL);
HInnerAllocatedObject* allocation_memento = Add<HInnerAllocatedObject>(
previous_object, previous_object_size, HType::HeapObject());
AddStoreMapConstant(
allocation_memento, isolate()->factory()->allocation_memento_map());
Add<HStoreNamedField>(
allocation_memento,
HObjectAccess::ForAllocationMementoSite(),
allocation_site);
if (FLAG_allocation_site_pretenuring) {
HValue* memento_create_count = Add<HLoadNamedField>(
allocation_site, static_cast<HValue*>(NULL),
HObjectAccess::ForAllocationSiteOffset(
AllocationSite::kPretenureCreateCountOffset));
memento_create_count = AddUncasted<HAdd>(
memento_create_count, graph()->GetConstant1());
// This smi value is reset to zero after every gc, overflow isn't a problem
// since the counter is bounded by the new space size.
memento_create_count->ClearFlag(HValue::kCanOverflow);
Add<HStoreNamedField>(
allocation_site, HObjectAccess::ForAllocationSiteOffset(
AllocationSite::kPretenureCreateCountOffset), memento_create_count);
}
}
HInstruction* HGraphBuilder::BuildGetNativeContext(HValue* closure) {
// Get the global context, then the native context
HInstruction* context =
Add<HLoadNamedField>(closure, static_cast<HValue*>(NULL),
HObjectAccess::ForFunctionContextPointer());
HInstruction* global_object = Add<HLoadNamedField>(
context, static_cast<HValue*>(NULL),
HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(
GlobalObject::kNativeContextOffset);
return Add<HLoadNamedField>(
global_object, static_cast<HValue*>(NULL), access);
}
HInstruction* HGraphBuilder::BuildGetNativeContext() {
// Get the global context, then the native context
HValue* global_object = Add<HLoadNamedField>(
context(), static_cast<HValue*>(NULL),
HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
return Add<HLoadNamedField>(
global_object, static_cast<HValue*>(NULL),
HObjectAccess::ForObservableJSObjectOffset(
GlobalObject::kNativeContextOffset));
}
HInstruction* HGraphBuilder::BuildGetArrayFunction() {
HInstruction* native_context = BuildGetNativeContext();
HInstruction* index =
Add<HConstant>(static_cast<int32_t>(Context::ARRAY_FUNCTION_INDEX));
return Add<HLoadKeyed>(
native_context, index, static_cast<HValue*>(NULL), FAST_ELEMENTS);
}
HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
ElementsKind kind,
HValue* allocation_site_payload,
HValue* constructor_function,
AllocationSiteOverrideMode override_mode) :
builder_(builder),
kind_(kind),
allocation_site_payload_(allocation_site_payload),
constructor_function_(constructor_function) {
ASSERT(!allocation_site_payload->IsConstant() ||
HConstant::cast(allocation_site_payload)->handle(
builder_->isolate())->IsAllocationSite());
mode_ = override_mode == DISABLE_ALLOCATION_SITES
? DONT_TRACK_ALLOCATION_SITE
: AllocationSite::GetMode(kind);
}
HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
ElementsKind kind,
HValue* constructor_function) :
builder_(builder),
kind_(kind),
mode_(DONT_TRACK_ALLOCATION_SITE),
allocation_site_payload_(NULL),
constructor_function_(constructor_function) {
}
HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
if (!builder()->top_info()->IsStub()) {
// A constant map is fine.
Handle<Map> map(builder()->isolate()->get_initial_js_array_map(kind_),
builder()->isolate());
return builder()->Add<HConstant>(map);
}
if (constructor_function_ != NULL && kind_ == GetInitialFastElementsKind()) {
// No need for a context lookup if the kind_ matches the initial
// map, because we can just load the map in that case.
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
return builder()->Add<HLoadNamedField>(
constructor_function_, static_cast<HValue*>(NULL), access);
}
// TODO(mvstanton): we should always have a constructor function if we
// are creating a stub.
HInstruction* native_context = constructor_function_ != NULL
? builder()->BuildGetNativeContext(constructor_function_)
: builder()->BuildGetNativeContext();
HInstruction* index = builder()->Add<HConstant>(
static_cast<int32_t>(Context::JS_ARRAY_MAPS_INDEX));
HInstruction* map_array = builder()->Add<HLoadKeyed>(
native_context, index, static_cast<HValue*>(NULL), FAST_ELEMENTS);
HInstruction* kind_index = builder()->Add<HConstant>(kind_);
return builder()->Add<HLoadKeyed>(
map_array, kind_index, static_cast<HValue*>(NULL), FAST_ELEMENTS);
}
HValue* HGraphBuilder::JSArrayBuilder::EmitInternalMapCode() {
// Find the map near the constructor function
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
return builder()->Add<HLoadNamedField>(
constructor_function_, static_cast<HValue*>(NULL), access);
}
HAllocate* HGraphBuilder::JSArrayBuilder::AllocateEmptyArray() {
HConstant* capacity = builder()->Add<HConstant>(initial_capacity());
return AllocateArray(capacity,
capacity,
builder()->graph()->GetConstant0());
}
HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
HValue* capacity,
HConstant* capacity_upper_bound,
HValue* length_field,
FillMode fill_mode) {
return AllocateArray(capacity,
capacity_upper_bound->GetInteger32Constant(),
length_field,
fill_mode);
}
HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
HValue* capacity,
int capacity_upper_bound,
HValue* length_field,
FillMode fill_mode) {
HConstant* elememts_size_upper_bound = capacity->IsInteger32Constant()
? HConstant::cast(capacity)
: builder()->EstablishElementsAllocationSize(kind_, capacity_upper_bound);
HAllocate* array = AllocateArray(capacity, length_field, fill_mode);
if (!elements_location_->has_size_upper_bound()) {
elements_location_->set_size_upper_bound(elememts_size_upper_bound);
}
return array;
}
HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
HValue* capacity,
HValue* length_field,
FillMode fill_mode) {
// These HForceRepresentations are because we store these as fields in the
// objects we construct, and an int32-to-smi HChange could deopt. Accept
// the deopt possibility now, before allocation occurs.
capacity =
builder()->AddUncasted<HForceRepresentation>(capacity,
Representation::Smi());
length_field =
builder()->AddUncasted<HForceRepresentation>(length_field,
Representation::Smi());
// Generate size calculation code here in order to make it dominate
// the JSArray allocation.
HValue* elements_size =
builder()->BuildCalculateElementsSize(kind_, capacity);
// Allocate (dealing with failure appropriately)
HAllocate* array_object = builder()->AllocateJSArrayObject(mode_);
// Fill in the fields: map, properties, length
HValue* map;
if (allocation_site_payload_ == NULL) {
map = EmitInternalMapCode();
} else {
map = EmitMapCode();
}
builder()->BuildJSArrayHeader(array_object,
map,
NULL, // set elements to empty fixed array
mode_,
kind_,
allocation_site_payload_,
length_field);
// Allocate and initialize the elements
elements_location_ = builder()->BuildAllocateElements(kind_, elements_size);
builder()->BuildInitializeElementsHeader(elements_location_, kind_, capacity);
// Set the elements
builder()->Add<HStoreNamedField>(
array_object, HObjectAccess::ForElementsPointer(), elements_location_);
if (fill_mode == FILL_WITH_HOLE) {
builder()->BuildFillElementsWithHole(elements_location_, kind_,
graph()->GetConstant0(), capacity);
}
return array_object;
}
HValue* HGraphBuilder::AddLoadJSBuiltin(Builtins::JavaScript builtin) {
HValue* global_object = Add<HLoadNamedField>(
context(), static_cast<HValue*>(NULL),
HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(
GlobalObject::kBuiltinsOffset);
HValue* builtins = Add<HLoadNamedField>(
global_object, static_cast<HValue*>(NULL), access);
HObjectAccess function_access = HObjectAccess::ForObservableJSObjectOffset(
JSBuiltinsObject::OffsetOfFunctionWithId(builtin));
return Add<HLoadNamedField>(
builtins, static_cast<HValue*>(NULL), function_access);
}
HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
: HGraphBuilder(info),
function_state_(NULL),
initial_function_state_(this, info, NORMAL_RETURN, 0),
ast_context_(NULL),
break_scope_(NULL),
inlined_count_(0),
globals_(10, info->zone()),
inline_bailout_(false),
osr_(new(info->zone()) HOsrBuilder(this)) {
// This is not initialized in the initializer list because the
// constructor for the initial state relies on function_state_ == NULL
// to know it's the initial state.
function_state_= &initial_function_state_;
InitializeAstVisitor(info->zone());
if (FLAG_hydrogen_track_positions) {
SetSourcePosition(info->shared_info()->start_position());
}
}
HBasicBlock* HOptimizedGraphBuilder::CreateJoin(HBasicBlock* first,
HBasicBlock* second,
BailoutId join_id) {
if (first == NULL) {
return second;
} else if (second == NULL) {
return first;
} else {
HBasicBlock* join_block = graph()->CreateBasicBlock();
Goto(first, join_block);
Goto(second, join_block);
join_block->SetJoinId(join_id);
return join_block;
}
}
HBasicBlock* HOptimizedGraphBuilder::JoinContinue(IterationStatement* statement,
HBasicBlock* exit_block,
HBasicBlock* continue_block) {
if (continue_block != NULL) {
if (exit_block != NULL) Goto(exit_block, continue_block);
continue_block->SetJoinId(statement->ContinueId());
return continue_block;
}
return exit_block;
}
HBasicBlock* HOptimizedGraphBuilder::CreateLoop(IterationStatement* statement,
HBasicBlock* loop_entry,
HBasicBlock* body_exit,
HBasicBlock* loop_successor,
HBasicBlock* break_block) {
if (body_exit != NULL) Goto(body_exit, loop_entry);
loop_entry->PostProcessLoopHeader(statement);
if (break_block != NULL) {
if (loop_successor != NULL) Goto(loop_successor, break_block);
break_block->SetJoinId(statement->ExitId());
return break_block;
}
return loop_successor;
}
// Build a new loop header block and set it as the current block.
HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry() {
HBasicBlock* loop_entry = CreateLoopHeaderBlock();
Goto(loop_entry);
set_current_block(loop_entry);
return loop_entry;
}
HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry(
IterationStatement* statement) {
HBasicBlock* loop_entry = osr()->HasOsrEntryAt(statement)
? osr()->BuildOsrLoopEntry(statement)
: BuildLoopEntry();
return loop_entry;
}
void HBasicBlock::FinishExit(HControlInstruction* instruction,
HSourcePosition position) {
Finish(instruction, position);
ClearEnvironment();
}
HGraph::HGraph(CompilationInfo* info)
: isolate_(info->isolate()),
next_block_id_(0),
entry_block_(NULL),
blocks_(8, info->zone()),
values_(16, info->zone()),
phi_list_(NULL),
uint32_instructions_(NULL),
osr_(NULL),
info_(info),
zone_(info->zone()),
is_recursive_(false),
use_optimistic_licm_(false),
depends_on_empty_array_proto_elements_(false),
type_change_checksum_(0),
maximum_environment_size_(0),
no_side_effects_scope_count_(0),
disallow_adding_new_values_(false),
next_inline_id_(0),
inlined_functions_(5, info->zone()) {
if (info->IsStub()) {
HydrogenCodeStub* stub = info->code_stub();
CodeStubInterfaceDescriptor* descriptor = stub->GetInterfaceDescriptor();
start_environment_ =
new(zone_) HEnvironment(zone_, descriptor->environment_length());
} else {
TraceInlinedFunction(info->shared_info(), HSourcePosition::Unknown());
start_environment_ =
new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
}
start_environment_->set_ast_id(BailoutId::FunctionEntry());
entry_block_ = CreateBasicBlock();
entry_block_->SetInitialEnvironment(start_environment_);
}
HBasicBlock* HGraph::CreateBasicBlock() {
HBasicBlock* result = new(zone()) HBasicBlock(this);
blocks_.Add(result, zone());
return result;
}
void HGraph::FinalizeUniqueness() {
DisallowHeapAllocation no_gc;
ASSERT(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
for (int i = 0; i < blocks()->length(); ++i) {
for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) {
it.Current()->FinalizeUniqueness();
}
}
}
int HGraph::TraceInlinedFunction(
Handle<SharedFunctionInfo> shared,
HSourcePosition position) {
if (!FLAG_hydrogen_track_positions) {
return 0;
}
int id = 0;
for (; id < inlined_functions_.length(); id++) {
if (inlined_functions_[id].shared().is_identical_to(shared)) {
break;
}
}
if (id == inlined_functions_.length()) {
inlined_functions_.Add(InlinedFunctionInfo(shared), zone());
if (!shared->script()->IsUndefined()) {
Handle<Script> script(Script::cast(shared->script()));
if (!script->source()->IsUndefined()) {
CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
PrintF(tracing_scope.file(),
"--- FUNCTION SOURCE (%s) id{%d,%d} ---\n",
shared->DebugName()->ToCString().get(),
info()->optimization_id(),
id);
{
ConsStringIteratorOp op;
StringCharacterStream stream(String::cast(script->source()),
&op,
shared->start_position());
// fun->end_position() points to the last character in the stream. We
// need to compensate by adding one to calculate the length.
int source_len =
shared->end_position() - shared->start_position() + 1;
for (int i = 0; i < source_len; i++) {
if (stream.HasMore()) {
PrintF(tracing_scope.file(), "%c", stream.GetNext());
}
}
}
PrintF(tracing_scope.file(), "\n--- END ---\n");
}
}
}
int inline_id = next_inline_id_++;
if (inline_id != 0) {
CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
PrintF(tracing_scope.file(), "INLINE (%s) id{%d,%d} AS %d AT ",
shared->DebugName()->ToCString().get(),
info()->optimization_id(),
id,
inline_id);
position.PrintTo(tracing_scope.file());
PrintF(tracing_scope.file(), "\n");
}
return inline_id;
}
int HGraph::SourcePositionToScriptPosition(HSourcePosition pos) {
if (!FLAG_hydrogen_track_positions || pos.IsUnknown()) {
return pos.raw();
}
return inlined_functions_[pos.inlining_id()].start_position() +
pos.position();
}
// Block ordering was implemented with two mutually recursive methods,
// HGraph::Postorder and HGraph::PostorderLoopBlocks.
// The recursion could lead to stack overflow so the algorithm has been
// implemented iteratively.
// At a high level the algorithm looks like this:
//
// Postorder(block, loop_header) : {
// if (block has already been visited or is of another loop) return;
// mark block as visited;
// if (block is a loop header) {
// VisitLoopMembers(block, loop_header);
// VisitSuccessorsOfLoopHeader(block);
// } else {
// VisitSuccessors(block)
// }
// put block in result list;
// }
//
// VisitLoopMembers(block, outer_loop_header) {
// foreach (block b in block loop members) {
// VisitSuccessorsOfLoopMember(b, outer_loop_header);
// if (b is loop header) VisitLoopMembers(b);
// }
// }
//
// VisitSuccessorsOfLoopMember(block, outer_loop_header) {
// foreach (block b in block successors) Postorder(b, outer_loop_header)
// }
//
// VisitSuccessorsOfLoopHeader(block) {
// foreach (block b in block successors) Postorder(b, block)
// }
//
// VisitSuccessors(block, loop_header) {
// foreach (block b in block successors) Postorder(b, loop_header)
// }
//
// The ordering is started calling Postorder(entry, NULL).
//
// Each instance of PostorderProcessor represents the "stack frame" of the
// recursion, and particularly keeps the state of the loop (iteration) of the
// "Visit..." function it represents.
// To recycle memory we keep all the frames in a double linked list but
// this means that we cannot use constructors to initialize the frames.
//
class PostorderProcessor : public ZoneObject {
public:
// Back link (towards the stack bottom).
PostorderProcessor* parent() {return father_; }
// Forward link (towards the stack top).
PostorderProcessor* child() {return child_; }
HBasicBlock* block() { return block_; }
HLoopInformation* loop() { return loop_; }
HBasicBlock* loop_header() { return loop_header_; }
static PostorderProcessor* CreateEntryProcessor(Zone* zone,
HBasicBlock* block) {
PostorderProcessor* result = new(zone) PostorderProcessor(NULL);
return result->SetupSuccessors(zone, block, NULL);
}
PostorderProcessor* PerformStep(Zone* zone,
ZoneList<HBasicBlock*>* order) {
PostorderProcessor* next =
PerformNonBacktrackingStep(zone, order);
if (next != NULL) {
return next;
} else {
return Backtrack(zone, order);
}
}
private:
explicit PostorderProcessor(PostorderProcessor* father)
: father_(father), child_(NULL), successor_iterator(NULL) { }
// Each enum value states the cycle whose state is kept by this instance.
enum LoopKind {
NONE,
SUCCESSORS,
SUCCESSORS_OF_LOOP_HEADER,
LOOP_MEMBERS,
SUCCESSORS_OF_LOOP_MEMBER
};
// Each "Setup..." method is like a constructor for a cycle state.
PostorderProcessor* SetupSuccessors(Zone* zone,
HBasicBlock* block,
HBasicBlock* loop_header) {
if (block == NULL || block->IsOrdered() ||
block->parent_loop_header() != loop_header) {
kind_ = NONE;
block_ = NULL;
loop_ = NULL;
loop_header_ = NULL;
return this;
} else {
block_ = block;
loop_ = NULL;
block->MarkAsOrdered();
if (block->IsLoopHeader()) {
kind_ = SUCCESSORS_OF_LOOP_HEADER;
loop_header_ = block;
InitializeSuccessors();
PostorderProcessor* result = Push(zone);
return result->SetupLoopMembers(zone, block, block->loop_information(),
loop_header);
} else {
ASSERT(block->IsFinished());
kind_ = SUCCESSORS;
loop_header_ = loop_header;
InitializeSuccessors();
return this;
}
}
}
PostorderProcessor* SetupLoopMembers(Zone* zone,
HBasicBlock* block,
HLoopInformation* loop,
HBasicBlock* loop_header) {
kind_ = LOOP_MEMBERS;
block_ = block;
loop_ = loop;
loop_header_ = loop_header;
InitializeLoopMembers();
return this;
}
PostorderProcessor* SetupSuccessorsOfLoopMember(
HBasicBlock* block,
HLoopInformation* loop,
HBasicBlock* loop_header) {
kind_ = SUCCESSORS_OF_LOOP_MEMBER;
block_ = block;
loop_ = loop;
loop_header_ = loop_header;
InitializeSuccessors();
return this;
}
// This method "allocates" a new stack frame.
PostorderProcessor* Push(Zone* zone) {
if (child_ == NULL) {
child_ = new(zone) PostorderProcessor(this);
}
return child_;
}
void ClosePostorder(ZoneList<HBasicBlock*>* order, Zone* zone) {
ASSERT(block_->end()->FirstSuccessor() == NULL ||
order->Contains(block_->end()->FirstSuccessor()) ||
block_->end()->FirstSuccessor()->IsLoopHeader());
ASSERT(block_->end()->SecondSuccessor() == NULL ||
order->Contains(block_->end()->SecondSuccessor()) ||
block_->end()->SecondSuccessor()->IsLoopHeader());
order->Add(block_, zone);
}
// This method is the basic block to walk up the stack.
PostorderProcessor* Pop(Zone* zone,
ZoneList<HBasicBlock*>* order) {
switch (kind_) {
case SUCCESSORS:
case SUCCESSORS_OF_LOOP_HEADER:
ClosePostorder(order, zone);
return father_;
case LOOP_MEMBERS:
return father_;
case SUCCESSORS_OF_LOOP_MEMBER:
if (block()->IsLoopHeader() && block() != loop_->loop_header()) {
// In this case we need to perform a LOOP_MEMBERS cycle so we
// initialize it and return this instead of father.
return SetupLoopMembers(zone, block(),
block()->loop_information(), loop_header_);
} else {
return father_;
}
case NONE:
return father_;
}
UNREACHABLE();
return NULL;
}
// Walks up the stack.
PostorderProcessor* Backtrack(Zone* zone,
ZoneList<HBasicBlock*>* order) {
PostorderProcessor* parent = Pop(zone, order);
while (parent != NULL) {
PostorderProcessor* next =
parent->PerformNonBacktrackingStep(zone, order);
if (next != NULL) {
return next;
} else {
parent = parent->Pop(zone, order);
}
}
return NULL;
}
PostorderProcessor* PerformNonBacktrackingStep(
Zone* zone,
ZoneList<HBasicBlock*>* order) {
HBasicBlock* next_block;
switch (kind_) {
case SUCCESSORS:
next_block = AdvanceSuccessors();
if (next_block != NULL) {
PostorderProcessor* result = Push(zone);
return result->SetupSuccessors(zone, next_block, loop_header_);
}
break;
case SUCCESSORS_OF_LOOP_HEADER:
next_block = AdvanceSuccessors();
if (next_block != NULL) {
PostorderProcessor* result = Push(zone);
return result->SetupSuccessors(zone, next_block, block());
}
break;
case LOOP_MEMBERS:
next_block = AdvanceLoopMembers();
if (next_block != NULL) {
PostorderProcessor* result = Push(zone);
return result->SetupSuccessorsOfLoopMember(next_block,
loop_, loop_header_);
}
break;
case SUCCESSORS_OF_LOOP_MEMBER:
next_block = AdvanceSuccessors();
if (next_block != NULL) {
PostorderProcessor* result = Push(zone);
return result->SetupSuccessors(zone, next_block, loop_header_);
}
break;
case NONE:
return NULL;
}
return NULL;
}
// The following two methods implement a "foreach b in successors" cycle.
void InitializeSuccessors() {
loop_index = 0;
loop_length = 0;
successor_iterator = HSuccessorIterator(block_->end());
}
HBasicBlock* AdvanceSuccessors() {
if (!successor_iterator.Done()) {
HBasicBlock* result = successor_iterator.Current();
successor_iterator.Advance();
return result;
}
return NULL;
}
// The following two methods implement a "foreach b in loop members" cycle.
void InitializeLoopMembers() {
loop_index = 0;
loop_length = loop_->blocks()->length();
}
HBasicBlock* AdvanceLoopMembers() {
if (loop_index < loop_length) {
HBasicBlock* result = loop_->blocks()->at(loop_index);
loop_index++;
return result;
} else {
return NULL;
}
}
LoopKind kind_;
PostorderProcessor* father_;
PostorderProcessor* child_;
HLoopInformation* loop_;
HBasicBlock* block_;
HBasicBlock* loop_header_;
int loop_index;
int loop_length;
HSuccessorIterator successor_iterator;
};
void HGraph::OrderBlocks() {
CompilationPhase phase("H_Block ordering", info());
#ifdef DEBUG
// Initially the blocks must not be ordered.
for (int i = 0; i < blocks_.length(); ++i) {
ASSERT(!blocks_[i]->IsOrdered());
}
#endif
PostorderProcessor* postorder =
PostorderProcessor::CreateEntryProcessor(zone(), blocks_[0]);
blocks_.Rewind(0);
while (postorder) {
postorder = postorder->PerformStep(zone(), &blocks_);
}
#ifdef DEBUG
// Now all blocks must be marked as ordered.
for (int i = 0; i < blocks_.length(); ++i) {
ASSERT(blocks_[i]->IsOrdered());
}
#endif
// Reverse block list and assign block IDs.
for (int i = 0, j = blocks_.length(); --j >= i; ++i) {
HBasicBlock* bi = blocks_[i];
HBasicBlock* bj = blocks_[j];
bi->set_block_id(j);
bj->set_block_id(i);
blocks_[i] = bj;
blocks_[j] = bi;
}
}
void HGraph::AssignDominators() {
HPhase phase("H_Assign dominators", this);
for (int i = 0; i < blocks_.length(); ++i) {
HBasicBlock* block = blocks_[i];
if (block->IsLoopHeader()) {
// Only the first predecessor of a loop header is from outside the loop.
// All others are back edges, and thus cannot dominate the loop header.
block->AssignCommonDominator(block->predecessors()->first());
block->AssignLoopSuccessorDominators();
} else {
for (int j = blocks_[i]->predecessors()->length() - 1; j >= 0; --j) {
blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->at(j));
}
}
}
}
bool HGraph::CheckArgumentsPhiUses() {
int block_count = blocks_.length();
for (int i = 0; i < block_count; ++i) {
for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
HPhi* phi = blocks_[i]->phis()->at(j);
// We don't support phi uses of arguments for now.
if (phi->CheckFlag(HValue::kIsArguments)) return false;
}
}
return true;
}
bool HGraph::CheckConstPhiUses() {
int block_count = blocks_.length();
for (int i = 0; i < block_count; ++i) {
for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
HPhi* phi = blocks_[i]->phis()->at(j);
// Check for the hole value (from an uninitialized const).
for (int k = 0; k < phi->OperandCount(); k++) {
if (phi->OperandAt(k) == GetConstantHole()) return false;
}
}
}
return true;
}
void HGraph::CollectPhis() {
int block_count = blocks_.length();
phi_list_ = new(zone()) ZoneList<HPhi*>(block_count, zone());
for (int i = 0; i < block_count; ++i) {
for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
HPhi* phi = blocks_[i]->phis()->at(j);
phi_list_->Add(phi, zone());
}
}
}
// Implementation of utility class to encapsulate the translation state for
// a (possibly inlined) function.
FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
InliningKind inlining_kind,
int inlining_id)
: owner_(owner),
compilation_info_(info),
call_context_(NULL),
inlining_kind_(inlining_kind),
function_return_(NULL),
test_context_(NULL),
entry_(NULL),
arguments_object_(NULL),
arguments_elements_(NULL),
inlining_id_(inlining_id),
outer_source_position_(HSourcePosition::Unknown()),
outer_(owner->function_state()) {
if (outer_ != NULL) {
// State for an inline function.
if (owner->ast_context()->IsTest()) {
HBasicBlock* if_true = owner->graph()->CreateBasicBlock();
HBasicBlock* if_false = owner->graph()->CreateBasicBlock();
if_true->MarkAsInlineReturnTarget(owner->current_block());
if_false->MarkAsInlineReturnTarget(owner->current_block());
TestContext* outer_test_context = TestContext::cast(owner->ast_context());
Expression* cond = outer_test_context->condition();
// The AstContext constructor pushed on the context stack. This newed
// instance is the reason that AstContext can't be BASE_EMBEDDED.
test_context_ = new TestContext(owner, cond, if_true, if_false);
} else {
function_return_ = owner->graph()->CreateBasicBlock();
function_return()->MarkAsInlineReturnTarget(owner->current_block());
}
// Set this after possibly allocating a new TestContext above.
call_context_ = owner->ast_context();
}
// Push on the state stack.
owner->set_function_state(this);
if (FLAG_hydrogen_track_positions) {
outer_source_position_ = owner->source_position();
owner->EnterInlinedSource(
info->shared_info()->start_position(),
inlining_id);
owner->SetSourcePosition(info->shared_info()->start_position());
}
}
FunctionState::~FunctionState() {
delete test_context_;
owner_->set_function_state(outer_);
if (FLAG_hydrogen_track_positions) {
owner_->set_source_position(outer_source_position_);
owner_->EnterInlinedSource(
outer_->compilation_info()->shared_info()->start_position(),
outer_->inlining_id());
}
}
// Implementation of utility classes to represent an expression's context in
// the AST.
AstContext::AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind)
: owner_(owner),
kind_(kind),
outer_(owner->ast_context()),
for_typeof_(false) {
owner->set_ast_context(this); // Push.
#ifdef DEBUG
ASSERT(owner->environment()->frame_type() == JS_FUNCTION);
original_length_ = owner->environment()->length();
#endif
}
AstContext::~AstContext() {
owner_->set_ast_context(outer_); // Pop.
}
EffectContext::~EffectContext() {
ASSERT(owner()->HasStackOverflow() ||
owner()->current_block() == NULL ||
(owner()->environment()->length() == original_length_ &&
owner()->environment()->frame_type() == JS_FUNCTION));
}
ValueContext::~ValueContext() {
ASSERT(owner()->HasStackOverflow() ||
owner()->current_block() == NULL ||
(owner()->environment()->length() == original_length_ + 1 &&
owner()->environment()->frame_type() == JS_FUNCTION));
}
void EffectContext::ReturnValue(HValue* value) {
// The value is simply ignored.
}
void ValueContext::ReturnValue(HValue* value) {
// The value is tracked in the bailout environment, and communicated
// through the environment as the result of the expression.
if (!arguments_allowed() && value->CheckFlag(HValue::kIsArguments)) {
owner()->Bailout(kBadValueContextForArgumentsValue);
}
owner()->Push(value);
}
void TestContext::ReturnValue(HValue* value) {
BuildBranch(value);
}
void EffectContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->IsControlInstruction());
owner()->AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
owner()->Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
}
}
void EffectContext::ReturnControl(HControlInstruction* instr,
BailoutId ast_id) {
ASSERT(!instr->HasObservableSideEffects());
HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
instr->SetSuccessorAt(0, empty_true);
instr->SetSuccessorAt(1, empty_false);
owner()->FinishCurrentBlock(instr);
HBasicBlock* join = owner()->CreateJoin(empty_true, empty_false, ast_id);
owner()->set_current_block(join);
}
void EffectContext::ReturnContinuation(HIfContinuation* continuation,
BailoutId ast_id) {
HBasicBlock* true_branch = NULL;
HBasicBlock* false_branch = NULL;
continuation->Continue(&true_branch, &false_branch);
if (!continuation->IsTrueReachable()) {
owner()->set_current_block(false_branch);
} else if (!continuation->IsFalseReachable()) {
owner()->set_current_block(true_branch);
} else {
HBasicBlock* join = owner()->CreateJoin(true_branch, false_branch, ast_id);
owner()->set_current_block(join);
}
}
void ValueContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->IsControlInstruction());
if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
return owner()->Bailout(kBadValueContextForArgumentsObjectValue);
}
owner()->AddInstruction(instr);
owner()->Push(instr);
if (instr->HasObservableSideEffects()) {
owner()->Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
}
}
void ValueContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->HasObservableSideEffects());
if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
return owner()->Bailout(kBadValueContextForArgumentsObjectValue);
}
HBasicBlock* materialize_false = owner()->graph()->CreateBasicBlock();
HBasicBlock* materialize_true = owner()->graph()->CreateBasicBlock();
instr->SetSuccessorAt(0, materialize_true);
instr->SetSuccessorAt(1, materialize_false);
owner()->FinishCurrentBlock(instr);
owner()->set_current_block(materialize_true);
owner()->Push(owner()->graph()->GetConstantTrue());
owner()->set_current_block(materialize_false);
owner()->Push(owner()->graph()->GetConstantFalse());
HBasicBlock* join =
owner()->CreateJoin(materialize_true, materialize_false, ast_id);
owner()->set_current_block(join);
}
void ValueContext::ReturnContinuation(HIfContinuation* continuation,
BailoutId ast_id) {
HBasicBlock* materialize_true = NULL;
HBasicBlock* materialize_false = NULL;
continuation->Continue(&materialize_true, &materialize_false);
if (continuation->IsTrueReachable()) {
owner()->set_current_block(materialize_true);
owner()->Push(owner()->graph()->GetConstantTrue());
owner()->set_current_block(materialize_true);
}
if (continuation->IsFalseReachable()) {
owner()->set_current_block(materialize_false);
owner()->Push(owner()->graph()->GetConstantFalse());
owner()->set_current_block(materialize_false);
}
if (continuation->TrueAndFalseReachable()) {
HBasicBlock* join =
owner()->CreateJoin(materialize_true, materialize_false, ast_id);
owner()->set_current_block(join);
}
}
void TestContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->IsControlInstruction());
HOptimizedGraphBuilder* builder = owner();
builder->AddInstruction(instr);
// We expect a simulate after every expression with side effects, though
// this one isn't actually needed (and wouldn't work if it were targeted).
if (instr->HasObservableSideEffects()) {
builder->Push(instr);
builder->Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
builder->Pop();
}
BuildBranch(instr);
}
void TestContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->HasObservableSideEffects());
HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
instr->SetSuccessorAt(0, empty_true);
instr->SetSuccessorAt(1, empty_false);
owner()->FinishCurrentBlock(instr);
owner()->Goto(empty_true, if_true(), owner()->function_state());
owner()->Goto(empty_false, if_false(), owner()->function_state());
owner()->set_current_block(NULL);
}
void TestContext::ReturnContinuation(HIfContinuation* continuation,
BailoutId ast_id) {
HBasicBlock* true_branch = NULL;
HBasicBlock* false_branch = NULL;
continuation->Continue(&true_branch, &false_branch);
if (continuation->IsTrueReachable()) {
owner()->Goto(true_branch, if_true(), owner()->function_state());
}
if (continuation->IsFalseReachable()) {
owner()->Goto(false_branch, if_false(), owner()->function_state());
}
owner()->set_current_block(NULL);
}
void TestContext::BuildBranch(HValue* value) {
// We expect the graph to be in edge-split form: there is no edge that
// connects a branch node to a join node. We conservatively ensure that
// property by always adding an empty block on the outgoing edges of this
// branch.
HOptimizedGraphBuilder* builder = owner();
if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
builder->Bailout(kArgumentsObjectValueInATestContext);
}
ToBooleanStub::Types expected(condition()->to_boolean_types());
ReturnControl(owner()->New<HBranch>(value, expected), BailoutId::None());
}
// HOptimizedGraphBuilder infrastructure for bailing out and checking bailouts.
#define CHECK_BAILOUT(call) \
do { \
call; \
if (HasStackOverflow()) return; \
} while (false)
#define CHECK_ALIVE(call) \
do { \
call; \
if (HasStackOverflow() || current_block() == NULL) return; \
} while (false)
#define CHECK_ALIVE_OR_RETURN(call, value) \
do { \
call; \
if (HasStackOverflow() || current_block() == NULL) return value; \
} while (false)
void HOptimizedGraphBuilder::Bailout(BailoutReason reason) {
current_info()->set_bailout_reason(reason);
SetStackOverflow();
}
void HOptimizedGraphBuilder::VisitForEffect(Expression* expr) {
EffectContext for_effect(this);
Visit(expr);
}
void HOptimizedGraphBuilder::VisitForValue(Expression* expr,
ArgumentsAllowedFlag flag) {
ValueContext for_value(this, flag);
Visit(expr);
}
void HOptimizedGraphBuilder::VisitForTypeOf(Expression* expr) {
ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
for_value.set_for_typeof(true);
Visit(expr);
}
void HOptimizedGraphBuilder::VisitForControl(Expression* expr,
HBasicBlock* true_block,
HBasicBlock* false_block) {
TestContext for_test(this, expr, true_block, false_block);
Visit(expr);
}
void HOptimizedGraphBuilder::VisitExpressions(
ZoneList<Expression*>* exprs) {
for (int i = 0; i < exprs->length(); ++i) {
CHECK_ALIVE(VisitForValue(exprs->at(i)));
}
}
bool HOptimizedGraphBuilder::BuildGraph() {
if (current_info()->function()->is_generator()) {
Bailout(kFunctionIsAGenerator);
return false;
}
Scope* scope = current_info()->scope();
if (scope->HasIllegalRedeclaration()) {
Bailout(kFunctionWithIllegalRedeclaration);
return false;
}
if (scope->calls_eval()) {
Bailout(kFunctionCallsEval);
return false;
}
SetUpScope(scope);
// Add an edge to the body entry. This is warty: the graph's start
// environment will be used by the Lithium translation as the initial
// environment on graph entry, but it has now been mutated by the
// Hydrogen translation of the instructions in the start block. This
// environment uses values which have not been defined yet. These
// Hydrogen instructions will then be replayed by the Lithium
// translation, so they cannot have an environment effect. The edge to
// the body's entry block (along with some special logic for the start
// block in HInstruction::InsertAfter) seals the start block from
// getting unwanted instructions inserted.
//
// TODO(kmillikin): Fix this. Stop mutating the initial environment.
// Make the Hydrogen instructions in the initial block into Hydrogen
// values (but not instructions), present in the initial environment and
// not replayed by the Lithium translation.
HEnvironment* initial_env = environment()->CopyWithoutHistory();
HBasicBlock* body_entry = CreateBasicBlock(initial_env);
Goto(body_entry);
body_entry->SetJoinId(BailoutId::FunctionEntry());
set_current_block(body_entry);
// Handle implicit declaration of the function name in named function
// expressions before other declarations.
if (scope->is_function_scope() && scope->function() != NULL) {
VisitVariableDeclaration(scope->function());
}
VisitDeclarations(scope->declarations());
Add<HSimulate>(BailoutId::Declarations());
Add<HStackCheck>(HStackCheck::kFunctionEntry);
VisitStatements(current_info()->function()->body());
if (HasStackOverflow()) return false;
if (current_block() != NULL) {
Add<HReturn>(graph()->GetConstantUndefined());
set_current_block(NULL);
}
// If the checksum of the number of type info changes is the same as the
// last time this function was compiled, then this recompile is likely not
// due to missing/inadequate type feedback, but rather too aggressive
// optimization. Disable optimistic LICM in that case.
Handle<Code> unoptimized_code(current_info()->shared_info()->code());
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
Handle<TypeFeedbackInfo> type_info(
TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
int checksum = type_info->own_type_change_checksum();
int composite_checksum = graph()->update_type_change_checksum(checksum);
graph()->set_use_optimistic_licm(
!type_info->matches_inlined_type_change_checksum(composite_checksum));
type_info->set_inlined_type_change_checksum(composite_checksum);
// Perform any necessary OSR-specific cleanups or changes to the graph.
osr()->FinishGraph();
return true;
}
bool HGraph::Optimize(BailoutReason* bailout_reason) {
OrderBlocks();
AssignDominators();
// We need to create a HConstant "zero" now so that GVN will fold every
// zero-valued constant in the graph together.
// The constant is needed to make idef-based bounds check work: the pass
// evaluates relations with "zero" and that zero cannot be created after GVN.
GetConstant0();
#ifdef DEBUG
// Do a full verify after building the graph and computing dominators.
Verify(true);
#endif
if (FLAG_analyze_environment_liveness && maximum_environment_size() != 0) {
Run<HEnvironmentLivenessAnalysisPhase>();
}
if (!CheckConstPhiUses()) {
*bailout_reason = kUnsupportedPhiUseOfConstVariable;
return false;
}
Run<HRedundantPhiEliminationPhase>();
if (!CheckArgumentsPhiUses()) {
*bailout_reason = kUnsupportedPhiUseOfArguments;
return false;
}
// Find and mark unreachable code to simplify optimizations, especially gvn,
// where unreachable code could unnecessarily defeat LICM.
Run<HMarkUnreachableBlocksPhase>();
if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
if (FLAG_use_escape_analysis) Run<HEscapeAnalysisPhase>();
if (FLAG_load_elimination) Run<HLoadEliminationPhase>();
CollectPhis();
if (has_osr()) osr()->FinishOsrValues();
Run<HInferRepresentationPhase>();
// Remove HSimulate instructions that have turned out not to be needed
// after all by folding them into the following HSimulate.
// This must happen after inferring representations.
Run<HMergeRemovableSimulatesPhase>();
Run<HMarkDeoptimizeOnUndefinedPhase>();
Run<HRepresentationChangesPhase>();
Run<HInferTypesPhase>();
// Must be performed before canonicalization to ensure that Canonicalize
// will not remove semantically meaningful ToInt32 operations e.g. BIT_OR with
// zero.
if (FLAG_opt_safe_uint32_operations) Run<HUint32AnalysisPhase>();
if (FLAG_use_canonicalizing) Run<HCanonicalizePhase>();
if (FLAG_use_gvn) Run<HGlobalValueNumberingPhase>();
if (FLAG_check_elimination) Run<HCheckEliminationPhase>();
if (FLAG_store_elimination) Run<HStoreEliminationPhase>();
Run<HRangeAnalysisPhase>();
Run<HComputeChangeUndefinedToNaN>();
// Eliminate redundant stack checks on backwards branches.
Run<HStackCheckEliminationPhase>();
if (FLAG_array_bounds_checks_elimination) Run<HBoundsCheckEliminationPhase>();
if (FLAG_array_bounds_checks_hoisting) Run<HBoundsCheckHoistingPhase>();
if (FLAG_array_index_dehoisting) Run<HDehoistIndexComputationsPhase>();
if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
RestoreActualValues();
// Find unreachable code a second time, GVN and other optimizations may have
// made blocks unreachable that were previously reachable.
Run<HMarkUnreachableBlocksPhase>();
return true;
}
void HGraph::RestoreActualValues() {
HPhase phase("H_Restore actual values", this);
for (int block_index = 0; block_index < blocks()->length(); block_index++) {
HBasicBlock* block = blocks()->at(block_index);
#ifdef DEBUG
for (int i = 0; i < block->phis()->length(); i++) {
HPhi* phi = block->phis()->at(i);
ASSERT(phi->ActualValue() == phi);
}
#endif
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instruction = it.Current();
if (instruction->ActualValue() == instruction) continue;
if (instruction->CheckFlag(HValue::kIsDead)) {
// The instruction was marked as deleted but left in the graph
// as a control flow dependency point for subsequent
// instructions.
instruction->DeleteAndReplaceWith(instruction->ActualValue());
} else {
ASSERT(instruction->IsInformativeDefinition());
if (instruction->IsPurelyInformativeDefinition()) {
instruction->DeleteAndReplaceWith(instruction->RedefinedOperand());
} else {
instruction->ReplaceAllUsesWith(instruction->ActualValue());
}
}
}
}
}
void HOptimizedGraphBuilder::PushArgumentsFromEnvironment(int count) {
ZoneList<HValue*> arguments(count, zone());
for (int i = 0; i < count; ++i) {
arguments.Add(Pop(), zone());
}
HPushArguments* push_args = New<HPushArguments>();
while (!arguments.is_empty()) {
push_args->AddInput(arguments.RemoveLast());
}
AddInstruction(push_args);
}
template <class Instruction>
HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
PushArgumentsFromEnvironment(call->argument_count());
return call;
}
void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
// First special is HContext.
HInstruction* context = Add<HContext>();
environment()->BindContext(context);
// Create an arguments object containing the initial parameters. Set the
// initial values of parameters including "this" having parameter index 0.
ASSERT_EQ(scope->num_parameters() + 1, environment()->parameter_count());
HArgumentsObject* arguments_object =
New<HArgumentsObject>(environment()->parameter_count());
for (int i = 0; i < environment()->parameter_count(); ++i) {
HInstruction* parameter = Add<HParameter>(i);
arguments_object->AddArgument(parameter, zone());
environment()->Bind(i, parameter);
}
AddInstruction(arguments_object);
graph()->SetArgumentsObject(arguments_object);
HConstant* undefined_constant = graph()->GetConstantUndefined();
// Initialize specials and locals to undefined.
for (int i = environment()->parameter_count() + 1;
i < environment()->length();
++i) {
environment()->Bind(i, undefined_constant);
}
// Handle the arguments and arguments shadow variables specially (they do
// not have declarations).
if (scope->arguments() != NULL) {
if (!scope->arguments()->IsStackAllocated()) {
return Bailout(kContextAllocatedArguments);
}
environment()->Bind(scope->arguments(),
graph()->GetArgumentsObject());
}
}
void HOptimizedGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
for (int i = 0; i < statements->length(); i++) {
Statement* stmt = statements->at(i);
CHECK_ALIVE(Visit(stmt));
if (stmt->IsJump()) break;
}
}
void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Scope* outer_scope = scope();
Scope* scope = stmt->scope();
BreakAndContinueInfo break_info(stmt, outer_scope);
{ BreakAndContinueScope push(&break_info, this);
if (scope != NULL) {
// Load the function object.
Scope* declaration_scope = scope->DeclarationScope();
HInstruction* function;
HValue* outer_context = environment()->context();
if (declaration_scope->is_global_scope() ||
declaration_scope->is_eval_scope()) {
function = new(zone()) HLoadContextSlot(
outer_context, Context::CLOSURE_INDEX, HLoadContextSlot::kNoCheck);
} else {
function = New<HThisFunction>();
}
AddInstruction(function);
// Allocate a block context and store it to the stack frame.
HInstruction* inner_context = Add<HAllocateBlockContext>(
outer_context, function, scope->GetScopeInfo());
HInstruction* instr = Add<HStoreFrameContext>(inner_context);
if (instr->HasObservableSideEffects()) {
AddSimulate(stmt->EntryId(), REMOVABLE_SIMULATE);
}
set_scope(scope);
environment()->BindContext(inner_context);
VisitDeclarations(scope->declarations());
AddSimulate(stmt->DeclsId(), REMOVABLE_SIMULATE);
}
CHECK_BAILOUT(VisitStatements(stmt->statements()));
}
set_scope(outer_scope);
if (scope != NULL && current_block() != NULL) {
HValue* inner_context = environment()->context();
HValue* outer_context = Add<HLoadNamedField>(
inner_context, static_cast<HValue*>(NULL),
HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
HInstruction* instr = Add<HStoreFrameContext>(outer_context);
if (instr->HasObservableSideEffects()) {
AddSimulate(stmt->ExitId(), REMOVABLE_SIMULATE);
}
environment()->BindContext(outer_context);
}
HBasicBlock* break_block = break_info.break_block();
if (break_block != NULL) {
if (current_block() != NULL) Goto(break_block);
break_block->SetJoinId(stmt->ExitId());
set_current_block(break_block);
}
}
void HOptimizedGraphBuilder::VisitExpressionStatement(
ExpressionStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
VisitForEffect(stmt->expression());
}
void HOptimizedGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
void HOptimizedGraphBuilder::VisitIfStatement(IfStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
if (stmt->condition()->ToBooleanIsTrue()) {
Add<HSimulate>(stmt->ThenId());
Visit(stmt->then_statement());
} else if (stmt->condition()->ToBooleanIsFalse()) {
Add<HSimulate>(stmt->ElseId());
Visit(stmt->else_statement());
} else {
HBasicBlock* cond_true = graph()->CreateBasicBlock();
HBasicBlock* cond_false = graph()->CreateBasicBlock();
CHECK_BAILOUT(VisitForControl(stmt->condition(), cond_true, cond_false));
if (cond_true->HasPredecessor()) {
cond_true->SetJoinId(stmt->ThenId());
set_current_block(cond_true);
CHECK_BAILOUT(Visit(stmt->then_statement()));
cond_true = current_block();
} else {
cond_true = NULL;
}
if (cond_false->HasPredecessor()) {
cond_false->SetJoinId(stmt->ElseId());
set_current_block(cond_false);
CHECK_BAILOUT(Visit(stmt->else_statement()));
cond_false = current_block();
} else {
cond_false = NULL;
}
HBasicBlock* join = CreateJoin(cond_true, cond_false, stmt->IfId());
set_current_block(join);
}
}
HBasicBlock* HOptimizedGraphBuilder::BreakAndContinueScope::Get(
BreakableStatement* stmt,
BreakType type,
Scope** scope,
int* drop_extra) {
*drop_extra = 0;
BreakAndContinueScope* current = this;
while (current != NULL && current->info()->target() != stmt) {
*drop_extra += current->info()->drop_extra();
current = current->next();
}
ASSERT(current != NULL); // Always found (unless stack is malformed).
*scope = current->info()->scope();
if (type == BREAK) {
*drop_extra += current->info()->drop_extra();
}
HBasicBlock* block = NULL;
switch (type) {
case BREAK:
block = current->info()->break_block();
if (block == NULL) {
block = current->owner()->graph()->CreateBasicBlock();
current->info()->set_break_block(block);
}
break;
case CONTINUE:
block = current->info()->continue_block();
if (block == NULL) {
block = current->owner()->graph()->CreateBasicBlock();
current->info()->set_continue_block(block);
}
break;
}
return block;
}
void HOptimizedGraphBuilder::VisitContinueStatement(
ContinueStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Scope* outer_scope = NULL;
Scope* inner_scope = scope();
int drop_extra = 0;
HBasicBlock* continue_block = break_scope()->Get(
stmt->target(), BreakAndContinueScope::CONTINUE,
&outer_scope, &drop_extra);
HValue* context = environment()->context();
Drop(drop_extra);
int context_pop_count = inner_scope->ContextChainLength(outer_scope);
if (context_pop_count > 0) {
while (context_pop_count-- > 0) {
HInstruction* context_instruction = Add<HLoadNamedField>(
context, static_cast<HValue*>(NULL),
HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
context = context_instruction;
}
HInstruction* instr = Add<HStoreFrameContext>(context);
if (instr->HasObservableSideEffects()) {
AddSimulate(stmt->target()->EntryId(), REMOVABLE_SIMULATE);
}
environment()->BindContext(context);
}
Goto(continue_block);
set_current_block(NULL);
}
void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Scope* outer_scope = NULL;
Scope* inner_scope = scope();
int drop_extra = 0;
HBasicBlock* break_block = break_scope()->Get(
stmt->target(), BreakAndContinueScope::BREAK,
&outer_scope, &drop_extra);
HValue* context = environment()->context();
Drop(drop_extra);
int context_pop_count = inner_scope->ContextChainLength(outer_scope);
if (context_pop_count > 0) {
while (context_pop_count-- > 0) {
HInstruction* context_instruction = Add<HLoadNamedField>(
context, static_cast<HValue*>(NULL),
HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
context = context_instruction;
}
HInstruction* instr = Add<HStoreFrameContext>(context);
if (instr->HasObservableSideEffects()) {
AddSimulate(stmt->target()->ExitId(), REMOVABLE_SIMULATE);
}
environment()->BindContext(context);
}
Goto(break_block);
set_current_block(NULL);
}
void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
FunctionState* state = function_state();
AstContext* context = call_context();
if (context == NULL) {
// Not an inlined return, so an actual one.
CHECK_ALIVE(VisitForValue(stmt->expression()));
HValue* result = environment()->Pop();
Add<HReturn>(result);
} else if (state->inlining_kind() == CONSTRUCT_CALL_RETURN) {
// Return from an inlined construct call. In a test context the return value
// will always evaluate to true, in a value context the return value needs
// to be a JSObject.
if (context->IsTest()) {
TestContext* test = TestContext::cast(context);
CHECK_ALIVE(VisitForEffect(stmt->expression()));
Goto(test->if_true(), state);
} else if (context->IsEffect()) {
CHECK_ALIVE(VisitForEffect(stmt->expression()));
Goto(function_return(), state);
} else {
ASSERT(context->IsValue());
CHECK_ALIVE(VisitForValue(stmt->expression()));
HValue* return_value = Pop();
HValue* receiver = environment()->arguments_environment()->Lookup(0);
HHasInstanceTypeAndBranch* typecheck =
New<HHasInstanceTypeAndBranch>(return_value,
FIRST_SPEC_OBJECT_TYPE,
LAST_SPEC_OBJECT_TYPE);
HBasicBlock* if_spec_object = graph()->CreateBasicBlock();
HBasicBlock* not_spec_object = graph()->CreateBasicBlock();
typecheck->SetSuccessorAt(0, if_spec_object);
typecheck->SetSuccessorAt(1, not_spec_object);
FinishCurrentBlock(typecheck);
AddLeaveInlined(if_spec_object, return_value, state);
AddLeaveInlined(not_spec_object, receiver, state);
}
} else if (state->inlining_kind() == SETTER_CALL_RETURN) {
// Return from an inlined setter call. The returned value is never used, the
// value of an assignment is always the value of the RHS of the assignment.
CHECK_ALIVE(VisitForEffect(stmt->expression()));
if (context->IsTest()) {
HValue* rhs = environment()->arguments_environment()->Lookup(1);
context->ReturnValue(rhs);
} else if (context->IsEffect()) {
Goto(function_return(), state);
} else {
ASSERT(context->IsValue());
HValue* rhs = environment()->arguments_environment()->Lookup(1);
AddLeaveInlined(rhs, state);
}
} else {
// Return from a normal inlined function. Visit the subexpression in the
// expression context of the call.
if (context->IsTest()) {
TestContext* test = TestContext::cast(context);
VisitForControl(stmt->expression(), test->if_true(), test->if_false());
} else if (context->IsEffect()) {
// Visit in value context and ignore the result. This is needed to keep
// environment in sync with full-codegen since some visitors (e.g.
// VisitCountOperation) use the operand stack differently depending on
// context.
CHECK_ALIVE(VisitForValue(stmt->expression()));
Pop();
Goto(function_return(), state);
} else {
ASSERT(context->IsValue());
CHECK_ALIVE(VisitForValue(stmt->expression()));
AddLeaveInlined(Pop(), state);
}
}
set_current_block(NULL);
}
void HOptimizedGraphBuilder::VisitWithStatement(WithStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
return Bailout(kWithStatement);
}
void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
// We only optimize switch statements with a bounded number of clauses.
const int kCaseClauseLimit = 128;
ZoneList<CaseClause*>* clauses = stmt->cases();
int clause_count = clauses->length();
ZoneList<HBasicBlock*> body_blocks(clause_count, zone());
if (clause_count > kCaseClauseLimit) {
return Bailout(kSwitchStatementTooManyClauses);
}
CHECK_ALIVE(VisitForValue(stmt->tag()));
Add<HSimulate>(stmt->EntryId());
HValue* tag_value = Top();
Type* tag_type = stmt->tag()->bounds().lower;
// 1. Build all the tests, with dangling true branches
BailoutId default_id = BailoutId::None();
for (int i = 0; i < clause_count; ++i) {
CaseClause* clause = clauses->at(i);
if (clause->is_default()) {
body_blocks.Add(NULL, zone());
if (default_id.IsNone()) default_id = clause->EntryId();
continue;
}
// Generate a compare and branch.
CHECK_ALIVE(VisitForValue(clause->label()));
HValue* label_value = Pop();
Type* label_type = clause->label()->bounds().lower;
Type* combined_type = clause->compare_type();
HControlInstruction* compare = BuildCompareInstruction(
Token::EQ_STRICT, tag_value, label_value, tag_type, label_type,
combined_type,
ScriptPositionToSourcePosition(stmt->tag()->position()),
ScriptPositionToSourcePosition(clause->label()->position()),
PUSH_BEFORE_SIMULATE, clause->id());
HBasicBlock* next_test_block = graph()->CreateBasicBlock();
HBasicBlock* body_block = graph()->CreateBasicBlock();
body_blocks.Add(body_block, zone());
compare->SetSuccessorAt(0, body_block);
compare->SetSuccessorAt(1, next_test_block);
FinishCurrentBlock(compare);
set_current_block(body_block);
Drop(1); // tag_value
set_current_block(next_test_block);
}
// Save the current block to use for the default or to join with the
// exit.
HBasicBlock* last_block = current_block();
Drop(1); // tag_value
// 2. Loop over the clauses and the linked list of tests in lockstep,
// translating the clause bodies.
HBasicBlock* fall_through_block = NULL;
BreakAndContinueInfo break_info(stmt, scope());
{ BreakAndContinueScope push(&break_info, this);
for (int i = 0; i < clause_count; ++i) {
CaseClause* clause = clauses->at(i);
// Identify the block where normal (non-fall-through) control flow
// goes to.
HBasicBlock* normal_block = NULL;
if (clause->is_default()) {
if (last_block == NULL) continue;
normal_block = last_block;
last_block = NULL; // Cleared to indicate we've handled it.
} else {
normal_block = body_blocks[i];
}
if (fall_through_block == NULL) {
set_current_block(normal_block);
} else {
HBasicBlock* join = CreateJoin(fall_through_block,
normal_block,
clause->EntryId());
set_current_block(join);
}
CHECK_BAILOUT(VisitStatements(clause->statements()));
fall_through_block = current_block();
}
}
// Create an up-to-3-way join. Use the break block if it exists since
// it's already a join block.
HBasicBlock* break_block = break_info.break_block();
if (break_block == NULL) {
set_current_block(CreateJoin(fall_through_block,
last_block,
stmt->ExitId()));
} else {
if (fall_through_block != NULL) Goto(fall_through_block, break_block);
if (last_block != NULL) Goto(last_block, break_block);
break_block->SetJoinId(stmt->ExitId());
set_current_block(break_block);
}
}
void HOptimizedGraphBuilder::VisitLoopBody(IterationStatement* stmt,
HBasicBlock* loop_entry) {
Add<HSimulate>(stmt->StackCheckId());
HStackCheck* stack_check =
HStackCheck::cast(Add<HStackCheck>(HStackCheck::kBackwardsBranch));
ASSERT(loop_entry->IsLoopHeader());
loop_entry->loop_information()->set_stack_check(stack_check);
CHECK_BAILOUT(Visit(stmt->body()));
}
void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(current_block() != NULL);
HBasicBlock* loop_entry = BuildLoopEntry(stmt);
BreakAndContinueInfo break_info(stmt, scope());
{
BreakAndContinueScope push(&break_info, this);
CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry));
}
HBasicBlock* body_exit =
JoinContinue(stmt, current_block(), break_info.continue_block());
HBasicBlock* loop_successor = NULL;
if (body_exit != NULL && !stmt->cond()->ToBooleanIsTrue()) {
set_current_block(body_exit);
loop_successor = graph()->CreateBasicBlock();
if (stmt->cond()->ToBooleanIsFalse()) {
loop_entry->loop_information()->stack_check()->Eliminate();
Goto(loop_successor);
body_exit = NULL;
} else {
// The block for a true condition, the actual predecessor block of the
// back edge.
body_exit = graph()->CreateBasicBlock();
CHECK_BAILOUT(VisitForControl(stmt->cond(), body_exit, loop_successor));
}
if (body_exit != NULL && body_exit->HasPredecessor()) {
body_exit->SetJoinId(stmt->BackEdgeId());
} else {
body_exit = NULL;
}
if (loop_successor->HasPredecessor()) {
loop_successor->SetJoinId(stmt->ExitId());
} else {
loop_successor = NULL;
}
}
HBasicBlock* loop_exit = CreateLoop(stmt,
loop_entry,
body_exit,
loop_successor,
break_info.break_block());
set_current_block(loop_exit);
}
void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(current_block() != NULL);
HBasicBlock* loop_entry = BuildLoopEntry(stmt);
// If the condition is constant true, do not generate a branch.
HBasicBlock* loop_successor = NULL;
if (!stmt->cond()->ToBooleanIsTrue()) {
HBasicBlock* body_entry = graph()->CreateBasicBlock();
loop_successor = graph()->CreateBasicBlock();
CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
if (body_entry->HasPredecessor()) {
body_entry->SetJoinId(stmt->BodyId());
set_current_block(body_entry);
}
if (loop_successor->HasPredecessor()) {
loop_successor->SetJoinId(stmt->ExitId());
} else {
loop_successor = NULL;
}
}
BreakAndContinueInfo break_info(stmt, scope());
if (current_block() != NULL) {
BreakAndContinueScope push(&break_info, this);
CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry));
}
HBasicBlock* body_exit =
JoinContinue(stmt, current_block(), break_info.continue_block());
HBasicBlock* loop_exit = CreateLoop(stmt,
loop_entry,
body_exit,
loop_successor,
break_info.break_block());
set_current_block(loop_exit);
}
void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
if (stmt->init() != NULL) {
CHECK_ALIVE(Visit(stmt->init()));
}
ASSERT(current_block() != NULL);
HBasicBlock* loop_entry = BuildLoopEntry(stmt);
HBasicBlock* loop_successor = NULL;
if (stmt->cond() != NULL) {
HBasicBlock* body_entry = graph()->CreateBasicBlock();
loop_successor = graph()->CreateBasicBlock();
CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
if (body_entry->HasPredecessor()) {
body_entry->SetJoinId(stmt->BodyId());
set_current_block(body_entry);
}
if (loop_successor->HasPredecessor()) {
loop_successor->SetJoinId(stmt->ExitId());
} else {
loop_successor = NULL;
}
}
BreakAndContinueInfo break_info(stmt, scope());
if (current_block() != NULL) {
BreakAndContinueScope push(&break_info, this);
CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry));
}
HBasicBlock* body_exit =
JoinContinue(stmt, current_block(), break_info.continue_block());
if (stmt->next() != NULL && body_exit != NULL) {
set_current_block(body_exit);
CHECK_BAILOUT(Visit(stmt->next()));
body_exit = current_block();
}
HBasicBlock* loop_exit = CreateLoop(stmt,
loop_entry,
body_exit,
loop_successor,
break_info.break_block());
set_current_block(loop_exit);
}
void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
if (!FLAG_optimize_for_in) {
return Bailout(kForInStatementOptimizationIsDisabled);
}
if (stmt->for_in_type() != ForInStatement::FAST_FOR_IN) {
return Bailout(kForInStatementIsNotFastCase);
}
if (!stmt->each()->IsVariableProxy() ||
!stmt->each()->AsVariableProxy()->var()->IsStackLocal()) {
return Bailout(kForInStatementWithNonLocalEachVariable);
}
Variable* each_var = stmt->each()->AsVariableProxy()->var();
CHECK_ALIVE(VisitForValue(stmt->enumerable()));
HValue* enumerable = Top(); // Leave enumerable at the top.
HInstruction* map = Add<HForInPrepareMap>(enumerable);
Add<HSimulate>(stmt->PrepareId());
HInstruction* array = Add<HForInCacheArray>(
enumerable, map, DescriptorArray::kEnumCacheBridgeCacheIndex);
HInstruction* enum_length = Add<HMapEnumLength>(map);
HInstruction* start_index = Add<HConstant>(0);
Push(map);
Push(array);
Push(enum_length);
Push(start_index);
HInstruction* index_cache = Add<HForInCacheArray>(
enumerable, map, DescriptorArray::kEnumCacheBridgeIndicesCacheIndex);
HForInCacheArray::cast(array)->set_index_cache(
HForInCacheArray::cast(index_cache));
HBasicBlock* loop_entry = BuildLoopEntry(stmt);
HValue* index = environment()->ExpressionStackAt(0);
HValue* limit = environment()->ExpressionStackAt(1);
// Check that we still have more keys.
HCompareNumericAndBranch* compare_index =
New<HCompareNumericAndBranch>(index, limit, Token::LT);
compare_index->set_observed_input_representation(
Representation::Smi(), Representation::Smi());
HBasicBlock* loop_body = graph()->CreateBasicBlock();
HBasicBlock* loop_successor = graph()->CreateBasicBlock();
compare_index->SetSuccessorAt(0, loop_body);
compare_index->SetSuccessorAt(1, loop_successor);
FinishCurrentBlock(compare_index);
set_current_block(loop_successor);
Drop(5);
set_current_block(loop_body);
HValue* key = Add<HLoadKeyed>(
environment()->ExpressionStackAt(2), // Enum cache.
environment()->ExpressionStackAt(0), // Iteration index.
environment()->ExpressionStackAt(0),
FAST_ELEMENTS);
// Check if the expected map still matches that of the enumerable.
// If not just deoptimize.
Add<HCheckMapValue>(environment()->ExpressionStackAt(4),
environment()->ExpressionStackAt(3));
Bind(each_var, key);
BreakAndContinueInfo break_info(stmt, scope(), 5);
{
BreakAndContinueScope push(&break_info, this);
CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry));
}
HBasicBlock* body_exit =
JoinContinue(stmt, current_block(), break_info.continue_block());
if (body_exit != NULL) {
set_current_block(body_exit);
HValue* current_index = Pop();
Push(AddUncasted<HAdd>(current_index, graph()->GetConstant1()));
body_exit = current_block();
}
HBasicBlock* loop_exit = CreateLoop(stmt,
loop_entry,
body_exit,
loop_successor,
break_info.break_block());
set_current_block(loop_exit);
}
void HOptimizedGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
return Bailout(kForOfStatement);
}
void HOptimizedGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
return Bailout(kTryCatchStatement);
}
void HOptimizedGraphBuilder::VisitTryFinallyStatement(
TryFinallyStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
return Bailout(kTryFinallyStatement);
}
void HOptimizedGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
return Bailout(kDebuggerStatement);
}
void HOptimizedGraphBuilder::VisitCaseClause(CaseClause* clause) {
UNREACHABLE();
}
void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Handle<SharedFunctionInfo> shared_info = expr->shared_info();
if (shared_info.is_null()) {
shared_info = Compiler::BuildFunctionInfo(expr, current_info()->script());
}
// We also have a stack overflow if the recursive compilation did.
if (HasStackOverflow()) return;
HFunctionLiteral* instr =
New<HFunctionLiteral>(shared_info, expr->pretenure());
return ast_context()->ReturnInstruction(instr, expr->id());
}
void HOptimizedGraphBuilder::VisitNativeFunctionLiteral(
NativeFunctionLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
return Bailout(kNativeFunctionLiteral);
}
void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
HBasicBlock* cond_true = graph()->CreateBasicBlock();
HBasicBlock* cond_false = graph()->CreateBasicBlock();
CHECK_BAILOUT(VisitForControl(expr->condition(), cond_true, cond_false));
// Visit the true and false subexpressions in the same AST context as the
// whole expression.
if (cond_true->HasPredecessor()) {
cond_true->SetJoinId(expr->ThenId());
set_current_block(cond_true);
CHECK_BAILOUT(Visit(expr->then_expression()));
cond_true = current_block();
} else {
cond_true = NULL;
}
if (cond_false->HasPredecessor()) {
cond_false->SetJoinId(expr->ElseId());
set_current_block(cond_false);
CHECK_BAILOUT(Visit(expr->else_expression()));
cond_false = current_block();
} else {
cond_false = NULL;
}
if (!ast_context()->IsTest()) {
HBasicBlock* join = CreateJoin(cond_true, cond_false, expr->id());
set_current_block(join);
if (join != NULL && !ast_context()->IsEffect()) {
return ast_context()->ReturnValue(Pop());
}
}
}
HOptimizedGraphBuilder::GlobalPropertyAccess
HOptimizedGraphBuilder::LookupGlobalProperty(
Variable* var, LookupResult* lookup, PropertyAccessType access_type) {
if (var->is_this() || !current_info()->has_global_object()) {
return kUseGeneric;
}
Handle<GlobalObject> global(current_info()->global_object());
global->Lookup(var->name(), lookup);
if (!lookup->IsNormal() ||
(access_type == STORE && lookup->IsReadOnly()) ||
lookup->holder() != *global) {
return kUseGeneric;
}
return kUseCell;
}
HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
ASSERT(var->IsContextSlot());
HValue* context = environment()->context();
int length = scope()->ContextChainLength(var->scope());
while (length-- > 0) {
context = Add<HLoadNamedField>(
context, static_cast<HValue*>(NULL),
HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
}
return context;
}
void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
if (expr->is_this()) {
current_info()->set_this_has_uses(true);
}
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Variable* variable = expr->var();
switch (variable->location()) {
case Variable::UNALLOCATED: {
if (IsLexicalVariableMode(variable->mode())) {
// TODO(rossberg): should this be an ASSERT?
return Bailout(kReferenceToGlobalLexicalVariable);
}
// Handle known global constants like 'undefined' specially to avoid a
// load from a global cell for them.
Handle<Object> constant_value =
isolate()->factory()->GlobalConstantFor(variable->name());
if (!constant_value.is_null()) {
HConstant* instr = New<HConstant>(constant_value);
return ast_context()->ReturnInstruction(instr, expr->id());
}
LookupResult lookup(isolate());
GlobalPropertyAccess type = LookupGlobalProperty(variable, &lookup, LOAD);
if (type == kUseCell &&
current_info()->global_object()->IsAccessCheckNeeded()) {
type = kUseGeneric;
}
if (type == kUseCell) {
Handle<GlobalObject> global(current_info()->global_object());
Handle<PropertyCell> cell(global->GetPropertyCell(&lookup));
if (cell->type()->IsConstant()) {
PropertyCell::AddDependentCompilationInfo(cell, top_info());
Handle<Object> constant_object = cell->type()->AsConstant()->Value();
if (constant_object->IsConsString()) {
constant_object =
String::Flatten(Handle<String>::cast(constant_object));
}
HConstant* constant = New<HConstant>(constant_object);
return ast_context()->ReturnInstruction(constant, expr->id());
} else {
HLoadGlobalCell* instr =
New<HLoadGlobalCell>(cell, lookup.GetPropertyDetails());
return ast_context()->ReturnInstruction(instr, expr->id());
}
} else {
HValue* global_object = Add<HLoadNamedField>(
context(), static_cast<HValue*>(NULL),
HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
HLoadGlobalGeneric* instr =
New<HLoadGlobalGeneric>(global_object,
variable->name(),
ast_context()->is_for_typeof());
return ast_context()->ReturnInstruction(instr, expr->id());
}
}
case Variable::PARAMETER:
case Variable::LOCAL: {
HValue* value = LookupAndMakeLive(variable);
if (value == graph()->GetConstantHole()) {
ASSERT(IsDeclaredVariableMode(variable->mode()) &&
variable->mode() != VAR);
return Bailout(kReferenceToUninitializedVariable);
}
return ast_context()->ReturnValue(value);
}
case Variable::CONTEXT: {
HValue* context = BuildContextChainWalk(variable);
HLoadContextSlot::Mode mode;
switch (variable->mode()) {
case LET:
case CONST:
mode = HLoadContextSlot::kCheckDeoptimize;
break;
case CONST_LEGACY:
mode = HLoadContextSlot::kCheckReturnUndefined;
break;
default:
mode = HLoadContextSlot::kNoCheck;
break;
}
HLoadContextSlot* instr =
new(zone()) HLoadContextSlot(context, variable->index(), mode);
return ast_context()->ReturnInstruction(instr, expr->id());
}
case Variable::LOOKUP:
return Bailout(kReferenceToAVariableWhichRequiresDynamicLookup);
}
}
void HOptimizedGraphBuilder::VisitLiteral(Literal* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
HConstant* instr = New<HConstant>(expr->value());
return ast_context()->ReturnInstruction(instr, expr->id());
}
void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Handle<JSFunction> closure = function_state()->compilation_info()->closure();
Handle<FixedArray> literals(closure->literals());
HRegExpLiteral* instr = New<HRegExpLiteral>(literals,
expr->pattern(),
expr->flags(),
expr->literal_index());
return ast_context()->ReturnInstruction(instr, expr->id());
}
static bool CanInlinePropertyAccess(Type* type) {
if (type->Is(Type::NumberOrString())) return true;
if (!type->IsClass()) return false;
Handle<Map> map = type->AsClass()->Map();
return map->IsJSObjectMap() &&
!map->is_dictionary_map() &&
!map->has_named_interceptor();
}
// Determines whether the given array or object literal boilerplate satisfies
// all limits to be considered for fast deep-copying and computes the total
// size of all objects that are part of the graph.
static bool IsFastLiteral(Handle<JSObject> boilerplate,
int max_depth,
int* max_properties) {
if (boilerplate->map()->is_deprecated() &&
!JSObject::TryMigrateInstance(boilerplate)) {
return false;
}
ASSERT(max_depth >= 0 && *max_properties >= 0);
if (max_depth == 0) return false;
Isolate* isolate = boilerplate->GetIsolate();
Handle<FixedArrayBase> elements(boilerplate->elements());
if (elements->length() > 0 &&
elements->map() != isolate->heap()->fixed_cow_array_map()) {
if (boilerplate->HasFastObjectElements()) {
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
int length = elements->length();
for (int i = 0; i < length; i++) {
if ((*max_properties)-- == 0) return false;
Handle<Object> value(fast_elements->get(i), isolate);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
if (!IsFastLiteral(value_object,
max_depth - 1,
max_properties)) {
return false;
}
}
}
} else if (!boilerplate->HasFastDoubleElements()) {
return false;
}
}
Handle<FixedArray> properties(boilerplate->properties());
if (properties->length() > 0) {
return false;
} else {
Handle<DescriptorArray> descriptors(
boilerplate->map()->instance_descriptors());
int limit = boilerplate->map()->NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.type() != FIELD) continue;
int index = descriptors->GetFieldIndex(i);
if ((*max_properties)-- == 0) return false;
Handle<Object> value(boilerplate->InObjectPropertyAt(index), isolate);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
if (!IsFastLiteral(value_object,
max_depth - 1,
max_properties)) {
return false;
}
}
}
}
return true;
}
void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
expr->BuildConstantProperties(isolate());
Handle<JSFunction> closure = function_state()->compilation_info()->closure();
HInstruction* literal;
// Check whether to use fast or slow deep-copying for boilerplate.
int max_properties = kMaxFastLiteralProperties;
Handle<Object> literals_cell(closure->literals()->get(expr->literal_index()),
isolate());
Handle<AllocationSite> site;
Handle<JSObject> boilerplate;
if (!literals_cell->IsUndefined()) {
// Retrieve the boilerplate
site = Handle<AllocationSite>::cast(literals_cell);
boilerplate = Handle<JSObject>(JSObject::cast(site->transition_info()),
isolate());
}
if (!boilerplate.is_null() &&
IsFastLiteral(boilerplate, kMaxFastLiteralDepth, &max_properties)) {
AllocationSiteUsageContext usage_context(isolate(), site, false);
usage_context.EnterNewScope();
literal = BuildFastLiteral(boilerplate, &usage_context);
usage_context.ExitScope(site, boilerplate);
} else {
NoObservableSideEffectsScope no_effects(this);
Handle<FixedArray> closure_literals(closure->literals(), isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
int literal_index = expr->literal_index();
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements : ObjectLiteral::kNoFlags;
flags |= expr->has_function()
? ObjectLiteral::kHasFunction : ObjectLiteral::kNoFlags;
Add<HPushArguments>(Add<HConstant>(closure_literals),
Add<HConstant>(literal_index),
Add<HConstant>(constant_properties),
Add<HConstant>(flags));
// TODO(mvstanton): Add a flag to turn off creation of any
// AllocationMementos for this call: we are in crankshaft and should have
// learned enough about transition behavior to stop emitting mementos.
Runtime::FunctionId function_id = Runtime::kHiddenCreateObjectLiteral;
literal = Add<HCallRuntime>(isolate()->factory()->empty_string(),
Runtime::FunctionForId(function_id),
4);
}
// The object is expected in the bailout environment during computation
// of the property values and is the value of the entire expression.
Push(literal);
expr->CalculateEmitStore(zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
Literal* key = property->key();
Expression* value = property->value();
switch (property->kind()) {
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
CHECK_ALIVE(VisitForValue(value));
HValue* value = Pop();
Handle<Map> map = property->GetReceiverType();
Handle<String> name = property->key()->AsPropertyName();
HInstruction* store;
if (map.is_null()) {
// If we don't know the monomorphic type, do a generic store.
CHECK_ALIVE(store = BuildNamedGeneric(
STORE, literal, name, value));
} else {
PropertyAccessInfo info(this, STORE, ToType(map), name);
if (info.CanAccessMonomorphic()) {
HValue* checked_literal = Add<HCheckMaps>(literal, map);
ASSERT(!info.lookup()->IsPropertyCallbacks());
store = BuildMonomorphicAccess(
&info, literal, checked_literal, value,
BailoutId::None(), BailoutId::None());
} else {
CHECK_ALIVE(store = BuildNamedGeneric(
STORE, literal, name, value));
}
}
AddInstruction(store);
if (store->HasObservableSideEffects()) {
Add<HSimulate>(key->id(), REMOVABLE_SIMULATE);
}
} else {
CHECK_ALIVE(VisitForEffect(value));
}
break;
}
// Fall through.
case ObjectLiteral::Property::PROTOTYPE:
case ObjectLiteral::Property::SETTER:
case ObjectLiteral::Property::GETTER:
return Bailout(kObjectLiteralWithComplexProperty);
default: UNREACHABLE();
}
}
if (expr->has_function()) {
// Return the result of the transformation to fast properties
// instead of the original since this operation changes the map
// of the object. This makes sure that the original object won't
// be used by other optimized code before it is transformed
// (e.g. because of code motion).
HToFastProperties* result = Add<HToFastProperties>(Pop());
return ast_context()->ReturnValue(result);
} else {
return ast_context()->ReturnValue(Pop());
}
}
void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
expr->BuildConstantElements(isolate());
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
HInstruction* literal;
Handle<AllocationSite> site;
Handle<FixedArray> literals(environment()->closure()->literals(), isolate());
bool uninitialized = false;
Handle<Object> literals_cell(literals->get(expr->literal_index()),
isolate());
Handle<JSObject> boilerplate_object;
if (literals_cell->IsUndefined()) {
uninitialized = true;
Handle<Object> raw_boilerplate;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate(), raw_boilerplate,
Runtime::CreateArrayLiteralBoilerplate(
isolate(), literals, expr->constant_elements()),
Bailout(kArrayBoilerplateCreationFailed));
boilerplate_object = Handle<JSObject>::cast(raw_boilerplate);
AllocationSiteCreationContext creation_context(isolate());
site = creation_context.EnterNewScope();
if (JSObject::DeepWalk(boilerplate_object, &creation_context).is_null()) {
return Bailout(kArrayBoilerplateCreationFailed);
}
creation_context.ExitScope(site, boilerplate_object);
literals->set(expr->literal_index(), *site);
if (boilerplate_object->elements()->map() ==
isolate()->heap()->fixed_cow_array_map()) {
isolate()->counters()->cow_arrays_created_runtime()->Increment();
}
} else {
ASSERT(literals_cell->IsAllocationSite());
site = Handle<AllocationSite>::cast(literals_cell);
boilerplate_object = Handle<JSObject>(
JSObject::cast(site->transition_info()), isolate());
}
ASSERT(!boilerplate_object.is_null());
ASSERT(site->SitePointsToLiteral());
ElementsKind boilerplate_elements_kind =
boilerplate_object->GetElementsKind();
// Check whether to use fast or slow deep-copying for boilerplate.
int max_properties = kMaxFastLiteralProperties;
if (IsFastLiteral(boilerplate_object,
kMaxFastLiteralDepth,
&max_properties)) {
AllocationSiteUsageContext usage_context(isolate(), site, false);
usage_context.EnterNewScope();
literal = BuildFastLiteral(boilerplate_object, &usage_context);
usage_context.ExitScope(site, boilerplate_object);
} else {
NoObservableSideEffectsScope no_effects(this);
// Boilerplate already exists and constant elements are never accessed,
// pass an empty fixed array to the runtime function instead.
Handle<FixedArray> constants = isolate()->factory()->empty_fixed_array();
int literal_index = expr->literal_index();
int flags = expr->depth() == 1
? ArrayLiteral::kShallowElements
: ArrayLiteral::kNoFlags;
flags |= ArrayLiteral::kDisableMementos;
Add<HPushArguments>(Add<HConstant>(literals),
Add<HConstant>(literal_index),
Add<HConstant>(constants),
Add<HConstant>(flags));
// TODO(mvstanton): Consider a flag to turn off creation of any
// AllocationMementos for this call: we are in crankshaft and should have
// learned enough about transition behavior to stop emitting mementos.
Runtime::FunctionId function_id = Runtime::kHiddenCreateArrayLiteral;
literal = Add<HCallRuntime>(isolate()->factory()->empty_string(),
Runtime::FunctionForId(function_id),
4);
// De-opt if elements kind changed from boilerplate_elements_kind.
Handle<Map> map = Handle<Map>(boilerplate_object->map(), isolate());
literal = Add<HCheckMaps>(literal, map);
}
// The array is expected in the bailout environment during computation
// of the property values and is the value of the entire expression.
Push(literal);
// The literal index is on the stack, too.
Push(Add<HConstant>(expr->literal_index()));
HInstruction* elements = NULL;
for (int i = 0; i < length; i++) {
Expression* subexpr = subexprs->at(i);
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
CHECK_ALIVE(VisitForValue(subexpr));
HValue* value = Pop();
if (!Smi::IsValid(i)) return Bailout(kNonSmiKeyInArrayLiteral);
elements = AddLoadElements(literal);
HValue* key = Add<HConstant>(i);
switch (boilerplate_elements_kind) {
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS: {
HStoreKeyed* instr = Add<HStoreKeyed>(elements, key, value,
boilerplate_elements_kind);
instr->SetUninitialized(uninitialized);
break;
}
default:
UNREACHABLE();
break;
}
Add<HSimulate>(expr->GetIdForElement(i));
}
Drop(1); // array literal index
return ast_context()->ReturnValue(Pop());
}
HCheckMaps* HOptimizedGraphBuilder::AddCheckMap(HValue* object,
Handle<Map> map) {
BuildCheckHeapObject(object);
return Add<HCheckMaps>(object, map);
}
HInstruction* HOptimizedGraphBuilder::BuildLoadNamedField(
PropertyAccessInfo* info,
HValue* checked_object) {
// See if this is a load for an immutable property
if (checked_object->ActualValue()->IsConstant() &&
info->lookup()->IsCacheable() &&
info->lookup()->IsReadOnly() && info->lookup()->IsDontDelete()) {
Handle<Object> object(
HConstant::cast(checked_object->ActualValue())->handle(isolate()));
if (object->IsJSObject()) {
LookupResult lookup(isolate());
Handle<JSObject>::cast(object)->Lookup(info->name(), &lookup);
Handle<Object> value(lookup.GetLazyValue(), isolate());
if (!value->IsTheHole()) {
return New<HConstant>(value);
}
}
}
HObjectAccess access = info->access();
if (access.representation().IsDouble()) {
// Load the heap number.
checked_object = Add<HLoadNamedField>(
checked_object, static_cast<HValue*>(NULL),
access.WithRepresentation(Representation::Tagged()));
// Load the double value from it.
access = HObjectAccess::ForHeapNumberValue();
}
SmallMapList* map_list = info->field_maps();
if (map_list->length() == 0) {
return New<HLoadNamedField>(checked_object, checked_object, access);
}
UniqueSet<Map>* maps = new(zone()) UniqueSet<Map>(map_list->length(), zone());
for (int i = 0; i < map_list->length(); ++i) {
maps->Add(Unique<Map>::CreateImmovable(map_list->at(i)), zone());
}
return New<HLoadNamedField>(
checked_object, checked_object, access, maps, info->field_type());
}
HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
PropertyAccessInfo* info,
HValue* checked_object,
HValue* value) {
bool transition_to_field = info->lookup()->IsTransition();
// TODO(verwaest): Move this logic into PropertyAccessInfo.
HObjectAccess field_access = info->access();
HStoreNamedField *instr;
if (field_access.representation().IsDouble()) {
HObjectAccess heap_number_access =
field_access.WithRepresentation(Representation::Tagged());
if (transition_to_field) {
// The store requires a mutable HeapNumber to be allocated.
NoObservableSideEffectsScope no_side_effects(this);
HInstruction* heap_number_size = Add<HConstant>(HeapNumber::kSize);
// TODO(hpayer): Allocation site pretenuring support.
HInstruction* heap_number = Add<HAllocate>(heap_number_size,
HType::HeapObject(),
NOT_TENURED,
HEAP_NUMBER_TYPE);
AddStoreMapConstant(heap_number, isolate()->factory()->heap_number_map());
Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(),
value);
instr = New<HStoreNamedField>(checked_object->ActualValue(),
heap_number_access,
heap_number);
} else {
// Already holds a HeapNumber; load the box and write its value field.
HInstruction* heap_number = Add<HLoadNamedField>(
checked_object, static_cast<HValue*>(NULL), heap_number_access);
instr = New<HStoreNamedField>(heap_number,
HObjectAccess::ForHeapNumberValue(),
value, STORE_TO_INITIALIZED_ENTRY);
}
} else {
if (field_access.representation().IsHeapObject()) {
BuildCheckHeapObject(value);
}
if (!info->field_maps()->is_empty()) {
ASSERT(field_access.representation().IsHeapObject());
value = Add<HCheckMaps>(value, info->field_maps());
}
// This is a normal store.
instr = New<HStoreNamedField>(
checked_object->ActualValue(), field_access, value,
transition_to_field ? INITIALIZING_STORE : STORE_TO_INITIALIZED_ENTRY);
}
if (transition_to_field) {
Handle<Map> transition(info->transition());
ASSERT(!transition->is_deprecated());
instr->SetTransition(Add<HConstant>(transition));
}
return instr;
}
bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatible(
PropertyAccessInfo* info) {
if (!CanInlinePropertyAccess(type_)) return false;
// Currently only handle Type::Number as a polymorphic case.
// TODO(verwaest): Support monomorphic handling of numbers with a HCheckNumber
// instruction.
if (type_->Is(Type::Number())) return false;
// Values are only compatible for monomorphic load if they all behave the same
// regarding value wrappers.
if (type_->Is(Type::NumberOrString())) {
if (!info->type_->Is(Type::NumberOrString())) return false;
} else {
if (info->type_->Is(Type::NumberOrString())) return false;
}
if (!LookupDescriptor()) return false;
if (!lookup_.IsFound()) {
return (!info->lookup_.IsFound() || info->has_holder()) &&
map()->prototype() == info->map()->prototype();
}
// Mismatch if the other access info found the property in the prototype
// chain.
if (info->has_holder()) return false;
if (lookup_.IsPropertyCallbacks()) {
return accessor_.is_identical_to(info->accessor_) &&
api_holder_.is_identical_to(info->api_holder_);
}
if (lookup_.IsConstant()) {
return constant_.is_identical_to(info->constant_);
}
ASSERT(lookup_.IsField());
if (!info->lookup_.IsField()) return false;
Representation r = access_.representation();
if (IsLoad()) {
if (!info->access_.representation().IsCompatibleForLoad(r)) return false;
} else {
if (!info->access_.representation().IsCompatibleForStore(r)) return false;
}
if (info->access_.offset() != access_.offset()) return false;
if (info->access_.IsInobject() != access_.IsInobject()) return false;
if (IsLoad()) {
if (field_maps_.is_empty()) {
info->field_maps_.Clear();
} else if (!info->field_maps_.is_empty()) {
for (int i = 0; i < field_maps_.length(); ++i) {
info->field_maps_.AddMapIfMissing(field_maps_.at(i), info->zone());
}
info->field_maps_.Sort();
}
} else {
// We can only merge stores that agree on their field maps. The comparison
// below is safe, since we keep the field maps sorted.
if (field_maps_.length() != info->field_maps_.length()) return false;
for (int i = 0; i < field_maps_.length(); ++i) {
if (!field_maps_.at(i).is_identical_to(info->field_maps_.at(i))) {
return false;
}
}
}
info->GeneralizeRepresentation(r);
info->field_type_ = info->field_type_.Combine(field_type_);
return true;
}
bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupDescriptor() {
if (!type_->IsClass()) return true;
map()->LookupDescriptor(NULL, *name_, &lookup_);
return LoadResult(map());
}
bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadResult(Handle<Map> map) {
if (!IsLoad() && lookup_.IsProperty() &&
(lookup_.IsReadOnly() || !lookup_.IsCacheable())) {
return false;
}
if (lookup_.IsField()) {
// Construct the object field access.
access_ = HObjectAccess::ForField(map, &lookup_, name_);
// Load field map for heap objects.
LoadFieldMaps(map);
} else if (lookup_.IsPropertyCallbacks()) {
Handle<Object> callback(lookup_.GetValueFromMap(*map), isolate());
if (!callback->IsAccessorPair()) return false;
Object* raw_accessor = IsLoad()
? Handle<AccessorPair>::cast(callback)->getter()
: Handle<AccessorPair>::cast(callback)->setter();
if (!raw_accessor->IsJSFunction()) return false;
Handle<JSFunction> accessor = handle(JSFunction::cast(raw_accessor));
if (accessor->shared()->IsApiFunction()) {
CallOptimization call_optimization(accessor);
if (call_optimization.is_simple_api_call()) {
CallOptimization::HolderLookup holder_lookup;
Handle<Map> receiver_map = this->map();
api_holder_ = call_optimization.LookupHolderOfExpectedType(
receiver_map, &holder_lookup);
}
}
accessor_ = accessor;
} else if (lookup_.IsConstant()) {
constant_ = handle(lookup_.GetConstantFromMap(*map), isolate());
}
return true;
}
void HOptimizedGraphBuilder::PropertyAccessInfo::LoadFieldMaps(
Handle<Map> map) {
// Clear any previously collected field maps/type.
field_maps_.Clear();
field_type_ = HType::Tagged();
// Figure out the field type from the accessor map.
Handle<HeapType> field_type(lookup_.GetFieldTypeFromMap(*map), isolate());
// Collect the (stable) maps from the field type.
int num_field_maps = field_type->NumClasses();
if (num_field_maps == 0) return;
ASSERT(access_.representation().IsHeapObject());
field_maps_.Reserve(num_field_maps, zone());
HeapType::Iterator<Map> it = field_type->Classes();
while (!it.Done()) {
Handle<Map> field_map = it.Current();
if (!field_map->is_stable()) {
field_maps_.Clear();
return;
}
field_maps_.Add(field_map, zone());
it.Advance();
}
field_maps_.Sort();
ASSERT_EQ(num_field_maps, field_maps_.length());
// Determine field HType from field HeapType.
field_type_ = HType::FromType<HeapType>(field_type);
ASSERT(field_type_.IsHeapObject());
// Add dependency on the map that introduced the field.
Map::AddDependentCompilationInfo(
handle(lookup_.GetFieldOwnerFromMap(*map), isolate()),
DependentCode::kFieldTypeGroup, top_info());
}
bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupInPrototypes() {
Handle<Map> map = this->map();
while (map->prototype()->IsJSObject()) {
holder_ = handle(JSObject::cast(map->prototype()));
if (holder_->map()->is_deprecated()) {
JSObject::TryMigrateInstance(holder_);
}
map = Handle<Map>(holder_->map());
if (!CanInlinePropertyAccess(ToType(map))) {
lookup_.NotFound();
return false;
}
map->LookupDescriptor(*holder_, *name_, &lookup_);
if (lookup_.IsFound()) return LoadResult(map);
}
lookup_.NotFound();
return true;
}
bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessMonomorphic() {
if (!CanInlinePropertyAccess(type_)) return false;
if (IsJSObjectFieldAccessor()) return IsLoad();
if (!LookupDescriptor()) return false;
if (lookup_.IsFound()) {
if (IsLoad()) return true;
return !lookup_.IsReadOnly() && lookup_.IsCacheable();
}
if (!LookupInPrototypes()) return false;
if (IsLoad()) return true;
if (lookup_.IsPropertyCallbacks()) return true;
Handle<Map> map = this->map();
map->LookupTransition(NULL, *name_, &lookup_);
if (lookup_.IsTransitionToField() && map->unused_property_fields() > 0) {
// Construct the object field access.
access_ = HObjectAccess::ForField(map, &lookup_, name_);
// Load field map for heap objects.
LoadFieldMaps(transition());
return true;
}
return false;
}
bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessAsMonomorphic(
SmallMapList* types) {
ASSERT(type_->Is(ToType(types->first())));
if (!CanAccessMonomorphic()) return false;
STATIC_ASSERT(kMaxLoadPolymorphism == kMaxStorePolymorphism);
if (types->length() > kMaxLoadPolymorphism) return false;
HObjectAccess access = HObjectAccess::ForMap(); // bogus default
if (GetJSObjectFieldAccess(&access)) {
for (int i = 1; i < types->length(); ++i) {
PropertyAccessInfo test_info(
builder_, access_type_, ToType(types->at(i)), name_);
HObjectAccess test_access = HObjectAccess::ForMap(); // bogus default
if (!test_info.GetJSObjectFieldAccess(&test_access)) return false;
if (!access.Equals(test_access)) return false;
}
return true;
}
// Currently only handle Type::Number as a polymorphic case.
// TODO(verwaest): Support monomorphic handling of numbers with a HCheckNumber
// instruction.
if (type_->Is(Type::Number())) return false;
// Multiple maps cannot transition to the same target map.
ASSERT(!IsLoad() || !lookup_.IsTransition());
if (lookup_.IsTransition() && types->length() > 1) return false;
for (int i = 1; i < types->length(); ++i) {
PropertyAccessInfo test_info(
builder_, access_type_, ToType(types->at(i)), name_);
if (!test_info.IsCompatible(this)) return false;
}
return true;
}
static bool NeedsWrappingFor(Type* type, Handle<JSFunction> target) {
return type->Is(Type::NumberOrString()) &&
target->shared()->strict_mode() == SLOPPY &&
!target->shared()->native();
}
HInstruction* HOptimizedGraphBuilder::BuildMonomorphicAccess(
PropertyAccessInfo* info,
HValue* object,
HValue* checked_object,
HValue* value,
BailoutId ast_id,
BailoutId return_id,
bool can_inline_accessor) {
HObjectAccess access = HObjectAccess::ForMap(); // bogus default
if (info->GetJSObjectFieldAccess(&access)) {
ASSERT(info->IsLoad());
return New<HLoadNamedField>(object, checked_object, access);
}
HValue* checked_holder = checked_object;
if (info->has_holder()) {
Handle<JSObject> prototype(JSObject::cast(info->map()->prototype()));
checked_holder = BuildCheckPrototypeMaps(prototype, info->holder());
}
if (!info->lookup()->IsFound()) {
ASSERT(info->IsLoad());
return graph()->GetConstantUndefined();
}
if (info->lookup()->IsField()) {
if (info->IsLoad()) {
return BuildLoadNamedField(info, checked_holder);
} else {
return BuildStoreNamedField(info, checked_object, value);
}
}
if (info->lookup()->IsTransition()) {
ASSERT(!info->IsLoad());
return BuildStoreNamedField(info, checked_object, value);
}
if (info->lookup()->IsPropertyCallbacks()) {
Push(checked_object);
int argument_count = 1;
if (!info->IsLoad()) {
argument_count = 2;
Push(value);
}
if (NeedsWrappingFor(info->type(), info->accessor())) {
HValue* function = Add<HConstant>(info->accessor());
PushArgumentsFromEnvironment(argument_count);
return New<HCallFunction>(function, argument_count, WRAP_AND_CALL);
} else if (FLAG_inline_accessors && can_inline_accessor) {
bool success = info->IsLoad()
? TryInlineGetter(info->accessor(), info->map(), ast_id, return_id)
: TryInlineSetter(
info->accessor(), info->map(), ast_id, return_id, value);
if (success || HasStackOverflow()) return NULL;
}
PushArgumentsFromEnvironment(argument_count);
return BuildCallConstantFunction(info->accessor(), argument_count);
}
ASSERT(info->lookup()->IsConstant());
if (info->IsLoad()) {
return New<HConstant>(info->constant());
} else {
return New<HCheckValue>(value, Handle<JSFunction>::cast(info->constant()));
}
}
void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
PropertyAccessType access_type,
BailoutId ast_id,
BailoutId return_id,
HValue* object,
HValue* value,
SmallMapList* types,
Handle<String> name) {
// Something did not match; must use a polymorphic load.
int count = 0;
HBasicBlock* join = NULL;
HBasicBlock* number_block = NULL;
bool handled_string = false;
bool handle_smi = false;
STATIC_ASSERT(kMaxLoadPolymorphism == kMaxStorePolymorphism);
for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
PropertyAccessInfo info(this, access_type, ToType(types->at(i)), name);
if (info.type()->Is(Type::String())) {
if (handled_string) continue;
handled_string = true;
}
if (info.CanAccessMonomorphic()) {
count++;
if (info.type()->Is(Type::Number())) {
handle_smi = true;
break;
}
}
}
count = 0;
HControlInstruction* smi_check = NULL;
handled_string = false;
for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
PropertyAccessInfo info(this, access_type, ToType(types->at(i)), name);
if (info.type()->Is(Type::String())) {
if (handled_string) continue;
handled_string = true;
}
if (!info.CanAccessMonomorphic()) continue;
if (count == 0) {
join = graph()->CreateBasicBlock();
if (handle_smi) {
HBasicBlock* empty_smi_block = graph()->CreateBasicBlock();
HBasicBlock* not_smi_block = graph()->CreateBasicBlock();
number_block = graph()->CreateBasicBlock();
smi_check = New<HIsSmiAndBranch>(
object, empty_smi_block, not_smi_block);
FinishCurrentBlock(smi_check);
GotoNoSimulate(empty_smi_block, number_block);
set_current_block(not_smi_block);
} else {
BuildCheckHeapObject(object);
}
}
++count;
HBasicBlock* if_true = graph()->CreateBasicBlock();
HBasicBlock* if_false = graph()->CreateBasicBlock();
HUnaryControlInstruction* compare;
HValue* dependency;
if (info.type()->Is(Type::Number())) {
Handle<Map> heap_number_map = isolate()->factory()->heap_number_map();
compare = New<HCompareMap>(object, heap_number_map, if_true, if_false);
dependency = smi_check;
} else if (info.type()->Is(Type::String())) {
compare = New<HIsStringAndBranch>(object, if_true, if_false);
dependency = compare;
} else {
compare = New<HCompareMap>(object, info.map(), if_true, if_false);
dependency = compare;
}
FinishCurrentBlock(compare);
if (info.type()->Is(Type::Number())) {
GotoNoSimulate(if_true, number_block);
if_true = number_block;
}
set_current_block(if_true);
HInstruction* access = BuildMonomorphicAccess(
&info, object, dependency, value, ast_id,
return_id, FLAG_polymorphic_inlining);
HValue* result = NULL;
switch (access_type) {
case LOAD:
result = access;
break;
case STORE:
result = value;
break;
}
if (access == NULL) {
if (HasStackOverflow()) return;
} else {
if (!access->IsLinked()) AddInstruction(access);
if (!ast_context()->IsEffect()) Push(result);
}
if (current_block() != NULL) Goto(join);
set_current_block(if_false);
}
// Finish up. Unconditionally deoptimize if we've handled all the maps we
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
FinishExitWithHardDeoptimization("Uknown map in polymorphic access");
} else {
HInstruction* instr = BuildNamedGeneric(access_type, object, name, value);
AddInstruction(instr);
if (!ast_context()->IsEffect()) Push(access_type == LOAD ? instr : value);
if (join != NULL) {
Goto(join);
} else {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
return;
}
}
ASSERT(join != NULL);
if (join->HasPredecessor()) {
join->SetJoinId(ast_id);
set_current_block(join);
if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
} else {
set_current_block(NULL);
}
}
static bool ComputeReceiverTypes(Expression* expr,
HValue* receiver,
SmallMapList** t,
Zone* zone) {
SmallMapList* types = expr->GetReceiverTypes();
*t = types;
bool monomorphic = expr->IsMonomorphic();
if (types != NULL && receiver->HasMonomorphicJSObjectType()) {
Map* root_map = receiver->GetMonomorphicJSObjectMap()->FindRootMap();
types->FilterForPossibleTransitions(root_map);
monomorphic = types->length() == 1;
}
return monomorphic && CanInlinePropertyAccess(
IC::MapToType<Type>(types->first(), zone));
}
static bool AreStringTypes(SmallMapList* types) {
for (int i = 0; i < types->length(); i++) {
if (types->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false;
}
return true;
}
void HOptimizedGraphBuilder::BuildStore(Expression* expr,
Property* prop,
BailoutId ast_id,
BailoutId return_id,
bool is_uninitialized) {
if (!prop->key()->IsPropertyName()) {
// Keyed store.
HValue* value = environment()->ExpressionStackAt(0);
HValue* key = environment()->ExpressionStackAt(1);
HValue* object = environment()->ExpressionStackAt(2);
bool has_side_effects = false;
HandleKeyedElementAccess(object, key, value, expr,
STORE, &has_side_effects);
Drop(3);
Push(value);
Add<HSimulate>(return_id, REMOVABLE_SIMULATE);
return ast_context()->ReturnValue(Pop());
}
// Named store.
HValue* value = Pop();
HValue* object = Pop();
Literal* key = prop->key()->AsLiteral();
Handle<String> name = Handle<String>::cast(key->value());
ASSERT(!name.is_null());
HInstruction* instr = BuildNamedAccess(STORE, ast_id, return_id, expr,
object, name, value, is_uninitialized);
if (instr == NULL) return;
if (!ast_context()->IsEffect()) Push(value);
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
}
if (!ast_context()->IsEffect()) Drop(1);
return ast_context()->ReturnValue(value);
}
void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
CHECK_ALIVE(VisitForValue(prop->obj()));
if (!prop->key()->IsPropertyName()) {
CHECK_ALIVE(VisitForValue(prop->key()));
}
CHECK_ALIVE(VisitForValue(expr->value()));
BuildStore(expr, prop, expr->id(),
expr->AssignmentId(), expr->IsUninitialized());
}
// Because not every expression has a position and there is not common
// superclass of Assignment and CountOperation, we cannot just pass the
// owning expression instead of position and ast_id separately.
void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
Variable* var,
HValue* value,
BailoutId ast_id) {
LookupResult lookup(isolate());
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, STORE);
if (type == kUseCell) {
Handle<GlobalObject> global(current_info()->global_object());
Handle<PropertyCell> cell(global->GetPropertyCell(&lookup));
if (cell->type()->IsConstant()) {
Handle<Object> constant = cell->type()->AsConstant()->Value();
if (value->IsConstant()) {
HConstant* c_value = HConstant::cast(value);
if (!constant.is_identical_to(c_value->handle(isolate()))) {
Add<HDeoptimize>("Constant global variable assignment",
Deoptimizer::EAGER);
}
} else {
HValue* c_constant = Add<HConstant>(constant);
IfBuilder builder(this);
if (constant->IsNumber()) {
builder.If<HCompareNumericAndBranch>(value, c_constant, Token::EQ);
} else {
builder.If<HCompareObjectEqAndBranch>(value, c_constant);
}
builder.Then();
builder.Else();
Add<HDeoptimize>("Constant global variable assignment",
Deoptimizer::EAGER);
builder.End();
}
}
HInstruction* instr =
Add<HStoreGlobalCell>(value, cell, lookup.GetPropertyDetails());
if (instr->HasObservableSideEffects()) {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
}
} else {
HValue* global_object = Add<HLoadNamedField>(
context(), static_cast<HValue*>(NULL),
HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
HStoreNamedGeneric* instr =
Add<HStoreNamedGeneric>(global_object, var->name(),
value, function_strict_mode());
USE(instr);
ASSERT(instr->HasObservableSideEffects());
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
}
}
void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
Expression* target = expr->target();
VariableProxy* proxy = target->AsVariableProxy();
Property* prop = target->AsProperty();
ASSERT(proxy == NULL || prop == NULL);
// We have a second position recorded in the FullCodeGenerator to have
// type feedback for the binary operation.
BinaryOperation* operation = expr->binary_operation();
if (proxy != NULL) {
Variable* var = proxy->var();
if (var->mode() == LET) {
return Bailout(kUnsupportedLetCompoundAssignment);
}
CHECK_ALIVE(VisitForValue(operation));
switch (var->location()) {
case Variable::UNALLOCATED:
HandleGlobalVariableAssignment(var,
Top(),
expr->AssignmentId());
break;
case Variable::PARAMETER:
case Variable::LOCAL:
if (var->mode() == CONST_LEGACY) {
return Bailout(kUnsupportedConstCompoundAssignment);
}
BindIfLive(var, Top());
break;
case Variable::CONTEXT: {
// Bail out if we try to mutate a parameter value in a function
// using the arguments object. We do not (yet) correctly handle the
// arguments property of the function.
if (current_info()->scope()->arguments() != NULL) {
// Parameters will be allocated to context slots. We have no
// direct way to detect that the variable is a parameter so we do
// a linear search of the parameter variables.
int count = current_info()->scope()->num_parameters();
for (int i = 0; i < count; ++i) {
if (var == current_info()->scope()->parameter(i)) {
Bailout(kAssignmentToParameterFunctionUsesArgumentsObject);
}
}
}
HStoreContextSlot::Mode mode;
switch (var->mode()) {
case LET:
mode = HStoreContextSlot::kCheckDeoptimize;
break;
case CONST:
// This case is checked statically so no need to
// perform checks here
UNREACHABLE();
case CONST_LEGACY:
return ast_context()->ReturnValue(Pop());
default:
mode = HStoreContextSlot::kNoCheck;
}
HValue* context = BuildContextChainWalk(var);
HStoreContextSlot* instr = Add<HStoreContextSlot>(
context, var->index(), mode, Top());
if (instr->HasObservableSideEffects()) {
Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
break;
}
case Variable::LOOKUP:
return Bailout(kCompoundAssignmentToLookupSlot);
}
return ast_context()->ReturnValue(Pop());
} else if (prop != NULL) {
CHECK_ALIVE(VisitForValue(prop->obj()));
HValue* object = Top();
HValue* key = NULL;
if ((!prop->IsFunctionPrototype() && !prop->key()->IsPropertyName()) ||
prop->IsStringAccess()) {
CHECK_ALIVE(VisitForValue(prop->key()));
key = Top();
}
CHECK_ALIVE(PushLoad(prop, object, key));
CHECK_ALIVE(VisitForValue(expr->value()));
HValue* right = Pop();
HValue* left = Pop();
Push(BuildBinaryOperation(operation, left, right, PUSH_BEFORE_SIMULATE));
BuildStore(expr, prop, expr->id(),
expr->AssignmentId(), expr->IsUninitialized());
} else {
return Bailout(kInvalidLhsInCompoundAssignment);
}
}
void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
VariableProxy* proxy = expr->target()->AsVariableProxy();
Property* prop = expr->target()->AsProperty();
ASSERT(proxy == NULL || prop == NULL);
if (expr->is_compound()) {
HandleCompoundAssignment(expr);
return;
}
if (prop != NULL) {
HandlePropertyAssignment(expr);
} else if (proxy != NULL) {
Variable* var = proxy->var();
if (var->mode() == CONST) {
if (expr->op() != Token::INIT_CONST) {
return Bailout(kNonInitializerAssignmentToConst);
}
} else if (var->mode() == CONST_LEGACY) {
if (expr->op() != Token::INIT_CONST_LEGACY) {
CHECK_ALIVE(VisitForValue(expr->value()));
return ast_context()->ReturnValue(Pop());
}
if (var->IsStackAllocated()) {
// We insert a use of the old value to detect unsupported uses of const
// variables (e.g. initialization inside a loop).
HValue* old_value = environment()->Lookup(var);
Add<HUseConst>(old_value);
}
}
if (proxy->IsArguments()) return Bailout(kAssignmentToArguments);
// Handle the assignment.
switch (var->location()) {
case Variable::UNALLOCATED:
CHECK_ALIVE(VisitForValue(expr->value()));
HandleGlobalVariableAssignment(var,
Top(),
expr->AssignmentId());
return ast_context()->ReturnValue(Pop());
case Variable::PARAMETER:
case Variable::LOCAL: {
// Perform an initialization check for let declared variables
// or parameters.
if (var->mode() == LET && expr->op() == Token::ASSIGN) {
HValue* env_value = environment()->Lookup(var);
if (env_value == graph()->GetConstantHole()) {
return Bailout(kAssignmentToLetVariableBeforeInitialization);
}
}
// We do not allow the arguments object to occur in a context where it
// may escape, but assignments to stack-allocated locals are
// permitted.
CHECK_ALIVE(VisitForValue(expr->value(), ARGUMENTS_ALLOWED));
HValue* value = Pop();
BindIfLive(var, value);
return ast_context()->ReturnValue(value);
}
case Variable::CONTEXT: {
// Bail out if we try to mutate a parameter value in a function using
// the arguments object. We do not (yet) correctly handle the
// arguments property of the function.
if (current_info()->scope()->arguments() != NULL) {
// Parameters will rewrite to context slots. We have no direct way
// to detect that the variable is a parameter.
int count = current_info()->scope()->num_parameters();
for (int i = 0; i < count; ++i) {
if (var == current_info()->scope()->parameter(i)) {
return Bailout(kAssignmentToParameterInArgumentsObject);
}
}
}
CHECK_ALIVE(VisitForValue(expr->value()));
HStoreContextSlot::Mode mode;
if (expr->op() == Token::ASSIGN) {
switch (var->mode()) {
case LET:
mode = HStoreContextSlot::kCheckDeoptimize;
break;
case CONST:
// This case is checked statically so no need to
// perform checks here
UNREACHABLE();
case CONST_LEGACY:
return ast_context()->ReturnValue(Pop());
default:
mode = HStoreContextSlot::kNoCheck;
}
} else if (expr->op() == Token::INIT_VAR ||
expr->op() == Token::INIT_LET ||
expr->op() == Token::INIT_CONST) {
mode = HStoreContextSlot::kNoCheck;
} else {
ASSERT(expr->op() == Token::INIT_CONST_LEGACY);
mode = HStoreContextSlot::kCheckIgnoreAssignment;
}
HValue* context = BuildContextChainWalk(var);
HStoreContextSlot* instr = Add<HStoreContextSlot>(
context, var->index(), mode, Top());
if (instr->HasObservableSideEffects()) {
Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
return ast_context()->ReturnValue(Pop());
}
case Variable::LOOKUP:
return Bailout(kAssignmentToLOOKUPVariable);
}
} else {
return Bailout(kInvalidLeftHandSideInAssignment);
}
}
void HOptimizedGraphBuilder::VisitYield(Yield* expr) {
// Generators are not optimized, so we should never get here.
UNREACHABLE();
}
void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
// We don't optimize functions with invalid left-hand sides in
// assignments, count operations, or for-in. Consequently throw can
// currently only occur in an effect context.
ASSERT(ast_context()->IsEffect());
CHECK_ALIVE(VisitForValue(expr->exception()));
HValue* value = environment()->Pop();
if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
Add<HPushArguments>(value);
Add<HCallRuntime>(isolate()->factory()->empty_string(),
Runtime::FunctionForId(Runtime::kHiddenThrow), 1);
Add<HSimulate>(expr->id());
// If the throw definitely exits the function, we can finish with a dummy
// control flow at this point. This is not the case if the throw is inside
// an inlined function which may be replaced.
if (call_context() == NULL) {
FinishExitCurrentBlock(New<HAbnormalExit>());
}
}
HInstruction* HGraphBuilder::AddLoadStringInstanceType(HValue* string) {
if (string->IsConstant()) {
HConstant* c_string = HConstant::cast(string);
if (c_string->HasStringValue()) {
return Add<HConstant>(c_string->StringValue()->map()->instance_type());
}
}
return Add<HLoadNamedField>(
Add<HLoadNamedField>(string, static_cast<HValue*>(NULL),
HObjectAccess::ForMap()),
static_cast<HValue*>(NULL), HObjectAccess::ForMapInstanceType());
}
HInstruction* HGraphBuilder::AddLoadStringLength(HValue* string) {
if (string->IsConstant()) {
HConstant* c_string = HConstant::cast(string);
if (c_string->HasStringValue()) {
return Add<HConstant>(c_string->StringValue()->length());
}
}
return Add<HLoadNamedField>(string, static_cast<HValue*>(NULL),
HObjectAccess::ForStringLength());
}
HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
PropertyAccessType access_type,
HValue* object,
Handle<String> name,
HValue* value,
bool is_uninitialized) {
if (is_uninitialized) {
Add<HDeoptimize>("Insufficient type feedback for generic named access",
Deoptimizer::SOFT);
}
if (access_type == LOAD) {
return New<HLoadNamedGeneric>(object, name);
} else {
return New<HStoreNamedGeneric>(object, name, value, function_strict_mode());
}
}
HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
PropertyAccessType access_type,
HValue* object,
HValue* key,
HValue* value) {
if (access_type == LOAD) {
return New<HLoadKeyedGeneric>(object, key);
} else {
return New<HStoreKeyedGeneric>(object, key, value, function_strict_mode());
}
}
LoadKeyedHoleMode HOptimizedGraphBuilder::BuildKeyedHoleMode(Handle<Map> map) {
// Loads from a "stock" fast holey double arrays can elide the hole check.
LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE;
if (*map == isolate()->get_initial_js_array_map(FAST_HOLEY_DOUBLE_ELEMENTS) &&
isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
Handle<JSObject> prototype(JSObject::cast(map->prototype()), isolate());
Handle<JSObject> object_prototype = isolate()->initial_object_prototype();
BuildCheckPrototypeMaps(prototype, object_prototype);
load_mode = ALLOW_RETURN_HOLE;
graph()->MarkDependsOnEmptyArrayProtoElements();
}
return load_mode;
}
HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
HValue* object,
HValue* key,
HValue* val,
HValue* dependency,
Handle<Map> map,
PropertyAccessType access_type,
KeyedAccessStoreMode store_mode) {
HCheckMaps* checked_object = Add<HCheckMaps>(object, map, dependency);
if (dependency) {
checked_object->ClearDependsOnFlag(kElementsKind);
}
if (access_type == STORE && map->prototype()->IsJSObject()) {
// monomorphic stores need a prototype chain check because shape
// changes could allow callbacks on elements in the chain that
// aren't compatible with monomorphic keyed stores.
Handle<JSObject> prototype(JSObject::cast(map->prototype()));
JSObject* holder = JSObject::cast(map->prototype());
while (!holder->GetPrototype()->IsNull()) {
holder = JSObject::cast(holder->GetPrototype());
}
BuildCheckPrototypeMaps(prototype,
Handle<JSObject>(JSObject::cast(holder)));
}
LoadKeyedHoleMode load_mode = BuildKeyedHoleMode(map);
return BuildUncheckedMonomorphicElementAccess(
checked_object, key, val,
map->instance_type() == JS_ARRAY_TYPE,
map->elements_kind(), access_type,
load_mode, store_mode);
}
HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
HValue* object,
HValue* key,
HValue* val,
SmallMapList* maps) {
// For polymorphic loads of similar elements kinds (i.e. all tagged or all
// double), always use the "worst case" code without a transition. This is
// much faster than transitioning the elements to the worst case, trading a
// HTransitionElements for a HCheckMaps, and avoiding mutation of the array.
bool has_double_maps = false;
bool has_smi_or_object_maps = false;
bool has_js_array_access = false;
bool has_non_js_array_access = false;
bool has_seen_holey_elements = false;
Handle<Map> most_general_consolidated_map;
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
if (!map->IsJSObjectMap()) return NULL;
// Don't allow mixing of JSArrays with JSObjects.
if (map->instance_type() == JS_ARRAY_TYPE) {
if (has_non_js_array_access) return NULL;
has_js_array_access = true;
} else if (has_js_array_access) {
return NULL;
} else {
has_non_js_array_access = true;
}
// Don't allow mixed, incompatible elements kinds.
if (map->has_fast_double_elements()) {
if (has_smi_or_object_maps) return NULL;
has_double_maps = true;
} else if (map->has_fast_smi_or_object_elements()) {
if (has_double_maps) return NULL;
has_smi_or_object_maps = true;
} else {
return NULL;
}
// Remember if we've ever seen holey elements.
if (IsHoleyElementsKind(map->elements_kind())) {
has_seen_holey_elements = true;
}
// Remember the most general elements kind, the code for its load will
// properly handle all of the more specific cases.
if ((i == 0) || IsMoreGeneralElementsKindTransition(
most_general_consolidated_map->elements_kind(),
map->elements_kind())) {
most_general_consolidated_map = map;
}
}
if (!has_double_maps && !has_smi_or_object_maps) return NULL;
HCheckMaps* checked_object = Add<HCheckMaps>(object, maps);
// FAST_ELEMENTS is considered more general than FAST_HOLEY_SMI_ELEMENTS.
// If we've seen both, the consolidated load must use FAST_HOLEY_ELEMENTS.
ElementsKind consolidated_elements_kind = has_seen_holey_elements
? GetHoleyElementsKind(most_general_consolidated_map->elements_kind())
: most_general_consolidated_map->elements_kind();
HInstruction* instr = BuildUncheckedMonomorphicElementAccess(
checked_object, key, val,
most_general_consolidated_map->instance_type() == JS_ARRAY_TYPE,
consolidated_elements_kind,
LOAD, NEVER_RETURN_HOLE, STANDARD_STORE);
return instr;
}
HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HValue* object,
HValue* key,
HValue* val,
SmallMapList* maps,
PropertyAccessType access_type,
KeyedAccessStoreMode store_mode,
bool* has_side_effects) {
*has_side_effects = false;
BuildCheckHeapObject(object);
if (access_type == LOAD) {
HInstruction* consolidated_load =
TryBuildConsolidatedElementLoad(object, key, val, maps);
if (consolidated_load != NULL) {
*has_side_effects |= consolidated_load->HasObservableSideEffects();
return consolidated_load;
}
}
// Elements_kind transition support.
MapHandleList transition_target(maps->length());
// Collect possible transition targets.
MapHandleList possible_transitioned_maps(maps->length());
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
ElementsKind elements_kind = map->elements_kind();
if (IsFastElementsKind(elements_kind) &&
elements_kind != GetInitialFastElementsKind()) {
possible_transitioned_maps.Add(map);
}
if (elements_kind == SLOPPY_ARGUMENTS_ELEMENTS) {
HInstruction* result = BuildKeyedGeneric(access_type, object, key, val);
*has_side_effects = result->HasObservableSideEffects();
return AddInstruction(result);
}
}
// Get transition target for each map (NULL == no transition).
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
Handle<Map> transitioned_map =
map->FindTransitionedMap(&possible_transitioned_maps);
transition_target.Add(transitioned_map);
}
MapHandleList untransitionable_maps(maps->length());
HTransitionElementsKind* transition = NULL;
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
ASSERT(map->IsMap());
if (!transition_target.at(i).is_null()) {
ASSERT(Map::IsValidElementsTransition(
map->elements_kind(),
transition_target.at(i)->elements_kind()));
transition = Add<HTransitionElementsKind>(object, map,
transition_target.at(i));
} else {
untransitionable_maps.Add(map);
}
}
// If only one map is left after transitioning, handle this case
// monomorphically.
ASSERT(untransitionable_maps.length() >= 1);
if (untransitionable_maps.length() == 1) {
Handle<Map> untransitionable_map = untransitionable_maps[0];
HInstruction* instr = NULL;
if (untransitionable_map->has_slow_elements_kind() ||
!untransitionable_map->IsJSObjectMap()) {
instr = AddInstruction(BuildKeyedGeneric(access_type, object, key, val));
} else {
instr = BuildMonomorphicElementAccess(
object, key, val, transition, untransitionable_map, access_type,
store_mode);
}
*has_side_effects |= instr->HasObservableSideEffects();
return access_type == STORE ? NULL : instr;
}
HBasicBlock* join = graph()->CreateBasicBlock();
for (int i = 0; i < untransitionable_maps.length(); ++i) {
Handle<Map> map = untransitionable_maps[i];
if (!map->IsJSObjectMap()) continue;
ElementsKind elements_kind = map->elements_kind();
HBasicBlock* this_map = graph()->CreateBasicBlock();
HBasicBlock* other_map = graph()->CreateBasicBlock();
HCompareMap* mapcompare =
New<HCompareMap>(object, map, this_map, other_map);
FinishCurrentBlock(mapcompare);
set_current_block(this_map);
HInstruction* access = NULL;
if (IsDictionaryElementsKind(elements_kind)) {
access = AddInstruction(BuildKeyedGeneric(access_type, object, key, val));
} else {
ASSERT(IsFastElementsKind(elements_kind) ||
IsExternalArrayElementsKind(elements_kind) ||
IsFixedTypedArrayElementsKind(elements_kind));
LoadKeyedHoleMode load_mode = BuildKeyedHoleMode(map);
// Happily, mapcompare is a checked object.
access = BuildUncheckedMonomorphicElementAccess(
mapcompare, key, val,
map->instance_type() == JS_ARRAY_TYPE,
elements_kind, access_type,
load_mode,
store_mode);
}
*has_side_effects |= access->HasObservableSideEffects();
// The caller will use has_side_effects and add a correct Simulate.
access->SetFlag(HValue::kHasNoObservableSideEffects);
if (access_type == LOAD) {
Push(access);
}
NoObservableSideEffectsScope scope(this);
GotoNoSimulate(join);
set_current_block(other_map);
}
// Ensure that we visited at least one map above that goes to join. This is
// necessary because FinishExitWithHardDeoptimization does an AbnormalExit
// rather than joining the join block. If this becomes an issue, insert a
// generic access in the case length() == 0.
ASSERT(join->predecessors()->length() > 0);
// Deopt if none of the cases matched.
NoObservableSideEffectsScope scope(this);
FinishExitWithHardDeoptimization("Unknown map in polymorphic element access");
set_current_block(join);
return access_type == STORE ? NULL : Pop();
}
HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
HValue* obj,
HValue* key,
HValue* val,
Expression* expr,
PropertyAccessType access_type,
bool* has_side_effects) {
ASSERT(!expr->IsPropertyName());
HInstruction* instr = NULL;
SmallMapList* types;
bool monomorphic = ComputeReceiverTypes(expr, obj, &types, zone());
bool force_generic = false;
if (access_type == STORE &&
(monomorphic || (types != NULL && !types->is_empty()))) {
// Stores can't be mono/polymorphic if their prototype chain has dictionary
// elements. However a receiver map that has dictionary elements itself
// should be left to normal mono/poly behavior (the other maps may benefit
// from highly optimized stores).
for (int i = 0; i < types->length(); i++) {
Handle<Map> current_map = types->at(i);
if (current_map->DictionaryElementsInPrototypeChainOnly()) {
force_generic = true;
monomorphic = false;
break;
}
}
}
if (monomorphic) {
Handle<Map> map = types->first();
if (map->has_slow_elements_kind() || !map->IsJSObjectMap()) {
instr = AddInstruction(BuildKeyedGeneric(access_type, obj, key, val));
} else {
BuildCheckHeapObject(obj);
instr = BuildMonomorphicElementAccess(
obj, key, val, NULL, map, access_type, expr->GetStoreMode());
}
} else if (!force_generic && (types != NULL && !types->is_empty())) {
return HandlePolymorphicElementAccess(
obj, key, val, types, access_type,
expr->GetStoreMode(), has_side_effects);
} else {
if (access_type == STORE) {
if (expr->IsAssignment() &&
expr->AsAssignment()->HasNoTypeInformation()) {
Add<HDeoptimize>("Insufficient type feedback for keyed store",
Deoptimizer::SOFT);
}
} else {
if (expr->AsProperty()->HasNoTypeInformation()) {
Add<HDeoptimize>("Insufficient type feedback for keyed load",
Deoptimizer::SOFT);
}
}
instr = AddInstruction(BuildKeyedGeneric(access_type, obj, key, val));
}
*has_side_effects = instr->HasObservableSideEffects();
return instr;
}
void HOptimizedGraphBuilder::EnsureArgumentsArePushedForAccess() {
// Outermost function already has arguments on the stack.
if (function_state()->outer() == NULL) return;
if (function_state()->arguments_pushed()) return;
// Push arguments when entering inlined function.
HEnterInlined* entry = function_state()->entry();
entry->set_arguments_pushed();
HArgumentsObject* arguments = entry->arguments_object();
const ZoneList<HValue*>* arguments_values = arguments->arguments_values();
HInstruction* insert_after = entry;
for (int i = 0; i < arguments_values->length(); i++) {
HValue* argument = arguments_values->at(i);
HInstruction* push_argument = New<HPushArguments>(argument);
push_argument->InsertAfter(insert_after);
insert_after = push_argument;
}
HArgumentsElements* arguments_elements = New<HArgumentsElements>(true);
arguments_elements->ClearFlag(HValue::kUseGVN);
arguments_elements->InsertAfter(insert_after);
function_state()->set_arguments_elements(arguments_elements);
}
bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
VariableProxy* proxy = expr->obj()->AsVariableProxy();
if (proxy == NULL) return false;
if (!proxy->var()->IsStackAllocated()) return false;
if (!environment()->Lookup(proxy->var())->CheckFlag(HValue::kIsArguments)) {
return false;
}
HInstruction* result = NULL;
if (expr->key()->IsPropertyName()) {
Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
if (!name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("length"))) return false;
if (function_state()->outer() == NULL) {
HInstruction* elements = Add<HArgumentsElements>(false);
result = New<HArgumentsLength>(elements);
} else {
// Number of arguments without receiver.
int argument_count = environment()->
arguments_environment()->parameter_count() - 1;
result = New<HConstant>(argument_count);
}
} else {
Push(graph()->GetArgumentsObject());
CHECK_ALIVE_OR_RETURN(VisitForValue(expr->key()), true);
HValue* key = Pop();
Drop(1); // Arguments object.
if (function_state()->outer() == NULL) {
HInstruction* elements = Add<HArgumentsElements>(false);
HInstruction* length = Add<HArgumentsLength>(elements);
HInstruction* checked_key = Add<HBoundsCheck>(key, length);
result = New<HAccessArgumentsAt>(elements, length, checked_key);
} else {
EnsureArgumentsArePushedForAccess();
// Number of arguments without receiver.
HInstruction* elements = function_state()->arguments_elements();
int argument_count = environment()->
arguments_environment()->parameter_count() - 1;
HInstruction* length = Add<HConstant>(argument_count);
HInstruction* checked_key = Add<HBoundsCheck>(key, length);
result = New<HAccessArgumentsAt>(elements, length, checked_key);
}
}
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
HInstruction* HOptimizedGraphBuilder::BuildNamedAccess(
PropertyAccessType access,
BailoutId ast_id,
BailoutId return_id,
Expression* expr,
HValue* object,
Handle<String> name,
HValue* value,
bool is_uninitialized) {
SmallMapList* types;
ComputeReceiverTypes(expr, object, &types, zone());
ASSERT(types != NULL);
if (types->length() > 0) {
PropertyAccessInfo info(this, access, ToType(types->first()), name);
if (!info.CanAccessAsMonomorphic(types)) {
HandlePolymorphicNamedFieldAccess(
access, ast_id, return_id, object, value, types, name);
return NULL;
}
HValue* checked_object;
// Type::Number() is only supported by polymorphic load/call handling.
ASSERT(!info.type()->Is(Type::Number()));
BuildCheckHeapObject(object);
if (AreStringTypes(types)) {
checked_object =
Add<HCheckInstanceType>(object, HCheckInstanceType::IS_STRING);
} else {
checked_object = Add<HCheckMaps>(object, types);
}
return BuildMonomorphicAccess(
&info, object, checked_object, value, ast_id, return_id);
}
return BuildNamedGeneric(access, object, name, value, is_uninitialized);
}
void HOptimizedGraphBuilder::PushLoad(Property* expr,
HValue* object,
HValue* key) {
ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
Push(object);
if (key != NULL) Push(key);
BuildLoad(expr, expr->LoadId());
}
void HOptimizedGraphBuilder::BuildLoad(Property* expr,
BailoutId ast_id) {
HInstruction* instr = NULL;
if (expr->IsStringAccess()) {
HValue* index = Pop();
HValue* string = Pop();
HInstruction* char_code = BuildStringCharCodeAt(string, index);
AddInstruction(char_code);
instr = NewUncasted<HStringCharFromCode>(char_code);
} else if (expr->IsFunctionPrototype()) {
HValue* function = Pop();
BuildCheckHeapObject(function);
instr = New<HLoadFunctionPrototype>(function);
} else if (expr->key()->IsPropertyName()) {
Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
HValue* object = Pop();
instr = BuildNamedAccess(LOAD, ast_id, expr->LoadId(), expr,
object, name, NULL, expr->IsUninitialized());
if (instr == NULL) return;
if (instr->IsLinked()) return ast_context()->ReturnValue(instr);
} else {
HValue* key = Pop();
HValue* obj = Pop();
bool has_side_effects = false;
HValue* load = HandleKeyedElementAccess(
obj, key, NULL, expr, LOAD, &has_side_effects);
if (has_side_effects) {
if (ast_context()->IsEffect()) {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
} else {
Push(load);
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
Drop(1);
}
}
return ast_context()->ReturnValue(load);
}
return ast_context()->ReturnInstruction(instr, ast_id);
}
void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
if (TryArgumentsAccess(expr)) return;
CHECK_ALIVE(VisitForValue(expr->obj()));
if ((!expr->IsFunctionPrototype() && !expr->key()->IsPropertyName()) ||
expr->IsStringAccess()) {
CHECK_ALIVE(VisitForValue(expr->key()));
}
BuildLoad(expr, expr->id());
}
HInstruction* HGraphBuilder::BuildConstantMapCheck(Handle<JSObject> constant) {
HCheckMaps* check = Add<HCheckMaps>(
Add<HConstant>(constant), handle(constant->map()));
check->ClearDependsOnFlag(kElementsKind);
return check;
}
HInstruction* HGraphBuilder::BuildCheckPrototypeMaps(Handle<JSObject> prototype,
Handle<JSObject> holder) {
while (holder.is_null() || !prototype.is_identical_to(holder)) {
BuildConstantMapCheck(prototype);
Object* next_prototype = prototype->GetPrototype();
if (next_prototype->IsNull()) return NULL;
CHECK(next_prototype->IsJSObject());
prototype = handle(JSObject::cast(next_prototype));
}
return BuildConstantMapCheck(prototype);
}
void HOptimizedGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
Handle<Map> receiver_map) {
if (!holder.is_null()) {
Handle<JSObject> prototype(JSObject::cast(receiver_map->prototype()));
BuildCheckPrototypeMaps(prototype, holder);
}
}
HInstruction* HOptimizedGraphBuilder::NewPlainFunctionCall(
HValue* fun, int argument_count, bool pass_argument_count) {
return New<HCallJSFunction>(
fun, argument_count, pass_argument_count);
}
HInstruction* HOptimizedGraphBuilder::NewArgumentAdaptorCall(
HValue* fun, HValue* context,
int argument_count, HValue* expected_param_count) {
CallInterfaceDescriptor* descriptor =
isolate()->call_descriptor(Isolate::ArgumentAdaptorCall);
HValue* arity = Add<HConstant>(argument_count - 1);
HValue* op_vals[] = { fun, context, arity, expected_param_count };
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
HConstant* adaptor_value = Add<HConstant>(adaptor);
return New<HCallWithDescriptor>(
adaptor_value, argument_count, descriptor,
Vector<HValue*>(op_vals, descriptor->environment_length()));
}
HInstruction* HOptimizedGraphBuilder::BuildCallConstantFunction(
Handle<JSFunction> jsfun, int argument_count) {
HValue* target = Add<HConstant>(jsfun);
// For constant functions, we try to avoid calling the
// argument adaptor and instead call the function directly
int formal_parameter_count = jsfun->shared()->formal_parameter_count();
bool dont_adapt_arguments =
(formal_parameter_count ==
SharedFunctionInfo::kDontAdaptArgumentsSentinel);
int arity = argument_count - 1;
bool can_invoke_directly =
dont_adapt_arguments || formal_parameter_count == arity;
if (can_invoke_directly) {
if (jsfun.is_identical_to(current_info()->closure())) {
graph()->MarkRecursive();
}
return NewPlainFunctionCall(target, argument_count, dont_adapt_arguments);
} else {
HValue* param_count_value = Add<HConstant>(formal_parameter_count);
HValue* context = Add<HLoadNamedField>(
target, static_cast<HValue*>(NULL),
HObjectAccess::ForFunctionContextPointer());
return NewArgumentAdaptorCall(target, context,
argument_count, param_count_value);
}
UNREACHABLE();
return NULL;
}
class FunctionSorter {
public:
FunctionSorter(int index = 0, int ticks = 0, int size = 0)
: index_(index), ticks_(ticks), size_(size) { }
int index() const { return index_; }
int ticks() const { return ticks_; }
int size() const { return size_; }
private:
int index_;
int ticks_;
int size_;
};
inline bool operator<(const FunctionSorter& lhs, const FunctionSorter& rhs) {
int diff = lhs.ticks() - rhs.ticks();
if (diff != 0) return diff > 0;
return lhs.size() < rhs.size();
}
void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
Call* expr,
HValue* receiver,
SmallMapList* types,
Handle<String> name) {
int argument_count = expr->arguments()->length() + 1; // Includes receiver.
FunctionSorter order[kMaxCallPolymorphism];
bool handle_smi = false;
bool handled_string = false;
int ordered_functions = 0;
for (int i = 0;
i < types->length() && ordered_functions < kMaxCallPolymorphism;
++i) {
PropertyAccessInfo info(this, LOAD, ToType(types->at(i)), name);
if (info.CanAccessMonomorphic() &&
info.lookup()->IsConstant() &&
info.constant()->IsJSFunction()) {
if (info.type()->Is(Type::String())) {
if (handled_string) continue;
handled_string = true;
}
Handle<JSFunction> target = Handle<JSFunction>::cast(info.constant());
if (info.type()->Is(Type::Number())) {
handle_smi = true;
}
expr->set_target(target);
order[ordered_functions++] = FunctionSorter(
i, target->shared()->profiler_ticks(), InliningAstSize(target));
}
}
std::sort(order, order + ordered_functions);
HBasicBlock* number_block = NULL;
HBasicBlock* join = NULL;
handled_string = false;
int count = 0;
for (int fn = 0; fn < ordered_functions; ++fn) {
int i = order[fn].index();
PropertyAccessInfo info(this, LOAD, ToType(types->at(i)), name);
if (info.type()->Is(Type::String())) {
if (handled_string) continue;
handled_string = true;
}
// Reloads the target.
info.CanAccessMonomorphic();
Handle<JSFunction> target = Handle<JSFunction>::cast(info.constant());
expr->set_target(target);
if (count == 0) {
// Only needed once.
join = graph()->CreateBasicBlock();
if (handle_smi) {
HBasicBlock* empty_smi_block = graph()->CreateBasicBlock();
HBasicBlock* not_smi_block = graph()->CreateBasicBlock();
number_block = graph()->CreateBasicBlock();
FinishCurrentBlock(New<HIsSmiAndBranch>(
receiver, empty_smi_block, not_smi_block));
GotoNoSimulate(empty_smi_block, number_block);
set_current_block(not_smi_block);
} else {
BuildCheckHeapObject(receiver);
}
}
++count;
HBasicBlock* if_true = graph()->CreateBasicBlock();
HBasicBlock* if_false = graph()->CreateBasicBlock();
HUnaryControlInstruction* compare;
Handle<Map> map = info.map();
if (info.type()->Is(Type::Number())) {
Handle<Map> heap_number_map = isolate()->factory()->heap_number_map();
compare = New<HCompareMap>(receiver, heap_number_map, if_true, if_false);
} else if (info.type()->Is(Type::String())) {
compare = New<HIsStringAndBranch>(receiver, if_true, if_false);
} else {
compare = New<HCompareMap>(receiver, map, if_true, if_false);
}
FinishCurrentBlock(compare);
if (info.type()->Is(Type::Number())) {
GotoNoSimulate(if_true, number_block);
if_true = number_block;
}
set_current_block(if_true);
AddCheckPrototypeMaps(info.holder(), map);
HValue* function = Add<HConstant>(expr->target());
environment()->SetExpressionStackAt(0, function);
Push(receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
bool needs_wrapping = NeedsWrappingFor(info.type(), target);
bool try_inline = FLAG_polymorphic_inlining && !needs_wrapping;
if (FLAG_trace_inlining && try_inline) {
Handle<JSFunction> caller = current_info()->closure();
SmartArrayPointer<char> caller_name =
caller->shared()->DebugName()->ToCString();
PrintF("Trying to inline the polymorphic call to %s from %s\n",
name->ToCString().get(),
caller_name.get());
}
if (try_inline && TryInlineCall(expr)) {
// Trying to inline will signal that we should bailout from the
// entire compilation by setting stack overflow on the visitor.
if (HasStackOverflow()) return;
} else {
// Since HWrapReceiver currently cannot actually wrap numbers and strings,
// use the regular CallFunctionStub for method calls to wrap the receiver.
// TODO(verwaest): Support creation of value wrappers directly in
// HWrapReceiver.
HInstruction* call = needs_wrapping
? NewUncasted<HCallFunction>(
function, argument_count, WRAP_AND_CALL)
: BuildCallConstantFunction(target, argument_count);
PushArgumentsFromEnvironment(argument_count);
AddInstruction(call);
Drop(1); // Drop the function.
if (!ast_context()->IsEffect()) Push(call);
}
if (current_block() != NULL) Goto(join);
set_current_block(if_false);
}
// Finish up. Unconditionally deoptimize if we've handled all the maps we
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (ordered_functions == types->length() && FLAG_deoptimize_uncommon_cases) {
FinishExitWithHardDeoptimization("Unknown map in polymorphic call");
} else {
Property* prop = expr->expression()->AsProperty();
HInstruction* function = BuildNamedGeneric(
LOAD, receiver, name, NULL, prop->IsUninitialized());
AddInstruction(function);
Push(function);
AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
environment()->SetExpressionStackAt(1, function);
environment()->SetExpressionStackAt(0, receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
CallFunctionFlags flags = receiver->type().IsJSObject()
? NO_CALL_FUNCTION_FLAGS : CALL_AS_METHOD;
HInstruction* call = New<HCallFunction>(
function, argument_count, flags);
PushArgumentsFromEnvironment(argument_count);
Drop(1); // Function.
if (join != NULL) {
AddInstruction(call);
if (!ast_context()->IsEffect()) Push(call);
Goto(join);
} else {
return ast_context()->ReturnInstruction(call, expr->id());
}
}
// We assume that control flow is always live after an expression. So
// even without predecessors to the join block, we set it as the exit
// block and continue by adding instructions there.
ASSERT(join != NULL);
if (join->HasPredecessor()) {
set_current_block(join);
join->SetJoinId(expr->id());
if (!ast_context()->IsEffect()) return ast_context()->ReturnValue(Pop());
} else {
set_current_block(NULL);
}
}
void HOptimizedGraphBuilder::TraceInline(Handle<JSFunction> target,
Handle<JSFunction> caller,
const char* reason) {
if (FLAG_trace_inlining) {
SmartArrayPointer<char> target_name =
target->shared()->DebugName()->ToCString();
SmartArrayPointer<char> caller_name =
caller->shared()->DebugName()->ToCString();
if (reason == NULL) {
PrintF("Inlined %s called from %s.\n", target_name.get(),
caller_name.get());
} else {
PrintF("Did not inline %s called from %s (%s).\n",
target_name.get(), caller_name.get(), reason);
}
}
}
static const int kNotInlinable = 1000000000;
int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
if (!FLAG_use_inlining) return kNotInlinable;
// Precondition: call is monomorphic and we have found a target with the
// appropriate arity.
Handle<JSFunction> caller = current_info()->closure();
Handle<SharedFunctionInfo> target_shared(target->shared());
// Always inline builtins marked for inlining.
if (target->IsBuiltin()) {
return target_shared->inline_builtin() ? 0 : kNotInlinable;
}
if (target_shared->IsApiFunction()) {
TraceInline(target, caller, "target is api function");
return kNotInlinable;
}
// Do a quick check on source code length to avoid parsing large
// inlining candidates.
if (target_shared->SourceSize() >
Min(FLAG_max_inlined_source_size, kUnlimitedMaxInlinedSourceSize)) {
TraceInline(target, caller, "target text too big");
return kNotInlinable;
}
// Target must be inlineable.
if (!target_shared->IsInlineable()) {
TraceInline(target, caller, "target not inlineable");
return kNotInlinable;
}
if (target_shared->dont_inline() || target_shared->dont_optimize()) {
TraceInline(target, caller, "target contains unsupported syntax [early]");
return kNotInlinable;
}
int nodes_added = target_shared->ast_node_count();
return nodes_added;
}
bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
int arguments_count,
HValue* implicit_return_value,
BailoutId ast_id,
BailoutId return_id,
InliningKind inlining_kind,
HSourcePosition position) {
int nodes_added = InliningAstSize(target);
if (nodes_added == kNotInlinable) return false;
Handle<JSFunction> caller = current_info()->closure();
if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) {
TraceInline(target, caller, "target AST is too large [early]");
return false;
}
// Don't inline deeper than the maximum number of inlining levels.
HEnvironment* env = environment();
int current_level = 1;
while (env->outer() != NULL) {
if (current_level == FLAG_max_inlining_levels) {
TraceInline(target, caller, "inline depth limit reached");
return false;
}
if (env->outer()->frame_type() == JS_FUNCTION) {
current_level++;
}
env = env->outer();
}
// Don't inline recursive functions.
for (FunctionState* state = function_state();
state != NULL;
state = state->outer()) {
if (*state->compilation_info()->closure() == *target) {
TraceInline(target, caller, "target is recursive");
return false;
}
}
// We don't want to add more than a certain number of nodes from inlining.
if (inlined_count_ > Min(FLAG_max_inlined_nodes_cumulative,
kUnlimitedMaxInlinedNodesCumulative)) {
TraceInline(target, caller, "cumulative AST node limit reached");
return false;
}
// Parse and allocate variables.
CompilationInfo target_info(target, zone());
// Use the same AstValueFactory for creating strings in the sub-compilation
// step, but don't transfer ownership to target_info.
target_info.SetAstValueFactory(top_info()->ast_value_factory(), false);
Handle<SharedFunctionInfo> target_shared(target->shared());
if (!Parser::Parse(&target_info) || !Scope::Analyze(&target_info)) {
if (target_info.isolate()->has_pending_exception()) {
// Parse or scope error, never optimize this function.
SetStackOverflow();
target_shared->DisableOptimization(kParseScopeError);
}
TraceInline(target, caller, "parse failure");
return false;
}
if (target_info.scope()->num_heap_slots() > 0) {
TraceInline(target, caller, "target has context-allocated variables");
return false;
}
FunctionLiteral* function = target_info.function();
// The following conditions must be checked again after re-parsing, because
// earlier the information might not have been complete due to lazy parsing.
nodes_added = function->ast_node_count();
if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) {
TraceInline(target, caller, "target AST is too large [late]");
return false;
}
AstProperties::Flags* flags(function->flags());
if (flags->Contains(kDontInline) || function->dont_optimize()) {
TraceInline(target, caller, "target contains unsupported syntax [late]");
return false;
}
// If the function uses the arguments object check that inlining of functions
// with arguments object is enabled and the arguments-variable is
// stack allocated.
if (function->scope()->arguments() != NULL) {
if (!FLAG_inline_arguments) {
TraceInline(target, caller, "target uses arguments object");
return false;
}
if (!function->scope()->arguments()->IsStackAllocated()) {
TraceInline(target,
caller,
"target uses non-stackallocated arguments object");
return false;
}
}
// All declarations must be inlineable.
ZoneList<Declaration*>* decls = target_info.scope()->declarations();
int decl_count = decls->length();
for (int i = 0; i < decl_count; ++i) {
if (!decls->at(i)->IsInlineable()) {
TraceInline(target, caller, "target has non-trivial declaration");
return false;
}
}
// Generate the deoptimization data for the unoptimized version of
// the target function if we don't already have it.
if (!target_shared->has_deoptimization_support()) {
// Note that we compile here using the same AST that we will use for
// generating the optimized inline code.
target_info.EnableDeoptimizationSupport();
if (!FullCodeGenerator::MakeCode(&target_info)) {
TraceInline(target, caller, "could not generate deoptimization info");
return false;
}
if (target_shared->scope_info() == ScopeInfo::Empty(isolate())) {
// The scope info might not have been set if a lazily compiled
// function is inlined before being called for the first time.
Handle<ScopeInfo> target_scope_info =
ScopeInfo::Create(target_info.scope(), zone());
target_shared->set_scope_info(*target_scope_info);
}
target_shared->EnableDeoptimizationSupport(*target_info.code());
target_shared->set_feedback_vector(*target_info.feedback_vector());
Compiler::RecordFunctionCompilation(Logger::FUNCTION_TAG,
&target_info,
target_shared);
}
// ----------------------------------------------------------------
// After this point, we've made a decision to inline this function (so
// TryInline should always return true).
// Type-check the inlined function.
ASSERT(target_shared->has_deoptimization_support());
AstTyper::Run(&target_info);
int function_id = graph()->TraceInlinedFunction(target_shared, position);
// Save the pending call context. Set up new one for the inlined function.
// The function state is new-allocated because we need to delete it
// in two different places.
FunctionState* target_state = new FunctionState(
this, &target_info, inlining_kind, function_id);
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner_env =
environment()->CopyForInlining(target,
arguments_count,
function,
undefined,
function_state()->inlining_kind());
HConstant* context = Add<HConstant>(Handle<Context>(target->context()));
inner_env->BindContext(context);
HArgumentsObject* arguments_object = NULL;
// If the function uses arguments object create and bind one, also copy
// current arguments values to use them for materialization.
if (function->scope()->arguments() != NULL) {
ASSERT(function->scope()->arguments()->IsStackAllocated());
HEnvironment* arguments_env = inner_env->arguments_environment();
int arguments_count = arguments_env->parameter_count();
arguments_object = Add<HArgumentsObject>(arguments_count);
inner_env->Bind(function->scope()->arguments(), arguments_object);
for (int i = 0; i < arguments_count; i++) {
arguments_object->AddArgument(arguments_env->Lookup(i), zone());
}
}
// Capture the state before invoking the inlined function for deopt in the
// inlined function. This simulate has no bailout-id since it's not directly
// reachable for deopt, and is only used to capture the state. If the simulate
// becomes reachable by merging, the ast id of the simulate merged into it is
// adopted.
Add<HSimulate>(BailoutId::None());
current_block()->UpdateEnvironment(inner_env);
Scope* saved_scope = scope();
set_scope(target_info.scope());
HEnterInlined* enter_inlined =
Add<HEnterInlined>(return_id, target, arguments_count, function,
function_state()->inlining_kind(),
function->scope()->arguments(),
arguments_object);
function_state()->set_entry(enter_inlined);
VisitDeclarations(target_info.scope()->declarations());
VisitStatements(function->body());
set_scope(saved_scope);
if (HasStackOverflow()) {
// Bail out if the inline function did, as we cannot residualize a call
// instead.
TraceInline(target, caller, "inline graph construction failed");
target_shared->DisableOptimization(kInliningBailedOut);
inline_bailout_ = true;
delete target_state;
return true;
}
// Update inlined nodes count.
inlined_count_ += nodes_added;
Handle<Code> unoptimized_code(target_shared->code());
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
Handle<TypeFeedbackInfo> type_info(
TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
graph()->update_type_change_checksum(type_info->own_type_change_checksum());
TraceInline(target, caller, NULL);
if (current_block() != NULL) {
FunctionState* state = function_state();
if (state->inlining_kind() == CONSTRUCT_CALL_RETURN) {
// Falling off the end of an inlined construct call. In a test context the
// return value will always evaluate to true, in a value context the
// return value is the newly allocated receiver.
if (call_context()->IsTest()) {
Goto(inlined_test_context()->if_true(), state);
} else if (call_context()->IsEffect()) {
Goto(function_return(), state);
} else {
ASSERT(call_context()->IsValue());
AddLeaveInlined(implicit_return_value, state);
}
} else if (state->inlining_kind() == SETTER_CALL_RETURN) {
// Falling off the end of an inlined setter call. The returned value is
// never used, the value of an assignment is always the value of the RHS
// of the assignment.
if (call_context()->IsTest()) {
inlined_test_context()->ReturnValue(implicit_return_value);
} else if (call_context()->IsEffect()) {
Goto(function_return(), state);
} else {
ASSERT(call_context()->IsValue());
AddLeaveInlined(implicit_return_value, state);
}
} else {
// Falling off the end of a normal inlined function. This basically means
// returning undefined.
if (call_context()->IsTest()) {
Goto(inlined_test_context()->if_false(), state);
} else if (call_context()->IsEffect()) {
Goto(function_return(), state);
} else {
ASSERT(call_context()->IsValue());
AddLeaveInlined(undefined, state);
}
}
}
// Fix up the function exits.
if (inlined_test_context() != NULL) {
HBasicBlock* if_true = inlined_test_context()->if_true();
HBasicBlock* if_false = inlined_test_context()->if_false();
HEnterInlined* entry = function_state()->entry();
// Pop the return test context from the expression context stack.
ASSERT(ast_context() == inlined_test_context());
ClearInlinedTestContext();
delete target_state;
// Forward to the real test context.
if (if_true->HasPredecessor()) {
entry->RegisterReturnTarget(if_true, zone());
if_true->SetJoinId(ast_id);
HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
Goto(if_true, true_target, function_state());
}
if (if_false->HasPredecessor()) {
entry->RegisterReturnTarget(if_false, zone());
if_false->SetJoinId(ast_id);
HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
Goto(if_false, false_target, function_state());
}
set_current_block(NULL);
return true;
} else if (function_return()->HasPredecessor()) {
function_state()->entry()->RegisterReturnTarget(function_return(), zone());
function_return()->SetJoinId(ast_id);
set_current_block(function_return());
} else {
set_current_block(NULL);
}
delete target_state;
return true;
}
bool HOptimizedGraphBuilder::TryInlineCall(Call* expr) {
return TryInline(expr->target(),
expr->arguments()->length(),
NULL,
expr->id(),
expr->ReturnId(),
NORMAL_RETURN,
ScriptPositionToSourcePosition(expr->position()));
}
bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
HValue* implicit_return_value) {
return TryInline(expr->target(),
expr->arguments()->length(),
implicit_return_value,
expr->id(),
expr->ReturnId(),
CONSTRUCT_CALL_RETURN,
ScriptPositionToSourcePosition(expr->position()));
}
bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
Handle<Map> receiver_map,
BailoutId ast_id,
BailoutId return_id) {
if (TryInlineApiGetter(getter, receiver_map, ast_id)) return true;
return TryInline(getter,
0,
NULL,
ast_id,
return_id,
GETTER_CALL_RETURN,
source_position());
}
bool HOptimizedGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
Handle<Map> receiver_map,
BailoutId id,
BailoutId assignment_id,
HValue* implicit_return_value) {
if (TryInlineApiSetter(setter, receiver_map, id)) return true;
return TryInline(setter,
1,
implicit_return_value,
id, assignment_id,
SETTER_CALL_RETURN,
source_position());
}
bool HOptimizedGraphBuilder::TryInlineApply(Handle<JSFunction> function,
Call* expr,
int arguments_count) {
return TryInline(function,
arguments_count,
NULL,
expr->id(),
expr->ReturnId(),
NORMAL_RETURN,
ScriptPositionToSourcePosition(expr->position()));
}
bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr) {
if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
switch (id) {
case kMathExp:
if (!FLAG_fast_math) break;
// Fall through if FLAG_fast_math.
case kMathRound:
case kMathFloor:
case kMathAbs:
case kMathSqrt:
case kMathLog:
case kMathClz32:
if (expr->arguments()->length() == 1) {
HValue* argument = Pop();
Drop(2); // Receiver and function.
HInstruction* op = NewUncasted<HUnaryMathOperation>(argument, id);
ast_context()->ReturnInstruction(op, expr->id());
return true;
}
break;
case kMathImul:
if (expr->arguments()->length() == 2) {
HValue* right = Pop();
HValue* left = Pop();
Drop(2); // Receiver and function.
HInstruction* op = HMul::NewImul(zone(), context(), left, right);
ast_context()->ReturnInstruction(op, expr->id());
return true;
}
break;
default:
// Not supported for inlining yet.
break;
}
return false;
}
bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
Call* expr,
HValue* receiver,
Handle<Map> receiver_map) {
// Try to inline calls like Math.* as operations in the calling function.
if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
int argument_count = expr->arguments()->length() + 1; // Plus receiver.
switch (id) {
case kStringCharCodeAt:
case kStringCharAt:
if (argument_count == 2) {
HValue* index = Pop();
HValue* string = Pop();
Drop(1); // Function.
HInstruction* char_code =
BuildStringCharCodeAt(string, index);
if (id == kStringCharCodeAt) {
ast_context()->ReturnInstruction(char_code, expr->id());
return true;
}
AddInstruction(char_code);
HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
break;
case kStringFromCharCode:
if (argument_count == 2) {
HValue* argument = Pop();
Drop(2); // Receiver and function.
HInstruction* result = NewUncasted<HStringCharFromCode>(argument);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
break;
case kMathExp:
if (!FLAG_fast_math) break;
// Fall through if FLAG_fast_math.
case kMathRound:
case kMathFloor:
case kMathAbs:
case kMathSqrt:
case kMathLog:
case kMathClz32:
if (argument_count == 2) {
HValue* argument = Pop();
Drop(2); // Receiver and function.
HInstruction* op = NewUncasted<HUnaryMathOperation>(argument, id);
ast_context()->ReturnInstruction(op, expr->id());
return true;
}
break;
case kMathPow:
if (argument_count == 3) {
HValue* right = Pop();
HValue* left = Pop();
Drop(2); // Receiver and function.
HInstruction* result = NULL;
// Use sqrt() if exponent is 0.5 or -0.5.
if (right->IsConstant() && HConstant::cast(right)->HasDoubleValue()) {
double exponent = HConstant::cast(right)->DoubleValue();
if (exponent == 0.5) {
result = NewUncasted<HUnaryMathOperation>(left, kMathPowHalf);
} else if (exponent == -0.5) {
HValue* one = graph()->GetConstant1();
HInstruction* sqrt = AddUncasted<HUnaryMathOperation>(
left, kMathPowHalf);
// MathPowHalf doesn't have side effects so there's no need for
// an environment simulation here.
ASSERT(!sqrt->HasObservableSideEffects());
result = NewUncasted<HDiv>(one, sqrt);
} else if (exponent == 2.0) {
result = NewUncasted<HMul>(left, left);
}
}
if (result == NULL) {
result = NewUncasted<HPower>(left, right);
}
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
break;
case kMathMax:
case kMathMin:
if (argument_count == 3) {
HValue* right = Pop();
HValue* left = Pop();
Drop(2); // Receiver and function.
HMathMinMax::Operation op = (id == kMathMin) ? HMathMinMax::kMathMin
: HMathMinMax::kMathMax;
HInstruction* result = NewUncasted<HMathMinMax>(left, right, op);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
break;
case kMathImul:
if (argument_count == 3) {
HValue* right = Pop();
HValue* left = Pop();
Drop(2); // Receiver and function.
HInstruction* result = HMul::NewImul(zone(), context(), left, right);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
break;
case kArrayPop: {
if (receiver_map.is_null()) return false;
if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false;
ElementsKind elements_kind = receiver_map->elements_kind();
if (!IsFastElementsKind(elements_kind)) return false;
if (receiver_map->is_observed()) return false;
ASSERT(receiver_map->is_extensible());
Drop(expr->arguments()->length());
HValue* result;
HValue* reduced_length;
HValue* receiver = Pop();
HValue* checked_object = AddCheckMap(receiver, receiver_map);
HValue* length = Add<HLoadNamedField>(
checked_object, static_cast<HValue*>(NULL),
HObjectAccess::ForArrayLength(elements_kind));
Drop(1); // Function.
{ NoObservableSideEffectsScope scope(this);
IfBuilder length_checker(this);
HValue* bounds_check = length_checker.If<HCompareNumericAndBranch>(
length, graph()->GetConstant0(), Token::EQ);
length_checker.Then();
if (!ast_context()->IsEffect()) Push(graph()->GetConstantUndefined());
length_checker.Else();
HValue* elements = AddLoadElements(checked_object);
// Ensure that we aren't popping from a copy-on-write array.
if (IsFastSmiOrObjectElementsKind(elements_kind)) {
elements = BuildCopyElementsOnWrite(checked_object, elements,
elements_kind, length);
}
reduced_length = AddUncasted<HSub>(length, graph()->GetConstant1());
result = AddElementAccess(elements, reduced_length, NULL,
bounds_check, elements_kind, LOAD);
Factory* factory = isolate()->factory();
double nan_double = FixedDoubleArray::hole_nan_as_double();
HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind)
? Add<HConstant>(factory->the_hole_value())
: Add<HConstant>(nan_double);
if (IsFastSmiOrObjectElementsKind(elements_kind)) {
elements_kind = FAST_HOLEY_ELEMENTS;
}
AddElementAccess(
elements, reduced_length, hole, bounds_check, elements_kind, STORE);
Add<HStoreNamedField>(
checked_object, HObjectAccess::ForArrayLength(elements_kind),
reduced_length, STORE_TO_INITIALIZED_ENTRY);
if (!ast_context()->IsEffect()) Push(result);
length_checker.End();
}
result = ast_context()->IsEffect() ? graph()->GetConstant0() : Top();
Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
if (!ast_context()->IsEffect()) Drop(1);
ast_context()->ReturnValue(result);
return true;
}
case kArrayPush: {
if (receiver_map.is_null()) return false;
if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false;
ElementsKind elements_kind = receiver_map->elements_kind();
if (!IsFastElementsKind(elements_kind)) return false;
if (receiver_map->is_observed()) return false;
if (JSArray::IsReadOnlyLengthDescriptor(receiver_map)) return false;
ASSERT(receiver_map->is_extensible());
// If there may be elements accessors in the prototype chain, the fast
// inlined version can't be used.
if (receiver_map->DictionaryElementsInPrototypeChainOnly()) return false;
// If there currently can be no elements accessors on the prototype chain,
// it doesn't mean that there won't be any later. Install a full prototype
// chain check to trap element accessors being installed on the prototype
// chain, which would cause elements to go to dictionary mode and result
// in a map change.
Handle<JSObject> prototype(JSObject::cast(receiver_map->prototype()));
BuildCheckPrototypeMaps(prototype, Handle<JSObject>());
const int argc = expr->arguments()->length();
if (argc != 1) return false;
HValue* value_to_push = Pop();
HValue* array = Pop();
Drop(1); // Drop function.
HInstruction* new_size = NULL;
HValue* length = NULL;
{
NoObservableSideEffectsScope scope(this);
length = Add<HLoadNamedField>(array, static_cast<HValue*>(NULL),
HObjectAccess::ForArrayLength(elements_kind));
new_size = AddUncasted<HAdd>(length, graph()->GetConstant1());
bool is_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
BuildUncheckedMonomorphicElementAccess(array, length,
value_to_push, is_array,
elements_kind, STORE,
NEVER_RETURN_HOLE,
STORE_AND_GROW_NO_TRANSITION);
if (!ast_context()->IsEffect()) Push(new_size);
Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
if (!ast_context()->IsEffect()) Drop(1);
}
ast_context()->ReturnValue(new_size);
return true;
}
case kArrayShift: {
if (receiver_map.is_null()) return false;
if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false;
ElementsKind kind = receiver_map->elements_kind();
if (!IsFastElementsKind(kind)) return false;
if (receiver_map->is_observed()) return false;
ASSERT(receiver_map->is_extensible());
// If there may be elements accessors in the prototype chain, the fast
// inlined version can't be used.
if (receiver_map->DictionaryElementsInPrototypeChainOnly()) return false;
// If there currently can be no elements accessors on the prototype chain,
// it doesn't mean that there won't be any later. Install a full prototype
// chain check to trap element accessors being installed on the prototype
// chain, which would cause elements to go to dictionary mode and result
// in a map change.
BuildCheckPrototypeMaps(
handle(JSObject::cast(receiver_map->prototype()), isolate()),
Handle<JSObject>::null());
// Threshold for fast inlined Array.shift().
HConstant* inline_threshold = Add<HConstant>(static_cast<int32_t>(16));
Drop(expr->arguments()->length());
HValue* receiver = Pop();
HValue* function = Pop();
HValue* result;
{
NoObservableSideEffectsScope scope(this);
HValue* length = Add<HLoadNamedField>(
receiver, static_cast<HValue*>(NULL),
HObjectAccess::ForArrayLength(kind));
IfBuilder if_lengthiszero(this);
HValue* lengthiszero = if_lengthiszero.If<HCompareNumericAndBranch>(
length, graph()->GetConstant0(), Token::EQ);
if_lengthiszero.Then();
{
if (!ast_context()->IsEffect()) Push(graph()->GetConstantUndefined());
}
if_lengthiszero.Else();
{
HValue* elements = AddLoadElements(receiver);
// Check if we can use the fast inlined Array.shift().
IfBuilder if_inline(this);
if_inline.If<HCompareNumericAndBranch>(
length, inline_threshold, Token::LTE);
if (IsFastSmiOrObjectElementsKind(kind)) {
// We cannot handle copy-on-write backing stores here.
if_inline.AndIf<HCompareMap>(
elements, isolate()->factory()->fixed_array_map());
}
if_inline.Then();
{
// Remember the result.
if (!ast_context()->IsEffect()) {
Push(AddElementAccess(elements, graph()->GetConstant0(), NULL,
lengthiszero, kind, LOAD));
}
// Compute the new length.
HValue* new_length = AddUncasted<HSub>(
length, graph()->GetConstant1());
new_length->ClearFlag(HValue::kCanOverflow);
// Copy the remaining elements.
LoopBuilder loop(this, context(), LoopBuilder::kPostIncrement);
{
HValue* new_key = loop.BeginBody(
graph()->GetConstant0(), new_length, Token::LT);
HValue* key = AddUncasted<HAdd>(new_key, graph()->GetConstant1());
key->ClearFlag(HValue::kCanOverflow);
HValue* element = AddUncasted<HLoadKeyed>(
elements, key, lengthiszero, kind, ALLOW_RETURN_HOLE);
HStoreKeyed* store = Add<HStoreKeyed>(
elements, new_key, element, kind);
store->SetFlag(HValue::kAllowUndefinedAsNaN);
}
loop.EndBody();
// Put a hole at the end.
HValue* hole = IsFastSmiOrObjectElementsKind(kind)
? Add<HConstant>(isolate()->factory()->the_hole_value())
: Add<HConstant>(FixedDoubleArray::hole_nan_as_double());
if (IsFastSmiOrObjectElementsKind(kind)) kind = FAST_HOLEY_ELEMENTS;
Add<HStoreKeyed>(
elements, new_length, hole, kind, INITIALIZING_STORE);
// Remember new length.
Add<HStoreNamedField>(
receiver, HObjectAccess::ForArrayLength(kind),
new_length, STORE_TO_INITIALIZED_ENTRY);
}
if_inline.Else();
{
Add<HPushArguments>(receiver);
result = Add<HCallJSFunction>(function, 1, true);
if (!ast_context()->IsEffect()) Push(result);
}
if_inline.End();
}
if_lengthiszero.End();
}
result = ast_context()->IsEffect() ? graph()->GetConstant0() : Top();
Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
if (!ast_context()->IsEffect()) Drop(1);
ast_context()->ReturnValue(result);
return true;
}
case kArrayIndexOf:
case kArrayLastIndexOf: {
if (receiver_map.is_null()) return false;
if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false;
ElementsKind kind = receiver_map->elements_kind();
if (!IsFastElementsKind(kind)) return false;
if (receiver_map->is_observed()) return false;
if (argument_count != 2) return false;
ASSERT(receiver_map->is_extensible());
// If there may be elements accessors in the prototype chain, the fast
// inlined version can't be used.
if (receiver_map->DictionaryElementsInPrototypeChainOnly()) return false;
// If there currently can be no elements accessors on the prototype chain,
// it doesn't mean that there won't be any later. Install a full prototype
// chain check to trap element accessors being installed on the prototype
// chain, which would cause elements to go to dictionary mode and result
// in a map change.
BuildCheckPrototypeMaps(
handle(JSObject::cast(receiver_map->prototype()), isolate()),
Handle<JSObject>::null());
HValue* search_element = Pop();
HValue* receiver = Pop();
Drop(1); // Drop function.
ArrayIndexOfMode mode = (id == kArrayIndexOf)
? kFirstIndexOf : kLastIndexOf;
HValue* index = BuildArrayIndexOf(receiver, search_element, kind, mode);
if (!ast_context()->IsEffect()) Push(index);
Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
if (!ast_context()->IsEffect()) Drop(1);
ast_context()->ReturnValue(index);
return true;
}
default:
// Not yet supported for inlining.
break;
}
return false;
}
bool HOptimizedGraphBuilder::TryInlineApiFunctionCall(Call* expr,
HValue* receiver) {
Handle<JSFunction> function = expr->target();
int argc = expr->arguments()->length();
SmallMapList receiver_maps;
return TryInlineApiCall(function,
receiver,
&receiver_maps,
argc,
expr->id(),
kCallApiFunction);
}
bool HOptimizedGraphBuilder::TryInlineApiMethodCall(
Call* expr,
HValue* receiver,
SmallMapList* receiver_maps) {
Handle<JSFunction> function = expr->target();
int argc = expr->arguments()->length();
return TryInlineApiCall(function,
receiver,
receiver_maps,
argc,
expr->id(),
kCallApiMethod);
}
bool HOptimizedGraphBuilder::TryInlineApiGetter(Handle<JSFunction> function,
Handle<Map> receiver_map,
BailoutId ast_id) {
SmallMapList receiver_maps(1, zone());
receiver_maps.Add(receiver_map, zone());
return TryInlineApiCall(function,
NULL, // Receiver is on expression stack.
&receiver_maps,
0,
ast_id,
kCallApiGetter);
}
bool HOptimizedGraphBuilder::TryInlineApiSetter(Handle<JSFunction> function,
Handle<Map> receiver_map,
BailoutId ast_id) {
SmallMapList receiver_maps(1, zone());
receiver_maps.Add(receiver_map, zone());
return TryInlineApiCall(function,
NULL, // Receiver is on expression stack.
&receiver_maps,
1,
ast_id,
kCallApiSetter);
}
bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
HValue* receiver,
SmallMapList* receiver_maps,
int argc,
BailoutId ast_id,
ApiCallType call_type) {
CallOptimization optimization(function);
if (!optimization.is_simple_api_call()) return false;
Handle<Map> holder_map;
if (call_type == kCallApiFunction) {
// Cannot embed a direct reference to the global proxy map
// as it maybe dropped on deserialization.
CHECK(!isolate()->serializer_enabled());
ASSERT_EQ(0, receiver_maps->length());
receiver_maps->Add(handle(
function->context()->global_object()->global_receiver()->map()),
zone());
}
CallOptimization::HolderLookup holder_lookup =
CallOptimization::kHolderNotFound;
Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
receiver_maps->first(), &holder_lookup);
if (holder_lookup == CallOptimization::kHolderNotFound) return false;
if (FLAG_trace_inlining) {
PrintF("Inlining api function ");
function->ShortPrint();
PrintF("\n");
}
bool drop_extra = false;
bool is_store = false;
switch (call_type) {
case kCallApiFunction:
case kCallApiMethod:
// Need to check that none of the receiver maps could have changed.
Add<HCheckMaps>(receiver, receiver_maps);
// Need to ensure the chain between receiver and api_holder is intact.
if (holder_lookup == CallOptimization::kHolderFound) {
AddCheckPrototypeMaps(api_holder, receiver_maps->first());
} else {
ASSERT_EQ(holder_lookup, CallOptimization::kHolderIsReceiver);
}
// Includes receiver.
PushArgumentsFromEnvironment(argc + 1);
// Drop function after call.
drop_extra = true;
break;
case kCallApiGetter:
// Receiver and prototype chain cannot have changed.
ASSERT_EQ(0, argc);
ASSERT_EQ(NULL, receiver);
// Receiver is on expression stack.
receiver = Pop();
Add<HPushArguments>(receiver);
break;
case kCallApiSetter:
{
is_store = true;
// Receiver and prototype chain cannot have changed.
ASSERT_EQ(1, argc);
ASSERT_EQ(NULL, receiver);
// Receiver and value are on expression stack.
HValue* value = Pop();
receiver = Pop();
Add<HPushArguments>(receiver, value);
break;
}
}
HValue* holder = NULL;
switch (holder_lookup) {
case CallOptimization::kHolderFound:
holder = Add<HConstant>(api_holder);
break;
case CallOptimization::kHolderIsReceiver:
holder = receiver;
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
break;
}
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data_obj(api_call_info->data(), isolate());
bool call_data_is_undefined = call_data_obj->IsUndefined();
HValue* call_data = Add<HConstant>(call_data_obj);
ApiFunction fun(v8::ToCData<Address>(api_call_info->callback()));
ExternalReference ref = ExternalReference(&fun,
ExternalReference::DIRECT_API_CALL,
isolate());
HValue* api_function_address = Add<HConstant>(ExternalReference(ref));
HValue* op_vals[] = {
Add<HConstant>(function),
call_data,
holder,
api_function_address,
context()
};
CallInterfaceDescriptor* descriptor =
isolate()->call_descriptor(Isolate::ApiFunctionCall);
CallApiFunctionStub stub(isolate(), is_store, call_data_is_undefined, argc);
Handle<Code> code = stub.GetCode();
HConstant* code_value = Add<HConstant>(code);
ASSERT((sizeof(op_vals) / kPointerSize) ==
descriptor->environment_length());
HInstruction* call = New<HCallWithDescriptor>(
code_value, argc + 1, descriptor,
Vector<HValue*>(op_vals, descriptor->environment_length()));
if (drop_extra) Drop(1); // Drop function.
ast_context()->ReturnInstruction(call, ast_id);
return true;
}
bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
ASSERT(expr->expression()->IsProperty());
if (!expr->IsMonomorphic()) {
return false;
}
Handle<Map> function_map = expr->GetReceiverTypes()->first();
if (function_map->instance_type() != JS_FUNCTION_TYPE ||
!expr->target()->shared()->HasBuiltinFunctionId() ||
expr->target()->shared()->builtin_function_id() != kFunctionApply) {
return false;
}
if (current_info()->scope()->arguments() == NULL) return false;
ZoneList<Expression*>* args = expr->arguments();
if (args->length() != 2) return false;
VariableProxy* arg_two = args->at(1)->AsVariableProxy();
if (arg_two == NULL || !arg_two->var()->IsStackAllocated()) return false;
HValue* arg_two_value = LookupAndMakeLive(arg_two->var());
if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
// Found pattern f.apply(receiver, arguments).
CHECK_ALIVE_OR_RETURN(VisitForValue(args->at(0)), true);
HValue* receiver = Pop(); // receiver
HValue* function = Pop(); // f
Drop(1); // apply
HValue* checked_function = AddCheckMap(function, function_map);
if (function_state()->outer() == NULL) {
HInstruction* elements = Add<HArgumentsElements>(false);
HInstruction* length = Add<HArgumentsLength>(elements);
HValue* wrapped_receiver = BuildWrapReceiver(receiver, checked_function);
HInstruction* result = New<HApplyArguments>(function,
wrapped_receiver,
length,
elements);
ast_context()->ReturnInstruction(result, expr->id());
return true;
} else {
// We are inside inlined function and we know exactly what is inside
// arguments object. But we need to be able to materialize at deopt.
ASSERT_EQ(environment()->arguments_environment()->parameter_count(),
function_state()->entry()->arguments_object()->arguments_count());
HArgumentsObject* args = function_state()->entry()->arguments_object();
const ZoneList<HValue*>* arguments_values = args->arguments_values();
int arguments_count = arguments_values->length();
Push(function);
Push(BuildWrapReceiver(receiver, checked_function));
for (int i = 1; i < arguments_count; i++) {
Push(arguments_values->at(i));
}
Handle<JSFunction> known_function;
if (function->IsConstant() &&
HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
known_function = Handle<JSFunction>::cast(
HConstant::cast(function)->handle(isolate()));
int args_count = arguments_count - 1; // Excluding receiver.
if (TryInlineApply(known_function, expr, args_count)) return true;
}
PushArgumentsFromEnvironment(arguments_count);
HInvokeFunction* call = New<HInvokeFunction>(
function, known_function, arguments_count);
Drop(1); // Function.
ast_context()->ReturnInstruction(call, expr->id());
return true;
}
}
HValue* HOptimizedGraphBuilder::ImplicitReceiverFor(HValue* function,
Handle<JSFunction> target) {
SharedFunctionInfo* shared = target->shared();
if (shared->strict_mode() == SLOPPY && !shared->native()) {
// Cannot embed a direct reference to the global proxy
// as is it dropped on deserialization.
CHECK(!isolate()->serializer_enabled());
Handle<JSObject> global_receiver(
target->context()->global_object()->global_receiver());
return Add<HConstant>(global_receiver);
}
return graph()->GetConstantUndefined();
}
void HOptimizedGraphBuilder::BuildArrayCall(Expression* expression,
int arguments_count,
HValue* function,
Handle<AllocationSite> site) {
Add<HCheckValue>(function, array_function());
if (IsCallArrayInlineable(arguments_count, site)) {
BuildInlinedCallArray(expression, arguments_count, site);
return;
}
HInstruction* call = PreProcessCall(New<HCallNewArray>(
function, arguments_count + 1, site->GetElementsKind()));
if (expression->IsCall()) {
Drop(1);
}
ast_context()->ReturnInstruction(call, expression->id());
}
HValue* HOptimizedGraphBuilder::BuildArrayIndexOf(HValue* receiver,
HValue* search_element,
ElementsKind kind,
ArrayIndexOfMode mode) {
ASSERT(IsFastElementsKind(kind));
NoObservableSideEffectsScope no_effects(this);
HValue* elements = AddLoadElements(receiver);
HValue* length = AddLoadArrayLength(receiver, kind);
HValue* initial;
HValue* terminating;
Token::Value token;
LoopBuilder::Direction direction;
if (mode == kFirstIndexOf) {
initial = graph()->GetConstant0();
terminating = length;
token = Token::LT;
direction = LoopBuilder::kPostIncrement;
} else {
ASSERT_EQ(kLastIndexOf, mode);
initial = length;
terminating = graph()->GetConstant0();
token = Token::GT;
direction = LoopBuilder::kPreDecrement;
}
Push(graph()->GetConstantMinus1());
if (IsFastDoubleElementsKind(kind) || IsFastSmiElementsKind(kind)) {
LoopBuilder loop(this, context(), direction);
{
HValue* index = loop.BeginBody(initial, terminating, token);
HValue* element = AddUncasted<HLoadKeyed>(
elements, index, static_cast<HValue*>(NULL),
kind, ALLOW_RETURN_HOLE);
IfBuilder if_issame(this);
if (IsFastDoubleElementsKind(kind)) {
if_issame.If<HCompareNumericAndBranch>(
element, search_element, Token::EQ_STRICT);
} else {
if_issame.If<HCompareObjectEqAndBranch>(element, search_element);
}
if_issame.Then();
{
Drop(1);
Push(index);
loop.Break();
}
if_issame.End();
}
loop.EndBody();
} else {
IfBuilder if_isstring(this);
if_isstring.If<HIsStringAndBranch>(search_element);
if_isstring.Then();
{
LoopBuilder loop(this, context(), direction);
{
HValue* index = loop.BeginBody(initial, terminating, token);
HValue* element = AddUncasted<HLoadKeyed>(
elements, index, static_cast<HValue*>(NULL),
kind, ALLOW_RETURN_HOLE);
IfBuilder if_issame(this);
if_issame.If<HIsStringAndBranch>(element);
if_issame.AndIf<HStringCompareAndBranch>(
element, search_element, Token::EQ_STRICT);
if_issame.Then();
{
Drop(1);
Push(index);
loop.Break();
}
if_issame.End();
}
loop.EndBody();
}
if_isstring.Else();
{
IfBuilder if_isnumber(this);
if_isnumber.If<HIsSmiAndBranch>(search_element);
if_isnumber.OrIf<HCompareMap>(
search_element, isolate()->factory()->heap_number_map());
if_isnumber.Then();
{
HValue* search_number =
AddUncasted<HForceRepresentation>(search_element,
Representation::Double());
LoopBuilder loop(this, context(), direction);
{
HValue* index = loop.BeginBody(initial, terminating, token);
HValue* element = AddUncasted<HLoadKeyed>(
elements, index, static_cast<HValue*>(NULL),
kind, ALLOW_RETURN_HOLE);
IfBuilder if_element_isnumber(this);
if_element_isnumber.If<HIsSmiAndBranch>(element);
if_element_isnumber.OrIf<HCompareMap>(
element, isolate()->factory()->heap_number_map());
if_element_isnumber.Then();
{
HValue* number =
AddUncasted<HForceRepresentation>(element,
Representation::Double());
IfBuilder if_issame(this);
if_issame.If<HCompareNumericAndBranch>(
number, search_number, Token::EQ_STRICT);
if_issame.Then();
{
Drop(1);
Push(index);
loop.Break();
}
if_issame.End();
}
if_element_isnumber.End();
}
loop.EndBody();
}
if_isnumber.Else();
{
LoopBuilder loop(this, context(), direction);
{
HValue* index = loop.BeginBody(initial, terminating, token);
HValue* element = AddUncasted<HLoadKeyed>(
elements, index, static_cast<HValue*>(NULL),
kind, ALLOW_RETURN_HOLE);
IfBuilder if_issame(this);
if_issame.If<HCompareObjectEqAndBranch>(
element, search_element);
if_issame.Then();
{
Drop(1);
Push(index);
loop.Break();
}
if_issame.End();
}
loop.EndBody();
}
if_isnumber.End();
}
if_isstring.End();
}
return Pop();
}
bool HOptimizedGraphBuilder::TryHandleArrayCall(Call* expr, HValue* function) {
if (!array_function().is_identical_to(expr->target())) {
return false;
}
Handle<AllocationSite> site = expr->allocation_site();
if (site.is_null()) return false;
BuildArrayCall(expr,
expr->arguments()->length(),
function,
site);
return true;
}
bool HOptimizedGraphBuilder::TryHandleArrayCallNew(CallNew* expr,
HValue* function) {
if (!array_function().is_identical_to(expr->target())) {
return false;
}
BuildArrayCall(expr,
expr->arguments()->length(),
function,
expr->allocation_site());
return true;
}
void HOptimizedGraphBuilder::VisitCall(Call* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Expression* callee = expr->expression();
int argument_count = expr->arguments()->length() + 1; // Plus receiver.
HInstruction* call = NULL;
Property* prop = callee->AsProperty();
if (prop != NULL) {
CHECK_ALIVE(VisitForValue(prop->obj()));
HValue* receiver = Top();
SmallMapList* types;
ComputeReceiverTypes(expr, receiver, &types, zone());
if (prop->key()->IsPropertyName() && types->length() > 0) {
Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
PropertyAccessInfo info(this, LOAD, ToType(types->first()), name);
if (!info.CanAccessAsMonomorphic(types)) {
HandlePolymorphicCallNamed(expr, receiver, types, name);
return;
}
}
HValue* key = NULL;
if (!prop->key()->IsPropertyName()) {
CHECK_ALIVE(VisitForValue(prop->key()));
key = Pop();
}
CHECK_ALIVE(PushLoad(prop, receiver, key));
HValue* function = Pop();
if (FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
// Push the function under the receiver.
environment()->SetExpressionStackAt(0, function);
Push(receiver);
if (function->IsConstant() &&
HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
Handle<JSFunction> known_function = Handle<JSFunction>::cast(
HConstant::cast(function)->handle(isolate()));
expr->set_target(known_function);
if (TryCallApply(expr)) return;
CHECK_ALIVE(VisitExpressions(expr->arguments()));
Handle<Map> map = types->length() == 1 ? types->first() : Handle<Map>();
if (TryInlineBuiltinMethodCall(expr, receiver, map)) {
if (FLAG_trace_inlining) {
PrintF("Inlining builtin ");
known_function->ShortPrint();
PrintF("\n");
}
return;
}
if (TryInlineApiMethodCall(expr, receiver, types)) return;
// Wrap the receiver if necessary.
if (NeedsWrappingFor(ToType(types->first()), known_function)) {
// Since HWrapReceiver currently cannot actually wrap numbers and
// strings, use the regular CallFunctionStub for method calls to wrap
// the receiver.
// TODO(verwaest): Support creation of value wrappers directly in
// HWrapReceiver.
call = New<HCallFunction>(
function, argument_count, WRAP_AND_CALL);
} else if (TryInlineCall(expr)) {
return;
} else {
call = BuildCallConstantFunction(known_function, argument_count);
}
} else {
CHECK_ALIVE(VisitExpressions(expr->arguments()));
CallFunctionFlags flags = receiver->type().IsJSObject()
? NO_CALL_FUNCTION_FLAGS : CALL_AS_METHOD;
call = New<HCallFunction>(function, argument_count, flags);
}
PushArgumentsFromEnvironment(argument_count);
} else {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
return Bailout(kPossibleDirectCallToEval);
}
// The function is on the stack in the unoptimized code during
// evaluation of the arguments.
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* function = Top();
if (expr->global_call()) {
Variable* var = proxy->var();
bool known_global_function = false;
// If there is a global property cell for the name at compile time and
// access check is not enabled we assume that the function will not change
// and generate optimized code for calling the function.
LookupResult lookup(isolate());
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, LOAD);
if (type == kUseCell &&
!current_info()->global_object()->IsAccessCheckNeeded()) {
Handle<GlobalObject> global(current_info()->global_object());
known_global_function = expr->ComputeGlobalTarget(global, &lookup);
}
if (known_global_function) {
Add<HCheckValue>(function, expr->target());
// Placeholder for the receiver.
Push(graph()->GetConstantUndefined());
CHECK_ALIVE(VisitExpressions(expr->arguments()));
// Patch the global object on the stack by the expected receiver.
HValue* receiver = ImplicitReceiverFor(function, expr->target());
const int receiver_index = argument_count - 1;
environment()->SetExpressionStackAt(receiver_index, receiver);
if (TryInlineBuiltinFunctionCall(expr)) {
if (FLAG_trace_inlining) {
PrintF("Inlining builtin ");
expr->target()->ShortPrint();
PrintF("\n");
}
return;
}
if (TryInlineApiFunctionCall(expr, receiver)) return;
if (TryHandleArrayCall(expr, function)) return;
if (TryInlineCall(expr)) return;
PushArgumentsFromEnvironment(argument_count);
call = BuildCallConstantFunction(expr->target(), argument_count);
} else {
Push(graph()->GetConstantUndefined());
CHECK_ALIVE(VisitExpressions(expr->arguments()));
PushArgumentsFromEnvironment(argument_count);
call = New<HCallFunction>(function, argument_count);
}
} else if (expr->IsMonomorphic()) {
Add<HCheckValue>(function, expr->target());
Push(graph()->GetConstantUndefined());
CHECK_ALIVE(VisitExpressions(expr->arguments()));
HValue* receiver = ImplicitReceiverFor(function, expr->target());
const int receiver_index = argument_count - 1;
environment()->SetExpressionStackAt(receiver_index, receiver);
if (TryInlineBuiltinFunctionCall(expr)) {
if (FLAG_trace_inlining) {
PrintF("Inlining builtin ");
expr->target()->ShortPrint();
PrintF("\n");
}
return;
}
if (TryInlineApiFunctionCall(expr, receiver)) return;
if (TryInlineCall(expr)) return;
call = PreProcessCall(New<HInvokeFunction>(
function, expr->target(), argument_count));
} else {
Push(graph()->GetConstantUndefined());
CHECK_ALIVE(VisitExpressions(expr->arguments()));
PushArgumentsFromEnvironment(argument_count);
call = New<HCallFunction>(function, argument_count);
}
}
Drop(1); // Drop the function.
return ast_context()->ReturnInstruction(call, expr->id());
}
void HOptimizedGraphBuilder::BuildInlinedCallArray(
Expression* expression,
int argument_count,
Handle<AllocationSite> site) {
ASSERT(!site.is_null());
ASSERT(argument_count >= 0 && argument_count <= 1);
NoObservableSideEffectsScope no_effects(this);
// We should at least have the constructor on the expression stack.
HValue* constructor = environment()->ExpressionStackAt(argument_count);
// Register on the site for deoptimization if the transition feedback changes.
AllocationSite::AddDependentCompilationInfo(
site, AllocationSite::TRANSITIONS, top_info());
ElementsKind kind = site->GetElementsKind();
HInstruction* site_instruction = Add<HConstant>(site);
// In the single constant argument case, we may have to adjust elements kind
// to avoid creating a packed non-empty array.
if (argument_count == 1 && !IsHoleyElementsKind(kind)) {
HValue* argument = environment()->Top();
if (argument->IsConstant()) {
HConstant* constant_argument = HConstant::cast(argument);
ASSERT(constant_argument->HasSmiValue());
int constant_array_size = constant_argument->Integer32Value();
if (constant_array_size != 0) {
kind = GetHoleyElementsKind(kind);
}
}
}
// Build the array.
JSArrayBuilder array_builder(this,
kind,
site_instruction,
constructor,
DISABLE_ALLOCATION_SITES);
HValue* new_object = argument_count == 0
? array_builder.AllocateEmptyArray()
: BuildAllocateArrayFromLength(&array_builder, Top());
int args_to_drop = argument_count + (expression->IsCall() ? 2 : 1);
Drop(args_to_drop);
ast_context()->ReturnValue(new_object);
}
// Checks whether allocation using the given constructor can be inlined.
static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
return constructor->has_initial_map() &&
constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
constructor->initial_map()->instance_size() < HAllocate::kMaxInlineSize &&
constructor->initial_map()->InitialPropertiesLength() == 0;
}
bool HOptimizedGraphBuilder::IsCallArrayInlineable(
int argument_count,
Handle<AllocationSite> site) {
Handle<JSFunction> caller = current_info()->closure();
Handle<JSFunction> target = array_function();
// We should have the function plus array arguments on the environment stack.
ASSERT(environment()->length() >= (argument_count + 1));
ASSERT(!site.is_null());
bool inline_ok = false;
if (site->CanInlineCall()) {
// We also want to avoid inlining in certain 1 argument scenarios.
if (argument_count == 1) {
HValue* argument = Top();
if (argument->IsConstant()) {
// Do not inline if the constant length argument is not a smi or
// outside the valid range for unrolled loop initialization.
HConstant* constant_argument = HConstant::cast(argument);
if (constant_argument->HasSmiValue()) {
int value = constant_argument->Integer32Value();
inline_ok = value >= 0 && value <= kElementLoopUnrollThreshold;
if (!inline_ok) {
TraceInline(target, caller,
"Constant length outside of valid inlining range.");
}
}
} else {
TraceInline(target, caller,
"Dont inline [new] Array(n) where n isn't constant.");
}
} else if (argument_count == 0) {
inline_ok = true;
} else {
TraceInline(target, caller, "Too many arguments to inline.");
}
} else {
TraceInline(target, caller, "AllocationSite requested no inlining.");
}
if (inline_ok) {
TraceInline(target, caller, NULL);
}
return inline_ok;
}
void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
int argument_count = expr->arguments()->length() + 1; // Plus constructor.
Factory* factory = isolate()->factory();
// The constructor function is on the stack in the unoptimized code
// during evaluation of the arguments.
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* function = Top();
CHECK_ALIVE(VisitExpressions(expr->arguments()));
if (FLAG_inline_construct &&
expr->IsMonomorphic() &&
IsAllocationInlineable(expr->target())) {
Handle<JSFunction> constructor = expr->target();
HValue* check = Add<HCheckValue>(function, constructor);
// Force completion of inobject slack tracking before generating
// allocation code to finalize instance size.
if (constructor->IsInobjectSlackTrackingInProgress()) {
constructor->CompleteInobjectSlackTracking();
}
// Calculate instance size from initial map of constructor.
ASSERT(constructor->has_initial_map());
Handle<Map> initial_map(constructor->initial_map());
int instance_size = initial_map->instance_size();
ASSERT(initial_map->InitialPropertiesLength() == 0);
// Allocate an instance of the implicit receiver object.
HValue* size_in_bytes = Add<HConstant>(instance_size);
HAllocationMode allocation_mode;
if (FLAG_pretenuring_call_new) {
if (FLAG_allocation_site_pretenuring) {
// Try to use pretenuring feedback.
Handle<AllocationSite> allocation_site = expr->allocation_site();
allocation_mode = HAllocationMode(allocation_site);
// Take a dependency on allocation site.
AllocationSite::AddDependentCompilationInfo(allocation_site,
AllocationSite::TENURING,
top_info());
}
}
HAllocate* receiver = BuildAllocate(
size_in_bytes, HType::JSObject(), JS_OBJECT_TYPE, allocation_mode);
receiver->set_known_initial_map(initial_map);
// Initialize map and fields of the newly allocated object.
{ NoObservableSideEffectsScope no_effects(this);
ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
Add<HStoreNamedField>(receiver,
HObjectAccess::ForMapAndOffset(initial_map, JSObject::kMapOffset),
Add<HConstant>(initial_map));
HValue* empty_fixed_array = Add<HConstant>(factory->empty_fixed_array());
Add<HStoreNamedField>(receiver,
HObjectAccess::ForMapAndOffset(initial_map,
JSObject::kPropertiesOffset),
empty_fixed_array);
Add<HStoreNamedField>(receiver,
HObjectAccess::ForMapAndOffset(initial_map,
JSObject::kElementsOffset),
empty_fixed_array);
if (initial_map->inobject_properties() != 0) {
HConstant* undefined = graph()->GetConstantUndefined();
for (int i = 0; i < initial_map->inobject_properties(); i++) {
int property_offset = initial_map->GetInObjectPropertyOffset(i);
Add<HStoreNamedField>(receiver,
HObjectAccess::ForMapAndOffset(initial_map, property_offset),
undefined);
}
}
}
// Replace the constructor function with a newly allocated receiver using
// the index of the receiver from the top of the expression stack.
const int receiver_index = argument_count - 1;
ASSERT(environment()->ExpressionStackAt(receiver_index) == function);
environment()->SetExpressionStackAt(receiver_index, receiver);
if (TryInlineConstruct(expr, receiver)) {
// Inlining worked, add a dependency on the initial map to make sure that
// this code is deoptimized whenever the initial map of the constructor
// changes.
Map::AddDependentCompilationInfo(
initial_map, DependentCode::kInitialMapChangedGroup, top_info());
return;
}
// TODO(mstarzinger): For now we remove the previous HAllocate and all
// corresponding instructions and instead add HPushArguments for the
// arguments in case inlining failed. What we actually should do is for
// inlining to try to build a subgraph without mutating the parent graph.
HInstruction* instr = current_block()->last();
do {
HInstruction* prev_instr = instr->previous();
instr->DeleteAndReplaceWith(NULL);
instr = prev_instr;
} while (instr != check);
environment()->SetExpressionStackAt(receiver_index, function);
HInstruction* call =
PreProcessCall(New<HCallNew>(function, argument_count));
return ast_context()->ReturnInstruction(call, expr->id());
} else {
// The constructor function is both an operand to the instruction and an
// argument to the construct call.
if (TryHandleArrayCallNew(expr, function)) return;
HInstruction* call =
PreProcessCall(New<HCallNew>(function, argument_count));
return ast_context()->ReturnInstruction(call, expr->id());
}
}
// Support for generating inlined runtime functions.
// Lookup table for generators for runtime calls that are generated inline.
// Elements of the table are member pointers to functions of
// HOptimizedGraphBuilder.
#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \
&HOptimizedGraphBuilder::Generate##Name,
const HOptimizedGraphBuilder::InlineFunctionGenerator
HOptimizedGraphBuilder::kInlineFunctionGenerators[] = {
INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
INLINE_OPTIMIZED_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
};
#undef INLINE_FUNCTION_GENERATOR_ADDRESS
template <class ViewClass>
void HGraphBuilder::BuildArrayBufferViewInitialization(
HValue* obj,
HValue* buffer,
HValue* byte_offset,
HValue* byte_length) {
for (int offset = ViewClass::kSize;
offset < ViewClass::kSizeWithInternalFields;
offset += kPointerSize) {
Add<HStoreNamedField>(obj,
HObjectAccess::ForObservableJSObjectOffset(offset),
graph()->GetConstant0());
}
Add<HStoreNamedField>(
obj,
HObjectAccess::ForJSArrayBufferViewByteOffset(),
byte_offset);
Add<HStoreNamedField>(
obj,
HObjectAccess::ForJSArrayBufferViewByteLength(),
byte_length);
if (buffer != NULL) {
Add<HStoreNamedField>(
obj,
HObjectAccess::ForJSArrayBufferViewBuffer(), buffer);
HObjectAccess weak_first_view_access =
HObjectAccess::ForJSArrayBufferWeakFirstView();
Add<HStoreNamedField>(obj,
HObjectAccess::ForJSArrayBufferViewWeakNext(),
Add<HLoadNamedField>(buffer,
static_cast<HValue*>(NULL),
weak_first_view_access));
Add<HStoreNamedField>(buffer, weak_first_view_access, obj);
} else {
Add<HStoreNamedField>(
obj,
HObjectAccess::ForJSArrayBufferViewBuffer(),
Add<HConstant>(static_cast<int32_t>(0)));
Add<HStoreNamedField>(obj,
HObjectAccess::ForJSArrayBufferViewWeakNext(),
graph()->GetConstantUndefined());
}
}
void HOptimizedGraphBuilder::GenerateDataViewInitialize(
CallRuntime* expr) {
ZoneList<Expression*>* arguments = expr->arguments();
ASSERT(arguments->length()== 4);
CHECK_ALIVE(VisitForValue(arguments->at(0)));
HValue* obj = Pop();
CHECK_ALIVE(VisitForValue(arguments->at(1)));
HValue* buffer = Pop();
CHECK_ALIVE(VisitForValue(arguments->at(2)));
HValue* byte_offset = Pop();
CHECK_ALIVE(VisitForValue(arguments->at(3)));
HValue* byte_length = Pop();
{
NoObservableSideEffectsScope scope(this);
BuildArrayBufferViewInitialization<JSDataView>(
obj, buffer, byte_offset, byte_length);
}
}
static Handle<Map> TypedArrayMap(Isolate* isolate,
ExternalArrayType array_type,
ElementsKind target_kind) {
Handle<Context> native_context = isolate->native_context();
Handle<JSFunction> fun;
switch (array_type) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case kExternal##Type##Array: \
fun = Handle<JSFunction>(native_context->type##_array_fun()); \
break;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
}
Handle<Map> map(fun->initial_map());
return Map::AsElementsKind(map, target_kind);
}
HValue* HOptimizedGraphBuilder::BuildAllocateExternalElements(
ExternalArrayType array_type,
bool is_zero_byte_offset,
HValue* buffer, HValue* byte_offset, HValue* length) {
Handle<Map> external_array_map(
isolate()->heap()->MapForExternalArrayType(array_type));
// The HForceRepresentation is to prevent possible deopt on int-smi
// conversion after allocation but before the new object fields are set.
length = AddUncasted<HForceRepresentation>(length, Representation::Smi());
HValue* elements =
Add<HAllocate>(
Add<HConstant>(ExternalArray::kAlignedSize),
HType::HeapObject(),
NOT_TENURED,
external_array_map->instance_type());
AddStoreMapConstant(elements, external_array_map);
Add<HStoreNamedField>(elements,
HObjectAccess::ForFixedArrayLength(), length);
HValue* backing_store = Add<HLoadNamedField>(
buffer, static_cast<HValue*>(NULL),
HObjectAccess::ForJSArrayBufferBackingStore());
HValue* typed_array_start;
if (is_zero_byte_offset) {
typed_array_start = backing_store;
} else {
HInstruction* external_pointer =
AddUncasted<HAdd>(backing_store, byte_offset);
// Arguments are checked prior to call to TypedArrayInitialize,
// including byte_offset.
external_pointer->ClearFlag(HValue::kCanOverflow);
typed_array_start = external_pointer;
}
Add<HStoreNamedField>(elements,
HObjectAccess::ForExternalArrayExternalPointer(),
typed_array_start);
return elements;
}
HValue* HOptimizedGraphBuilder::BuildAllocateFixedTypedArray(
ExternalArrayType array_type, size_t element_size,
ElementsKind fixed_elements_kind,
HValue* byte_length, HValue* length) {
STATIC_ASSERT(
(FixedTypedArrayBase::kHeaderSize & kObjectAlignmentMask) == 0);
HValue* total_size;
// if fixed array's elements are not aligned to object's alignment,
// we need to align the whole array to object alignment.
if (element_size % kObjectAlignment != 0) {
total_size = BuildObjectSizeAlignment(
byte_length, FixedTypedArrayBase::kHeaderSize);
} else {
total_size = AddUncasted<HAdd>(byte_length,
Add<HConstant>(FixedTypedArrayBase::kHeaderSize));
total_size->ClearFlag(HValue::kCanOverflow);
}
// The HForceRepresentation is to prevent possible deopt on int-smi
// conversion after allocation but before the new object fields are set.
length = AddUncasted<HForceRepresentation>(length, Representation::Smi());
Handle<Map> fixed_typed_array_map(
isolate()->heap()->MapForFixedTypedArray(array_type));
HValue* elements =
Add<HAllocate>(total_size, HType::HeapObject(),
NOT_TENURED, fixed_typed_array_map->instance_type());
AddStoreMapConstant(elements, fixed_typed_array_map);
Add<HStoreNamedField>(elements,
HObjectAccess::ForFixedArrayLength(),
length);
HValue* filler = Add<HConstant>(static_cast<int32_t>(0));
{
LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
HValue* key = builder.BeginBody(
Add<HConstant>(static_cast<int32_t>(0)),
length, Token::LT);
Add<HStoreKeyed>(elements, key, filler, fixed_elements_kind);
builder.EndBody();
}
return elements;
}
void HOptimizedGraphBuilder::GenerateTypedArrayInitialize(
CallRuntime* expr) {
ZoneList<Expression*>* arguments = expr->arguments();
static const int kObjectArg = 0;
static const int kArrayIdArg = 1;
static const int kBufferArg = 2;
static const int kByteOffsetArg = 3;
static const int kByteLengthArg = 4;
static const int kArgsLength = 5;
ASSERT(arguments->length() == kArgsLength);
CHECK_ALIVE(VisitForValue(arguments->at(kObjectArg)));
HValue* obj = Pop();
if (arguments->at(kArrayIdArg)->IsLiteral()) {
// This should never happen in real use, but can happen when fuzzing.
// Just bail out.
Bailout(kNeedSmiLiteral);
return;
}
Handle<Object> value =
static_cast<Literal*>(arguments->at(kArrayIdArg))->value();
if (!value->IsSmi()) {
// This should never happen in real use, but can happen when fuzzing.
// Just bail out.
Bailout(kNeedSmiLiteral);
return;
}
int array_id = Smi::cast(*value)->value();
HValue* buffer;
if (!arguments->at(kBufferArg)->IsNullLiteral()) {
CHECK_ALIVE(VisitForValue(arguments->at(kBufferArg)));
buffer = Pop();
} else {
buffer = NULL;
}
HValue* byte_offset;
bool is_zero_byte_offset;
if (arguments->at(kByteOffsetArg)->IsLiteral()
&& Smi::FromInt(0) ==
*static_cast<Literal*>(arguments->at(kByteOffsetArg))->value()) {
byte_offset = Add<HConstant>(static_cast<int32_t>(0));
is_zero_byte_offset = true;
} else {
CHECK_ALIVE(VisitForValue(arguments->at(kByteOffsetArg)));
byte_offset = Pop();
is_zero_byte_offset = false;
ASSERT(buffer != NULL);
}
CHECK_ALIVE(VisitForValue(arguments->at(kByteLengthArg)));
HValue* byte_length = Pop();
NoObservableSideEffectsScope scope(this);
IfBuilder byte_offset_smi(this);
if (!is_zero_byte_offset) {
byte_offset_smi.If<HIsSmiAndBranch>(byte_offset);
byte_offset_smi.Then();
}
ExternalArrayType array_type =
kExternalInt8Array; // Bogus initialization.
size_t element_size = 1; // Bogus initialization.
ElementsKind external_elements_kind = // Bogus initialization.
EXTERNAL_INT8_ELEMENTS;
ElementsKind fixed_elements_kind = // Bogus initialization.
INT8_ELEMENTS;
Runtime::ArrayIdToTypeAndSize(array_id,
&array_type,
&external_elements_kind,
&fixed_elements_kind,
&element_size);
{ // byte_offset is Smi.
BuildArrayBufferViewInitialization<JSTypedArray>(
obj, buffer, byte_offset, byte_length);
HInstruction* length = AddUncasted<HDiv>(byte_length,
Add<HConstant>(static_cast<int32_t>(element_size)));
Add<HStoreNamedField>(obj,
HObjectAccess::ForJSTypedArrayLength(),
length);
HValue* elements;
if (buffer != NULL) {
elements = BuildAllocateExternalElements(
array_type, is_zero_byte_offset, buffer, byte_offset, length);
Handle<Map> obj_map = TypedArrayMap(
isolate(), array_type, external_elements_kind);
AddStoreMapConstant(obj, obj_map);
} else {
ASSERT(is_zero_byte_offset);
elements = BuildAllocateFixedTypedArray(
array_type, element_size, fixed_elements_kind,
byte_length, length);
}
Add<HStoreNamedField>(
obj, HObjectAccess::ForElementsPointer(), elements);
}
if (!is_zero_byte_offset) {
byte_offset_smi.Else();
{ // byte_offset is not Smi.
Push(obj);
CHECK_ALIVE(VisitForValue(arguments->at(kArrayIdArg)));
Push(buffer);
Push(byte_offset);
Push(byte_length);
PushArgumentsFromEnvironment(kArgsLength);
Add<HCallRuntime>(expr->name(), expr->function(), kArgsLength);
}
}
byte_offset_smi.End();
}
void HOptimizedGraphBuilder::GenerateMaxSmi(CallRuntime* expr) {
ASSERT(expr->arguments()->length() == 0);
HConstant* max_smi = New<HConstant>(static_cast<int32_t>(Smi::kMaxValue));
return ast_context()->ReturnInstruction(max_smi, expr->id());
}
void HOptimizedGraphBuilder::GenerateTypedArrayMaxSizeInHeap(
CallRuntime* expr) {
ASSERT(expr->arguments()->length() == 0);
HConstant* result = New<HConstant>(static_cast<int32_t>(
FLAG_typed_array_max_size_in_heap));
return ast_context()->ReturnInstruction(result, expr->id());
}
void HOptimizedGraphBuilder::GenerateArrayBufferGetByteLength(
CallRuntime* expr) {
ASSERT(expr->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
HValue* buffer = Pop();
HInstruction* result = New<HLoadNamedField>(
buffer,
static_cast<HValue*>(NULL),
HObjectAccess::ForJSArrayBufferByteLength());
return ast_context()->ReturnInstruction(result, expr->id());
}
void HOptimizedGraphBuilder::GenerateArrayBufferViewGetByteLength(
CallRuntime* expr) {
ASSERT(expr->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
HValue* buffer = Pop();
HInstruction* result = New<HLoadNamedField>(
buffer,
static_cast<HValue*>(NULL),
HObjectAccess::ForJSArrayBufferViewByteLength());
return ast_context()->ReturnInstruction(result, expr->id());
}
void HOptimizedGraphBuilder::GenerateArrayBufferViewGetByteOffset(
CallRuntime* expr) {
ASSERT(expr->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
HValue* buffer = Pop();
HInstruction* result = New<HLoadNamedField>(
buffer,
static_cast<HValue*>(NULL),
HObjectAccess::ForJSArrayBufferViewByteOffset());
return ast_context()->ReturnInstruction(result, expr->id());
}
void HOptimizedGraphBuilder::GenerateTypedArrayGetLength(
CallRuntime* expr) {
ASSERT(expr->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
HValue* buffer = Pop();
HInstruction* result = New<HLoadNamedField>(
buffer,
static_cast<HValue*>(NULL),
HObjectAccess::ForJSTypedArrayLength());
return ast_context()->ReturnInstruction(result, expr->id());
}
void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
if (expr->is_jsruntime()) {
return Bailout(kCallToAJavaScriptRuntimeFunction);
}
const Runtime::Function* function = expr->function();
ASSERT(function != NULL);
if (function->intrinsic_type == Runtime::INLINE ||
function->intrinsic_type == Runtime::INLINE_OPTIMIZED) {
ASSERT(expr->name()->length() > 0);
ASSERT(expr->name()->Get(0) == '_');
// Call to an inline function.
int lookup_index = static_cast<int>(function->function_id) -
static_cast<int>(Runtime::kFirstInlineFunction);
ASSERT(lookup_index >= 0);
ASSERT(static_cast<size_t>(lookup_index) <
ARRAY_SIZE(kInlineFunctionGenerators));
InlineFunctionGenerator generator = kInlineFunctionGenerators[lookup_index];
// Call the inline code generator using the pointer-to-member.
(this->*generator)(expr);
} else {
ASSERT(function->intrinsic_type == Runtime::RUNTIME);
Handle<String> name = expr->name();
int argument_count = expr->arguments()->length();
CHECK_ALIVE(VisitExpressions(expr->arguments()));
PushArgumentsFromEnvironment(argument_count);
HCallRuntime* call = New<HCallRuntime>(name, function,
argument_count);
return ast_context()->ReturnInstruction(call, expr->id());
}
}
void HOptimizedGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
switch (expr->op()) {
case Token::DELETE: return VisitDelete(expr);
case Token::VOID: return VisitVoid(expr);
case Token::TYPEOF: return VisitTypeof(expr);
case Token::NOT: return VisitNot(expr);
default: UNREACHABLE();
}
}
void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
Property* prop = expr->expression()->AsProperty();
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (prop != NULL) {
CHECK_ALIVE(VisitForValue(prop->obj()));
CHECK_ALIVE(VisitForValue(prop->key()));
HValue* key = Pop();
HValue* obj = Pop();
HValue* function = AddLoadJSBuiltin(Builtins::DELETE);
Add<HPushArguments>(obj, key, Add<HConstant>(function_strict_mode()));
// TODO(olivf) InvokeFunction produces a check for the parameter count,
// even though we are certain to pass the correct number of arguments here.
HInstruction* instr = New<HInvokeFunction>(function, 3);
return ast_context()->ReturnInstruction(instr, expr->id());
} else if (proxy != NULL) {
Variable* var = proxy->var();
if (var->IsUnallocated()) {
Bailout(kDeleteWithGlobalVariable);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global variables is false. 'this' is not
// really a variable, though we implement it as one. The
// subexpression does not have side effects.
HValue* value = var->is_this()
? graph()->GetConstantTrue()
: graph()->GetConstantFalse();
return ast_context()->ReturnValue(value);
} else {
Bailout(kDeleteWithNonGlobalVariable);
}
} else {
// Result of deleting non-property, non-variable reference is true.
// Evaluate the subexpression for side effects.
CHECK_ALIVE(VisitForEffect(expr->expression()));
return ast_context()->ReturnValue(graph()->GetConstantTrue());
}
}
void HOptimizedGraphBuilder::VisitVoid(UnaryOperation* expr) {
CHECK_ALIVE(VisitForEffect(expr->expression()));
return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
void HOptimizedGraphBuilder::VisitTypeof(UnaryOperation* expr) {
CHECK_ALIVE(VisitForTypeOf(expr->expression()));
HValue* value = Pop();
HInstruction* instr = New<HTypeof>(value);
return ast_context()->ReturnInstruction(instr, expr->id());
}
void HOptimizedGraphBuilder::VisitNot(UnaryOperation* expr) {
if (ast_context()->IsTest()) {
TestContext* context = TestContext::cast(ast_context());
VisitForControl(expr->expression(),
context->if_false(),
context->if_true());
return;
}
if (ast_context()->IsEffect()) {
VisitForEffect(expr->expression());
return;
}
ASSERT(ast_context()->IsValue());
HBasicBlock* materialize_false = graph()->CreateBasicBlock();
HBasicBlock* materialize_true = graph()->CreateBasicBlock();
CHECK_BAILOUT(VisitForControl(expr->expression(),
materialize_false,
materialize_true));
if (materialize_false->HasPredecessor()) {
materialize_false->SetJoinId(expr->MaterializeFalseId());
set_current_block(materialize_false);
Push(graph()->GetConstantFalse());
} else {
materialize_false = NULL;
}
if (materialize_true->HasPredecessor()) {
materialize_true->SetJoinId(expr->MaterializeTrueId());
set_current_block(materialize_true);
Push(graph()->GetConstantTrue());
} else {
materialize_true = NULL;
}
HBasicBlock* join =
CreateJoin(materialize_false, materialize_true, expr->id());
set_current_block(join);
if (join != NULL) return ast_context()->ReturnValue(Pop());
}
HInstruction* HOptimizedGraphBuilder::BuildIncrement(
bool returns_original_input,
CountOperation* expr) {
// The input to the count operation is on top of the expression stack.
Representation rep = Representation::FromType(expr->type());
if (rep.IsNone() || rep.IsTagged()) {
rep = Representation::Smi();
}
if (returns_original_input) {
// We need an explicit HValue representing ToNumber(input). The
// actual HChange instruction we need is (sometimes) added in a later
// phase, so it is not available now to be used as an input to HAdd and
// as the return value.
HInstruction* number_input = AddUncasted<HForceRepresentation>(Pop(), rep);
if (!rep.IsDouble()) {
number_input->SetFlag(HInstruction::kFlexibleRepresentation);
number_input->SetFlag(HInstruction::kCannotBeTagged);
}
Push(number_input);
}
// The addition has no side effects, so we do not need
// to simulate the expression stack after this instruction.
// Any later failures deopt to the load of the input or earlier.
HConstant* delta = (expr->op() == Token::INC)
? graph()->GetConstant1()
: graph()->GetConstantMinus1();
HInstruction* instr = AddUncasted<HAdd>(Top(), delta);
if (instr->IsAdd()) {
HAdd* add = HAdd::cast(instr);
add->set_observed_input_representation(1, rep);
add->set_observed_input_representation(2, Representation::Smi());
}
instr->SetFlag(HInstruction::kCannotBeTagged);
instr->ClearAllSideEffects();
return instr;
}
void HOptimizedGraphBuilder::BuildStoreForEffect(Expression* expr,
Property* prop,
BailoutId ast_id,
BailoutId return_id,
HValue* object,
HValue* key,
HValue* value) {
EffectContext for_effect(this);
Push(object);
if (key != NULL) Push(key);
Push(value);
BuildStore(expr, prop, ast_id, return_id);
}
void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
Expression* target = expr->expression();
VariableProxy* proxy = target->AsVariableProxy();
Property* prop = target->AsProperty();
if (proxy == NULL && prop == NULL) {
return Bailout(kInvalidLhsInCountOperation);
}
// Match the full code generator stack by simulating an extra stack
// element for postfix operations in a non-effect context. The return
// value is ToNumber(input).
bool returns_original_input =
expr->is_postfix() && !ast_context()->IsEffect();
HValue* input = NULL; // ToNumber(original_input).
HValue* after = NULL; // The result after incrementing or decrementing.
if (proxy != NULL) {
Variable* var = proxy->var();
if (var->mode() == CONST_LEGACY) {
return Bailout(kUnsupportedCountOperationWithConst);
}
// Argument of the count operation is a variable, not a property.
ASSERT(prop == NULL);
CHECK_ALIVE(VisitForValue(target));
after = BuildIncrement(returns_original_input, expr);
input = returns_original_input ? Top() : Pop();
Push(after);
switch (var->location()) {
case Variable::UNALLOCATED:
HandleGlobalVariableAssignment(var,
after,
expr->AssignmentId());
break;
case Variable::PARAMETER:
case Variable::LOCAL:
BindIfLive(var, after);
break;
case Variable::CONTEXT: {
// Bail out if we try to mutate a parameter value in a function
// using the arguments object. We do not (yet) correctly handle the
// arguments property of the function.
if (current_info()->scope()->arguments() != NULL) {
// Parameters will rewrite to context slots. We have no direct
// way to detect that the variable is a parameter so we use a
// linear search of the parameter list.
int count = current_info()->scope()->num_parameters();
for (int i = 0; i < count; ++i) {
if (var == current_info()->scope()->parameter(i)) {
return Bailout(kAssignmentToParameterInArgumentsObject);
}
}
}
HValue* context = BuildContextChainWalk(var);
HStoreContextSlot::Mode mode = IsLexicalVariableMode(var->mode())
? HStoreContextSlot::kCheckDeoptimize : HStoreContextSlot::kNoCheck;
HStoreContextSlot* instr = Add<HStoreContextSlot>(context, var->index(),
mode, after);
if (instr->HasObservableSideEffects()) {
Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
break;
}
case Variable::LOOKUP:
return Bailout(kLookupVariableInCountOperation);
}
Drop(returns_original_input ? 2 : 1);
return ast_context()->ReturnValue(expr->is_postfix() ? input : after);
}
// Argument of the count operation is a property.
ASSERT(prop != NULL);
if (returns_original_input) Push(graph()->GetConstantUndefined());
CHECK_ALIVE(VisitForValue(prop->obj()));
HValue* object = Top();
HValue* key = NULL;
if ((!prop->IsFunctionPrototype() && !prop->key()->IsPropertyName()) ||
prop->IsStringAccess()) {
CHECK_ALIVE(VisitForValue(prop->key()));
key = Top();
}
CHECK_ALIVE(PushLoad(prop, object, key));
after = BuildIncrement(returns_original_input, expr);
if (returns_original_input) {
input = Pop();
// Drop object and key to push it again in the effect context below.
Drop(key == NULL ? 1 : 2);
environment()->SetExpressionStackAt(0, input);
CHECK_ALIVE(BuildStoreForEffect(
expr, prop, expr->id(), expr->AssignmentId(), object, key, after));
return ast_context()->ReturnValue(Pop());
}
environment()->SetExpressionStackAt(0, after);
return BuildStore(expr, prop, expr->id(), expr->AssignmentId());
}
HInstruction* HOptimizedGraphBuilder::BuildStringCharCodeAt(
HValue* string,
HValue* index) {
if (string->IsConstant() && index->IsConstant()) {
HConstant* c_string = HConstant::cast(string);
HConstant* c_index = HConstant::cast(index);
if (c_string->HasStringValue() && c_index->HasNumberValue()) {
int32_t i = c_index->NumberValueAsInteger32();
Handle<String> s = c_string->StringValue();
if (i < 0 || i >= s->length()) {
return New<HConstant>(OS::nan_value());
}
return New<HConstant>(s->Get(i));
}
}
string = BuildCheckString(string);
index = Add<HBoundsCheck>(index, AddLoadStringLength(string));
return New<HStringCharCodeAt>(string, index);
}
// Checks if the given shift amounts have following forms:
// (N1) and (N2) with N1 + N2 = 32; (sa) and (32 - sa).
static bool ShiftAmountsAllowReplaceByRotate(HValue* sa,
HValue* const32_minus_sa) {
if (sa->IsConstant() && const32_minus_sa->IsConstant()) {
const HConstant* c1 = HConstant::cast(sa);
const HConstant* c2 = HConstant::cast(const32_minus_sa);
return c1->HasInteger32Value() && c2->HasInteger32Value() &&
(c1->Integer32Value() + c2->Integer32Value() == 32);
}
if (!const32_minus_sa->IsSub()) return false;
HSub* sub = HSub::cast(const32_minus_sa);
return sub->left()->EqualsInteger32Constant(32) && sub->right() == sa;
}
// Checks if the left and the right are shift instructions with the oposite
// directions that can be replaced by one rotate right instruction or not.
// Returns the operand and the shift amount for the rotate instruction in the
// former case.
bool HGraphBuilder::MatchRotateRight(HValue* left,
HValue* right,
HValue** operand,
HValue** shift_amount) {
HShl* shl;
HShr* shr;
if (left->IsShl() && right->IsShr()) {
shl = HShl::cast(left);
shr = HShr::cast(right);
} else if (left->IsShr() && right->IsShl()) {
shl = HShl::cast(right);
shr = HShr::cast(left);
} else {
return false;
}
if (shl->left() != shr->left()) return false;
if (!ShiftAmountsAllowReplaceByRotate(shl->right(), shr->right()) &&
!ShiftAmountsAllowReplaceByRotate(shr->right(), shl->right())) {
return false;
}
*operand= shr->left();
*shift_amount = shr->right();
return true;
}
bool CanBeZero(HValue* right) {
if (right->IsConstant()) {
HConstant* right_const = HConstant::cast(right);
if (right_const->HasInteger32Value() &&
(right_const->Integer32Value() & 0x1f) != 0) {
return false;
}
}
return true;
}
HValue* HGraphBuilder::EnforceNumberType(HValue* number,
Type* expected) {
if (expected->Is(Type::SignedSmall())) {
return AddUncasted<HForceRepresentation>(number, Representation::Smi());
}
if (expected->Is(Type::Signed32())) {
return AddUncasted<HForceRepresentation>(number,
Representation::Integer32());
}
return number;
}
HValue* HGraphBuilder::TruncateToNumber(HValue* value, Type** expected) {
if (value->IsConstant()) {
HConstant* constant = HConstant::cast(value);
Maybe<HConstant*> number = constant->CopyToTruncatedNumber(zone());
if (number.has_value) {
*expected = Type::Number(zone());
return AddInstruction(number.value);
}
}
// We put temporary values on the stack, which don't correspond to anything
// in baseline code. Since nothing is observable we avoid recording those
// pushes with a NoObservableSideEffectsScope.
NoObservableSideEffectsScope no_effects(this);
Type* expected_type = *expected;
// Separate the number type from the rest.
Type* expected_obj =
Type::Intersect(expected_type, Type::NonNumber(zone()), zone());
Type* expected_number =
Type::Intersect(expected_type, Type::Number(zone()), zone());
// We expect to get a number.
// (We need to check first, since Type::None->Is(Type::Any()) == true.
if (expected_obj->Is(Type::None())) {
ASSERT(!expected_number->Is(Type::None(zone())));
return value;
}
if (expected_obj->Is(Type::Undefined(zone()))) {
// This is already done by HChange.
*expected = Type::Union(expected_number, Type::Number(zone()), zone());
return value;
}
return value;
}
HValue* HOptimizedGraphBuilder::BuildBinaryOperation(
BinaryOperation* expr,
HValue* left,
HValue* right,
PushBeforeSimulateBehavior push_sim_result) {
Type* left_type = expr->left()->bounds().lower;
Type* right_type = expr->right()->bounds().lower;
Type* result_type = expr->bounds().lower;
Maybe<int> fixed_right_arg = expr->fixed_right_arg();
Handle<AllocationSite> allocation_site = expr->allocation_site();
HAllocationMode allocation_mode;
if (FLAG_allocation_site_pretenuring && !allocation_site.is_null()) {
allocation_mode = HAllocationMode(allocation_site);
}
HValue* result = HGraphBuilder::BuildBinaryOperation(
expr->op(), left, right, left_type, right_type, result_type,
fixed_right_arg, allocation_mode);
// Add a simulate after instructions with observable side effects, and
// after phis, which are the result of BuildBinaryOperation when we
// inlined some complex subgraph.
if (result->HasObservableSideEffects() || result->IsPhi()) {
if (push_sim_result == PUSH_BEFORE_SIMULATE) {
Push(result);
Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
Drop(1);
} else {
Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
}
}
return result;
}
HValue* HGraphBuilder::BuildBinaryOperation(
Token::Value op,
HValue* left,
HValue* right,
Type* left_type,
Type* right_type,
Type* result_type,
Maybe<int> fixed_right_arg,
HAllocationMode allocation_mode) {
Representation left_rep = Representation::FromType(left_type);
Representation right_rep = Representation::FromType(right_type);
bool maybe_string_add = op == Token::ADD &&
(left_type->Maybe(Type::String()) ||
right_type->Maybe(Type::String()));
if (left_type->Is(Type::None())) {
Add<HDeoptimize>("Insufficient type feedback for LHS of binary operation",
Deoptimizer::SOFT);
// TODO(rossberg): we should be able to get rid of non-continuous
// defaults.
left_type = Type::Any(zone());
} else {
if (!maybe_string_add) left = TruncateToNumber(left, &left_type);
left_rep = Representation::FromType(left_type);
}
if (right_type->Is(Type::None())) {
Add<HDeoptimize>("Insufficient type feedback for RHS of binary operation",
Deoptimizer::SOFT);
right_type = Type::Any(zone());
} else {
if (!maybe_string_add) right = TruncateToNumber(right, &right_type);
right_rep = Representation::FromType(right_type);
}
// Special case for string addition here.
if (op == Token::ADD &&
(left_type->Is(Type::String()) || right_type->Is(Type::String()))) {
// Validate type feedback for left argument.
if (left_type->Is(Type::String())) {
left = BuildCheckString(left);
}
// Validate type feedback for right argument.
if (right_type->Is(Type::String())) {
right = BuildCheckString(right);
}
// Convert left argument as necessary.
if (left_type->Is(Type::Number())) {
ASSERT(right_type->Is(Type::String()));
left = BuildNumberToString(left, left_type);
} else if (!left_type->Is(Type::String())) {
ASSERT(right_type->Is(Type::String()));
HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_RIGHT);
Add<HPushArguments>(left, right);
return AddUncasted<HInvokeFunction>(function, 2);
}
// Convert right argument as necessary.
if (right_type->Is(Type::Number())) {
ASSERT(left_type->Is(Type::String()));
right = BuildNumberToString(right, right_type);
} else if (!right_type->Is(Type::String())) {
ASSERT(left_type->Is(Type::String()));
HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_LEFT);
Add<HPushArguments>(left, right);
return AddUncasted<HInvokeFunction>(function, 2);
}
// Fast path for empty constant strings.
if (left->IsConstant() &&
HConstant::cast(left)->HasStringValue() &&
HConstant::cast(left)->StringValue()->length() == 0) {
return right;
}
if (right->IsConstant() &&
HConstant::cast(right)->HasStringValue() &&
HConstant::cast(right)->StringValue()->length() == 0) {
return left;
}
// Register the dependent code with the allocation site.
if (!allocation_mode.feedback_site().is_null()) {
ASSERT(!graph()->info()->IsStub());
Handle<AllocationSite> site(allocation_mode.feedback_site());
AllocationSite::AddDependentCompilationInfo(
site, AllocationSite::TENURING, top_info());
}
// Inline the string addition into the stub when creating allocation
// mementos to gather allocation site feedback, or if we can statically
// infer that we're going to create a cons string.
if ((graph()->info()->IsStub() &&
allocation_mode.CreateAllocationMementos()) ||
(left->IsConstant() &&
HConstant::cast(left)->HasStringValue() &&
HConstant::cast(left)->StringValue()->length() + 1 >=
ConsString::kMinLength) ||
(right->IsConstant() &&
HConstant::cast(right)->HasStringValue() &&
HConstant::cast(right)->StringValue()->length() + 1 >=
ConsString::kMinLength)) {
return BuildStringAdd(left, right, allocation_mode);
}
// Fallback to using the string add stub.
return AddUncasted<HStringAdd>(
left, right, allocation_mode.GetPretenureMode(),
STRING_ADD_CHECK_NONE, allocation_mode.feedback_site());
}
if (graph()->info()->IsStub()) {
left = EnforceNumberType(left, left_type);
right = EnforceNumberType(right, right_type);
}
Representation result_rep = Representation::FromType(result_type);
bool is_non_primitive = (left_rep.IsTagged() && !left_rep.IsSmi()) ||
(right_rep.IsTagged() && !right_rep.IsSmi());
HInstruction* instr = NULL;
// Only the stub is allowed to call into the runtime, since otherwise we would
// inline several instructions (including the two pushes) for every tagged
// operation in optimized code, which is more expensive, than a stub call.
if (graph()->info()->IsStub() && is_non_primitive) {
HValue* function = AddLoadJSBuiltin(BinaryOpIC::TokenToJSBuiltin(op));
Add<HPushArguments>(left, right);
instr = AddUncasted<HInvokeFunction>(function, 2);
} else {
switch (op) {
case Token::ADD:
instr = AddUncasted<HAdd>(left, right);
break;
case Token::SUB:
instr = AddUncasted<HSub>(left, right);
break;
case Token::MUL:
instr = AddUncasted<HMul>(left, right);
break;
case Token::MOD: {
if (fixed_right_arg.has_value &&
!right->EqualsInteger32Constant(fixed_right_arg.value)) {
HConstant* fixed_right = Add<HConstant>(
static_cast<int>(fixed_right_arg.value));
IfBuilder if_same(this);
if_same.If<HCompareNumericAndBranch>(right, fixed_right, Token::EQ);
if_same.Then();
if_same.ElseDeopt("Unexpected RHS of binary operation");
right = fixed_right;
}
instr = AddUncasted<HMod>(left, right);
break;
}
case Token::DIV:
instr = AddUncasted<HDiv>(left, right);
break;
case Token::BIT_XOR:
case Token::BIT_AND:
instr = AddUncasted<HBitwise>(op, left, right);
break;
case Token::BIT_OR: {
HValue* operand, *shift_amount;
if (left_type->Is(Type::Signed32()) &&
right_type->Is(Type::Signed32()) &&
MatchRotateRight(left, right, &operand, &shift_amount)) {
instr = AddUncasted<HRor>(operand, shift_amount);
} else {
instr = AddUncasted<HBitwise>(op, left, right);
}
break;
}
case Token::SAR:
instr = AddUncasted<HSar>(left, right);
break;
case Token::SHR:
instr = AddUncasted<HShr>(left, right);
if (FLAG_opt_safe_uint32_operations && instr->IsShr() &&
CanBeZero(right)) {
graph()->RecordUint32Instruction(instr);
}
break;
case Token::SHL:
instr = AddUncasted<HShl>(left, right);
break;
default:
UNREACHABLE();
}
}
if (instr->IsBinaryOperation()) {
HBinaryOperation* binop = HBinaryOperation::cast(instr);
binop->set_observed_input_representation(1, left_rep);
binop->set_observed_input_representation(2, right_rep);
binop->initialize_output_representation(result_rep);
if (graph()->info()->IsStub()) {
// Stub should not call into stub.
instr->SetFlag(HValue::kCannotBeTagged);
// And should truncate on HForceRepresentation already.
if (left->IsForceRepresentation()) {
left->CopyFlag(HValue::kTruncatingToSmi, instr);
left->CopyFlag(HValue::kTruncatingToInt32, instr);
}
if (right->IsForceRepresentation()) {
right->CopyFlag(HValue::kTruncatingToSmi, instr);
right->CopyFlag(HValue::kTruncatingToInt32, instr);
}
}
}
return instr;
}
// Check for the form (%_ClassOf(foo) === 'BarClass').
static bool IsClassOfTest(CompareOperation* expr) {
if (expr->op() != Token::EQ_STRICT) return false;
CallRuntime* call = expr->left()->AsCallRuntime();
if (call == NULL) return false;
Literal* literal = expr->right()->AsLiteral();
if (literal == NULL) return false;
if (!literal->value()->IsString()) return false;
if (!call->name()->IsOneByteEqualTo(STATIC_ASCII_VECTOR("_ClassOf"))) {
return false;
}
ASSERT(call->arguments()->length() == 1);
return true;
}
void HOptimizedGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
switch (expr->op()) {
case Token::COMMA:
return VisitComma(expr);
case Token::OR:
case Token::AND:
return VisitLogicalExpression(expr);
default:
return VisitArithmeticExpression(expr);
}
}
void HOptimizedGraphBuilder::VisitComma(BinaryOperation* expr) {
CHECK_ALIVE(VisitForEffect(expr->left()));
// Visit the right subexpression in the same AST context as the entire
// expression.
Visit(expr->right());
}
void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
bool is_logical_and = expr->op() == Token::AND;
if (ast_context()->IsTest()) {
TestContext* context = TestContext::cast(ast_context());
// Translate left subexpression.
HBasicBlock* eval_right = graph()->CreateBasicBlock();
if (is_logical_and) {
CHECK_BAILOUT(VisitForControl(expr->left(),
eval_right,
context->if_false()));
} else {
CHECK_BAILOUT(VisitForControl(expr->left(),
context->if_true(),
eval_right));
}
// Translate right subexpression by visiting it in the same AST
// context as the entire expression.
if (eval_right->HasPredecessor()) {
eval_right->SetJoinId(expr->RightId());
set_current_block(eval_right);
Visit(expr->right());
}
} else if (ast_context()->IsValue()) {
CHECK_ALIVE(VisitForValue(expr->left()));
ASSERT(current_block() != NULL);
HValue* left_value = Top();
// Short-circuit left values that always evaluate to the same boolean value.
if (expr->left()->ToBooleanIsTrue() || expr->left()->ToBooleanIsFalse()) {
// l (evals true) && r -> r
// l (evals true) || r -> l
// l (evals false) && r -> l
// l (evals false) || r -> r
if (is_logical_and == expr->left()->ToBooleanIsTrue()) {
Drop(1);
CHECK_ALIVE(VisitForValue(expr->right()));
}
return ast_context()->ReturnValue(Pop());
}
// We need an extra block to maintain edge-split form.
HBasicBlock* empty_block = graph()->CreateBasicBlock();
HBasicBlock* eval_right = graph()->CreateBasicBlock();
ToBooleanStub::Types expected(expr->left()->to_boolean_types());
HBranch* test = is_logical_and
? New<HBranch>(left_value, expected, eval_right, empty_block)
: New<HBranch>(left_value, expected, empty_block, eval_right);
FinishCurrentBlock(test);
set_current_block(eval_right);
Drop(1); // Value of the left subexpression.
CHECK_BAILOUT(VisitForValue(expr->right()));
HBasicBlock* join_block =
CreateJoin(empty_block, current_block(), expr->id());
set_current_block(join_block);
return ast_context()->ReturnValue(Pop());
} else {
ASSERT(ast_context()->IsEffect());
// In an effect context, we don't need the value of the left subexpression,
// only its control flow and side effects. We need an extra block to
// maintain edge-split form.
HBasicBlock* empty_block = graph()->CreateBasicBlock();
HBasicBlock* right_block = graph()->CreateBasicBlock();
if (is_logical_and) {
CHECK_BAILOUT(VisitForControl(expr->left(), right_block, empty_block));
} else {
CHECK_BAILOUT(VisitForControl(expr->left(), empty_block, right_block));
}
// TODO(kmillikin): Find a way to fix this. It's ugly that there are
// actually two empty blocks (one here and one inserted by
// TestContext::BuildBranch, and that they both have an HSimulate though the
// second one is not a merge node, and that we really have no good AST ID to
// put on that first HSimulate.
if (empty_block->HasPredecessor()) {
empty_block->SetJoinId(expr->id());
} else {
empty_block = NULL;
}
if (right_block->HasPredecessor()) {
right_block->SetJoinId(expr->RightId());
set_current_block(right_block);
CHECK_BAILOUT(VisitForEffect(expr->right()));
right_block = current_block();
} else {
right_block = NULL;
}
HBasicBlock* join_block =
CreateJoin(empty_block, right_block, expr->id());
set_current_block(join_block);
// We did not materialize any value in the predecessor environments,
// so there is no need to handle it here.
}
}
void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
SetSourcePosition(expr->position());
HValue* right = Pop();
HValue* left = Pop();
HValue* result =
BuildBinaryOperation(expr, left, right,
ast_context()->IsEffect() ? NO_PUSH_BEFORE_SIMULATE
: PUSH_BEFORE_SIMULATE);
if (FLAG_hydrogen_track_positions && result->IsBinaryOperation()) {
HBinaryOperation::cast(result)->SetOperandPositions(
zone(),
ScriptPositionToSourcePosition(expr->left()->position()),
ScriptPositionToSourcePosition(expr->right()->position()));
}
return ast_context()->ReturnValue(result);
}
void HOptimizedGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
Expression* sub_expr,
Handle<String> check) {
CHECK_ALIVE(VisitForTypeOf(sub_expr));
SetSourcePosition(expr->position());
HValue* value = Pop();
HTypeofIsAndBranch* instr = New<HTypeofIsAndBranch>(value, check);
return ast_context()->ReturnControl(instr, expr->id());
}
static bool IsLiteralCompareBool(Isolate* isolate,
HValue* left,
Token::Value op,
HValue* right) {
return op == Token::EQ_STRICT &&
((left->IsConstant() &&
HConstant::cast(left)->handle(isolate)->IsBoolean()) ||
(right->IsConstant() &&
HConstant::cast(right)->handle(isolate)->IsBoolean()));
}
void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
// Check for a few fast cases. The AST visiting behavior must be in sync
// with the full codegen: We don't push both left and right values onto
// the expression stack when one side is a special-case literal.
Expression* sub_expr = NULL;
Handle<String> check;
if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
return HandleLiteralCompareTypeof(expr, sub_expr, check);
}
if (expr->IsLiteralCompareUndefined(&sub_expr, isolate())) {
return HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
}
if (expr->IsLiteralCompareNull(&sub_expr)) {
return HandleLiteralCompareNil(expr, sub_expr, kNullValue);
}
if (IsClassOfTest(expr)) {
CallRuntime* call = expr->left()->AsCallRuntime();
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
Literal* literal = expr->right()->AsLiteral();
Handle<String> rhs = Handle<String>::cast(literal->value());
HClassOfTestAndBranch* instr = New<HClassOfTestAndBranch>(value, rhs);
return ast_context()->ReturnControl(instr, expr->id());
}
Type* left_type = expr->left()->bounds().lower;
Type* right_type = expr->right()->bounds().lower;
Type* combined_type = expr->combined_type();
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
if (FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
HValue* right = Pop();
HValue* left = Pop();
Token::Value op = expr->op();
if (IsLiteralCompareBool(isolate(), left, op, right)) {
HCompareObjectEqAndBranch* result =
New<HCompareObjectEqAndBranch>(left, right);
return ast_context()->ReturnControl(result, expr->id());
}
if (op == Token::INSTANCEOF) {
// Check to see if the rhs of the instanceof is a global function not
// residing in new space. If it is we assume that the function will stay the
// same.
Handle<JSFunction> target = Handle<JSFunction>::null();
VariableProxy* proxy = expr->right()->AsVariableProxy();
bool global_function = (proxy != NULL) && proxy->var()->IsUnallocated();
if (global_function &&
current_info()->has_global_object() &&
!current_info()->global_object()->IsAccessCheckNeeded()) {
Handle<String> name = proxy->name();
Handle<GlobalObject> global(current_info()->global_object());
LookupResult lookup(isolate());
global->Lookup(name, &lookup);
if (lookup.IsNormal() && lookup.GetValue()->IsJSFunction()) {
Handle<JSFunction> candidate(JSFunction::cast(lookup.GetValue()));
// If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code.
if (!isolate()->heap()->InNewSpace(*candidate)) {
target = candidate;
}
}
}
// If the target is not null we have found a known global function that is
// assumed to stay the same for this instanceof.
if (target.is_null()) {
HInstanceOf* result = New<HInstanceOf>(left, right);
return ast_context()->ReturnInstruction(result, expr->id());
} else {
Add<HCheckValue>(right, target);
HInstanceOfKnownGlobal* result =
New<HInstanceOfKnownGlobal>(left, target);
return ast_context()->ReturnInstruction(result, expr->id());
}
// Code below assumes that we don't fall through.
UNREACHABLE();
} else if (op == Token::IN) {
HValue* function = AddLoadJSBuiltin(Builtins::IN);
Add<HPushArguments>(left, right);
// TODO(olivf) InvokeFunction produces a check for the parameter count,
// even though we are certain to pass the correct number of arguments here.
HInstruction* result = New<HInvokeFunction>(function, 2);
return ast_context()->ReturnInstruction(result, expr->id());
}
PushBeforeSimulateBehavior push_behavior =
ast_context()->IsEffect() ? NO_PUSH_BEFORE_SIMULATE
: PUSH_BEFORE_SIMULATE;
HControlInstruction* compare = BuildCompareInstruction(
op, left, right, left_type, right_type, combined_type,
ScriptPositionToSourcePosition(expr->left()->position()),
ScriptPositionToSourcePosition(expr->right()->position()),
push_behavior, expr->id());
if (compare == NULL) return; // Bailed out.
return ast_context()->ReturnControl(compare, expr->id());
}
HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
Token::Value op,
HValue* left,
HValue* right,
Type* left_type,
Type* right_type,
Type* combined_type,
HSourcePosition left_position,
HSourcePosition right_position,
PushBeforeSimulateBehavior push_sim_result,
BailoutId bailout_id) {
// Cases handled below depend on collected type feedback. They should
// soft deoptimize when there is no type feedback.
if (combined_type->Is(Type::None())) {
Add<HDeoptimize>("Insufficient type feedback for combined type "
"of binary operation",
Deoptimizer::SOFT);
combined_type = left_type = right_type = Type::Any(zone());
}
Representation left_rep = Representation::FromType(left_type);
Representation right_rep = Representation::FromType(right_type);
Representation combined_rep = Representation::FromType(combined_type);
if (combined_type->Is(Type::Receiver())) {
if (Token::IsEqualityOp(op)) {
// HCompareObjectEqAndBranch can only deal with object, so
// exclude numbers.
if ((left->IsConstant() &&
HConstant::cast(left)->HasNumberValue()) ||
(right->IsConstant() &&
HConstant::cast(right)->HasNumberValue())) {
Add<HDeoptimize>("Type mismatch between feedback and constant",
Deoptimizer::SOFT);
// The caller expects a branch instruction, so make it happy.
return New<HBranch>(graph()->GetConstantTrue());
}
// Can we get away with map check and not instance type check?
HValue* operand_to_check =
left->block()->block_id() < right->block()->block_id() ? left : right;
if (combined_type->IsClass()) {
Handle<Map> map = combined_type->AsClass()->Map();
AddCheckMap(operand_to_check, map);
HCompareObjectEqAndBranch* result =
New<HCompareObjectEqAndBranch>(left, right);
if (FLAG_hydrogen_track_positions) {
result->set_operand_position(zone(), 0, left_position);
result->set_operand_position(zone(), 1, right_position);
}
return result;
} else {
BuildCheckHeapObject(operand_to_check);
Add<HCheckInstanceType>(operand_to_check,
HCheckInstanceType::IS_SPEC_OBJECT);
HCompareObjectEqAndBranch* result =
New<HCompareObjectEqAndBranch>(left, right);
return result;
}
} else {
Bailout(kUnsupportedNonPrimitiveCompare);
return NULL;
}
} else if (combined_type->Is(Type::InternalizedString()) &&
Token::IsEqualityOp(op)) {
// If we have a constant argument, it should be consistent with the type
// feedback (otherwise we fail assertions in HCompareObjectEqAndBranch).
if ((left->IsConstant() &&
!HConstant::cast(left)->HasInternalizedStringValue()) ||
(right->IsConstant() &&
!HConstant::cast(right)->HasInternalizedStringValue())) {
Add<HDeoptimize>("Type mismatch between feedback and constant",
Deoptimizer::SOFT);
// The caller expects a branch instruction, so make it happy.
return New<HBranch>(graph()->GetConstantTrue());
}
BuildCheckHeapObject(left);
Add<HCheckInstanceType>(left, HCheckInstanceType::IS_INTERNALIZED_STRING);
BuildCheckHeapObject(right);
Add<HCheckInstanceType>(right, HCheckInstanceType::IS_INTERNALIZED_STRING);
HCompareObjectEqAndBranch* result =
New<HCompareObjectEqAndBranch>(left, right);
return result;
} else if (combined_type->Is(Type::String())) {
BuildCheckHeapObject(left);
Add<HCheckInstanceType>(left, HCheckInstanceType::IS_STRING);
BuildCheckHeapObject(right);
Add<HCheckInstanceType>(right, HCheckInstanceType::IS_STRING);
HStringCompareAndBranch* result =
New<HStringCompareAndBranch>(left, right, op);
return result;
} else {
if (combined_rep.IsTagged() || combined_rep.IsNone()) {
HCompareGeneric* result = Add<HCompareGeneric>(left, right, op);
result->set_observed_input_representation(1, left_rep);
result->set_observed_input_representation(2, right_rep);
if (result->HasObservableSideEffects()) {
if (push_sim_result == PUSH_BEFORE_SIMULATE) {
Push(result);
AddSimulate(bailout_id, REMOVABLE_SIMULATE);
Drop(1);
} else {
AddSimulate(bailout_id, REMOVABLE_SIMULATE);
}
}
// TODO(jkummerow): Can we make this more efficient?
HBranch* branch = New<HBranch>(result);
return branch;
} else {
HCompareNumericAndBranch* result =
New<HCompareNumericAndBranch>(left, right, op);
result->set_observed_input_representation(left_rep, right_rep);
if (FLAG_hydrogen_track_positions) {
result->SetOperandPositions(zone(), left_position, right_position);
}
return result;
}
}
}
void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
Expression* sub_expr,
NilValue nil) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT);
if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
CHECK_ALIVE(VisitForValue(sub_expr));
HValue* value = Pop();
if (expr->op() == Token::EQ_STRICT) {
HConstant* nil_constant = nil == kNullValue
? graph()->GetConstantNull()
: graph()->GetConstantUndefined();
HCompareObjectEqAndBranch* instr =
New<HCompareObjectEqAndBranch>(value, nil_constant);
return ast_context()->ReturnControl(instr, expr->id());
} else {
ASSERT_EQ(Token::EQ, expr->op());
Type* type = expr->combined_type()->Is(Type::None())
? Type::Any(zone()) : expr->combined_type();
HIfContinuation continuation;
BuildCompareNil(value, type, &continuation);
return ast_context()->ReturnContinuation(&continuation, expr->id());
}
}
HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
// If we share optimized code between different closures, the
// this-function is not a constant, except inside an inlined body.
if (function_state()->outer() != NULL) {
return New<HConstant>(
function_state()->compilation_info()->closure());
} else {
return New<HThisFunction>();
}
}
HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
Handle<JSObject> boilerplate_object,
AllocationSiteUsageContext* site_context) {
NoObservableSideEffectsScope no_effects(this);
InstanceType instance_type = boilerplate_object->map()->instance_type();
ASSERT(instance_type == JS_ARRAY_TYPE || instance_type == JS_OBJECT_TYPE);
HType type = instance_type == JS_ARRAY_TYPE
? HType::JSArray() : HType::JSObject();
HValue* object_size_constant = Add<HConstant>(
boilerplate_object->map()->instance_size());
PretenureFlag pretenure_flag = NOT_TENURED;
if (FLAG_allocation_site_pretenuring) {
pretenure_flag = site_context->current()->GetPretenureMode();
Handle<AllocationSite> site(site_context->current());
AllocationSite::AddDependentCompilationInfo(
site, AllocationSite::TENURING, top_info());
}
HInstruction* object = Add<HAllocate>(object_size_constant, type,
pretenure_flag, instance_type, site_context->current());
// If allocation folding reaches Page::kMaxRegularHeapObjectSize the
// elements array may not get folded into the object. Hence, we set the
// elements pointer to empty fixed array and let store elimination remove
// this store in the folding case.
HConstant* empty_fixed_array = Add<HConstant>(
isolate()->factory()->empty_fixed_array());
Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
empty_fixed_array);
BuildEmitObjectHeader(boilerplate_object, object);
Handle<FixedArrayBase> elements(boilerplate_object->elements());
int elements_size = (elements->length() > 0 &&
elements->map() != isolate()->heap()->fixed_cow_array_map()) ?
elements->Size() : 0;
if (pretenure_flag == TENURED &&
elements->map() == isolate()->heap()->fixed_cow_array_map() &&
isolate()->heap()->InNewSpace(*elements)) {
// If we would like to pretenure a fixed cow array, we must ensure that the
// array is already in old space, otherwise we'll create too many old-to-
// new-space pointers (overflowing the store buffer).
elements = Handle<FixedArrayBase>(
isolate()->factory()->CopyAndTenureFixedCOWArray(
Handle<FixedArray>::cast(elements)));
boilerplate_object->set_elements(*elements);
}
HInstruction* object_elements = NULL;
if (elements_size > 0) {
HValue* object_elements_size = Add<HConstant>(elements_size);
InstanceType instance_type = boilerplate_object->HasFastDoubleElements()
? FIXED_DOUBLE_ARRAY_TYPE : FIXED_ARRAY_TYPE;
object_elements = Add<HAllocate>(
object_elements_size, HType::HeapObject(),
pretenure_flag, instance_type, site_context->current());
}
BuildInitElementsInObjectHeader(boilerplate_object, object, object_elements);
// Copy object elements if non-COW.
if (object_elements != NULL) {
BuildEmitElements(boilerplate_object, elements, object_elements,
site_context);
}
// Copy in-object properties.
if (boilerplate_object->map()->NumberOfFields() != 0) {
BuildEmitInObjectProperties(boilerplate_object, object, site_context,
pretenure_flag);
}
return object;
}
void HOptimizedGraphBuilder::BuildEmitObjectHeader(
Handle<JSObject> boilerplate_object,
HInstruction* object) {
ASSERT(boilerplate_object->properties()->length() == 0);
Handle<Map> boilerplate_object_map(boilerplate_object->map());
AddStoreMapConstant(object, boilerplate_object_map);
Handle<Object> properties_field =
Handle<Object>(boilerplate_object->properties(), isolate());
ASSERT(*properties_field == isolate()->heap()->empty_fixed_array());
HInstruction* properties = Add<HConstant>(properties_field);
HObjectAccess access = HObjectAccess::ForPropertiesPointer();
Add<HStoreNamedField>(object, access, properties);
if (boilerplate_object->IsJSArray()) {
Handle<JSArray> boilerplate_array =
Handle<JSArray>::cast(boilerplate_object);
Handle<Object> length_field =
Handle<Object>(boilerplate_array->length(), isolate());
HInstruction* length = Add<HConstant>(length_field);
ASSERT(boilerplate_array->length()->IsSmi());
Add<HStoreNamedField>(object, HObjectAccess::ForArrayLength(
boilerplate_array->GetElementsKind()), length);
}
}
void HOptimizedGraphBuilder::BuildInitElementsInObjectHeader(
Handle<JSObject> boilerplate_object,
HInstruction* object,
HInstruction* object_elements) {
ASSERT(boilerplate_object->properties()->length() == 0);
if (object_elements == NULL) {
Handle<Object> elements_field =
Handle<Object>(boilerplate_object->elements(), isolate());
object_elements = Add<HConstant>(elements_field);
}
Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
object_elements);
}
void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
Handle<JSObject> boilerplate_object,
HInstruction* object,
AllocationSiteUsageContext* site_context,
PretenureFlag pretenure_flag) {
Handle<Map> boilerplate_map(boilerplate_object->map());
Handle<DescriptorArray> descriptors(boilerplate_map->instance_descriptors());
int limit = boilerplate_map->NumberOfOwnDescriptors();
int copied_fields = 0;
for (int i = 0; i < limit; i++) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.type() != FIELD) continue;
copied_fields++;
int index = descriptors->GetFieldIndex(i);
int property_offset = boilerplate_object->GetInObjectPropertyOffset(index);
Handle<Name> name(descriptors->GetKey(i));
Handle<Object> value =
Handle<Object>(boilerplate_object->InObjectPropertyAt(index),
isolate());
// The access for the store depends on the type of the boilerplate.
HObjectAccess access = boilerplate_object->IsJSArray() ?
HObjectAccess::ForJSArrayOffset(property_offset) :
HObjectAccess::ForMapAndOffset(boilerplate_map, property_offset);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
Handle<AllocationSite> current_site = site_context->EnterNewScope();
HInstruction* result =
BuildFastLiteral(value_object, site_context);
site_context->ExitScope(current_site, value_object);
Add<HStoreNamedField>(object, access, result);
} else {
Representation representation = details.representation();
HInstruction* value_instruction;
if (representation.IsDouble()) {
// Allocate a HeapNumber box and store the value into it.
HValue* heap_number_constant = Add<HConstant>(HeapNumber::kSize);
// This heap number alloc does not have a corresponding
// AllocationSite. That is okay because
// 1) it's a child object of another object with a valid allocation site
// 2) we can just use the mode of the parent object for pretenuring
HInstruction* double_box =
Add<HAllocate>(heap_number_constant, HType::HeapObject(),
pretenure_flag, HEAP_NUMBER_TYPE);
AddStoreMapConstant(double_box,
isolate()->factory()->heap_number_map());
Add<HStoreNamedField>(double_box, HObjectAccess::ForHeapNumberValue(),
Add<HConstant>(value));
value_instruction = double_box;
} else if (representation.IsSmi()) {
value_instruction = value->IsUninitialized()
? graph()->GetConstant0()
: Add<HConstant>(value);
// Ensure that value is stored as smi.
access = access.WithRepresentation(representation);
} else {
value_instruction = Add<HConstant>(value);
}
Add<HStoreNamedField>(object, access, value_instruction);
}
}
int inobject_properties = boilerplate_object->map()->inobject_properties();
HInstruction* value_instruction =
Add<HConstant>(isolate()->factory()->one_pointer_filler_map());
for (int i = copied_fields; i < inobject_properties; i++) {
ASSERT(boilerplate_object->IsJSObject());
int property_offset = boilerplate_object->GetInObjectPropertyOffset(i);
HObjectAccess access =
HObjectAccess::ForMapAndOffset(boilerplate_map, property_offset);
Add<HStoreNamedField>(object, access, value_instruction);
}
}
void HOptimizedGraphBuilder::BuildEmitElements(
Handle<JSObject> boilerplate_object,
Handle<FixedArrayBase> elements,
HValue* object_elements,
AllocationSiteUsageContext* site_context) {
ElementsKind kind = boilerplate_object->map()->elements_kind();
int elements_length = elements->length();
HValue* object_elements_length = Add<HConstant>(elements_length);
BuildInitializeElementsHeader(object_elements, kind, object_elements_length);
// Copy elements backing store content.
if (elements->IsFixedDoubleArray()) {
BuildEmitFixedDoubleArray(elements, kind, object_elements);
} else if (elements->IsFixedArray()) {
BuildEmitFixedArray(elements, kind, object_elements,
site_context);
} else {
UNREACHABLE();
}
}
void HOptimizedGraphBuilder::BuildEmitFixedDoubleArray(
Handle<FixedArrayBase> elements,
ElementsKind kind,
HValue* object_elements) {
HInstruction* boilerplate_elements = Add<HConstant>(elements);
int elements_length = elements->length();
for (int i = 0; i < elements_length; i++) {
HValue* key_constant = Add<HConstant>(i);
HInstruction* value_instruction =
Add<HLoadKeyed>(boilerplate_elements, key_constant,
static_cast<HValue*>(NULL), kind,
ALLOW_RETURN_HOLE);
HInstruction* store = Add<HStoreKeyed>(object_elements, key_constant,
value_instruction, kind);
store->SetFlag(HValue::kAllowUndefinedAsNaN);
}
}
void HOptimizedGraphBuilder::BuildEmitFixedArray(
Handle<FixedArrayBase> elements,
ElementsKind kind,
HValue* object_elements,
AllocationSiteUsageContext* site_context) {
HInstruction* boilerplate_elements = Add<HConstant>(elements);
int elements_length = elements->length();
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
for (int i = 0; i < elements_length; i++) {
Handle<Object> value(fast_elements->get(i), isolate());
HValue* key_constant = Add<HConstant>(i);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
Handle<AllocationSite> current_site = site_context->EnterNewScope();
HInstruction* result =
BuildFastLiteral(value_object, site_context);
site_context->ExitScope(current_site, value_object);
Add<HStoreKeyed>(object_elements, key_constant, result, kind);
} else {
HInstruction* value_instruction =
Add<HLoadKeyed>(boilerplate_elements, key_constant,
static_cast<HValue*>(NULL), kind,
ALLOW_RETURN_HOLE);
Add<HStoreKeyed>(object_elements, key_constant, value_instruction, kind);
}
}
}
void HOptimizedGraphBuilder::VisitThisFunction(ThisFunction* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
HInstruction* instr = BuildThisFunction();
return ast_context()->ReturnInstruction(instr, expr->id());
}
void HOptimizedGraphBuilder::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
ASSERT(globals_.is_empty());
AstVisitor::VisitDeclarations(declarations);
if (!globals_.is_empty()) {
Handle<FixedArray> array =
isolate()->factory()->NewFixedArray(globals_.length(), TENURED);
for (int i = 0; i < globals_.length(); ++i) array->set(i, *globals_.at(i));
int flags = DeclareGlobalsEvalFlag::encode(current_info()->is_eval()) |
DeclareGlobalsNativeFlag::encode(current_info()->is_native()) |
DeclareGlobalsStrictMode::encode(current_info()->strict_mode());
Add<HDeclareGlobals>(array, flags);
globals_.Rewind(0);
}
}
void HOptimizedGraphBuilder::VisitVariableDeclaration(
VariableDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case Variable::UNALLOCATED:
globals_.Add(variable->name(), zone());
globals_.Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
: isolate()->factory()->undefined_value(), zone());
return;
case Variable::PARAMETER:
case Variable::LOCAL:
if (hole_init) {
HValue* value = graph()->GetConstantHole();
environment()->Bind(variable, value);
}
break;
case Variable::CONTEXT:
if (hole_init) {
HValue* value = graph()->GetConstantHole();
HValue* context = environment()->context();
HStoreContextSlot* store = Add<HStoreContextSlot>(
context, variable->index(), HStoreContextSlot::kNoCheck, value);
if (store->HasObservableSideEffects()) {
Add<HSimulate>(proxy->id(), REMOVABLE_SIMULATE);
}
}
break;
case Variable::LOOKUP:
return Bailout(kUnsupportedLookupSlotInDeclaration);
}
}
void HOptimizedGraphBuilder::VisitFunctionDeclaration(
FunctionDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
case Variable::UNALLOCATED: {
globals_.Add(variable->name(), zone());
Handle<SharedFunctionInfo> function = Compiler::BuildFunctionInfo(
declaration->fun(), current_info()->script());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_.Add(function, zone());
return;
}
case Variable::PARAMETER:
case Variable::LOCAL: {
CHECK_ALIVE(VisitForValue(declaration->fun()));
HValue* value = Pop();
BindIfLive(variable, value);
break;
}
case Variable::CONTEXT: {
CHECK_ALIVE(VisitForValue(declaration->fun()));
HValue* value = Pop();
HValue* context = environment()->context();
HStoreContextSlot* store = Add<HStoreContextSlot>(
context, variable->index(), HStoreContextSlot::kNoCheck, value);
if (store->HasObservableSideEffects()) {
Add<HSimulate>(proxy->id(), REMOVABLE_SIMULATE);
}
break;
}
case Variable::LOOKUP:
return Bailout(kUnsupportedLookupSlotInDeclaration);
}
}
void HOptimizedGraphBuilder::VisitModuleDeclaration(
ModuleDeclaration* declaration) {
UNREACHABLE();
}
void HOptimizedGraphBuilder::VisitImportDeclaration(
ImportDeclaration* declaration) {
UNREACHABLE();
}
void HOptimizedGraphBuilder::VisitExportDeclaration(
ExportDeclaration* declaration) {
UNREACHABLE();
}
void HOptimizedGraphBuilder::VisitModuleLiteral(ModuleLiteral* module) {
UNREACHABLE();
}
void HOptimizedGraphBuilder::VisitModuleVariable(ModuleVariable* module) {
UNREACHABLE();
}
void HOptimizedGraphBuilder::VisitModulePath(ModulePath* module) {
UNREACHABLE();
}
void HOptimizedGraphBuilder::VisitModuleUrl(ModuleUrl* module) {
UNREACHABLE();
}
void HOptimizedGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) {
UNREACHABLE();
}
// Generators for inline runtime functions.
// Support for types.
void HOptimizedGraphBuilder::GenerateIsSmi(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HIsSmiAndBranch* result = New<HIsSmiAndBranch>(value);
return ast_context()->ReturnControl(result, call->id());
}
void HOptimizedGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceTypeAndBranch* result =
New<HHasInstanceTypeAndBranch>(value,
FIRST_SPEC_OBJECT_TYPE,
LAST_SPEC_OBJECT_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
void HOptimizedGraphBuilder::GenerateIsFunction(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceTypeAndBranch* result =
New<HHasInstanceTypeAndBranch>(value, JS_FUNCTION_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
void HOptimizedGraphBuilder::GenerateIsMinusZero(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HCompareMinusZeroAndBranch* result = New<HCompareMinusZeroAndBranch>(value);
return ast_context()->ReturnControl(result, call->id());
}
void HOptimizedGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasCachedArrayIndexAndBranch* result =
New<HHasCachedArrayIndexAndBranch>(value);
return ast_context()->ReturnControl(result, call->id());
}
void HOptimizedGraphBuilder::GenerateIsArray(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceTypeAndBranch* result =
New<HHasInstanceTypeAndBranch>(value, JS_ARRAY_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
void HOptimizedGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceTypeAndBranch* result =
New<HHasInstanceTypeAndBranch>(value, JS_REGEXP_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
void HOptimizedGraphBuilder::GenerateIsObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HIsObjectAndBranch* result = New<HIsObjectAndBranch>(value);
return ast_context()->ReturnControl(result, call->id());
}
void HOptimizedGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
return Bailout(kInlinedRuntimeFunctionIsNonNegativeSmi);
}
void HOptimizedGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HIsUndetectableAndBranch* result = New<HIsUndetectableAndBranch>(value);
return ast_context()->ReturnControl(result, call->id());
}
void HOptimizedGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
CallRuntime* call) {
return Bailout(kInlinedRuntimeFunctionIsStringWrapperSafeForDefaultValueOf);
}
// Support for construct call checks.
void HOptimizedGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
ASSERT(call->arguments()->length() == 0);
if (function_state()->outer() != NULL) {
// We are generating graph for inlined function.
HValue* value = function_state()->inlining_kind() == CONSTRUCT_CALL_RETURN
? graph()->GetConstantTrue()
: graph()->GetConstantFalse();
return ast_context()->ReturnValue(value);
} else {
return ast_context()->ReturnControl(New<HIsConstructCallAndBranch>(),
call->id());
}
}
// Support for arguments.length and arguments[?].
void HOptimizedGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
// Our implementation of arguments (based on this stack frame or an
// adapter below it) does not work for inlined functions. This runtime
// function is blacklisted by AstNode::IsInlineable.
ASSERT(function_state()->outer() == NULL);
ASSERT(call->arguments()->length() == 0);
HInstruction* elements = Add<HArgumentsElements>(false);
HArgumentsLength* result = New<HArgumentsLength>(elements);
return ast_context()->ReturnInstruction(result, call->id());
}
void HOptimizedGraphBuilder::GenerateArguments(CallRuntime* call) {
// Our implementation of arguments (based on this stack frame or an
// adapter below it) does not work for inlined functions. This runtime
// function is blacklisted by AstNode::IsInlineable.
ASSERT(function_state()->outer() == NULL);
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* index = Pop();
HInstruction* elements = Add<HArgumentsElements>(false);
HInstruction* length = Add<HArgumentsLength>(elements);
HInstruction* checked_index = Add<HBoundsCheck>(index, length);
HAccessArgumentsAt* result = New<HAccessArgumentsAt>(
elements, length, checked_index);
return ast_context()->ReturnInstruction(result, call->id());
}
// Support for accessing the class and value fields of an object.
void HOptimizedGraphBuilder::GenerateClassOf(CallRuntime* call) {
// The special form detected by IsClassOfTest is detected before we get here
// and does not cause a bailout.
return Bailout(kInlinedRuntimeFunctionClassOf);
}
void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* object = Pop();
IfBuilder if_objectisvalue(this);
HValue* objectisvalue = if_objectisvalue.If<HHasInstanceTypeAndBranch>(
object, JS_VALUE_TYPE);
if_objectisvalue.Then();
{
// Return the actual value.
Push(Add<HLoadNamedField>(
object, objectisvalue,
HObjectAccess::ForObservableJSObjectOffset(
JSValue::kValueOffset)));
Add<HSimulate>(call->id(), FIXED_SIMULATE);
}
if_objectisvalue.Else();
{
// If the object is not a value return the object.
Push(object);
Add<HSimulate>(call->id(), FIXED_SIMULATE);
}
if_objectisvalue.End();
return ast_context()->ReturnValue(Pop());
}
void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
ASSERT_NE(NULL, call->arguments()->at(1)->AsLiteral());
Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->value()));
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* date = Pop();
HDateField* result = New<HDateField>(date, index);
return ast_context()->ReturnInstruction(result, call->id());
}
void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
CallRuntime* call) {
ASSERT(call->arguments()->length() == 3);
// We need to follow the evaluation order of full codegen.
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* string = Pop();
HValue* value = Pop();
HValue* index = Pop();
Add<HSeqStringSetChar>(String::ONE_BYTE_ENCODING, string,
index, value);
Add<HSimulate>(call->id(), FIXED_SIMULATE);
return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar(
CallRuntime* call) {
ASSERT(call->arguments()->length() == 3);
// We need to follow the evaluation order of full codegen.
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* string = Pop();
HValue* value = Pop();
HValue* index = Pop();
Add<HSeqStringSetChar>(String::TWO_BYTE_ENCODING, string,
index, value);
Add<HSimulate>(call->id(), FIXED_SIMULATE);
return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* value = Pop();
HValue* object = Pop();
// Check if object is a JSValue.
IfBuilder if_objectisvalue(this);
if_objectisvalue.If<HHasInstanceTypeAndBranch>(object, JS_VALUE_TYPE);
if_objectisvalue.Then();
{
// Create in-object property store to kValueOffset.
Add<HStoreNamedField>(object,
HObjectAccess::ForObservableJSObjectOffset(JSValue::kValueOffset),
value);
if (!ast_context()->IsEffect()) {
Push(value);
}
Add<HSimulate>(call->id(), FIXED_SIMULATE);
}
if_objectisvalue.Else();
{
// Nothing to do in this case.
if (!ast_context()->IsEffect()) {
Push(value);
}
Add<HSimulate>(call->id(), FIXED_SIMULATE);
}
if_objectisvalue.End();
if (!ast_context()->IsEffect()) {
Drop(1);
}
return ast_context()->ReturnValue(value);
}
// Fast support for charCodeAt(n).
void HOptimizedGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* index = Pop();
HValue* string = Pop();
HInstruction* result = BuildStringCharCodeAt(string, index);
return ast_context()->ReturnInstruction(result, call->id());
}
// Fast support for string.charAt(n) and string[n].
void HOptimizedGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* char_code = Pop();
HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
return ast_context()->ReturnInstruction(result, call->id());
}
// Fast support for string.charAt(n) and string[n].
void HOptimizedGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* index = Pop();
HValue* string = Pop();
HInstruction* char_code = BuildStringCharCodeAt(string, index);
AddInstruction(char_code);
HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
return ast_context()->ReturnInstruction(result, call->id());
}
// Fast support for object equality testing.
void HOptimizedGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* right = Pop();
HValue* left = Pop();
HCompareObjectEqAndBranch* result =
New<HCompareObjectEqAndBranch>(left, right);
return ast_context()->ReturnControl(result, call->id());
}
// Fast support for StringAdd.
void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* right = Pop();
HValue* left = Pop();
HInstruction* result = NewUncasted<HStringAdd>(left, right);
return ast_context()->ReturnInstruction(result, call->id());
}
// Fast support for SubString.
void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
ASSERT_EQ(3, call->arguments()->length());
CHECK_ALIVE(VisitExpressions(call->arguments()));
PushArgumentsFromEnvironment(call->arguments()->length());
HCallStub* result = New<HCallStub>(CodeStub::SubString, 3);
return ast_context()->ReturnInstruction(result, call->id());
}
// Fast support for StringCompare.
void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
CHECK_ALIVE(VisitExpressions(call->arguments()));
PushArgumentsFromEnvironment(call->arguments()->length());
HCallStub* result = New<HCallStub>(CodeStub::StringCompare, 2);
return ast_context()->ReturnInstruction(result, call->id());
}
// Support for direct calls from JavaScript to native RegExp code.
void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
ASSERT_EQ(4, call->arguments()->length());
CHECK_ALIVE(VisitExpressions(call->arguments()));
PushArgumentsFromEnvironment(call->arguments()->length());
HCallStub* result = New<HCallStub>(CodeStub::RegExpExec, 4);
return ast_context()->ReturnInstruction(result, call->id());
}
void HOptimizedGraphBuilder::GenerateDoubleLo(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HInstruction* result = NewUncasted<HDoubleBits>(value, HDoubleBits::LOW);
return ast_context()->ReturnInstruction(result, call->id());
}
void HOptimizedGraphBuilder::GenerateDoubleHi(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HInstruction* result = NewUncasted<HDoubleBits>(value, HDoubleBits::HIGH);
return ast_context()->ReturnInstruction(result, call->id());
}
void HOptimizedGraphBuilder::GenerateConstructDouble(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* lo = Pop();
HValue* hi = Pop();
HInstruction* result = NewUncasted<HConstructDouble>(hi, lo);
return ast_context()->ReturnInstruction(result, call->id());
}
// Construct a RegExp exec result with two in-object properties.
void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
ASSERT_EQ(3, call->arguments()->length());
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
HValue* input = Pop();
HValue* index = Pop();
HValue* length = Pop();
HValue* result = BuildRegExpConstructResult(length, index, input);
return ast_context()->ReturnValue(result);
}
// Support for fast native caches.
void HOptimizedGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
return Bailout(kInlinedRuntimeFunctionGetFromCache);
}
// Fast support for number to string.
void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* number = Pop();
HValue* result = BuildNumberToString(number, Type::Any(zone()));
return ast_context()->ReturnValue(result);
}
// Fast call for custom callbacks.
void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
// 1 ~ The function to call is not itself an argument to the call.
int arg_count = call->arguments()->length() - 1;
ASSERT(arg_count >= 1); // There's always at least a receiver.
CHECK_ALIVE(VisitExpressions(call->arguments()));
// The function is the last argument
HValue* function = Pop();
// Push the arguments to the stack
PushArgumentsFromEnvironment(arg_count);
IfBuilder if_is_jsfunction(this);
if_is_jsfunction.If<HHasInstanceTypeAndBranch>(function, JS_FUNCTION_TYPE);
if_is_jsfunction.Then();
{
HInstruction* invoke_result =
Add<HInvokeFunction>(function, arg_count);
if (!ast_context()->IsEffect()) {
Push(invoke_result);
}
Add<HSimulate>(call->id(), FIXED_SIMULATE);
}
if_is_jsfunction.Else();
{
HInstruction* call_result =
Add<HCallFunction>(function, arg_count);
if (!ast_context()->IsEffect()) {
Push(call_result);
}
Add<HSimulate>(call->id(), FIXED_SIMULATE);
}
if_is_jsfunction.End();
if (ast_context()->IsEffect()) {
// EffectContext::ReturnValue ignores the value, so we can just pass
// 'undefined' (as we do not have the call result anymore).
return ast_context()->ReturnValue(graph()->GetConstantUndefined());
} else {
return ast_context()->ReturnValue(Pop());
}
}
// Fast call to math functions.
void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* right = Pop();
HValue* left = Pop();
HInstruction* result = NewUncasted<HPower>(left, right);
return ast_context()->ReturnInstruction(result, call->id());
}
void HOptimizedGraphBuilder::GenerateMathLogRT(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathLog);
return ast_context()->ReturnInstruction(result, call->id());
}
void HOptimizedGraphBuilder::GenerateMathSqrtRT(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathSqrt);
return ast_context()->ReturnInstruction(result, call->id());
}
void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HGetCachedArrayIndex* result = New<HGetCachedArrayIndex>(value);
return ast_context()->ReturnInstruction(result, call->id());
}
void HOptimizedGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
return Bailout(kInlinedRuntimeFunctionFastAsciiArrayJoin);
}
// Support for generators.
void HOptimizedGraphBuilder::GenerateGeneratorNext(CallRuntime* call) {
return Bailout(kInlinedRuntimeFunctionGeneratorNext);
}
void HOptimizedGraphBuilder::GenerateGeneratorThrow(CallRuntime* call) {
return Bailout(kInlinedRuntimeFunctionGeneratorThrow);
}
void HOptimizedGraphBuilder::GenerateDebugBreakInOptimizedCode(
CallRuntime* call) {
Add<HDebugBreak>();
return ast_context()->ReturnValue(graph()->GetConstant0());
}
void HOptimizedGraphBuilder::GenerateDebugIsActive(CallRuntime* call) {
ASSERT(call->arguments()->length() == 0);
HValue* ref =
Add<HConstant>(ExternalReference::debug_is_active_address(isolate()));
HValue* value = Add<HLoadNamedField>(
ref, static_cast<HValue*>(NULL), HObjectAccess::ForExternalUInteger8());
return ast_context()->ReturnValue(value);
}
#undef CHECK_BAILOUT
#undef CHECK_ALIVE
HEnvironment::HEnvironment(HEnvironment* outer,
Scope* scope,
Handle<JSFunction> closure,
Zone* zone)
: closure_(closure),
values_(0, zone),
frame_type_(JS_FUNCTION),
parameter_count_(0),
specials_count_(1),
local_count_(0),
outer_(outer),
entry_(NULL),
pop_count_(0),
push_count_(0),
ast_id_(BailoutId::None()),
zone_(zone) {
Scope* declaration_scope = scope->DeclarationScope();
Initialize(declaration_scope->num_parameters() + 1,
declaration_scope->num_stack_slots(), 0);
}
HEnvironment::HEnvironment(Zone* zone, int parameter_count)
: values_(0, zone),
frame_type_(STUB),
parameter_count_(parameter_count),
specials_count_(1),
local_count_(0),
outer_(NULL),
entry_(NULL),
pop_count_(0),
push_count_(0),
ast_id_(BailoutId::None()),
zone_(zone) {
Initialize(parameter_count, 0, 0);
}
HEnvironment::HEnvironment(const HEnvironment* other, Zone* zone)
: values_(0, zone),
frame_type_(JS_FUNCTION),
parameter_count_(0),
specials_count_(0),
local_count_(0),
outer_(NULL),
entry_(NULL),
pop_count_(0),
push_count_(0),
ast_id_(other->ast_id()),
zone_(zone) {
Initialize(other);
}
HEnvironment::HEnvironment(HEnvironment* outer,
Handle<JSFunction> closure,
FrameType frame_type,
int arguments,
Zone* zone)
: closure_(closure),
values_(arguments, zone),
frame_type_(frame_type),
parameter_count_(arguments),
specials_count_(0),
local_count_(0),
outer_(outer),
entry_(NULL),
pop_count_(0),
push_count_(0),
ast_id_(BailoutId::None()),
zone_(zone) {
}
void HEnvironment::Initialize(int parameter_count,
int local_count,
int stack_height) {
parameter_count_ = parameter_count;
local_count_ = local_count;
// Avoid reallocating the temporaries' backing store on the first Push.
int total = parameter_count + specials_count_ + local_count + stack_height;
values_.Initialize(total + 4, zone());
for (int i = 0; i < total; ++i) values_.Add(NULL, zone());
}
void HEnvironment::Initialize(const HEnvironment* other) {
closure_ = other->closure();
values_.AddAll(other->values_, zone());
assigned_variables_.Union(other->assigned_variables_, zone());
frame_type_ = other->frame_type_;
parameter_count_ = other->parameter_count_;
local_count_ = other->local_count_;
if (other->outer_ != NULL) outer_ = other->outer_->Copy(); // Deep copy.
entry_ = other->entry_;
pop_count_ = other->pop_count_;
push_count_ = other->push_count_;
specials_count_ = other->specials_count_;
ast_id_ = other->ast_id_;
}
void HEnvironment::AddIncomingEdge(HBasicBlock* block, HEnvironment* other) {
ASSERT(!block->IsLoopHeader());
ASSERT(values_.length() == other->values_.length());
int length = values_.length();
for (int i = 0; i < length; ++i) {
HValue* value = values_[i];
if (value != NULL && value->IsPhi() && value->block() == block) {
// There is already a phi for the i'th value.
HPhi* phi = HPhi::cast(value);
// Assert index is correct and that we haven't missed an incoming edge.
ASSERT(phi->merged_index() == i || !phi->HasMergedIndex());
ASSERT(phi->OperandCount() == block->predecessors()->length());
phi->AddInput(other->values_[i]);
} else if (values_[i] != other->values_[i]) {
// There is a fresh value on the incoming edge, a phi is needed.
ASSERT(values_[i] != NULL && other->values_[i] != NULL);
HPhi* phi = block->AddNewPhi(i);
HValue* old_value = values_[i];
for (int j = 0; j < block->predecessors()->length(); j++) {
phi->AddInput(old_value);
}
phi->AddInput(other->values_[i]);
this->values_[i] = phi;
}
}
}
void HEnvironment::Bind(int index, HValue* value) {
ASSERT(value != NULL);
assigned_variables_.Add(index, zone());
values_[index] = value;
}
bool HEnvironment::HasExpressionAt(int index) const {
return index >= parameter_count_ + specials_count_ + local_count_;
}
bool HEnvironment::ExpressionStackIsEmpty() const {
ASSERT(length() >= first_expression_index());
return length() == first_expression_index();
}
void HEnvironment::SetExpressionStackAt(int index_from_top, HValue* value) {
int count = index_from_top + 1;
int index = values_.length() - count;
ASSERT(HasExpressionAt(index));
// The push count must include at least the element in question or else
// the new value will not be included in this environment's history.
if (push_count_ < count) {
// This is the same effect as popping then re-pushing 'count' elements.
pop_count_ += (count - push_count_);
push_count_ = count;
}
values_[index] = value;
}
void HEnvironment::Drop(int count) {
for (int i = 0; i < count; ++i) {
Pop();
}
}
HEnvironment* HEnvironment::Copy() const {
return new(zone()) HEnvironment(this, zone());
}
HEnvironment* HEnvironment::CopyWithoutHistory() const {
HEnvironment* result = Copy();
result->ClearHistory();
return result;
}
HEnvironment* HEnvironment::CopyAsLoopHeader(HBasicBlock* loop_header) const {
HEnvironment* new_env = Copy();
for (int i = 0; i < values_.length(); ++i) {
HPhi* phi = loop_header->AddNewPhi(i);
phi->AddInput(values_[i]);
new_env->values_[i] = phi;
}
new_env->ClearHistory();
return new_env;
}
HEnvironment* HEnvironment::CreateStubEnvironment(HEnvironment* outer,
Handle<JSFunction> target,
FrameType frame_type,
int arguments) const {
HEnvironment* new_env =
new(zone()) HEnvironment(outer, target, frame_type,
arguments + 1, zone());
for (int i = 0; i <= arguments; ++i) { // Include receiver.
new_env->Push(ExpressionStackAt(arguments - i));
}
new_env->ClearHistory();
return new_env;
}
HEnvironment* HEnvironment::CopyForInlining(
Handle<JSFunction> target,
int arguments,
FunctionLiteral* function,
HConstant* undefined,
InliningKind inlining_kind) const {
ASSERT(frame_type() == JS_FUNCTION);
// Outer environment is a copy of this one without the arguments.
int arity = function->scope()->num_parameters();
HEnvironment* outer = Copy();
outer->Drop(arguments + 1); // Including receiver.
outer->ClearHistory();
if (inlining_kind == CONSTRUCT_CALL_RETURN) {
// Create artificial constructor stub environment. The receiver should
// actually be the constructor function, but we pass the newly allocated
// object instead, DoComputeConstructStubFrame() relies on that.
outer = CreateStubEnvironment(outer, target, JS_CONSTRUCT, arguments);
} else if (inlining_kind == GETTER_CALL_RETURN) {
// We need an additional StackFrame::INTERNAL frame for restoring the
// correct context.
outer = CreateStubEnvironment(outer, target, JS_GETTER, arguments);
} else if (inlining_kind == SETTER_CALL_RETURN) {
// We need an additional StackFrame::INTERNAL frame for temporarily saving
// the argument of the setter, see StoreStubCompiler::CompileStoreViaSetter.
outer = CreateStubEnvironment(outer, target, JS_SETTER, arguments);
}
if (arity != arguments) {
// Create artificial arguments adaptation environment.
outer = CreateStubEnvironment(outer, target, ARGUMENTS_ADAPTOR, arguments);
}
HEnvironment* inner =
new(zone()) HEnvironment(outer, function->scope(), target, zone());
// Get the argument values from the original environment.
for (int i = 0; i <= arity; ++i) { // Include receiver.
HValue* push = (i <= arguments) ?
ExpressionStackAt(arguments - i) : undefined;
inner->SetValueAt(i, push);
}
inner->SetValueAt(arity + 1, context());
for (int i = arity + 2; i < inner->length(); ++i) {
inner->SetValueAt(i, undefined);
}
inner->set_ast_id(BailoutId::FunctionEntry());
return inner;
}
void HEnvironment::PrintTo(StringStream* stream) {
for (int i = 0; i < length(); i++) {
if (i == 0) stream->Add("parameters\n");
if (i == parameter_count()) stream->Add("specials\n");
if (i == parameter_count() + specials_count()) stream->Add("locals\n");
if (i == parameter_count() + specials_count() + local_count()) {
stream->Add("expressions\n");
}
HValue* val = values_.at(i);
stream->Add("%d: ", i);
if (val != NULL) {
val->PrintNameTo(stream);
} else {
stream->Add("NULL");
}
stream->Add("\n");
}
PrintF("\n");
}
void HEnvironment::PrintToStd() {
HeapStringAllocator string_allocator;
StringStream trace(&string_allocator);
PrintTo(&trace);
PrintF("%s", trace.ToCString().get());
}
void HTracer::TraceCompilation(CompilationInfo* info) {
Tag tag(this, "compilation");
if (info->IsOptimizing()) {
Handle<String> name = info->function()->debug_name();
PrintStringProperty("name", name->ToCString().get());
PrintIndent();
trace_.Add("method \"%s:%d\"\n",
name->ToCString().get(),
info->optimization_id());
} else {
CodeStub::Major major_key = info->code_stub()->MajorKey();
PrintStringProperty("name", CodeStub::MajorName(major_key, false));
PrintStringProperty("method", "stub");
}
PrintLongProperty("date", static_cast<int64_t>(OS::TimeCurrentMillis()));
}
void HTracer::TraceLithium(const char* name, LChunk* chunk) {
ASSERT(!chunk->isolate()->concurrent_recompilation_enabled());
AllowHandleDereference allow_deref;
AllowDeferredHandleDereference allow_deferred_deref;
Trace(name, chunk->graph(), chunk);
}
void HTracer::TraceHydrogen(const char* name, HGraph* graph) {
ASSERT(!graph->isolate()->concurrent_recompilation_enabled());
AllowHandleDereference allow_deref;
AllowDeferredHandleDereference allow_deferred_deref;
Trace(name, graph, NULL);
}
void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
Tag tag(this, "cfg");
PrintStringProperty("name", name);
const ZoneList<HBasicBlock*>* blocks = graph->blocks();
for (int i = 0; i < blocks->length(); i++) {
HBasicBlock* current = blocks->at(i);
Tag block_tag(this, "block");
PrintBlockProperty("name", current->block_id());
PrintIntProperty("from_bci", -1);
PrintIntProperty("to_bci", -1);
if (!current->predecessors()->is_empty()) {
PrintIndent();
trace_.Add("predecessors");
for (int j = 0; j < current->predecessors()->length(); ++j) {
trace_.Add(" \"B%d\"", current->predecessors()->at(j)->block_id());
}
trace_.Add("\n");
} else {
PrintEmptyProperty("predecessors");
}
if (current->end()->SuccessorCount() == 0) {
PrintEmptyProperty("successors");
} else {
PrintIndent();
trace_.Add("successors");
for (HSuccessorIterator it(current->end()); !it.Done(); it.Advance()) {
trace_.Add(" \"B%d\"", it.Current()->block_id());
}
trace_.Add("\n");
}
PrintEmptyProperty("xhandlers");
{
PrintIndent();
trace_.Add("flags");
if (current->IsLoopSuccessorDominator()) {
trace_.Add(" \"dom-loop-succ\"");
}
if (current->IsUnreachable()) {
trace_.Add(" \"dead\"");
}
if (current->is_osr_entry()) {
trace_.Add(" \"osr\"");
}
trace_.Add("\n");
}
if (current->dominator() != NULL) {
PrintBlockProperty("dominator", current->dominator()->block_id());
}
PrintIntProperty("loop_depth", current->LoopNestingDepth());
if (chunk != NULL) {
int first_index = current->first_instruction_index();
int last_index = current->last_instruction_index();
PrintIntProperty(
"first_lir_id",
LifetimePosition::FromInstructionIndex(first_index).Value());
PrintIntProperty(
"last_lir_id",
LifetimePosition::FromInstructionIndex(last_index).Value());
}
{
Tag states_tag(this, "states");
Tag locals_tag(this, "locals");
int total = current->phis()->length();
PrintIntProperty("size", current->phis()->length());
PrintStringProperty("method", "None");
for (int j = 0; j < total; ++j) {
HPhi* phi = current->phis()->at(j);
PrintIndent();
trace_.Add("%d ", phi->merged_index());
phi->PrintNameTo(&trace_);
trace_.Add(" ");
phi->PrintTo(&trace_);
trace_.Add("\n");
}
}
{
Tag HIR_tag(this, "HIR");
for (HInstructionIterator it(current); !it.Done(); it.Advance()) {
HInstruction* instruction = it.Current();
int uses = instruction->UseCount();
PrintIndent();
trace_.Add("0 %d ", uses);
instruction->PrintNameTo(&trace_);
trace_.Add(" ");
instruction->PrintTo(&trace_);
if (FLAG_hydrogen_track_positions &&
instruction->has_position() &&
instruction->position().raw() != 0) {
const HSourcePosition pos = instruction->position();
trace_.Add(" pos:");
if (pos.inlining_id() != 0) {
trace_.Add("%d_", pos.inlining_id());
}
trace_.Add("%d", pos.position());
}
trace_.Add(" <|@\n");
}
}
if (chunk != NULL) {
Tag LIR_tag(this, "LIR");
int first_index = current->first_instruction_index();
int last_index = current->last_instruction_index();
if (first_index != -1 && last_index != -1) {
const ZoneList<LInstruction*>* instructions = chunk->instructions();
for (int i = first_index; i <= last_index; ++i) {
LInstruction* linstr = instructions->at(i);
if (linstr != NULL) {
PrintIndent();
trace_.Add("%d ",
LifetimePosition::FromInstructionIndex(i).Value());
linstr->PrintTo(&trace_);
trace_.Add(" [hir:");
linstr->hydrogen_value()->PrintNameTo(&trace_);
trace_.Add("]");
trace_.Add(" <|@\n");
}
}
}
}
}
}
void HTracer::TraceLiveRanges(const char* name, LAllocator* allocator) {
Tag tag(this, "intervals");
PrintStringProperty("name", name);
const Vector<LiveRange*>* fixed_d = allocator->fixed_double_live_ranges();
for (int i = 0; i < fixed_d->length(); ++i) {
TraceLiveRange(fixed_d->at(i), "fixed", allocator->zone());
}
const Vector<LiveRange*>* fixed = allocator->fixed_live_ranges();
for (int i = 0; i < fixed->length(); ++i) {
TraceLiveRange(fixed->at(i), "fixed", allocator->zone());
}
const ZoneList<LiveRange*>* live_ranges = allocator->live_ranges();
for (int i = 0; i < live_ranges->length(); ++i) {
TraceLiveRange(live_ranges->at(i), "object", allocator->zone());
}
}
void HTracer::TraceLiveRange(LiveRange* range, const char* type,
Zone* zone) {
if (range != NULL && !range->IsEmpty()) {
PrintIndent();
trace_.Add("%d %s", range->id(), type);
if (range->HasRegisterAssigned()) {
LOperand* op = range->CreateAssignedOperand(zone);
int assigned_reg = op->index();
if (op->IsDoubleRegister()) {
trace_.Add(" \"%s\"",
DoubleRegister::AllocationIndexToString(assigned_reg));
} else {
ASSERT(op->IsRegister());
trace_.Add(" \"%s\"", Register::AllocationIndexToString(assigned_reg));
}
} else if (range->IsSpilled()) {
LOperand* op = range->TopLevel()->GetSpillOperand();
if (op->IsDoubleStackSlot()) {
trace_.Add(" \"double_stack:%d\"", op->index());
} else {
ASSERT(op->IsStackSlot());
trace_.Add(" \"stack:%d\"", op->index());
}
}
int parent_index = -1;
if (range->IsChild()) {
parent_index = range->parent()->id();
} else {
parent_index = range->id();
}
LOperand* op = range->FirstHint();
int hint_index = -1;
if (op != NULL && op->IsUnallocated()) {
hint_index = LUnallocated::cast(op)->virtual_register();
}
trace_.Add(" %d %d", parent_index, hint_index);
UseInterval* cur_interval = range->first_interval();
while (cur_interval != NULL && range->Covers(cur_interval->start())) {
trace_.Add(" [%d, %d[",
cur_interval->start().Value(),
cur_interval->end().Value());
cur_interval = cur_interval->next();
}
UsePosition* current_pos = range->first_pos();
while (current_pos != NULL) {
if (current_pos->RegisterIsBeneficial() || FLAG_trace_all_uses) {
trace_.Add(" %d M", current_pos->pos().Value());
}
current_pos = current_pos->next();
}
trace_.Add(" \"\"\n");
}
}
void HTracer::FlushToFile() {
AppendChars(filename_.start(), trace_.ToCString().get(), trace_.length(),
false);
trace_.Reset();
}
void HStatistics::Initialize(CompilationInfo* info) {
if (info->shared_info().is_null()) return;
source_size_ += info->shared_info()->SourceSize();
}
void HStatistics::Print() {
PrintF("Timing results:\n");
TimeDelta sum;
for (int i = 0; i < times_.length(); ++i) {
sum += times_[i];
}
for (int i = 0; i < names_.length(); ++i) {
PrintF("%32s", names_[i]);
double ms = times_[i].InMillisecondsF();
double percent = times_[i].PercentOf(sum);
PrintF(" %8.3f ms / %4.1f %% ", ms, percent);
unsigned size = sizes_[i];
double size_percent = static_cast<double>(size) * 100 / total_size_;
PrintF(" %9u bytes / %4.1f %%\n", size, size_percent);
}
PrintF("----------------------------------------"
"---------------------------------------\n");
TimeDelta total = create_graph_ + optimize_graph_ + generate_code_;
PrintF("%32s %8.3f ms / %4.1f %% \n",
"Create graph",
create_graph_.InMillisecondsF(),
create_graph_.PercentOf(total));
PrintF("%32s %8.3f ms / %4.1f %% \n",
"Optimize graph",
optimize_graph_.InMillisecondsF(),
optimize_graph_.PercentOf(total));
PrintF("%32s %8.3f ms / %4.1f %% \n",
"Generate and install code",
generate_code_.InMillisecondsF(),
generate_code_.PercentOf(total));
PrintF("----------------------------------------"
"---------------------------------------\n");
PrintF("%32s %8.3f ms (%.1f times slower than full code gen)\n",
"Total",
total.InMillisecondsF(),
total.TimesOf(full_code_gen_));
double source_size_in_kb = static_cast<double>(source_size_) / 1024;
double normalized_time = source_size_in_kb > 0
? total.InMillisecondsF() / source_size_in_kb
: 0;
double normalized_size_in_kb = source_size_in_kb > 0
? total_size_ / 1024 / source_size_in_kb
: 0;
PrintF("%32s %8.3f ms %7.3f kB allocated\n",
"Average per kB source",
normalized_time, normalized_size_in_kb);
}
void HStatistics::SaveTiming(const char* name, TimeDelta time, unsigned size) {
total_size_ += size;
for (int i = 0; i < names_.length(); ++i) {
if (strcmp(names_[i], name) == 0) {
times_[i] += time;
sizes_[i] += size;
return;
}
}
names_.Add(name);
times_.Add(time);
sizes_.Add(size);
}
HPhase::~HPhase() {
if (ShouldProduceTraceOutput()) {
isolate()->GetHTracer()->TraceHydrogen(name(), graph_);
}
#ifdef DEBUG
graph_->Verify(false); // No full verify.
#endif
}
} } // namespace v8::internal
| 35.463436 | 80 | 0.654931 | Myrannas |