body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def _make_fake_dataset_fn():
'Returns a dataset that emulates a remote storage data source.\n\n Returns a dataset factory which creates a dataset with 100 elements that\n emulates the performance characteristic of a file-based dataset stored in a\n remote storage. In particular, the first element will take an order of\n magnitude longer to produce than the remaining elements (1s vs. 1ms).\n '
def fake_dataset_fn(unused):
del unused
def make_dataset(time_us, num_elements):
return dataset_ops.Dataset.range(num_elements).apply(sleep.sleep(time_us))
return make_dataset((1000 * 1000), 0).concatenate(make_dataset(1000, 100)).take(100)
return fake_dataset_fn | -4,219,913,051,384,786,400 | Returns a dataset that emulates a remote storage data source.
Returns a dataset factory which creates a dataset with 100 elements that
emulates the performance characteristic of a file-based dataset stored in a
remote storage. In particular, the first element will take an order of
magnitude longer to produce than the remaining elements (1s vs. 1ms). | tensorflow/python/data/experimental/benchmarks/parallel_interleave_benchmark.py | _make_fake_dataset_fn | 1244783394/tensorflow | python | def _make_fake_dataset_fn():
'Returns a dataset that emulates a remote storage data source.\n\n Returns a dataset factory which creates a dataset with 100 elements that\n emulates the performance characteristic of a file-based dataset stored in a\n remote storage. In particular, the first element will take an order of\n magnitude longer to produce than the remaining elements (1s vs. 1ms).\n '
def fake_dataset_fn(unused):
del unused
def make_dataset(time_us, num_elements):
return dataset_ops.Dataset.range(num_elements).apply(sleep.sleep(time_us))
return make_dataset((1000 * 1000), 0).concatenate(make_dataset(1000, 100)).take(100)
return fake_dataset_fn |
def benchmark_parallel_interleave_v1(self):
'Benchmark for parallel interleave that does not support autotuning.'
def dataset_fn():
return dataset_ops.Dataset.range(1).repeat().apply(interleave_ops.parallel_interleave(_make_fake_dataset_fn(), cycle_length=10))
self._benchmark(dataset_fn=dataset_fn, iters=100, num_elements=1000) | 6,597,294,758,033,310,000 | Benchmark for parallel interleave that does not support autotuning. | tensorflow/python/data/experimental/benchmarks/parallel_interleave_benchmark.py | benchmark_parallel_interleave_v1 | 1244783394/tensorflow | python | def benchmark_parallel_interleave_v1(self):
def dataset_fn():
return dataset_ops.Dataset.range(1).repeat().apply(interleave_ops.parallel_interleave(_make_fake_dataset_fn(), cycle_length=10))
self._benchmark(dataset_fn=dataset_fn, iters=100, num_elements=1000) |
def benchmark_parallel_interleave_v2(self):
'Benchmark for parallel interleave that supports autotuning.'
def dataset_fn():
return dataset_ops.Dataset.range(1).repeat().interleave(_make_fake_dataset_fn(), cycle_length=10, num_parallel_calls=dataset_ops.AUTOTUNE)
self._benchmark(dataset_fn=dataset_fn, iters=100, num_elements=1000) | -5,564,878,944,003,852,000 | Benchmark for parallel interleave that supports autotuning. | tensorflow/python/data/experimental/benchmarks/parallel_interleave_benchmark.py | benchmark_parallel_interleave_v2 | 1244783394/tensorflow | python | def benchmark_parallel_interleave_v2(self):
def dataset_fn():
return dataset_ops.Dataset.range(1).repeat().interleave(_make_fake_dataset_fn(), cycle_length=10, num_parallel_calls=dataset_ops.AUTOTUNE)
self._benchmark(dataset_fn=dataset_fn, iters=100, num_elements=1000) |
def MA(ma, close):
'Compute Moving Average\n\n Args:\n ma (float): MA value\n close (float): Close value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if (ma < close):
return Recommendation.buy
elif (ma > close):
return Recommendation.sell
else:
return Recommendation.neutral | -8,902,239,209,966,890,000 | Compute Moving Average
Args:
ma (float): MA value
close (float): Close value
Returns:
string: "BUY", "SELL", or "NEUTRAL" | tradingview_ta/technicals.py | MA | Chizkiyahu/python-tradingview-ta | python | def MA(ma, close):
'Compute Moving Average\n\n Args:\n ma (float): MA value\n close (float): Close value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if (ma < close):
return Recommendation.buy
elif (ma > close):
return Recommendation.sell
else:
return Recommendation.neutral |
def RSI(rsi, rsi1):
'Compute Relative Strength Index\n\n Args:\n rsi (float): RSI value\n rsi1 (float): RSI[1] value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if ((rsi < 30) and (rsi1 > rsi)):
return Recommendation.buy
elif ((rsi > 70) and (rsi1 < rsi)):
return Recommendation.sell
else:
return Recommendation.neutral | -4,906,149,037,489,024,000 | Compute Relative Strength Index
Args:
rsi (float): RSI value
rsi1 (float): RSI[1] value
Returns:
string: "BUY", "SELL", or "NEUTRAL" | tradingview_ta/technicals.py | RSI | Chizkiyahu/python-tradingview-ta | python | def RSI(rsi, rsi1):
'Compute Relative Strength Index\n\n Args:\n rsi (float): RSI value\n rsi1 (float): RSI[1] value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if ((rsi < 30) and (rsi1 > rsi)):
return Recommendation.buy
elif ((rsi > 70) and (rsi1 < rsi)):
return Recommendation.sell
else:
return Recommendation.neutral |
def Stoch(k, d, k1, d1):
'Compute Stochastic\n\n Args:\n k (float): Stoch.K value\n d (float): Stoch.D value\n k1 (float): Stoch.K[1] value\n d1 (float): Stoch.D[1] value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if ((k < 20) and (d < 20) and (k > d) and (k1 < d1)):
return Recommendation.buy
elif ((k > 80) and (d > 80) and (k < d) and (k1 > d1)):
return Recommendation.sell
else:
return Recommendation.neutral | -4,613,719,220,800,345,000 | Compute Stochastic
Args:
k (float): Stoch.K value
d (float): Stoch.D value
k1 (float): Stoch.K[1] value
d1 (float): Stoch.D[1] value
Returns:
string: "BUY", "SELL", or "NEUTRAL" | tradingview_ta/technicals.py | Stoch | Chizkiyahu/python-tradingview-ta | python | def Stoch(k, d, k1, d1):
'Compute Stochastic\n\n Args:\n k (float): Stoch.K value\n d (float): Stoch.D value\n k1 (float): Stoch.K[1] value\n d1 (float): Stoch.D[1] value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if ((k < 20) and (d < 20) and (k > d) and (k1 < d1)):
return Recommendation.buy
elif ((k > 80) and (d > 80) and (k < d) and (k1 > d1)):
return Recommendation.sell
else:
return Recommendation.neutral |
def CCI20(cci20, cci201):
'Compute Commodity Channel Index 20\n\n Args:\n cci20 (float): CCI20 value\n cci201 ([type]): CCI20[1] value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if ((cci20 < (- 100)) and (cci20 > cci201)):
return Recommendation.buy
elif ((cci20 > 100) and (cci20 < cci201)):
return Recommendation.sell
else:
return Recommendation.neutral | 7,493,960,804,903,274,000 | Compute Commodity Channel Index 20
Args:
cci20 (float): CCI20 value
cci201 ([type]): CCI20[1] value
Returns:
string: "BUY", "SELL", or "NEUTRAL" | tradingview_ta/technicals.py | CCI20 | Chizkiyahu/python-tradingview-ta | python | def CCI20(cci20, cci201):
'Compute Commodity Channel Index 20\n\n Args:\n cci20 (float): CCI20 value\n cci201 ([type]): CCI20[1] value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if ((cci20 < (- 100)) and (cci20 > cci201)):
return Recommendation.buy
elif ((cci20 > 100) and (cci20 < cci201)):
return Recommendation.sell
else:
return Recommendation.neutral |
def ADX(adx, adxpdi, adxndi, adxpdi1, adxndi1):
'Compute Average Directional Index\n\n Args:\n adx (float): ADX value\n adxpdi (float): ADX+DI value\n adxndi (float): ADX-DI value\n adxpdi1 (float): ADX+DI[1] value\n adxndi1 (float): ADX-DI[1] value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if ((adx > 20) and (adxpdi1 < adxndi1) and (adxpdi > adxndi)):
return Recommendation.buy
elif ((adx > 20) and (adxpdi1 > adxndi1) and (adxpdi < adxndi)):
return Recommendation.sell
else:
return Recommendation.neutral | 8,872,061,564,006,650,000 | Compute Average Directional Index
Args:
adx (float): ADX value
adxpdi (float): ADX+DI value
adxndi (float): ADX-DI value
adxpdi1 (float): ADX+DI[1] value
adxndi1 (float): ADX-DI[1] value
Returns:
string: "BUY", "SELL", or "NEUTRAL" | tradingview_ta/technicals.py | ADX | Chizkiyahu/python-tradingview-ta | python | def ADX(adx, adxpdi, adxndi, adxpdi1, adxndi1):
'Compute Average Directional Index\n\n Args:\n adx (float): ADX value\n adxpdi (float): ADX+DI value\n adxndi (float): ADX-DI value\n adxpdi1 (float): ADX+DI[1] value\n adxndi1 (float): ADX-DI[1] value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if ((adx > 20) and (adxpdi1 < adxndi1) and (adxpdi > adxndi)):
return Recommendation.buy
elif ((adx > 20) and (adxpdi1 > adxndi1) and (adxpdi < adxndi)):
return Recommendation.sell
else:
return Recommendation.neutral |
def AO(ao, ao1):
'Compute Awesome Oscillator\n\n Args:\n ao (float): AO value\n ao1 (float): AO[1] value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if (((ao > 0) and (ao1 < 0)) or ((ao > 0) and (ao1 > 0) and (ao > ao1))):
return Recommendation.buy
elif (((ao < 0) and (ao1 > 0)) or ((ao < 0) and (ao1 < 0) and (ao < ao1))):
return Recommendation.sell
else:
return Recommendation.neutral | -6,713,277,984,516,991,000 | Compute Awesome Oscillator
Args:
ao (float): AO value
ao1 (float): AO[1] value
Returns:
string: "BUY", "SELL", or "NEUTRAL" | tradingview_ta/technicals.py | AO | Chizkiyahu/python-tradingview-ta | python | def AO(ao, ao1):
'Compute Awesome Oscillator\n\n Args:\n ao (float): AO value\n ao1 (float): AO[1] value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if (((ao > 0) and (ao1 < 0)) or ((ao > 0) and (ao1 > 0) and (ao > ao1))):
return Recommendation.buy
elif (((ao < 0) and (ao1 > 0)) or ((ao < 0) and (ao1 < 0) and (ao < ao1))):
return Recommendation.sell
else:
return Recommendation.neutral |
def Mom(mom, mom1):
'Compute Momentum\n\n Args:\n mom (float): Mom value\n mom1 (float): Mom[1] value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if (mom < mom1):
return Recommendation.buy
elif (mom > mom1):
return Recommendation.sell
else:
return Recommendation.neutral | -8,346,860,407,921,247,000 | Compute Momentum
Args:
mom (float): Mom value
mom1 (float): Mom[1] value
Returns:
string: "BUY", "SELL", or "NEUTRAL" | tradingview_ta/technicals.py | Mom | Chizkiyahu/python-tradingview-ta | python | def Mom(mom, mom1):
'Compute Momentum\n\n Args:\n mom (float): Mom value\n mom1 (float): Mom[1] value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if (mom < mom1):
return Recommendation.buy
elif (mom > mom1):
return Recommendation.sell
else:
return Recommendation.neutral |
def MACD(macd, signal):
'Compute Moving Average Convergence/Divergence\n\n Args:\n macd (float): MACD.macd value\n signal (float): MACD.signal value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if (macd > signal):
return Recommendation.buy
elif (macd < signal):
return Recommendation.sell
else:
return Recommendation.neutral | -7,220,129,794,616,158,000 | Compute Moving Average Convergence/Divergence
Args:
macd (float): MACD.macd value
signal (float): MACD.signal value
Returns:
string: "BUY", "SELL", or "NEUTRAL" | tradingview_ta/technicals.py | MACD | Chizkiyahu/python-tradingview-ta | python | def MACD(macd, signal):
'Compute Moving Average Convergence/Divergence\n\n Args:\n macd (float): MACD.macd value\n signal (float): MACD.signal value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if (macd > signal):
return Recommendation.buy
elif (macd < signal):
return Recommendation.sell
else:
return Recommendation.neutral |
def BBBuy(close, bblower):
'Compute Bull Bear Buy\n\n Args:\n close (float): close value\n bblower (float): BB.lower value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if (close < bblower):
return Recommendation.buy
else:
return Recommendation.neutral | 3,170,778,078,137,031,000 | Compute Bull Bear Buy
Args:
close (float): close value
bblower (float): BB.lower value
Returns:
string: "BUY", "SELL", or "NEUTRAL" | tradingview_ta/technicals.py | BBBuy | Chizkiyahu/python-tradingview-ta | python | def BBBuy(close, bblower):
'Compute Bull Bear Buy\n\n Args:\n close (float): close value\n bblower (float): BB.lower value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if (close < bblower):
return Recommendation.buy
else:
return Recommendation.neutral |
def BBSell(close, bbupper):
'Compute Bull Bear Sell\n\n Args:\n close (float): close value\n bbupper (float): BB.upper value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if (close > bbupper):
return Recommendation.sell
else:
return Recommendation.neutral | -1,134,133,646,265,981,600 | Compute Bull Bear Sell
Args:
close (float): close value
bbupper (float): BB.upper value
Returns:
string: "BUY", "SELL", or "NEUTRAL" | tradingview_ta/technicals.py | BBSell | Chizkiyahu/python-tradingview-ta | python | def BBSell(close, bbupper):
'Compute Bull Bear Sell\n\n Args:\n close (float): close value\n bbupper (float): BB.upper value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if (close > bbupper):
return Recommendation.sell
else:
return Recommendation.neutral |
def PSAR(psar, open):
'Compute Parabolic Stop-And-Reverse\n\n Args:\n psar (float): P.SAR value\n open (float): open value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if (psar < open):
return Recommendation.buy
elif (psar > open):
return Recommendation.sell
else:
return Recommendation.neutral | 2,251,847,869,837,323,800 | Compute Parabolic Stop-And-Reverse
Args:
psar (float): P.SAR value
open (float): open value
Returns:
string: "BUY", "SELL", or "NEUTRAL" | tradingview_ta/technicals.py | PSAR | Chizkiyahu/python-tradingview-ta | python | def PSAR(psar, open):
'Compute Parabolic Stop-And-Reverse\n\n Args:\n psar (float): P.SAR value\n open (float): open value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if (psar < open):
return Recommendation.buy
elif (psar > open):
return Recommendation.sell
else:
return Recommendation.neutral |
def Recommend(value):
'Compute Recommend\n\n Args:\n value (float): recommend value\n\n Returns:\n string: "STRONG_BUY", "BUY", "NEUTRAL", "SELL", "STRONG_SELL", or "ERROR"\n '
if ((value >= (- 1)) and (value < (- 0.5))):
return Recommendation.strong_sell
elif ((value >= (- 0.5)) and (value < 0)):
return Recommendation.sell
elif (value == 0):
return Recommendation.neutral
elif ((value > 0) and (value <= 0.5)):
return Recommendation.buy
elif ((value > 0.5) and (value <= 1)):
return Recommendation.strong_buy
else:
return Recommendation.error | -4,807,892,968,998,064,000 | Compute Recommend
Args:
value (float): recommend value
Returns:
string: "STRONG_BUY", "BUY", "NEUTRAL", "SELL", "STRONG_SELL", or "ERROR" | tradingview_ta/technicals.py | Recommend | Chizkiyahu/python-tradingview-ta | python | def Recommend(value):
'Compute Recommend\n\n Args:\n value (float): recommend value\n\n Returns:\n string: "STRONG_BUY", "BUY", "NEUTRAL", "SELL", "STRONG_SELL", or "ERROR"\n '
if ((value >= (- 1)) and (value < (- 0.5))):
return Recommendation.strong_sell
elif ((value >= (- 0.5)) and (value < 0)):
return Recommendation.sell
elif (value == 0):
return Recommendation.neutral
elif ((value > 0) and (value <= 0.5)):
return Recommendation.buy
elif ((value > 0.5) and (value <= 1)):
return Recommendation.strong_buy
else:
return Recommendation.error |
def Simple(value):
'Compute Simple\n\n Args:\n value (float): Rec.X value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if (value == (- 1)):
return Recommendation.sell
elif (value == 1):
return Recommendation.buy
else:
return Recommendation.neutral | -4,654,071,787,963,070,000 | Compute Simple
Args:
value (float): Rec.X value
Returns:
string: "BUY", "SELL", or "NEUTRAL" | tradingview_ta/technicals.py | Simple | Chizkiyahu/python-tradingview-ta | python | def Simple(value):
'Compute Simple\n\n Args:\n value (float): Rec.X value\n\n Returns:\n string: "BUY", "SELL", or "NEUTRAL"\n '
if (value == (- 1)):
return Recommendation.sell
elif (value == 1):
return Recommendation.buy
else:
return Recommendation.neutral |
def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
"\n Applies matrix multiplication to two tensors. `matmul` follows\n the complete broadcast rules,\n and its behavior is consistent with `np.matmul`.\n\n Currently, the input tensors' number of dimensions can be any, `matmul` can be used to\n achieve the `dot`, `matmul` and `batchmatmul`.\n\n The actual behavior depends on the shapes of :math:`x`, :math:`y` and the\n flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically:\n\n - If a transpose flag is specified, the last two dimensions of the tensor\n are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor\n is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas\n for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`.\n\n The multiplication behavior depends on the dimensions of `x` and `y`. Specifically:\n\n - If both tensors are 1-dimensional, the dot product result is obtained.\n\n - If both tensors are 2-dimensional, the matrix-matrix product is obtained.\n\n - If the `x` is 1-dimensional and the `y` is 2-dimensional,\n a `1` is prepended to its dimension in order to conduct the matrix multiply.\n After the matrix multiply, the prepended dimension is removed.\n\n - If the `x` is 2-dimensional and `y` is 1-dimensional,\n the matrix-vector product is obtained.\n\n - If both arguments are at least 1-dimensional and at least one argument\n is N-dimensional (where N > 2), then a batched matrix multiply is obtained.\n If the first argument is 1-dimensional, a 1 is prepended to its dimension\n in order to conduct the batched matrix multiply and removed after.\n If the second argument is 1-dimensional, a 1 is appended to its\n dimension for the purpose of the batched matrix multiple and removed after.\n The non-matrix (exclude the last two dimensions) dimensions are\n broadcasted according the broadcast rule.\n For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor,\n out will be a (j, k, n, p) tensor.\n\n Args:\n x (Tensor): The input tensor which is a Tensor.\n y (Tensor): The input tensor which is a Tensor.\n transpose_x (bool): Whether to transpose :math:`x` before multiplication.\n transpose_y (bool): Whether to transpose :math:`y` before multiplication.\n name(str|None): A name for this layer(optional). If set None, the layer\n will be named automatically.\n\n Returns:\n Tensor: The output Tensor.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n # vector * vector\n x_data = np.random.random([10]).astype(np.float32)\n y_data = np.random.random([10]).astype(np.float32)\n x = paddle.to_tensor(x_data)\n y = paddle.to_tensor(y_data)\n z = paddle.matmul(x, y)\n print(z.numpy().shape)\n # [1]\n\n # matrix * vector\n x_data = np.random.random([10, 5]).astype(np.float32)\n y_data = np.random.random([5]).astype(np.float32)\n x = paddle.to_tensor(x_data)\n y = paddle.to_tensor(y_data)\n z = paddle.matmul(x, y)\n print(z.numpy().shape)\n # [10]\n\n # batched matrix * broadcasted vector\n x_data = np.random.random([10, 5, 2]).astype(np.float32)\n y_data = np.random.random([2]).astype(np.float32)\n x = paddle.to_tensor(x_data)\n y = paddle.to_tensor(y_data)\n z = paddle.matmul(x, y)\n print(z.numpy().shape)\n # [10, 5]\n\n # batched matrix * batched matrix\n x_data = np.random.random([10, 5, 2]).astype(np.float32)\n y_data = np.random.random([10, 2, 5]).astype(np.float32)\n x = paddle.to_tensor(x_data)\n y = paddle.to_tensor(y_data)\n z = paddle.matmul(x, y)\n print(z.numpy().shape)\n # [10, 5, 5]\n\n # batched matrix * broadcasted matrix\n x_data = np.random.random([10, 1, 5, 2]).astype(np.float32)\n y_data = np.random.random([1, 3, 2, 5]).astype(np.float32)\n x = paddle.to_tensor(x_data)\n y = paddle.to_tensor(y_data)\n z = paddle.matmul(x, y)\n print(z.numpy().shape)\n # [10, 3, 5, 5]\n\n "
if in_dygraph_mode():
return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y)
if _in_legacy_dygraph():
op_type = 'matmul_v2'
op = getattr(_C_ops, op_type)
return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y)
attrs = {'trans_x': transpose_x, 'trans_y': transpose_y}
def __check_input(x, y):
var_names = {'x': x, 'y': y}
for (name, val) in var_names.items():
check_variable_and_dtype(val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul')
__check_input(x, y)
helper = LayerHelper('matmul_v2', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs)
return out | -2,043,395,450,413,527,600 | Applies matrix multiplication to two tensors. `matmul` follows
the complete broadcast rules,
and its behavior is consistent with `np.matmul`.
Currently, the input tensors' number of dimensions can be any, `matmul` can be used to
achieve the `dot`, `matmul` and `batchmatmul`.
The actual behavior depends on the shapes of :math:`x`, :math:`y` and the
flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically:
- If a transpose flag is specified, the last two dimensions of the tensor
are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor
is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas
for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`.
The multiplication behavior depends on the dimensions of `x` and `y`. Specifically:
- If both tensors are 1-dimensional, the dot product result is obtained.
- If both tensors are 2-dimensional, the matrix-matrix product is obtained.
- If the `x` is 1-dimensional and the `y` is 2-dimensional,
a `1` is prepended to its dimension in order to conduct the matrix multiply.
After the matrix multiply, the prepended dimension is removed.
- If the `x` is 2-dimensional and `y` is 1-dimensional,
the matrix-vector product is obtained.
- If both arguments are at least 1-dimensional and at least one argument
is N-dimensional (where N > 2), then a batched matrix multiply is obtained.
If the first argument is 1-dimensional, a 1 is prepended to its dimension
in order to conduct the batched matrix multiply and removed after.
If the second argument is 1-dimensional, a 1 is appended to its
dimension for the purpose of the batched matrix multiple and removed after.
The non-matrix (exclude the last two dimensions) dimensions are
broadcasted according the broadcast rule.
For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor,
out will be a (j, k, n, p) tensor.
Args:
x (Tensor): The input tensor which is a Tensor.
y (Tensor): The input tensor which is a Tensor.
transpose_x (bool): Whether to transpose :math:`x` before multiplication.
transpose_y (bool): Whether to transpose :math:`y` before multiplication.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Tensor: The output Tensor.
Examples:
.. code-block:: python
import paddle
import numpy as np
# vector * vector
x_data = np.random.random([10]).astype(np.float32)
y_data = np.random.random([10]).astype(np.float32)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
z = paddle.matmul(x, y)
print(z.numpy().shape)
# [1]
# matrix * vector
x_data = np.random.random([10, 5]).astype(np.float32)
y_data = np.random.random([5]).astype(np.float32)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
z = paddle.matmul(x, y)
print(z.numpy().shape)
# [10]
# batched matrix * broadcasted vector
x_data = np.random.random([10, 5, 2]).astype(np.float32)
y_data = np.random.random([2]).astype(np.float32)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
z = paddle.matmul(x, y)
print(z.numpy().shape)
# [10, 5]
# batched matrix * batched matrix
x_data = np.random.random([10, 5, 2]).astype(np.float32)
y_data = np.random.random([10, 2, 5]).astype(np.float32)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
z = paddle.matmul(x, y)
print(z.numpy().shape)
# [10, 5, 5]
# batched matrix * broadcasted matrix
x_data = np.random.random([10, 1, 5, 2]).astype(np.float32)
y_data = np.random.random([1, 3, 2, 5]).astype(np.float32)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
z = paddle.matmul(x, y)
print(z.numpy().shape)
# [10, 3, 5, 5] | python/paddle/tensor/linalg.py | matmul | DevilCarp/Paddle | python | def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
"\n Applies matrix multiplication to two tensors. `matmul` follows\n the complete broadcast rules,\n and its behavior is consistent with `np.matmul`.\n\n Currently, the input tensors' number of dimensions can be any, `matmul` can be used to\n achieve the `dot`, `matmul` and `batchmatmul`.\n\n The actual behavior depends on the shapes of :math:`x`, :math:`y` and the\n flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically:\n\n - If a transpose flag is specified, the last two dimensions of the tensor\n are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor\n is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas\n for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`.\n\n The multiplication behavior depends on the dimensions of `x` and `y`. Specifically:\n\n - If both tensors are 1-dimensional, the dot product result is obtained.\n\n - If both tensors are 2-dimensional, the matrix-matrix product is obtained.\n\n - If the `x` is 1-dimensional and the `y` is 2-dimensional,\n a `1` is prepended to its dimension in order to conduct the matrix multiply.\n After the matrix multiply, the prepended dimension is removed.\n\n - If the `x` is 2-dimensional and `y` is 1-dimensional,\n the matrix-vector product is obtained.\n\n - If both arguments are at least 1-dimensional and at least one argument\n is N-dimensional (where N > 2), then a batched matrix multiply is obtained.\n If the first argument is 1-dimensional, a 1 is prepended to its dimension\n in order to conduct the batched matrix multiply and removed after.\n If the second argument is 1-dimensional, a 1 is appended to its\n dimension for the purpose of the batched matrix multiple and removed after.\n The non-matrix (exclude the last two dimensions) dimensions are\n broadcasted according the broadcast rule.\n For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor,\n out will be a (j, k, n, p) tensor.\n\n Args:\n x (Tensor): The input tensor which is a Tensor.\n y (Tensor): The input tensor which is a Tensor.\n transpose_x (bool): Whether to transpose :math:`x` before multiplication.\n transpose_y (bool): Whether to transpose :math:`y` before multiplication.\n name(str|None): A name for this layer(optional). If set None, the layer\n will be named automatically.\n\n Returns:\n Tensor: The output Tensor.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n # vector * vector\n x_data = np.random.random([10]).astype(np.float32)\n y_data = np.random.random([10]).astype(np.float32)\n x = paddle.to_tensor(x_data)\n y = paddle.to_tensor(y_data)\n z = paddle.matmul(x, y)\n print(z.numpy().shape)\n # [1]\n\n # matrix * vector\n x_data = np.random.random([10, 5]).astype(np.float32)\n y_data = np.random.random([5]).astype(np.float32)\n x = paddle.to_tensor(x_data)\n y = paddle.to_tensor(y_data)\n z = paddle.matmul(x, y)\n print(z.numpy().shape)\n # [10]\n\n # batched matrix * broadcasted vector\n x_data = np.random.random([10, 5, 2]).astype(np.float32)\n y_data = np.random.random([2]).astype(np.float32)\n x = paddle.to_tensor(x_data)\n y = paddle.to_tensor(y_data)\n z = paddle.matmul(x, y)\n print(z.numpy().shape)\n # [10, 5]\n\n # batched matrix * batched matrix\n x_data = np.random.random([10, 5, 2]).astype(np.float32)\n y_data = np.random.random([10, 2, 5]).astype(np.float32)\n x = paddle.to_tensor(x_data)\n y = paddle.to_tensor(y_data)\n z = paddle.matmul(x, y)\n print(z.numpy().shape)\n # [10, 5, 5]\n\n # batched matrix * broadcasted matrix\n x_data = np.random.random([10, 1, 5, 2]).astype(np.float32)\n y_data = np.random.random([1, 3, 2, 5]).astype(np.float32)\n x = paddle.to_tensor(x_data)\n y = paddle.to_tensor(y_data)\n z = paddle.matmul(x, y)\n print(z.numpy().shape)\n # [10, 3, 5, 5]\n\n "
if in_dygraph_mode():
return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y)
if _in_legacy_dygraph():
op_type = 'matmul_v2'
op = getattr(_C_ops, op_type)
return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y)
attrs = {'trans_x': transpose_x, 'trans_y': transpose_y}
def __check_input(x, y):
var_names = {'x': x, 'y': y}
for (name, val) in var_names.items():
check_variable_and_dtype(val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul')
__check_input(x, y)
helper = LayerHelper('matmul_v2', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs)
return out |
def norm(x, p='fro', axis=None, keepdim=False, name=None):
"\n\n Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean\n or 2-norm, and in general the p-norm for p > 0) of a given tensor.\n\n .. note::\n This norm API is different from `numpy.linalg.norm`.\n This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm.\n But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor.\n For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM.\n\n Args:\n x (Tensor): The input tensor could be N-D tensor, and the input data\n type could be float32 or float64.\n p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`,\n `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm.\n Default value is `fro`.\n axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int\n or list(int)/tuple(int) with only one element, the vector norm is computed over the axis.\n If `axis < 0`, the dimension to norm operation is rank(input) + axis.\n If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis.\n Defalut value is `None`.\n keepdim (bool, optional): Whether to reserve the reduced dimension in the\n output Tensor. The result tensor will have fewer dimension\n than the :attr:`input` unless :attr:`keepdim` is true, default\n value is False.\n name (str, optional): The default value is None. Normally there is no need for\n user to set this property. For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: results of norm operation on the specified axis of input tensor,\n it's data type is the same as input's Tensor.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import numpy as np\n shape=[2, 3, 4]\n np_input = np.arange(24).astype('float32') - 12\n np_input = np_input.reshape(shape)\n x = paddle.to_tensor(np_input)\n #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]]\n # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]]\n\n # compute frobenius norm along last two dimensions.\n out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1])\n # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535]\n\n # compute 2-order vector norm along last dimension.\n out_pnorm = paddle.linalg.norm(x, p=2, axis=-1)\n #out_pnorm.numpy(): [[21.118711 13.190906 5.477226]\n # [ 3.7416575 11.224972 19.131126]]\n\n # compute 2-order norm along [0,1] dimension.\n out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1])\n #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535]\n\n # compute inf-order norm\n out_pnorm = paddle.linalg.norm(x, p=np.inf)\n #out_pnorm.numpy() = [12.]\n out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0)\n #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]]\n\n # compute -inf-order norm\n out_pnorm = paddle.linalg.norm(x, p=-np.inf)\n #out_pnorm.numpy(): [0.]\n out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0)\n #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]]\n "
def frobenius_norm(input, dim=None, keepdim=False, name=None):
'\n The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`.\n Args:\n input (Variable): Tensor, data type float32, float64.\n dim (list, optional): None for last two dimensions.\n keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.\n '
if ((dim is not None) and (not (isinstance(dim, list) and (len(dim) == 2)))):
raise ValueError('The dim of frobenius norm op should be None or two elements list!')
if paddle.in_dynamic_mode():
if (dim is None):
return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True)
return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False)
attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False}
if (dim is None):
attrs['reduce_all'] = True
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm')
helper = LayerHelper('frobenius_norm', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs)
return out
def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None):
'\n Calculate the p-order vector norm for certain dimension of Tensor `input`.\n Args:\n input (Variable): Tensor, data type float32, float64.\n porder (float, optional): None for porder=2.0.\n axis (int, optional): None for last dimension.\n keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.\n '
if paddle.in_dynamic_mode():
if (axis is None):
axis = (- 1)
return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector)
if (porder is not None):
check_type(porder, 'porder', (float, int), 'p_norm')
if (axis is not None):
check_type(axis, 'axis', int, 'p_norm')
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm')
attrs = {'axis': (axis if (axis is not None) else (- 1)), 'porder': (float(porder) if (porder is not None) else 2.0), 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12}
helper = LayerHelper('p_norm', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs)
return out
def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None):
helper = LayerHelper('frobenius_norm', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out})
reduce_out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
reduce_all = (True if ((axis == None) or (axis == []) or (asvector == True)) else False)
axis = (axis if ((axis != None) and (axis != [])) else [0])
reduce_type = ('reduce_max' if (porder == np.float('inf')) else 'reduce_min')
helper.append_op(type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
return reduce_out
def p_matrix_norm(input, porder=1.0, axis=axis, keepdim=False, name=None):
'\n NOTE:\n This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm.\n '
block = LayerHelper('norm', **locals())
out = block.create_variable_for_type_inference(dtype=block.input_dtype())
abs_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='abs', inputs={'X': input}, outputs={'Out': abs_out})
pow_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder})
sum_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': (True if (axis is None) else False)})
porder
block.append_op(type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float((1.0 / porder))})
return out
if ((axis is None) and (p is not None)):
if isinstance(p, str):
if (p == 'fro'):
return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name)
else:
raise ValueError("only valid string values are 'fro', found {}".format(p))
elif isinstance(p, (int, float)):
return vector_norm(x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name)
else:
raise ValueError('only valid p type is string or float, found {}'.format(type(p)))
if isinstance(axis, tuple):
axis = list(axis)
if (isinstance(axis, list) and (len(axis) == 1)):
axis = axis[0]
if isinstance(axis, int):
if isinstance(p, str):
if (p == 'fro'):
return vector_norm(x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name)
else:
raise ValueError("only valid string values are 'fro', found {}".format(p))
elif isinstance(p, (int, float)):
return vector_norm(x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name)
else:
raise ValueError('unspport p for p-order vector norm. except float, found {}'.format(p))
elif (isinstance(axis, list) and (len(axis) == 2)):
if (p == 'fro'):
return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name)
elif ((p == np.inf) or (p == (- np.inf))):
return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name)
elif (p == 0):
raise ValueError('just suport axis type int or list (length of list <=1) if p = 0, found {}'.format(axis))
else:
return p_matrix_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name)
else:
raise ValueError('except axis type int or list (length of list <=2), found {}'.format(axis)) | 2,276,984,511,793,815,300 | Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean
or 2-norm, and in general the p-norm for p > 0) of a given tensor.
.. note::
This norm API is different from `numpy.linalg.norm`.
This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm.
But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor.
For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM.
Args:
x (Tensor): The input tensor could be N-D tensor, and the input data
type could be float32 or float64.
p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`,
`inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm.
Default value is `fro`.
axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int
or list(int)/tuple(int) with only one element, the vector norm is computed over the axis.
If `axis < 0`, the dimension to norm operation is rank(input) + axis.
If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis.
Defalut value is `None`.
keepdim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have fewer dimension
than the :attr:`input` unless :attr:`keepdim` is true, default
value is False.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: results of norm operation on the specified axis of input tensor,
it's data type is the same as input's Tensor.
Examples:
.. code-block:: python
import paddle
import numpy as np
shape=[2, 3, 4]
np_input = np.arange(24).astype('float32') - 12
np_input = np_input.reshape(shape)
x = paddle.to_tensor(np_input)
#[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]]
# [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]]
# compute frobenius norm along last two dimensions.
out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1])
# out_fro.numpy() [17.435596 16.911535 16.7332 16.911535]
# compute 2-order vector norm along last dimension.
out_pnorm = paddle.linalg.norm(x, p=2, axis=-1)
#out_pnorm.numpy(): [[21.118711 13.190906 5.477226]
# [ 3.7416575 11.224972 19.131126]]
# compute 2-order norm along [0,1] dimension.
out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1])
#out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535]
# compute inf-order norm
out_pnorm = paddle.linalg.norm(x, p=np.inf)
#out_pnorm.numpy() = [12.]
out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0)
#out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]]
# compute -inf-order norm
out_pnorm = paddle.linalg.norm(x, p=-np.inf)
#out_pnorm.numpy(): [0.]
out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0)
#out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] | python/paddle/tensor/linalg.py | norm | DevilCarp/Paddle | python | def norm(x, p='fro', axis=None, keepdim=False, name=None):
"\n\n Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean\n or 2-norm, and in general the p-norm for p > 0) of a given tensor.\n\n .. note::\n This norm API is different from `numpy.linalg.norm`.\n This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm.\n But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor.\n For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM.\n\n Args:\n x (Tensor): The input tensor could be N-D tensor, and the input data\n type could be float32 or float64.\n p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`,\n `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm.\n Default value is `fro`.\n axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int\n or list(int)/tuple(int) with only one element, the vector norm is computed over the axis.\n If `axis < 0`, the dimension to norm operation is rank(input) + axis.\n If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis.\n Defalut value is `None`.\n keepdim (bool, optional): Whether to reserve the reduced dimension in the\n output Tensor. The result tensor will have fewer dimension\n than the :attr:`input` unless :attr:`keepdim` is true, default\n value is False.\n name (str, optional): The default value is None. Normally there is no need for\n user to set this property. For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: results of norm operation on the specified axis of input tensor,\n it's data type is the same as input's Tensor.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import numpy as np\n shape=[2, 3, 4]\n np_input = np.arange(24).astype('float32') - 12\n np_input = np_input.reshape(shape)\n x = paddle.to_tensor(np_input)\n #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]]\n # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]]\n\n # compute frobenius norm along last two dimensions.\n out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1])\n # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535]\n\n # compute 2-order vector norm along last dimension.\n out_pnorm = paddle.linalg.norm(x, p=2, axis=-1)\n #out_pnorm.numpy(): [[21.118711 13.190906 5.477226]\n # [ 3.7416575 11.224972 19.131126]]\n\n # compute 2-order norm along [0,1] dimension.\n out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1])\n #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535]\n\n # compute inf-order norm\n out_pnorm = paddle.linalg.norm(x, p=np.inf)\n #out_pnorm.numpy() = [12.]\n out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0)\n #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]]\n\n # compute -inf-order norm\n out_pnorm = paddle.linalg.norm(x, p=-np.inf)\n #out_pnorm.numpy(): [0.]\n out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0)\n #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]]\n "
def frobenius_norm(input, dim=None, keepdim=False, name=None):
'\n The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`.\n Args:\n input (Variable): Tensor, data type float32, float64.\n dim (list, optional): None for last two dimensions.\n keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.\n '
if ((dim is not None) and (not (isinstance(dim, list) and (len(dim) == 2)))):
raise ValueError('The dim of frobenius norm op should be None or two elements list!')
if paddle.in_dynamic_mode():
if (dim is None):
return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True)
return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False)
attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False}
if (dim is None):
attrs['reduce_all'] = True
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm')
helper = LayerHelper('frobenius_norm', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs)
return out
def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None):
'\n Calculate the p-order vector norm for certain dimension of Tensor `input`.\n Args:\n input (Variable): Tensor, data type float32, float64.\n porder (float, optional): None for porder=2.0.\n axis (int, optional): None for last dimension.\n keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.\n '
if paddle.in_dynamic_mode():
if (axis is None):
axis = (- 1)
return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector)
if (porder is not None):
check_type(porder, 'porder', (float, int), 'p_norm')
if (axis is not None):
check_type(axis, 'axis', int, 'p_norm')
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm')
attrs = {'axis': (axis if (axis is not None) else (- 1)), 'porder': (float(porder) if (porder is not None) else 2.0), 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12}
helper = LayerHelper('p_norm', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs)
return out
def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None):
helper = LayerHelper('frobenius_norm', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out})
reduce_out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
reduce_all = (True if ((axis == None) or (axis == []) or (asvector == True)) else False)
axis = (axis if ((axis != None) and (axis != [])) else [0])
reduce_type = ('reduce_max' if (porder == np.float('inf')) else 'reduce_min')
helper.append_op(type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
return reduce_out
def p_matrix_norm(input, porder=1.0, axis=axis, keepdim=False, name=None):
'\n NOTE:\n This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm.\n '
block = LayerHelper('norm', **locals())
out = block.create_variable_for_type_inference(dtype=block.input_dtype())
abs_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='abs', inputs={'X': input}, outputs={'Out': abs_out})
pow_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder})
sum_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': (True if (axis is None) else False)})
porder
block.append_op(type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float((1.0 / porder))})
return out
if ((axis is None) and (p is not None)):
if isinstance(p, str):
if (p == 'fro'):
return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name)
else:
raise ValueError("only valid string values are 'fro', found {}".format(p))
elif isinstance(p, (int, float)):
return vector_norm(x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name)
else:
raise ValueError('only valid p type is string or float, found {}'.format(type(p)))
if isinstance(axis, tuple):
axis = list(axis)
if (isinstance(axis, list) and (len(axis) == 1)):
axis = axis[0]
if isinstance(axis, int):
if isinstance(p, str):
if (p == 'fro'):
return vector_norm(x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name)
else:
raise ValueError("only valid string values are 'fro', found {}".format(p))
elif isinstance(p, (int, float)):
return vector_norm(x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name)
else:
raise ValueError('unspport p for p-order vector norm. except float, found {}'.format(p))
elif (isinstance(axis, list) and (len(axis) == 2)):
if (p == 'fro'):
return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name)
elif ((p == np.inf) or (p == (- np.inf))):
return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name)
elif (p == 0):
raise ValueError('just suport axis type int or list (length of list <=1) if p = 0, found {}'.format(axis))
else:
return p_matrix_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name)
else:
raise ValueError('except axis type int or list (length of list <=2), found {}'.format(axis)) |
def dist(x, y, p=2, name=None):
'\n\n This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure\n of distance. The shapes of x and y must be broadcastable. The definition is as follows, for\n details, please refer to the `numpy\'s broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_:\n\n - Each input has at least one dimension.\n - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist.\n\n Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be\n obtained as follows:\n\n 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the\n tensor with fewer dimensions.\n\n For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the\n dimension of y.\n\n x (4-D Tensor): 8 x 1 x 6 x 1\n\n y (4-D Tensor): 1 x 7 x 1 x 5\n\n 2. Determine the size of each dimension of the output z: choose the maximum value from the\n two input dimensions.\n\n z (4-D Tensor): 8 x 7 x 6 x 5\n\n If the number of dimensions of the two inputs are the same, the size of the output can be\n directly determined in step 2. When p takes different values, the norm formula is as follows:\n\n When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z.\n\n .. math::\n\n ||z||_{0}=\\lim_{p \\\\rightarrow 0}\\sum_{i=1}^{m}|z_i|^{p}\n\n When p = inf, the inf-norm of z is the maximum element of z.\n\n .. math::\n\n ||z||_\\infty=\\max_i |z_i|\n\n When p = -inf, the negative-inf-norm of z is the minimum element of z.\n\n .. math::\n\n ||z||_{-\\infty}=\\min_i |z_i|\n\n Otherwise, the p-norm of z follows the formula,\n\n .. math::\n\n ||z||_{p}=(\\sum_{i=1}^{m}|z_i|^p)^{\\\\frac{1}{p}}\n\n Args:\n x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64.\n y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64.\n p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2.\n\n Returns:\n Tensor: Tensor that is the p-norm of (x - y).\n\n Examples:\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32")\n y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32")\n out = paddle.dist(x, y, 0)\n print(out) # out = [1.]\n\n out = paddle.dist(x, y, 2)\n print(out) # out = [2.]\n\n out = paddle.dist(x, y, float("inf"))\n print(out) # out = [2.]\n\n out = paddle.dist(x, y, float("-inf"))\n print(out) # out = [0.]\n '
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist')
check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist')
check_type(p, 'p', (float, int), 'dist')
helper = LayerHelper('dist', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
inputs = {'X': [x], 'Y': [y]}
outputs = {'Out': [out]}
attrs = {'p': float(p)}
helper.append_op(type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out | -1,655,034,432,683,055,400 | This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure
of distance. The shapes of x and y must be broadcastable. The definition is as follows, for
details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_:
- Each input has at least one dimension.
- Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist.
Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be
obtained as follows:
1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the
tensor with fewer dimensions.
For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the
dimension of y.
x (4-D Tensor): 8 x 1 x 6 x 1
y (4-D Tensor): 1 x 7 x 1 x 5
2. Determine the size of each dimension of the output z: choose the maximum value from the
two input dimensions.
z (4-D Tensor): 8 x 7 x 6 x 5
If the number of dimensions of the two inputs are the same, the size of the output can be
directly determined in step 2. When p takes different values, the norm formula is as follows:
When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z.
.. math::
||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p}
When p = inf, the inf-norm of z is the maximum element of z.
.. math::
||z||_\infty=\max_i |z_i|
When p = -inf, the negative-inf-norm of z is the minimum element of z.
.. math::
||z||_{-\infty}=\min_i |z_i|
Otherwise, the p-norm of z follows the formula,
.. math::
||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}}
Args:
x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64.
y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64.
p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2.
Returns:
Tensor: Tensor that is the p-norm of (x - y).
Examples:
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32")
y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32")
out = paddle.dist(x, y, 0)
print(out) # out = [1.]
out = paddle.dist(x, y, 2)
print(out) # out = [2.]
out = paddle.dist(x, y, float("inf"))
print(out) # out = [2.]
out = paddle.dist(x, y, float("-inf"))
print(out) # out = [0.] | python/paddle/tensor/linalg.py | dist | DevilCarp/Paddle | python | def dist(x, y, p=2, name=None):
'\n\n This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure\n of distance. The shapes of x and y must be broadcastable. The definition is as follows, for\n details, please refer to the `numpy\'s broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_:\n\n - Each input has at least one dimension.\n - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist.\n\n Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be\n obtained as follows:\n\n 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the\n tensor with fewer dimensions.\n\n For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the\n dimension of y.\n\n x (4-D Tensor): 8 x 1 x 6 x 1\n\n y (4-D Tensor): 1 x 7 x 1 x 5\n\n 2. Determine the size of each dimension of the output z: choose the maximum value from the\n two input dimensions.\n\n z (4-D Tensor): 8 x 7 x 6 x 5\n\n If the number of dimensions of the two inputs are the same, the size of the output can be\n directly determined in step 2. When p takes different values, the norm formula is as follows:\n\n When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z.\n\n .. math::\n\n ||z||_{0}=\\lim_{p \\\\rightarrow 0}\\sum_{i=1}^{m}|z_i|^{p}\n\n When p = inf, the inf-norm of z is the maximum element of z.\n\n .. math::\n\n ||z||_\\infty=\\max_i |z_i|\n\n When p = -inf, the negative-inf-norm of z is the minimum element of z.\n\n .. math::\n\n ||z||_{-\\infty}=\\min_i |z_i|\n\n Otherwise, the p-norm of z follows the formula,\n\n .. math::\n\n ||z||_{p}=(\\sum_{i=1}^{m}|z_i|^p)^{\\\\frac{1}{p}}\n\n Args:\n x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64.\n y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64.\n p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2.\n\n Returns:\n Tensor: Tensor that is the p-norm of (x - y).\n\n Examples:\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32")\n y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32")\n out = paddle.dist(x, y, 0)\n print(out) # out = [1.]\n\n out = paddle.dist(x, y, 2)\n print(out) # out = [2.]\n\n out = paddle.dist(x, y, float("inf"))\n print(out) # out = [2.]\n\n out = paddle.dist(x, y, float("-inf"))\n print(out) # out = [0.]\n '
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist')
check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist')
check_type(p, 'p', (float, int), 'dist')
helper = LayerHelper('dist', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
inputs = {'X': [x], 'Y': [y]}
outputs = {'Out': [out]}
attrs = {'p': float(p)}
helper.append_op(type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out |
def cond(x, p=None, name=None):
"\n\n Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``.\n\n Args:\n x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions\n for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``.\n And the input data type could be ``float32`` or ``float64``.\n p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`,\n `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`.\n name (str, optional): The default value is `None`. Normally there is no need for\n user to set this property. For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: computing results of condition number, its data type is the same as input Tensor ``x``.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]])\n\n # compute conditional number when p is None\n out = paddle.linalg.cond(x)\n # out.numpy() [1.4142135]\n\n # compute conditional number when order of the norm is 'fro'\n out_fro = paddle.linalg.cond(x, p='fro')\n # out_fro.numpy() [3.1622777]\n\n # compute conditional number when order of the norm is 'nuc'\n out_nuc = paddle.linalg.cond(x, p='nuc')\n # out_nuc.numpy() [9.2426405]\n\n # compute conditional number when order of the norm is 1\n out_1 = paddle.linalg.cond(x, p=1)\n # out_1.numpy() [2.]\n\n # compute conditional number when order of the norm is -1\n out_minus_1 = paddle.linalg.cond(x, p=-1)\n # out_minus_1.numpy() [1.]\n\n # compute conditional number when order of the norm is 2\n out_2 = paddle.linalg.cond(x, p=2)\n # out_2.numpy() [1.4142135]\n\n # compute conditional number when order of the norm is -1\n out_minus_2 = paddle.linalg.cond(x, p=-2)\n # out_minus_2.numpy() [0.70710677]\n\n # compute conditional number when order of the norm is inf\n out_inf = paddle.linalg.cond(x, p=np.inf)\n # out_inf.numpy() [2.]\n\n # compute conditional number when order of the norm is -inf\n out_minus_inf = paddle.linalg.cond(x, p=-np.inf)\n # out_minus_inf.numpy() [1.]\n\n a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32'))\n # a.numpy()\n # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543]\n # [-0.16303636 1.5534962 -0.49919784 -0.04402903]\n # [-1.1341571 -0.6022629 0.5445269 0.29154757]\n # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]]\n # [[-0.58081484 0.12402827 0.7229862 -0.55046535]\n # [-0.15178485 -1.1604939 0.75810957 0.30971205]\n # [-0.9669573 1.0940945 -0.27363303 -0.35416734]\n # [-1.216529 2.0018666 -0.7773689 -0.17556527]]]\n a_cond_fro = paddle.linalg.cond(a, p='fro')\n # a_cond_fro.numpy() [31.572273 28.120834]\n\n b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64'))\n # b.numpy()\n # [[[ 1.61707487 0.46829144 0.38130416 0.82546736]\n # [-1.72710298 0.08866375 -0.62518804 0.16128892]\n # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]]\n # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523]\n # [-0.76620689 0.56673047 0.85064753 -0.45158196]\n # [ 1.47595418 2.23646462 1.5701758 0.10497519]]]\n b_cond_2 = paddle.linalg.cond(b, p=2)\n # b_cond_2.numpy() [3.30064451 2.51976252]\n\n "
def mat_norm(input, porder=1.0, axis=None):
'\n NOTE:\n Calculate the matrix norm of a square matrix or batches of square matrices,\n when porder is in (1, -1, inf, -inf)\n '
reduce_all = (True if ((axis is None) or (axis == [])) else False)
axis = (axis if ((axis != None) and (axis != [])) else [0])
keepdim = False
if paddle.in_dynamic_mode():
abs_out = _C_ops.abs(input)
sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
if ((porder == 1) or (porder == np.inf)):
return _C_ops.reduce_max(sum_out, 'dim', [(- 1)], 'keepdim', keepdim, 'reduce_all', reduce_all)
if ((porder == (- 1)) or (porder == (- np.inf))):
return _C_ops.reduce_min(sum_out, 'dim', [(- 1)], 'keepdim', keepdim, 'reduce_all', reduce_all)
block = LayerHelper('norm', **locals())
abs_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
sum_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='abs', inputs={'X': input}, outputs={'Out': abs_out})
block.append_op(type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
if ((porder == 1) or (porder == np.inf)):
block.append_op(type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'dim': [(- 1)], 'keep_dim': keepdim, 'reduce_all': reduce_all})
if ((porder == (- 1)) or (porder == (- np.inf))):
block.append_op(type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'dim': [(- 1)], 'keep_dim': keepdim, 'reduce_all': reduce_all})
return out
def fro_norm(input, porder=2, axis=[(- 1)]):
'\n NOTE:\n Calculate the frobenius norm of a square matrix or batches of square matrices.\n '
reduce_all = (True if ((axis is None) or (axis == [])) else False)
keepdim = False
if paddle.in_dynamic_mode():
pow_out = _C_ops.pow(input, 'factor', porder)
sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
return _C_ops.pow(sum_out_2, 'factor', float((1.0 / porder)))
block = LayerHelper('norm', **locals())
pow_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
sum_out_1 = block.create_variable_for_type_inference(dtype=block.input_dtype())
sum_out_2 = block.create_variable_for_type_inference(dtype=block.input_dtype())
out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder})
block.append_op(type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
block.append_op(type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
block.append_op(type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float((1.0 / porder))})
return out
def svd_norm(input, porder, axis=[(- 1)]):
'\n NOTE:\n Calculate the matrix norm, which is related to singular values, of a matrix\n or batches of matrices, including nuclear norm, 2-norm and (-2)-norm.\n '
reduce_all = (True if ((axis is None) or (axis == [])) else False)
keepdim = False
(u, s, vh) = svd(input, full_matrices=False)
if paddle.in_dynamic_mode():
if (porder == 'nuc'):
return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
if (porder == 2):
return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False)
if (porder == (- 2)):
return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False)
block = LayerHelper('norm', **locals())
out = block.create_variable_for_type_inference(dtype=block.input_dtype())
if (porder == 'nuc'):
block.append_op(type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
return out
max_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
min_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
block.append_op(type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
if (porder == 2):
block.append_op(type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False})
return out
if (porder == (- 2)):
block.append_op(type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False})
return out
def empty_tensor(input, shape):
if paddle.in_dynamic_mode():
return input.reshape(shape)
raise ValueError('only support x is nonempty tensor in static mode')
x_shape = list(x.shape)
if (not (len(x_shape) >= 2)):
raise ValueError(('input should be a matrix or batches of matrices, ' + 'but the dimention of received input is {}'.format(len(x_shape))))
if (p == None):
p = 2
x_size = (0 if (0 in x_shape) else 1)
if (p in ('fro', 'nuc', 1, (- 1), np.inf, (- np.inf))):
if (x_shape[(len(x_shape) - 1)] == x_shape[(len(x_shape) - 2)]):
if (x_size == 0):
return empty_tensor(x, x_shape[:(- 2)])
x_inv = x.inverse()
if (p == 'fro'):
return (fro_norm(x) * fro_norm(x_inv))
if (p == 'nuc'):
return (svd_norm(x, p) * svd_norm(x_inv, p))
if (p in (1, (- 1))):
return (mat_norm(x, porder=p, axis=[(- 2)]) * mat_norm(x_inv, porder=p, axis=[(- 2)]))
if (p in (np.inf, (- np.inf))):
return (mat_norm(x, porder=p, axis=[(- 1)]) * mat_norm(x_inv, porder=p, axis=[(- 1)]))
else:
raise ValueError(('only support p is {} when input is a '.format(p) + 'square matrix or batches of square matrices'))
elif (p in (2, (- 2))):
if (x_size == 0):
return empty_tensor(x, x_shape[:(- 2)])
return svd_norm(x, porder=p)
else:
raise ValueError(("unsupported {} for p, only supporting ('fro', 'nuc', ".format(p) + '1, -1, 2, -2, inf, -inf) or none')) | -8,999,208,571,252,141,000 | Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``.
Args:
x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions
for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``.
And the input data type could be ``float32`` or ``float64``.
p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`,
`inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`.
name (str, optional): The default value is `None`. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: computing results of condition number, its data type is the same as input Tensor ``x``.
Examples:
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]])
# compute conditional number when p is None
out = paddle.linalg.cond(x)
# out.numpy() [1.4142135]
# compute conditional number when order of the norm is 'fro'
out_fro = paddle.linalg.cond(x, p='fro')
# out_fro.numpy() [3.1622777]
# compute conditional number when order of the norm is 'nuc'
out_nuc = paddle.linalg.cond(x, p='nuc')
# out_nuc.numpy() [9.2426405]
# compute conditional number when order of the norm is 1
out_1 = paddle.linalg.cond(x, p=1)
# out_1.numpy() [2.]
# compute conditional number when order of the norm is -1
out_minus_1 = paddle.linalg.cond(x, p=-1)
# out_minus_1.numpy() [1.]
# compute conditional number when order of the norm is 2
out_2 = paddle.linalg.cond(x, p=2)
# out_2.numpy() [1.4142135]
# compute conditional number when order of the norm is -1
out_minus_2 = paddle.linalg.cond(x, p=-2)
# out_minus_2.numpy() [0.70710677]
# compute conditional number when order of the norm is inf
out_inf = paddle.linalg.cond(x, p=np.inf)
# out_inf.numpy() [2.]
# compute conditional number when order of the norm is -inf
out_minus_inf = paddle.linalg.cond(x, p=-np.inf)
# out_minus_inf.numpy() [1.]
a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32'))
# a.numpy()
# [[[ 0.14063153 -0.996288 0.7996131 -0.02571543]
# [-0.16303636 1.5534962 -0.49919784 -0.04402903]
# [-1.1341571 -0.6022629 0.5445269 0.29154757]
# [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]]
# [[-0.58081484 0.12402827 0.7229862 -0.55046535]
# [-0.15178485 -1.1604939 0.75810957 0.30971205]
# [-0.9669573 1.0940945 -0.27363303 -0.35416734]
# [-1.216529 2.0018666 -0.7773689 -0.17556527]]]
a_cond_fro = paddle.linalg.cond(a, p='fro')
# a_cond_fro.numpy() [31.572273 28.120834]
b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64'))
# b.numpy()
# [[[ 1.61707487 0.46829144 0.38130416 0.82546736]
# [-1.72710298 0.08866375 -0.62518804 0.16128892]
# [-0.02822879 -1.67764516 0.11141444 0.3220113 ]]
# [[ 0.22524372 0.62474921 -0.85503233 -1.03960523]
# [-0.76620689 0.56673047 0.85064753 -0.45158196]
# [ 1.47595418 2.23646462 1.5701758 0.10497519]]]
b_cond_2 = paddle.linalg.cond(b, p=2)
# b_cond_2.numpy() [3.30064451 2.51976252] | python/paddle/tensor/linalg.py | cond | DevilCarp/Paddle | python | def cond(x, p=None, name=None):
"\n\n Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``.\n\n Args:\n x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions\n for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``.\n And the input data type could be ``float32`` or ``float64``.\n p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`,\n `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`.\n name (str, optional): The default value is `None`. Normally there is no need for\n user to set this property. For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: computing results of condition number, its data type is the same as input Tensor ``x``.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]])\n\n # compute conditional number when p is None\n out = paddle.linalg.cond(x)\n # out.numpy() [1.4142135]\n\n # compute conditional number when order of the norm is 'fro'\n out_fro = paddle.linalg.cond(x, p='fro')\n # out_fro.numpy() [3.1622777]\n\n # compute conditional number when order of the norm is 'nuc'\n out_nuc = paddle.linalg.cond(x, p='nuc')\n # out_nuc.numpy() [9.2426405]\n\n # compute conditional number when order of the norm is 1\n out_1 = paddle.linalg.cond(x, p=1)\n # out_1.numpy() [2.]\n\n # compute conditional number when order of the norm is -1\n out_minus_1 = paddle.linalg.cond(x, p=-1)\n # out_minus_1.numpy() [1.]\n\n # compute conditional number when order of the norm is 2\n out_2 = paddle.linalg.cond(x, p=2)\n # out_2.numpy() [1.4142135]\n\n # compute conditional number when order of the norm is -1\n out_minus_2 = paddle.linalg.cond(x, p=-2)\n # out_minus_2.numpy() [0.70710677]\n\n # compute conditional number when order of the norm is inf\n out_inf = paddle.linalg.cond(x, p=np.inf)\n # out_inf.numpy() [2.]\n\n # compute conditional number when order of the norm is -inf\n out_minus_inf = paddle.linalg.cond(x, p=-np.inf)\n # out_minus_inf.numpy() [1.]\n\n a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32'))\n # a.numpy()\n # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543]\n # [-0.16303636 1.5534962 -0.49919784 -0.04402903]\n # [-1.1341571 -0.6022629 0.5445269 0.29154757]\n # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]]\n # [[-0.58081484 0.12402827 0.7229862 -0.55046535]\n # [-0.15178485 -1.1604939 0.75810957 0.30971205]\n # [-0.9669573 1.0940945 -0.27363303 -0.35416734]\n # [-1.216529 2.0018666 -0.7773689 -0.17556527]]]\n a_cond_fro = paddle.linalg.cond(a, p='fro')\n # a_cond_fro.numpy() [31.572273 28.120834]\n\n b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64'))\n # b.numpy()\n # [[[ 1.61707487 0.46829144 0.38130416 0.82546736]\n # [-1.72710298 0.08866375 -0.62518804 0.16128892]\n # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]]\n # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523]\n # [-0.76620689 0.56673047 0.85064753 -0.45158196]\n # [ 1.47595418 2.23646462 1.5701758 0.10497519]]]\n b_cond_2 = paddle.linalg.cond(b, p=2)\n # b_cond_2.numpy() [3.30064451 2.51976252]\n\n "
def mat_norm(input, porder=1.0, axis=None):
'\n NOTE:\n Calculate the matrix norm of a square matrix or batches of square matrices,\n when porder is in (1, -1, inf, -inf)\n '
reduce_all = (True if ((axis is None) or (axis == [])) else False)
axis = (axis if ((axis != None) and (axis != [])) else [0])
keepdim = False
if paddle.in_dynamic_mode():
abs_out = _C_ops.abs(input)
sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
if ((porder == 1) or (porder == np.inf)):
return _C_ops.reduce_max(sum_out, 'dim', [(- 1)], 'keepdim', keepdim, 'reduce_all', reduce_all)
if ((porder == (- 1)) or (porder == (- np.inf))):
return _C_ops.reduce_min(sum_out, 'dim', [(- 1)], 'keepdim', keepdim, 'reduce_all', reduce_all)
block = LayerHelper('norm', **locals())
abs_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
sum_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='abs', inputs={'X': input}, outputs={'Out': abs_out})
block.append_op(type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
if ((porder == 1) or (porder == np.inf)):
block.append_op(type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'dim': [(- 1)], 'keep_dim': keepdim, 'reduce_all': reduce_all})
if ((porder == (- 1)) or (porder == (- np.inf))):
block.append_op(type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'dim': [(- 1)], 'keep_dim': keepdim, 'reduce_all': reduce_all})
return out
def fro_norm(input, porder=2, axis=[(- 1)]):
'\n NOTE:\n Calculate the frobenius norm of a square matrix or batches of square matrices.\n '
reduce_all = (True if ((axis is None) or (axis == [])) else False)
keepdim = False
if paddle.in_dynamic_mode():
pow_out = _C_ops.pow(input, 'factor', porder)
sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
return _C_ops.pow(sum_out_2, 'factor', float((1.0 / porder)))
block = LayerHelper('norm', **locals())
pow_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
sum_out_1 = block.create_variable_for_type_inference(dtype=block.input_dtype())
sum_out_2 = block.create_variable_for_type_inference(dtype=block.input_dtype())
out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder})
block.append_op(type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
block.append_op(type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
block.append_op(type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float((1.0 / porder))})
return out
def svd_norm(input, porder, axis=[(- 1)]):
'\n NOTE:\n Calculate the matrix norm, which is related to singular values, of a matrix\n or batches of matrices, including nuclear norm, 2-norm and (-2)-norm.\n '
reduce_all = (True if ((axis is None) or (axis == [])) else False)
keepdim = False
(u, s, vh) = svd(input, full_matrices=False)
if paddle.in_dynamic_mode():
if (porder == 'nuc'):
return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
if (porder == 2):
return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False)
if (porder == (- 2)):
return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False)
block = LayerHelper('norm', **locals())
out = block.create_variable_for_type_inference(dtype=block.input_dtype())
if (porder == 'nuc'):
block.append_op(type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
return out
max_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
min_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
block.append_op(type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
if (porder == 2):
block.append_op(type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False})
return out
if (porder == (- 2)):
block.append_op(type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False})
return out
def empty_tensor(input, shape):
if paddle.in_dynamic_mode():
return input.reshape(shape)
raise ValueError('only support x is nonempty tensor in static mode')
x_shape = list(x.shape)
if (not (len(x_shape) >= 2)):
raise ValueError(('input should be a matrix or batches of matrices, ' + 'but the dimention of received input is {}'.format(len(x_shape))))
if (p == None):
p = 2
x_size = (0 if (0 in x_shape) else 1)
if (p in ('fro', 'nuc', 1, (- 1), np.inf, (- np.inf))):
if (x_shape[(len(x_shape) - 1)] == x_shape[(len(x_shape) - 2)]):
if (x_size == 0):
return empty_tensor(x, x_shape[:(- 2)])
x_inv = x.inverse()
if (p == 'fro'):
return (fro_norm(x) * fro_norm(x_inv))
if (p == 'nuc'):
return (svd_norm(x, p) * svd_norm(x_inv, p))
if (p in (1, (- 1))):
return (mat_norm(x, porder=p, axis=[(- 2)]) * mat_norm(x_inv, porder=p, axis=[(- 2)]))
if (p in (np.inf, (- np.inf))):
return (mat_norm(x, porder=p, axis=[(- 1)]) * mat_norm(x_inv, porder=p, axis=[(- 1)]))
else:
raise ValueError(('only support p is {} when input is a '.format(p) + 'square matrix or batches of square matrices'))
elif (p in (2, (- 2))):
if (x_size == 0):
return empty_tensor(x, x_shape[:(- 2)])
return svd_norm(x, porder=p)
else:
raise ValueError(("unsupported {} for p, only supporting ('fro', 'nuc', ".format(p) + '1, -1, 2, -2, inf, -inf) or none')) |
def dot(x, y, name=None):
"\n This operator calculates inner product for vectors.\n\n .. note::\n Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix\n is the batch dimension, which means that the vectors of multiple batches are dotted.\n\n Parameters:\n x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64``\n y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64``\n name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name`\n\n Returns:\n Tensor: the calculated result Tensor.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32)\n y_data = np.random.uniform(1, 3, [10]).astype(np.float32)\n x = paddle.to_tensor(x_data)\n y = paddle.to_tensor(y_data)\n z = paddle.dot(x, y)\n print(z)\n\n "
op_type = 'dot'
if paddle.in_dynamic_mode():
op = getattr(_C_ops, op_type)
return op(x, y)
assert (x is not None), 'x cannot be None in {}'.format(op_type)
assert (y is not None), 'y cannot be None in {}'.format(op_type)
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type)
check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type)
helper = LayerHelper(op_type, **locals())
if (name is None):
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(name=name, dtype=x.dtype, persistable=False)
helper.append_op(type='dot', inputs={'X': x, 'Y': y}, attrs={}, outputs={'Out': out})
return out | -8,485,889,732,085,839,000 | This operator calculates inner product for vectors.
.. note::
Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix
is the batch dimension, which means that the vectors of multiple batches are dotted.
Parameters:
x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64``
y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64``
name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name`
Returns:
Tensor: the calculated result Tensor.
Examples:
.. code-block:: python
import paddle
import numpy as np
x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32)
y_data = np.random.uniform(1, 3, [10]).astype(np.float32)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
z = paddle.dot(x, y)
print(z) | python/paddle/tensor/linalg.py | dot | DevilCarp/Paddle | python | def dot(x, y, name=None):
"\n This operator calculates inner product for vectors.\n\n .. note::\n Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix\n is the batch dimension, which means that the vectors of multiple batches are dotted.\n\n Parameters:\n x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64``\n y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64``\n name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name`\n\n Returns:\n Tensor: the calculated result Tensor.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32)\n y_data = np.random.uniform(1, 3, [10]).astype(np.float32)\n x = paddle.to_tensor(x_data)\n y = paddle.to_tensor(y_data)\n z = paddle.dot(x, y)\n print(z)\n\n "
op_type = 'dot'
if paddle.in_dynamic_mode():
op = getattr(_C_ops, op_type)
return op(x, y)
assert (x is not None), 'x cannot be None in {}'.format(op_type)
assert (y is not None), 'y cannot be None in {}'.format(op_type)
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type)
check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type)
helper = LayerHelper(op_type, **locals())
if (name is None):
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(name=name, dtype=x.dtype, persistable=False)
helper.append_op(type='dot', inputs={'X': x, 'Y': y}, attrs={}, outputs={'Out': out})
return out |
def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None):
"\n Estimate the covariance matrix of the input variables, given data and weights.\n\n A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix.\n For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix \n element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself.\n\n Parameters:\n x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below.\n rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True\n ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True\n fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None\n aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None\n name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name`\n\n Returns:\n Tensor: The covariance matrix Tensor of the variables.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n\n xt = paddle.rand((3,4))\n paddle.linalg.cov(xt)\n\n '''\n Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True,\n [[0.07918842, 0.06127326, 0.01493049],\n [0.06127326, 0.06166256, 0.00302668],\n [0.01493049, 0.00302668, 0.01632146]])\n '''\n "
op_type = 'cov'
if ((len(x.shape) > 2) or (len(x.shape) < 1)):
raise ValueError(('Input(x) only support N-D (1<=N<=2) tensor in cov, but received length of Input(input) is %s.' % len(x.shape)))
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov')
nx = x
if (len(x.shape) == 1):
nx = x.reshape((1, (- 1)))
if ((not rowvar) and (nx.shape[0] != 1)):
nx = nx.t()
w = None
observation_num = nx.shape[1]
if (fweights is not None):
w = fweights.astype(nx.dtype)
if (len(w.shape) > 1):
raise ValueError(('Input(fweights) only support N-D (N<=1) tensor in cov, but received shape of Input(input) is %s.' % len(fweights.shape)))
if (fweights.shape[0] != observation_num):
raise ValueError("The number of Input(fweights) should equal to x's dim[1]: {}, but received size of Input(fweights) is {}.".format(observation_num, fweights.shape[0]))
if (fweights.min() < 0):
raise ValueError('The value of Input(fweights) cannot be negtive, but received min of Input(fweights) is {}.'.format(fweights.min()))
if (not paddle.all((fweights == paddle.round(fweights.astype('float64'))))):
raise ValueError('Input(fweights) must be integer ')
if (aweights is not None):
aw = aweights.astype(nx.dtype)
if (len(aw.shape) > 1):
raise ValueError(('Input(aweights) only support N-D (N<=1) tensor in cov, but received length of Input(input) is %s.' % len(aweights.shape)))
check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov')
if (aweights.shape[0] != observation_num):
raise ValueError("The number of Input(aweights) should equal to x's dim[1]: {}, but received size of Input(aweights) is {}.".format(observation_num, aweights.shape[0]))
if (aweights.min() < 0):
raise ValueError('The value of Input(aweights) cannot be negtive, but received min of Input(aweights) is {}.'.format(aweights.min()))
if (w is not None):
w = (w * aw)
else:
w = aw
w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype)
if ((fweights is not None) or (aweights is not None)):
w_sum = w.sum()
if (w_sum.item() == 0):
raise ValueError("The sum of weights is zero, can't be normalized.")
if (w is not None):
nx_w = (nx * w)
avg = (nx_w.sum(axis=1) / w_sum)
else:
avg = (nx.sum(axis=1) / w_sum)
nx_w = nx
if ((w is not None) and (aweights is not None) and (ddof == True)):
norm_factor = (w_sum - ((w * aweights).sum() / w_sum))
else:
norm_factor = (w_sum - ddof)
if (norm_factor <= 0):
norm_factor = paddle.to_tensor(0, dtype=nx.dtype)
nx = (nx - avg.unsqueeze(1))
xxt = paddle.mm(nx, nx_w.t().conj())
cov = paddle.divide(xxt, norm_factor).squeeze()
return cov | 4,961,629,382,206,763,000 | Estimate the covariance matrix of the input variables, given data and weights.
A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix.
For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix
element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself.
Parameters:
x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below.
rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True
ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True
fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None
aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None
name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name`
Returns:
Tensor: The covariance matrix Tensor of the variables.
Examples:
.. code-block:: python
import paddle
xt = paddle.rand((3,4))
paddle.linalg.cov(xt)
'''
Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
[[0.07918842, 0.06127326, 0.01493049],
[0.06127326, 0.06166256, 0.00302668],
[0.01493049, 0.00302668, 0.01632146]])
''' | python/paddle/tensor/linalg.py | cov | DevilCarp/Paddle | python | def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None):
"\n Estimate the covariance matrix of the input variables, given data and weights.\n\n A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix.\n For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix \n element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself.\n\n Parameters:\n x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below.\n rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True\n ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True\n fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None\n aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None\n name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name`\n\n Returns:\n Tensor: The covariance matrix Tensor of the variables.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n\n xt = paddle.rand((3,4))\n paddle.linalg.cov(xt)\n\n '\n Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True,\n [[0.07918842, 0.06127326, 0.01493049],\n [0.06127326, 0.06166256, 0.00302668],\n [0.01493049, 0.00302668, 0.01632146]])\n '\n "
op_type = 'cov'
if ((len(x.shape) > 2) or (len(x.shape) < 1)):
raise ValueError(('Input(x) only support N-D (1<=N<=2) tensor in cov, but received length of Input(input) is %s.' % len(x.shape)))
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov')
nx = x
if (len(x.shape) == 1):
nx = x.reshape((1, (- 1)))
if ((not rowvar) and (nx.shape[0] != 1)):
nx = nx.t()
w = None
observation_num = nx.shape[1]
if (fweights is not None):
w = fweights.astype(nx.dtype)
if (len(w.shape) > 1):
raise ValueError(('Input(fweights) only support N-D (N<=1) tensor in cov, but received shape of Input(input) is %s.' % len(fweights.shape)))
if (fweights.shape[0] != observation_num):
raise ValueError("The number of Input(fweights) should equal to x's dim[1]: {}, but received size of Input(fweights) is {}.".format(observation_num, fweights.shape[0]))
if (fweights.min() < 0):
raise ValueError('The value of Input(fweights) cannot be negtive, but received min of Input(fweights) is {}.'.format(fweights.min()))
if (not paddle.all((fweights == paddle.round(fweights.astype('float64'))))):
raise ValueError('Input(fweights) must be integer ')
if (aweights is not None):
aw = aweights.astype(nx.dtype)
if (len(aw.shape) > 1):
raise ValueError(('Input(aweights) only support N-D (N<=1) tensor in cov, but received length of Input(input) is %s.' % len(aweights.shape)))
check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov')
if (aweights.shape[0] != observation_num):
raise ValueError("The number of Input(aweights) should equal to x's dim[1]: {}, but received size of Input(aweights) is {}.".format(observation_num, aweights.shape[0]))
if (aweights.min() < 0):
raise ValueError('The value of Input(aweights) cannot be negtive, but received min of Input(aweights) is {}.'.format(aweights.min()))
if (w is not None):
w = (w * aw)
else:
w = aw
w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype)
if ((fweights is not None) or (aweights is not None)):
w_sum = w.sum()
if (w_sum.item() == 0):
raise ValueError("The sum of weights is zero, can't be normalized.")
if (w is not None):
nx_w = (nx * w)
avg = (nx_w.sum(axis=1) / w_sum)
else:
avg = (nx.sum(axis=1) / w_sum)
nx_w = nx
if ((w is not None) and (aweights is not None) and (ddof == True)):
norm_factor = (w_sum - ((w * aweights).sum() / w_sum))
else:
norm_factor = (w_sum - ddof)
if (norm_factor <= 0):
norm_factor = paddle.to_tensor(0, dtype=nx.dtype)
nx = (nx - avg.unsqueeze(1))
xxt = paddle.mm(nx, nx_w.t().conj())
cov = paddle.divide(xxt, norm_factor).squeeze()
return cov |
def t(input, name=None):
"\n Transpose <=2-D tensor.\n 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to\n the paddle.transpose function which perm dimensions set 0 and 1.\n\n Args:\n input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32.\n name(str, optional): The default value is None. Normally there is no need for\n user to set this property. For more information, please refer to :ref:`api_guide_Name`\n Returns:\n Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64.\n\n For Example:\n\n .. code-block:: text\n\n # Example 1 (0-D tensor)\n x = tensor([0.79])\n paddle.t(x) = tensor([0.79])\n\n # Example 2 (1-D tensor)\n x = tensor([0.79, 0.84, 0.32])\n paddle.t(x) = tensor([0.79, 0.84, 0.32])\n\n # Example 3 (2-D tensor)\n x = tensor([0.79, 0.84, 0.32],\n [0.64, 0.14, 0.57])\n paddle.t(x) = tensor([0.79, 0.64],\n [0.84, 0.14],\n [0.32, 0.57])\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n x = paddle.ones(shape=[2, 3], dtype='int32')\n x_transposed = paddle.t(x)\n print(x_transposed.shape)\n # [3, 2]\n "
if (len(input.shape) > 2):
raise ValueError(('Input(input) only support N-D (N<=2) tensor, but received length of Input(input) is %s. Perhaps you can use paddle.tensor.transpose() instead.' % len(input.shape)))
if paddle.in_dynamic_mode():
if (len(input.shape) == 1):
return input
perm = [1, 0]
(out, _) = _C_ops.transpose2(input, 'axis', perm)
return out
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose')
helper = LayerHelper('t', **locals())
out = helper.create_variable_for_type_inference(input.dtype)
input_shape = helper.create_variable_for_type_inference(input.dtype)
if (len(input.shape) == 1):
out = input
else:
helper.append_op(type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]})
return out | 3,878,725,866,431,119,400 | Transpose <=2-D tensor.
0-D and 1-D tensors are returned as it is and 2-D tensor is equal to
the paddle.transpose function which perm dimensions set 0 and 1.
Args:
input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64.
For Example:
.. code-block:: text
# Example 1 (0-D tensor)
x = tensor([0.79])
paddle.t(x) = tensor([0.79])
# Example 2 (1-D tensor)
x = tensor([0.79, 0.84, 0.32])
paddle.t(x) = tensor([0.79, 0.84, 0.32])
# Example 3 (2-D tensor)
x = tensor([0.79, 0.84, 0.32],
[0.64, 0.14, 0.57])
paddle.t(x) = tensor([0.79, 0.64],
[0.84, 0.14],
[0.32, 0.57])
Examples:
.. code-block:: python
import paddle
x = paddle.ones(shape=[2, 3], dtype='int32')
x_transposed = paddle.t(x)
print(x_transposed.shape)
# [3, 2] | python/paddle/tensor/linalg.py | t | DevilCarp/Paddle | python | def t(input, name=None):
"\n Transpose <=2-D tensor.\n 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to\n the paddle.transpose function which perm dimensions set 0 and 1.\n\n Args:\n input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32.\n name(str, optional): The default value is None. Normally there is no need for\n user to set this property. For more information, please refer to :ref:`api_guide_Name`\n Returns:\n Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64.\n\n For Example:\n\n .. code-block:: text\n\n # Example 1 (0-D tensor)\n x = tensor([0.79])\n paddle.t(x) = tensor([0.79])\n\n # Example 2 (1-D tensor)\n x = tensor([0.79, 0.84, 0.32])\n paddle.t(x) = tensor([0.79, 0.84, 0.32])\n\n # Example 3 (2-D tensor)\n x = tensor([0.79, 0.84, 0.32],\n [0.64, 0.14, 0.57])\n paddle.t(x) = tensor([0.79, 0.64],\n [0.84, 0.14],\n [0.32, 0.57])\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n x = paddle.ones(shape=[2, 3], dtype='int32')\n x_transposed = paddle.t(x)\n print(x_transposed.shape)\n # [3, 2]\n "
if (len(input.shape) > 2):
raise ValueError(('Input(input) only support N-D (N<=2) tensor, but received length of Input(input) is %s. Perhaps you can use paddle.tensor.transpose() instead.' % len(input.shape)))
if paddle.in_dynamic_mode():
if (len(input.shape) == 1):
return input
perm = [1, 0]
(out, _) = _C_ops.transpose2(input, 'axis', perm)
return out
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose')
helper = LayerHelper('t', **locals())
out = helper.create_variable_for_type_inference(input.dtype)
input_shape = helper.create_variable_for_type_inference(input.dtype)
if (len(input.shape) == 1):
out = input
else:
helper.append_op(type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]})
return out |
def cross(x, y, axis=None, name=None):
'\n Computes the cross product between two tensors along an axis.\n\n Inputs must have the same shape, and the length of their axes should be equal to 3.\n If `axis` is not given, it defaults to the first axis found with the length 3.\n\n Args:\n x (Tensor): The first input tensor.\n y (Tensor): The second input tensor.\n axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor. A Tensor with same data type as `x`.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([[1.0, 1.0, 1.0],\n [2.0, 2.0, 2.0],\n [3.0, 3.0, 3.0]])\n y = paddle.to_tensor([[1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0]])\n\n z1 = paddle.cross(x, y)\n # [[-1. -1. -1.]\n # [ 2. 2. 2.]\n # [-1. -1. -1.]]\n\n z2 = paddle.cross(x, y, axis=1)\n # [[0. 0. 0.]\n # [0. 0. 0.]\n # [0. 0. 0.]]\n '
if in_dygraph_mode():
return _C_ops.final_state_cross(x, y, axis)
elif _in_legacy_dygraph():
if (axis is not None):
return _C_ops.cross(x, y, 'dim', axis)
else:
return _C_ops.cross(x, y)
else:
helper = LayerHelper('cross', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
attrs = dict()
attrs['dim'] = axis
helper.append_op(type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs)
return out | -527,705,269,943,561,300 | Computes the cross product between two tensors along an axis.
Inputs must have the same shape, and the length of their axes should be equal to 3.
If `axis` is not given, it defaults to the first axis found with the length 3.
Args:
x (Tensor): The first input tensor.
y (Tensor): The second input tensor.
axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor. A Tensor with same data type as `x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0],
[3.0, 3.0, 3.0]])
y = paddle.to_tensor([[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0]])
z1 = paddle.cross(x, y)
# [[-1. -1. -1.]
# [ 2. 2. 2.]
# [-1. -1. -1.]]
z2 = paddle.cross(x, y, axis=1)
# [[0. 0. 0.]
# [0. 0. 0.]
# [0. 0. 0.]] | python/paddle/tensor/linalg.py | cross | DevilCarp/Paddle | python | def cross(x, y, axis=None, name=None):
'\n Computes the cross product between two tensors along an axis.\n\n Inputs must have the same shape, and the length of their axes should be equal to 3.\n If `axis` is not given, it defaults to the first axis found with the length 3.\n\n Args:\n x (Tensor): The first input tensor.\n y (Tensor): The second input tensor.\n axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor. A Tensor with same data type as `x`.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([[1.0, 1.0, 1.0],\n [2.0, 2.0, 2.0],\n [3.0, 3.0, 3.0]])\n y = paddle.to_tensor([[1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0]])\n\n z1 = paddle.cross(x, y)\n # [[-1. -1. -1.]\n # [ 2. 2. 2.]\n # [-1. -1. -1.]]\n\n z2 = paddle.cross(x, y, axis=1)\n # [[0. 0. 0.]\n # [0. 0. 0.]\n # [0. 0. 0.]]\n '
if in_dygraph_mode():
return _C_ops.final_state_cross(x, y, axis)
elif _in_legacy_dygraph():
if (axis is not None):
return _C_ops.cross(x, y, 'dim', axis)
else:
return _C_ops.cross(x, y)
else:
helper = LayerHelper('cross', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
attrs = dict()
attrs['dim'] = axis
helper.append_op(type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs)
return out |
def cholesky(x, upper=False, name=None):
'\n Computes the Cholesky decomposition of one symmetric positive-definite\n matrix or batches of symmetric positive-definite matrice.\n\n If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` ,\n and the returned matrix :math:`U` is upper-triangular. Otherwise, the\n decomposition has the form :math:`A = LL^{T}` , and the returned matrix\n :math:`L` is lower-triangular.\n\n Args:\n x (Tensor): The input tensor. Its shape should be `[*, M, M]`,\n where * is zero or more batch dimensions, and matrices on the\n inner-most 2 dimensions all should be symmetric positive-definite.\n Its data type should be float32 or float64.\n upper (bool): The flag indicating whether to return upper or lower\n triangular matrices. Default: False.\n\n Returns:\n Tensor: A Tensor with same shape and data type as `x`. It represents \\\n triangular matrices generated by Cholesky decomposition.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n a = np.random.rand(3, 3)\n a_t = np.transpose(a, [1, 0])\n x_data = np.matmul(a, a_t) + 1e-03\n x = paddle.to_tensor(x_data)\n out = paddle.linalg.cholesky(x, upper=False)\n print(out)\n # [[1.190523 0. 0. ]\n # [0.9906703 0.27676893 0. ]\n # [1.25450498 0.05600871 0.06400121]]\n\n '
if paddle.in_dynamic_mode():
return _C_ops.cholesky(x, 'upper', upper)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky')
check_type(upper, 'upper', bool, 'cholesky')
helper = LayerHelper('cholesky', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper})
return out | -1,156,870,987,024,628,700 | Computes the Cholesky decomposition of one symmetric positive-definite
matrix or batches of symmetric positive-definite matrice.
If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` ,
and the returned matrix :math:`U` is upper-triangular. Otherwise, the
decomposition has the form :math:`A = LL^{T}` , and the returned matrix
:math:`L` is lower-triangular.
Args:
x (Tensor): The input tensor. Its shape should be `[*, M, M]`,
where * is zero or more batch dimensions, and matrices on the
inner-most 2 dimensions all should be symmetric positive-definite.
Its data type should be float32 or float64.
upper (bool): The flag indicating whether to return upper or lower
triangular matrices. Default: False.
Returns:
Tensor: A Tensor with same shape and data type as `x`. It represents \
triangular matrices generated by Cholesky decomposition.
Examples:
.. code-block:: python
import paddle
import numpy as np
a = np.random.rand(3, 3)
a_t = np.transpose(a, [1, 0])
x_data = np.matmul(a, a_t) + 1e-03
x = paddle.to_tensor(x_data)
out = paddle.linalg.cholesky(x, upper=False)
print(out)
# [[1.190523 0. 0. ]
# [0.9906703 0.27676893 0. ]
# [1.25450498 0.05600871 0.06400121]] | python/paddle/tensor/linalg.py | cholesky | DevilCarp/Paddle | python | def cholesky(x, upper=False, name=None):
'\n Computes the Cholesky decomposition of one symmetric positive-definite\n matrix or batches of symmetric positive-definite matrice.\n\n If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` ,\n and the returned matrix :math:`U` is upper-triangular. Otherwise, the\n decomposition has the form :math:`A = LL^{T}` , and the returned matrix\n :math:`L` is lower-triangular.\n\n Args:\n x (Tensor): The input tensor. Its shape should be `[*, M, M]`,\n where * is zero or more batch dimensions, and matrices on the\n inner-most 2 dimensions all should be symmetric positive-definite.\n Its data type should be float32 or float64.\n upper (bool): The flag indicating whether to return upper or lower\n triangular matrices. Default: False.\n\n Returns:\n Tensor: A Tensor with same shape and data type as `x`. It represents \\\n triangular matrices generated by Cholesky decomposition.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n a = np.random.rand(3, 3)\n a_t = np.transpose(a, [1, 0])\n x_data = np.matmul(a, a_t) + 1e-03\n x = paddle.to_tensor(x_data)\n out = paddle.linalg.cholesky(x, upper=False)\n print(out)\n # [[1.190523 0. 0. ]\n # [0.9906703 0.27676893 0. ]\n # [1.25450498 0.05600871 0.06400121]]\n\n '
if paddle.in_dynamic_mode():
return _C_ops.cholesky(x, 'upper', upper)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky')
check_type(upper, 'upper', bool, 'cholesky')
helper = LayerHelper('cholesky', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper})
return out |
def matrix_rank(x, tol=None, hermitian=False, name=None):
'\n Computes the rank of a matrix.\n\n The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False,\n or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True.\n\n Args:\n x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch\n of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64.\n tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest\n singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed\n with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch.\n hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian,\n enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use\n the lower triangular of the matrix to compute.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: Rank of tensor x.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n a = paddle.eye(10)\n b = paddle.linalg.matrix_rank(a)\n print(b)\n # b = [10]\n\n c = paddle.ones(shape=[3, 4, 5, 5])\n d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True)\n print(d)\n # d = [[1, 1, 1, 1],\n # [1, 1, 1, 1],\n # [1, 1, 1, 1]]\n\n '
if paddle.in_dynamic_mode():
if (tol is None):
tol_tensor = None
tol_attr = 0.0
use_default_tol = True
elif isinstance(tol, Variable):
if (tol.dtype != x.dtype):
tol_tensor = cast(tol, x.dtype)
else:
tol_tensor = tol
tol_attr = 0.0
use_default_tol = False
else:
tol_tensor = None
tol_attr = float(tol)
use_default_tol = False
return _C_ops.matrix_rank(x, tol_tensor, 'tol', tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol)
inputs = {}
attrs = {}
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank')
inputs['X'] = x
if (tol is None):
attrs['use_default_tol'] = True
elif isinstance(tol, Variable):
check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank')
attrs['use_default_tol'] = False
if (tol.dtype != x.dtype):
inputs['TolTensor'] = cast(tol, x.dtype)
else:
inputs['TolTensor'] = tol
else:
check_type(tol, 'tol', float, 'matrix_rank')
attrs['use_default_tol'] = False
attrs['tol'] = tol
check_type(hermitian, 'hermitian', bool, 'matrix_rank')
attrs['hermitian'] = hermitian
helper = LayerHelper('matrix_rank', **locals())
out = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out | 1,160,058,177,991,084,800 | Computes the rank of a matrix.
The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False,
or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True.
Args:
x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch
of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64.
tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest
singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed
with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch.
hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian,
enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use
the lower triangular of the matrix to compute.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: Rank of tensor x.
Examples:
.. code-block:: python
import paddle
a = paddle.eye(10)
b = paddle.linalg.matrix_rank(a)
print(b)
# b = [10]
c = paddle.ones(shape=[3, 4, 5, 5])
d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True)
print(d)
# d = [[1, 1, 1, 1],
# [1, 1, 1, 1],
# [1, 1, 1, 1]] | python/paddle/tensor/linalg.py | matrix_rank | DevilCarp/Paddle | python | def matrix_rank(x, tol=None, hermitian=False, name=None):
'\n Computes the rank of a matrix.\n\n The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False,\n or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True.\n\n Args:\n x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch\n of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64.\n tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest\n singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed\n with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch.\n hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian,\n enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use\n the lower triangular of the matrix to compute.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: Rank of tensor x.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n a = paddle.eye(10)\n b = paddle.linalg.matrix_rank(a)\n print(b)\n # b = [10]\n\n c = paddle.ones(shape=[3, 4, 5, 5])\n d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True)\n print(d)\n # d = [[1, 1, 1, 1],\n # [1, 1, 1, 1],\n # [1, 1, 1, 1]]\n\n '
if paddle.in_dynamic_mode():
if (tol is None):
tol_tensor = None
tol_attr = 0.0
use_default_tol = True
elif isinstance(tol, Variable):
if (tol.dtype != x.dtype):
tol_tensor = cast(tol, x.dtype)
else:
tol_tensor = tol
tol_attr = 0.0
use_default_tol = False
else:
tol_tensor = None
tol_attr = float(tol)
use_default_tol = False
return _C_ops.matrix_rank(x, tol_tensor, 'tol', tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol)
inputs = {}
attrs = {}
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank')
inputs['X'] = x
if (tol is None):
attrs['use_default_tol'] = True
elif isinstance(tol, Variable):
check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank')
attrs['use_default_tol'] = False
if (tol.dtype != x.dtype):
inputs['TolTensor'] = cast(tol, x.dtype)
else:
inputs['TolTensor'] = tol
else:
check_type(tol, 'tol', float, 'matrix_rank')
attrs['use_default_tol'] = False
attrs['tol'] = tol
check_type(hermitian, 'hermitian', bool, 'matrix_rank')
attrs['hermitian'] = hermitian
helper = LayerHelper('matrix_rank', **locals())
out = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out |
def bmm(x, y, name=None):
'\n Applies batched matrix multiplication to two tensors.\n\n Both of the two input tensors must be three-dementional and share the same batch size.\n\n if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor.\n\n Args:\n x (Tensor): The input Tensor.\n y (Tensor): The input Tensor.\n name(str|None): A name for this layer(optional). If set None, the layer\n will be named automatically.\n\n Returns:\n Tensor: The product Tensor.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n # In imperative mode:\n # size x: (2, 2, 3) and y: (2, 3, 2)\n x = paddle.to_tensor([[[1.0, 1.0, 1.0],\n [2.0, 2.0, 2.0]],\n [[3.0, 3.0, 3.0],\n [4.0, 4.0, 4.0]]])\n y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]],\n [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]])\n out = paddle.bmm(x, y)\n #output size: (2, 2, 2)\n #output value:\n #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]]\n out_np = out.numpy()\n\n '
x_shape = x.shape
y_shape = y.shape
if (not (len(x_shape) == len(y_shape) == 3)):
raise ValueError("x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}".format(x_shape, y_shape))
if (x_shape[2] != y_shape[1]):
raise ValueError("x's width must be equal with y's height. But received x's shape: {}, y's shape: {}".format(x_shape, y_shape))
if (x_shape[0] != y_shape[0]):
raise ValueError("x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}".format(x_shape, y_shape))
if paddle.in_dynamic_mode():
return _C_ops.bmm(x, y)
helper = LayerHelper('bmm', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out})
return out | -4,711,183,802,654,545,000 | Applies batched matrix multiplication to two tensors.
Both of the two input tensors must be three-dementional and share the same batch size.
if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor.
Args:
x (Tensor): The input Tensor.
y (Tensor): The input Tensor.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Tensor: The product Tensor.
Examples:
.. code-block:: python
import paddle
# In imperative mode:
# size x: (2, 2, 3) and y: (2, 3, 2)
x = paddle.to_tensor([[[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0]],
[[3.0, 3.0, 3.0],
[4.0, 4.0, 4.0]]])
y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]],
[[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]])
out = paddle.bmm(x, y)
#output size: (2, 2, 2)
#output value:
#[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]]
out_np = out.numpy() | python/paddle/tensor/linalg.py | bmm | DevilCarp/Paddle | python | def bmm(x, y, name=None):
'\n Applies batched matrix multiplication to two tensors.\n\n Both of the two input tensors must be three-dementional and share the same batch size.\n\n if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor.\n\n Args:\n x (Tensor): The input Tensor.\n y (Tensor): The input Tensor.\n name(str|None): A name for this layer(optional). If set None, the layer\n will be named automatically.\n\n Returns:\n Tensor: The product Tensor.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n # In imperative mode:\n # size x: (2, 2, 3) and y: (2, 3, 2)\n x = paddle.to_tensor([[[1.0, 1.0, 1.0],\n [2.0, 2.0, 2.0]],\n [[3.0, 3.0, 3.0],\n [4.0, 4.0, 4.0]]])\n y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]],\n [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]])\n out = paddle.bmm(x, y)\n #output size: (2, 2, 2)\n #output value:\n #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]]\n out_np = out.numpy()\n\n '
x_shape = x.shape
y_shape = y.shape
if (not (len(x_shape) == len(y_shape) == 3)):
raise ValueError("x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}".format(x_shape, y_shape))
if (x_shape[2] != y_shape[1]):
raise ValueError("x's width must be equal with y's height. But received x's shape: {}, y's shape: {}".format(x_shape, y_shape))
if (x_shape[0] != y_shape[0]):
raise ValueError("x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}".format(x_shape, y_shape))
if paddle.in_dynamic_mode():
return _C_ops.bmm(x, y)
helper = LayerHelper('bmm', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out})
return out |
def histogram(input, bins=100, min=0, max=0, name=None):
'\n Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max.\n If min and max are both zero, the minimum and maximum values of the data are used.\n\n Args:\n input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor\n should be float32, float64, int32, int64.\n bins (int): number of histogram bins\n min (int): lower end of the range (inclusive)\n max (int): upper end of the range (inclusive)\n\n Returns:\n Tensor: data type is int64, shape is (nbins,).\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n inputs = paddle.to_tensor([1, 2, 1])\n result = paddle.histogram(inputs, bins=4, min=0, max=3)\n print(result) # [0, 2, 1, 0]\n '
if paddle.in_dynamic_mode():
return _C_ops.histogram(input, 'bins', bins, 'min', min, 'max', max)
helper = LayerHelper('histogram', **locals())
check_variable_and_dtype(input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram')
out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
helper.append_op(type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max})
return out | 8,785,959,902,747,494,000 | Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max.
If min and max are both zero, the minimum and maximum values of the data are used.
Args:
input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor
should be float32, float64, int32, int64.
bins (int): number of histogram bins
min (int): lower end of the range (inclusive)
max (int): upper end of the range (inclusive)
Returns:
Tensor: data type is int64, shape is (nbins,).
Examples:
.. code-block:: python
import paddle
inputs = paddle.to_tensor([1, 2, 1])
result = paddle.histogram(inputs, bins=4, min=0, max=3)
print(result) # [0, 2, 1, 0] | python/paddle/tensor/linalg.py | histogram | DevilCarp/Paddle | python | def histogram(input, bins=100, min=0, max=0, name=None):
'\n Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max.\n If min and max are both zero, the minimum and maximum values of the data are used.\n\n Args:\n input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor\n should be float32, float64, int32, int64.\n bins (int): number of histogram bins\n min (int): lower end of the range (inclusive)\n max (int): upper end of the range (inclusive)\n\n Returns:\n Tensor: data type is int64, shape is (nbins,).\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n inputs = paddle.to_tensor([1, 2, 1])\n result = paddle.histogram(inputs, bins=4, min=0, max=3)\n print(result) # [0, 2, 1, 0]\n '
if paddle.in_dynamic_mode():
return _C_ops.histogram(input, 'bins', bins, 'min', min, 'max', max)
helper = LayerHelper('histogram', **locals())
check_variable_and_dtype(input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram')
out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
helper.append_op(type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max})
return out |
def bincount(x, weights=None, minlength=0, name=None):
'\n Computes frequency of each value in the input tensor. \n\n Args:\n x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor.\n weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None.\n minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0.\n name(str, optional): The default value is None. Normally there is no need for user to set this\n property. For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: The tensor of frequency.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([1, 2, 1, 4, 5])\n result1 = paddle.bincount(x)\n print(result1) # [0, 2, 1, 0, 1, 1]\n\n w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5])\n result2 = paddle.bincount(x, weights=w)\n print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000]\n '
if (x.dtype not in [paddle.int32, paddle.int64]):
raise TypeError('Elements in Input(x) should all be integers')
if paddle.in_dynamic_mode():
return _C_ops.bincount(x, weights, 'minlength', minlength)
helper = LayerHelper('bincount', **locals())
check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount')
if (weights is not None):
check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount')
out = helper.create_variable_for_type_inference(dtype=weights.dtype)
else:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength})
return out | -7,411,482,120,546,404,000 | Computes frequency of each value in the input tensor.
Args:
x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor.
weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None.
minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0.
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The tensor of frequency.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 1, 4, 5])
result1 = paddle.bincount(x)
print(result1) # [0, 2, 1, 0, 1, 1]
w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5])
result2 = paddle.bincount(x, weights=w)
print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] | python/paddle/tensor/linalg.py | bincount | DevilCarp/Paddle | python | def bincount(x, weights=None, minlength=0, name=None):
'\n Computes frequency of each value in the input tensor. \n\n Args:\n x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor.\n weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None.\n minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0.\n name(str, optional): The default value is None. Normally there is no need for user to set this\n property. For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: The tensor of frequency.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([1, 2, 1, 4, 5])\n result1 = paddle.bincount(x)\n print(result1) # [0, 2, 1, 0, 1, 1]\n\n w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5])\n result2 = paddle.bincount(x, weights=w)\n print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000]\n '
if (x.dtype not in [paddle.int32, paddle.int64]):
raise TypeError('Elements in Input(x) should all be integers')
if paddle.in_dynamic_mode():
return _C_ops.bincount(x, weights, 'minlength', minlength)
helper = LayerHelper('bincount', **locals())
check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount')
if (weights is not None):
check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount')
out = helper.create_variable_for_type_inference(dtype=weights.dtype)
else:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength})
return out |
def mv(x, vec, name=None):
'\n Performs a matrix-vector product of the matrix x and the vector vec.\n\n Args:\n x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x\n should be one of float32, float64.\n vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x\n should be one of float32, float64.\n name(str, optional): The default value is None. Normally there is no need for user to set this\n property. For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: The tensor which is producted by x and vec.\n\n Examples:\n .. code-block:: python\n\n # x: [M, N], vec: [N]\n # paddle.mv(x, vec) # out: [M]\n\n import numpy as np\n import paddle\n\n x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64")\n x = paddle.to_tensor(x_data)\n vec_data = np.array([3, 5, 1])\n vec = paddle.to_tensor(vec_data).astype("float64")\n out = paddle.mv(x, vec)\n '
if in_dygraph_mode():
return _C_ops.final_state_mv(x, vec)
elif _in_legacy_dygraph():
out = _C_ops.mv(x, vec)
return out
else:
def __check_input(x, vec):
var_names = {'x': x, 'vec': vec}
for (name, val) in var_names.items():
check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv')
x_shape = list(x.shape)
vec_shape = list(vec.shape)
if (len(x_shape) != 2):
raise ValueError("x should be 2-dimensional. But received x's dimention: {}".format(x_shape))
if (len(vec_shape) != 1):
raise ValueError("vec should be 1-dimensional. But received vec's dimention: {}".format(vec_shape))
__check_input(x, vec)
helper = LayerHelper('mv', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out})
return out | 7,252,601,793,221,310,000 | Performs a matrix-vector product of the matrix x and the vector vec.
Args:
x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x
should be one of float32, float64.
vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x
should be one of float32, float64.
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The tensor which is producted by x and vec.
Examples:
.. code-block:: python
# x: [M, N], vec: [N]
# paddle.mv(x, vec) # out: [M]
import numpy as np
import paddle
x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64")
x = paddle.to_tensor(x_data)
vec_data = np.array([3, 5, 1])
vec = paddle.to_tensor(vec_data).astype("float64")
out = paddle.mv(x, vec) | python/paddle/tensor/linalg.py | mv | DevilCarp/Paddle | python | def mv(x, vec, name=None):
'\n Performs a matrix-vector product of the matrix x and the vector vec.\n\n Args:\n x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x\n should be one of float32, float64.\n vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x\n should be one of float32, float64.\n name(str, optional): The default value is None. Normally there is no need for user to set this\n property. For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: The tensor which is producted by x and vec.\n\n Examples:\n .. code-block:: python\n\n # x: [M, N], vec: [N]\n # paddle.mv(x, vec) # out: [M]\n\n import numpy as np\n import paddle\n\n x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64")\n x = paddle.to_tensor(x_data)\n vec_data = np.array([3, 5, 1])\n vec = paddle.to_tensor(vec_data).astype("float64")\n out = paddle.mv(x, vec)\n '
if in_dygraph_mode():
return _C_ops.final_state_mv(x, vec)
elif _in_legacy_dygraph():
out = _C_ops.mv(x, vec)
return out
else:
def __check_input(x, vec):
var_names = {'x': x, 'vec': vec}
for (name, val) in var_names.items():
check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv')
x_shape = list(x.shape)
vec_shape = list(vec.shape)
if (len(x_shape) != 2):
raise ValueError("x should be 2-dimensional. But received x's dimention: {}".format(x_shape))
if (len(vec_shape) != 1):
raise ValueError("vec should be 1-dimensional. But received vec's dimention: {}".format(vec_shape))
__check_input(x, vec)
helper = LayerHelper('mv', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out})
return out |
def det(x, name=None):
'\n Calculates determinant value of a square matrix or batches of square matrices.\n Args:\n x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size\n `(*, n, n)` where `*` is one or more batch dimensions.\n Returns:\n y (Tensor):the determinant value of a square matrix or batches of square matrices.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.randn([3,3,3])\n\n A = paddle.linalg.det(x)\n\n print(A)\n\n # [ 0.02547996, 2.52317095, -6.15900707])\n\n\n '
if paddle.in_dynamic_mode():
return _C_ops.determinant(x)
check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det')
input_shape = list(x.shape)
assert (len(input_shape) >= 2), ("The x must be at least 2-dimensional, but received Input x's dimensional: %s.\n" % len(input_shape))
assert (input_shape[(- 1)] == input_shape[(- 2)]), ('Expect squared input,but received %s by %s matrix.\n' % (input_shape[(- 2)], input_shape[(- 1)]))
helper = LayerHelper('determinant', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]})
return out | 4,513,317,354,980,321,000 | Calculates determinant value of a square matrix or batches of square matrices.
Args:
x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size
`(*, n, n)` where `*` is one or more batch dimensions.
Returns:
y (Tensor):the determinant value of a square matrix or batches of square matrices.
Examples:
.. code-block:: python
import paddle
x = paddle.randn([3,3,3])
A = paddle.linalg.det(x)
print(A)
# [ 0.02547996, 2.52317095, -6.15900707]) | python/paddle/tensor/linalg.py | det | DevilCarp/Paddle | python | def det(x, name=None):
'\n Calculates determinant value of a square matrix or batches of square matrices.\n Args:\n x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size\n `(*, n, n)` where `*` is one or more batch dimensions.\n Returns:\n y (Tensor):the determinant value of a square matrix or batches of square matrices.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.randn([3,3,3])\n\n A = paddle.linalg.det(x)\n\n print(A)\n\n # [ 0.02547996, 2.52317095, -6.15900707])\n\n\n '
if paddle.in_dynamic_mode():
return _C_ops.determinant(x)
check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det')
input_shape = list(x.shape)
assert (len(input_shape) >= 2), ("The x must be at least 2-dimensional, but received Input x's dimensional: %s.\n" % len(input_shape))
assert (input_shape[(- 1)] == input_shape[(- 2)]), ('Expect squared input,but received %s by %s matrix.\n' % (input_shape[(- 2)], input_shape[(- 1)]))
helper = LayerHelper('determinant', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]})
return out |
def slogdet(x, name=None):
"\n Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant.\n The determinant can be computed with ``sign * exp(logabsdet)\n\n Supports input of float, double\n\n Note that for matrices that have zero determinant, this returns ``(0, -inf)``\n Args:\n x (Tensor): the batch of matrices of size :math:`(*, n, n)`\n where math:`*` is one or more batch dimensions.\n\n Returns:\n y (Tensor): A tensor containing the sign of the determinant and the natural logarithm\n of the absolute value of determinant, respectively.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.randn([3,3,3])\n\n A = paddle.linalg.slogdet(x)\n\n print(A)\n\n # [[ 1. , 1. , -1. ],\n # [-0.98610914, -0.43010661, -0.10872950]])\n\n "
if paddle.in_dynamic_mode():
return _C_ops.slogdeterminant(x)
check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet')
input_shape = list(x.shape)
assert (len(input_shape) >= 2), ("The x must be at least 2-dimensional, but received Input x's dimensional: %s.\n" % len(input_shape))
assert (input_shape[(- 1)] == input_shape[(- 2)]), ('Expect squared input,but received %s by %s matrix.\n' % (input_shape[(- 2)], input_shape[(- 1)]))
helper = LayerHelper('slogdeterminant', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]})
return out | 9,101,923,281,703,332,000 | Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant.
The determinant can be computed with ``sign * exp(logabsdet)
Supports input of float, double
Note that for matrices that have zero determinant, this returns ``(0, -inf)``
Args:
x (Tensor): the batch of matrices of size :math:`(*, n, n)`
where math:`*` is one or more batch dimensions.
Returns:
y (Tensor): A tensor containing the sign of the determinant and the natural logarithm
of the absolute value of determinant, respectively.
Examples:
.. code-block:: python
import paddle
x = paddle.randn([3,3,3])
A = paddle.linalg.slogdet(x)
print(A)
# [[ 1. , 1. , -1. ],
# [-0.98610914, -0.43010661, -0.10872950]]) | python/paddle/tensor/linalg.py | slogdet | DevilCarp/Paddle | python | def slogdet(x, name=None):
"\n Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant.\n The determinant can be computed with ``sign * exp(logabsdet)\n\n Supports input of float, double\n\n Note that for matrices that have zero determinant, this returns ``(0, -inf)``\n Args:\n x (Tensor): the batch of matrices of size :math:`(*, n, n)`\n where math:`*` is one or more batch dimensions.\n\n Returns:\n y (Tensor): A tensor containing the sign of the determinant and the natural logarithm\n of the absolute value of determinant, respectively.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.randn([3,3,3])\n\n A = paddle.linalg.slogdet(x)\n\n print(A)\n\n # [[ 1. , 1. , -1. ],\n # [-0.98610914, -0.43010661, -0.10872950]])\n\n "
if paddle.in_dynamic_mode():
return _C_ops.slogdeterminant(x)
check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet')
input_shape = list(x.shape)
assert (len(input_shape) >= 2), ("The x must be at least 2-dimensional, but received Input x's dimensional: %s.\n" % len(input_shape))
assert (input_shape[(- 1)] == input_shape[(- 2)]), ('Expect squared input,but received %s by %s matrix.\n' % (input_shape[(- 2)], input_shape[(- 1)]))
helper = LayerHelper('slogdeterminant', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]})
return out |
def svd(x, full_matrices=False, name=None):
"\n Computes the singular value decomposition of one matrix or a batch of regular matrices.\n\n Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies:\n\n .. math::\n X = U * diag(S) * VT\n\n Args:\n x (Tensor): The input tensor. Its shape should be `[..., N, M]`,\n where `...` is zero or more batch dimensions. N and M can be arbitraty\n positive number. Note that if x is sigular matrices, the grad is numerical\n instable. The data type of x should be float32 or float64.\n full_matrices (bool): A flag to control the behavor of svd.\n If full_matrices = True, svd op will compute full U and V matrics,\n which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N).\n If full_matrices = False, svd op will use a economic method to store U and V.\n which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N).\n name (str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]`\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64')\n x = x.reshape([3, 2])\n u, s, vh = paddle.linalg.svd(x)\n print (u)\n #U = [[ 0.27364809, -0.21695147 ],\n # [ 0.37892198, -0.87112408 ],\n # [ 0.8840446 , 0.44053933 ]]\n\n print (s)\n #S = [8.14753743, 0.78589688]\n print (vh)\n #VT= [[ 0.51411221, 0.85772294],\n # [ 0.85772294, -0.51411221]]\n\n # one can verify : U * S * VT == X\n # U * UH == I\n # V * VH == I\n "
if paddle.in_dynamic_mode():
return _C_ops.svd(x, 'full_matrices', full_matrices)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd')
check_type(full_matrices, 'full_matrices', bool, 'svd')
helper = LayerHelper('svd', **locals())
u = helper.create_variable_for_type_inference(dtype=x.dtype)
vh = helper.create_variable_for_type_inference(dtype=x.dtype)
s = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = dict()
attrs['full_matrices'] = full_matrices
helper.append_op(type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs)
return (u, s, vh) | 7,655,087,064,594,550,000 | Computes the singular value decomposition of one matrix or a batch of regular matrices.
Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies:
.. math::
X = U * diag(S) * VT
Args:
x (Tensor): The input tensor. Its shape should be `[..., N, M]`,
where `...` is zero or more batch dimensions. N and M can be arbitraty
positive number. Note that if x is sigular matrices, the grad is numerical
instable. The data type of x should be float32 or float64.
full_matrices (bool): A flag to control the behavor of svd.
If full_matrices = True, svd op will compute full U and V matrics,
which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N).
If full_matrices = False, svd op will use a economic method to store U and V.
which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N).
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]`
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64')
x = x.reshape([3, 2])
u, s, vh = paddle.linalg.svd(x)
print (u)
#U = [[ 0.27364809, -0.21695147 ],
# [ 0.37892198, -0.87112408 ],
# [ 0.8840446 , 0.44053933 ]]
print (s)
#S = [8.14753743, 0.78589688]
print (vh)
#VT= [[ 0.51411221, 0.85772294],
# [ 0.85772294, -0.51411221]]
# one can verify : U * S * VT == X
# U * UH == I
# V * VH == I | python/paddle/tensor/linalg.py | svd | DevilCarp/Paddle | python | def svd(x, full_matrices=False, name=None):
"\n Computes the singular value decomposition of one matrix or a batch of regular matrices.\n\n Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies:\n\n .. math::\n X = U * diag(S) * VT\n\n Args:\n x (Tensor): The input tensor. Its shape should be `[..., N, M]`,\n where `...` is zero or more batch dimensions. N and M can be arbitraty\n positive number. Note that if x is sigular matrices, the grad is numerical\n instable. The data type of x should be float32 or float64.\n full_matrices (bool): A flag to control the behavor of svd.\n If full_matrices = True, svd op will compute full U and V matrics,\n which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N).\n If full_matrices = False, svd op will use a economic method to store U and V.\n which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N).\n name (str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]`\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64')\n x = x.reshape([3, 2])\n u, s, vh = paddle.linalg.svd(x)\n print (u)\n #U = [[ 0.27364809, -0.21695147 ],\n # [ 0.37892198, -0.87112408 ],\n # [ 0.8840446 , 0.44053933 ]]\n\n print (s)\n #S = [8.14753743, 0.78589688]\n print (vh)\n #VT= [[ 0.51411221, 0.85772294],\n # [ 0.85772294, -0.51411221]]\n\n # one can verify : U * S * VT == X\n # U * UH == I\n # V * VH == I\n "
if paddle.in_dynamic_mode():
return _C_ops.svd(x, 'full_matrices', full_matrices)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd')
check_type(full_matrices, 'full_matrices', bool, 'svd')
helper = LayerHelper('svd', **locals())
u = helper.create_variable_for_type_inference(dtype=x.dtype)
vh = helper.create_variable_for_type_inference(dtype=x.dtype)
s = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = dict()
attrs['full_matrices'] = full_matrices
helper.append_op(type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs)
return (u, s, vh) |
def matrix_power(x, n, name=None):
"\n Computes the n-th power of a square matrix or a batch of square matrices.\n\n Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be\n an exponent, the equation should be:\n\n .. math::\n Out = X ^ {n}\n\n Specifically,\n\n - If `n > 0`, it returns the matrix or a batch of matrices raised to the power\n of `n`.\n\n - If `n = 0`, it returns the identity matrix or a batch of identity matrices.\n\n - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to\n the power of `abs(n)`.\n\n Args:\n x (Tensor): A square matrix or a batch of square matrices to be raised\n to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or\n more batch dimensions. Its data type should be float32 or float64.\n n (int): The exponent. It can be any positive, negative integer or zero.\n name (str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its\n data type should be the same as that of `x`.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([[1, 2, 3],\n [1, 4, 9],\n [1, 8, 27]], dtype='float64')\n print(paddle.linalg.matrix_power(x, 2))\n # [[6. , 34. , 102.],\n # [14. , 90. , 282.],\n # [36. , 250., 804.]]\n\n print(paddle.linalg.matrix_power(x, 0))\n # [[1., 0., 0.],\n # [0., 1., 0.],\n # [0., 0., 1.]]\n\n print(paddle.linalg.matrix_power(x, -2))\n # [[ 12.91666667, -12.75000000, 2.83333333 ],\n # [-7.66666667 , 8. , -1.83333333 ],\n # [ 1.80555556 , -1.91666667 , 0.44444444 ]]\n "
if paddle.in_dynamic_mode():
return _C_ops.matrix_power(x, 'n', n)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power')
check_type(n, 'n', int, 'matrix_power')
helper = LayerHelper('matrix_power', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n})
return out | -4,744,617,923,970,205,000 | Computes the n-th power of a square matrix or a batch of square matrices.
Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be
an exponent, the equation should be:
.. math::
Out = X ^ {n}
Specifically,
- If `n > 0`, it returns the matrix or a batch of matrices raised to the power
of `n`.
- If `n = 0`, it returns the identity matrix or a batch of identity matrices.
- If `n < 0`, it returns the inverse of each matrix (if invertible) raised to
the power of `abs(n)`.
Args:
x (Tensor): A square matrix or a batch of square matrices to be raised
to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or
more batch dimensions. Its data type should be float32 or float64.
n (int): The exponent. It can be any positive, negative integer or zero.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its
data type should be the same as that of `x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1, 2, 3],
[1, 4, 9],
[1, 8, 27]], dtype='float64')
print(paddle.linalg.matrix_power(x, 2))
# [[6. , 34. , 102.],
# [14. , 90. , 282.],
# [36. , 250., 804.]]
print(paddle.linalg.matrix_power(x, 0))
# [[1., 0., 0.],
# [0., 1., 0.],
# [0., 0., 1.]]
print(paddle.linalg.matrix_power(x, -2))
# [[ 12.91666667, -12.75000000, 2.83333333 ],
# [-7.66666667 , 8. , -1.83333333 ],
# [ 1.80555556 , -1.91666667 , 0.44444444 ]] | python/paddle/tensor/linalg.py | matrix_power | DevilCarp/Paddle | python | def matrix_power(x, n, name=None):
"\n Computes the n-th power of a square matrix or a batch of square matrices.\n\n Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be\n an exponent, the equation should be:\n\n .. math::\n Out = X ^ {n}\n\n Specifically,\n\n - If `n > 0`, it returns the matrix or a batch of matrices raised to the power\n of `n`.\n\n - If `n = 0`, it returns the identity matrix or a batch of identity matrices.\n\n - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to\n the power of `abs(n)`.\n\n Args:\n x (Tensor): A square matrix or a batch of square matrices to be raised\n to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or\n more batch dimensions. Its data type should be float32 or float64.\n n (int): The exponent. It can be any positive, negative integer or zero.\n name (str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its\n data type should be the same as that of `x`.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([[1, 2, 3],\n [1, 4, 9],\n [1, 8, 27]], dtype='float64')\n print(paddle.linalg.matrix_power(x, 2))\n # [[6. , 34. , 102.],\n # [14. , 90. , 282.],\n # [36. , 250., 804.]]\n\n print(paddle.linalg.matrix_power(x, 0))\n # [[1., 0., 0.],\n # [0., 1., 0.],\n # [0., 0., 1.]]\n\n print(paddle.linalg.matrix_power(x, -2))\n # [[ 12.91666667, -12.75000000, 2.83333333 ],\n # [-7.66666667 , 8. , -1.83333333 ],\n # [ 1.80555556 , -1.91666667 , 0.44444444 ]]\n "
if paddle.in_dynamic_mode():
return _C_ops.matrix_power(x, 'n', n)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power')
check_type(n, 'n', int, 'matrix_power')
helper = LayerHelper('matrix_power', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n})
return out |
def qr(x, mode='reduced', name=None):
'\n Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now).\n\n Args:\n x (Tensor): The input tensor. Its shape should be `[..., M, N]`,\n where ... is zero or more batch dimensions. M and N can be arbitrary\n positive number. The data type of x should be float32 or float64. \n mode (str, optional): A flag to control the behavior of qr, the default is "reduced". \n Suppose x\'s shape is `[..., M, N]` and denoting `K = min(M, N)`:\n If mode = "reduced", qr op will return reduced Q and R matrices, \n which means Q\'s shape is `[..., M, K]` and R\'s shape is `[..., K, N]`.\n If mode = "complete", qr op will return complete Q and R matrices, \n which means Q\'s shape is `[..., M, M]` and R\'s shape is `[..., M, N]`.\n If mode = "r", qr op will only return reduced R matrix, which means\n R\'s shape is `[..., K, N]`.\n name (str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n \n Returns:\n If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. \n If mode = "r", qr will return a tensor which represents R.\n \n Examples: \n .. code-block:: python\n\n import paddle \n\n x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype(\'float64\')\n q, r = paddle.linalg.qr(x)\n print (q)\n print (r)\n\n # Q = [[-0.16903085, 0.89708523],\n # [-0.50709255, 0.27602622],\n # [-0.84515425, -0.34503278]])\n\n # R = [[-5.91607978, -7.43735744],\n # [ 0. , 0.82807867]])\n \n # one can verify : X = Q * R ; \n '
if paddle.in_dynamic_mode():
(q, r) = _C_ops.qr(x, 'mode', mode)
if (mode == 'r'):
return r
else:
return (q, r)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr')
check_type(mode, 'mode', str, 'qr')
helper = LayerHelper('qr', **locals())
q = helper.create_variable_for_type_inference(dtype=x.dtype)
r = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = dict()
attrs['mode'] = mode
helper.append_op(type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs)
if (mode == 'r'):
return r
else:
return (q, r) | -753,448,886,194,518,700 | Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now).
Args:
x (Tensor): The input tensor. Its shape should be `[..., M, N]`,
where ... is zero or more batch dimensions. M and N can be arbitrary
positive number. The data type of x should be float32 or float64.
mode (str, optional): A flag to control the behavior of qr, the default is "reduced".
Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`:
If mode = "reduced", qr op will return reduced Q and R matrices,
which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`.
If mode = "complete", qr op will return complete Q and R matrices,
which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`.
If mode = "r", qr op will only return reduced R matrix, which means
R's shape is `[..., K, N]`.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R.
If mode = "r", qr will return a tensor which represents R.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64')
q, r = paddle.linalg.qr(x)
print (q)
print (r)
# Q = [[-0.16903085, 0.89708523],
# [-0.50709255, 0.27602622],
# [-0.84515425, -0.34503278]])
# R = [[-5.91607978, -7.43735744],
# [ 0. , 0.82807867]])
# one can verify : X = Q * R ; | python/paddle/tensor/linalg.py | qr | DevilCarp/Paddle | python | def qr(x, mode='reduced', name=None):
'\n Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now).\n\n Args:\n x (Tensor): The input tensor. Its shape should be `[..., M, N]`,\n where ... is zero or more batch dimensions. M and N can be arbitrary\n positive number. The data type of x should be float32 or float64. \n mode (str, optional): A flag to control the behavior of qr, the default is "reduced". \n Suppose x\'s shape is `[..., M, N]` and denoting `K = min(M, N)`:\n If mode = "reduced", qr op will return reduced Q and R matrices, \n which means Q\'s shape is `[..., M, K]` and R\'s shape is `[..., K, N]`.\n If mode = "complete", qr op will return complete Q and R matrices, \n which means Q\'s shape is `[..., M, M]` and R\'s shape is `[..., M, N]`.\n If mode = "r", qr op will only return reduced R matrix, which means\n R\'s shape is `[..., K, N]`.\n name (str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n \n Returns:\n If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. \n If mode = "r", qr will return a tensor which represents R.\n \n Examples: \n .. code-block:: python\n\n import paddle \n\n x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype(\'float64\')\n q, r = paddle.linalg.qr(x)\n print (q)\n print (r)\n\n # Q = [[-0.16903085, 0.89708523],\n # [-0.50709255, 0.27602622],\n # [-0.84515425, -0.34503278]])\n\n # R = [[-5.91607978, -7.43735744],\n # [ 0. , 0.82807867]])\n \n # one can verify : X = Q * R ; \n '
if paddle.in_dynamic_mode():
(q, r) = _C_ops.qr(x, 'mode', mode)
if (mode == 'r'):
return r
else:
return (q, r)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr')
check_type(mode, 'mode', str, 'qr')
helper = LayerHelper('qr', **locals())
q = helper.create_variable_for_type_inference(dtype=x.dtype)
r = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = dict()
attrs['mode'] = mode
helper.append_op(type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs)
if (mode == 'r'):
return r
else:
return (q, r) |
def lu(x, pivot=True, get_infos=False, name=None):
"\n Computes the LU factorization of an N-D(N>=2) matrix x. \n\n Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and \n upper triangular matrix U are combined to a single LU matrix.\n\n Pivoting is done if pivot is set to True.\n P mat can be get by pivots:\n # ones = eye(rows) #eye matrix of rank rows\n # for i in range(cols):\n # swap(ones[i], ones[pivots[i]])\n # return ones\n\n Args:\n\n X (Tensor): the tensor to factor of N-dimensions(N>=2).\n\n pivot (bool, optional): controls whether pivoting is done. Default: True.\n\n get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False.\n\n name (str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n \n Returns:\n factorization (Tensor): LU matrix, the factorization of input X.\n\n pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the \n intermediate transpositions of rows. The final permutation `perm` could be \n reconstructed by this, details refer to upper example.\n\n infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) \n where non-zero values indicate whether factorization for the matrix or each minibatch \n has succeeded or failed.\n\n \n Examples: \n .. code-block:: python\n\n import paddle \n\n x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64')\n lu,p,info = paddle.linalg.lu(x, get_infos=True)\n\n # >>> lu:\n # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,\n # [[5. , 6. ],\n # [0.20000000, 0.80000000],\n # [0.60000000, 0.50000000]])\n # >>> p\n # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True,\n # [3, 3])\n # >>> info\n # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True,\n # 0)\n \n P,L,U = paddle.linalg.lu_unpack(lu,p)\n\n # >>> P\n # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True,\n # [[0., 1., 0.],\n # [0., 0., 1.],\n # [1., 0., 0.]]), \n # >>> L\n # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,\n # [[1. , 0. ],\n # [0.20000000, 1. ],\n # [0.60000000, 0.50000000]]), \n # >>> U\n # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,\n # [[5. , 6. ],\n # [0. , 0.80000000]]))\n \n\n # one can verify : X = P @ L @ U ; \n "
if paddle.in_dynamic_mode():
(LU, Piv, Info) = _C_ops.lu(x, 'pivots', pivot)
if get_infos:
return (LU, Piv, Info)
else:
return (LU, Piv)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu')
helper = LayerHelper('lu', **locals())
lu = helper.create_variable_for_type_inference(dtype=x.dtype)
p = helper.create_variable_for_type_inference(dtype='int')
info = helper.create_variable_for_type_inference(dtype='int')
attrs = dict()
attrs['pivots'] = pivot
helper.append_op(type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs)
if get_infos:
return (lu, p, info)
else:
return (lu, p) | -1,885,591,820,543,059,200 | Computes the LU factorization of an N-D(N>=2) matrix x.
Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and
upper triangular matrix U are combined to a single LU matrix.
Pivoting is done if pivot is set to True.
P mat can be get by pivots:
# ones = eye(rows) #eye matrix of rank rows
# for i in range(cols):
# swap(ones[i], ones[pivots[i]])
# return ones
Args:
X (Tensor): the tensor to factor of N-dimensions(N>=2).
pivot (bool, optional): controls whether pivoting is done. Default: True.
get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
factorization (Tensor): LU matrix, the factorization of input X.
pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the
intermediate transpositions of rows. The final permutation `perm` could be
reconstructed by this, details refer to upper example.
infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2))
where non-zero values indicate whether factorization for the matrix or each minibatch
has succeeded or failed.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64')
lu,p,info = paddle.linalg.lu(x, get_infos=True)
# >>> lu:
# Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[5. , 6. ],
# [0.20000000, 0.80000000],
# [0.60000000, 0.50000000]])
# >>> p
# Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True,
# [3, 3])
# >>> info
# Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True,
# 0)
P,L,U = paddle.linalg.lu_unpack(lu,p)
# >>> P
# (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[0., 1., 0.],
# [0., 0., 1.],
# [1., 0., 0.]]),
# >>> L
# Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[1. , 0. ],
# [0.20000000, 1. ],
# [0.60000000, 0.50000000]]),
# >>> U
# Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[5. , 6. ],
# [0. , 0.80000000]]))
# one can verify : X = P @ L @ U ; | python/paddle/tensor/linalg.py | lu | DevilCarp/Paddle | python | def lu(x, pivot=True, get_infos=False, name=None):
"\n Computes the LU factorization of an N-D(N>=2) matrix x. \n\n Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and \n upper triangular matrix U are combined to a single LU matrix.\n\n Pivoting is done if pivot is set to True.\n P mat can be get by pivots:\n # ones = eye(rows) #eye matrix of rank rows\n # for i in range(cols):\n # swap(ones[i], ones[pivots[i]])\n # return ones\n\n Args:\n\n X (Tensor): the tensor to factor of N-dimensions(N>=2).\n\n pivot (bool, optional): controls whether pivoting is done. Default: True.\n\n get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False.\n\n name (str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n \n Returns:\n factorization (Tensor): LU matrix, the factorization of input X.\n\n pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the \n intermediate transpositions of rows. The final permutation `perm` could be \n reconstructed by this, details refer to upper example.\n\n infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) \n where non-zero values indicate whether factorization for the matrix or each minibatch \n has succeeded or failed.\n\n \n Examples: \n .. code-block:: python\n\n import paddle \n\n x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64')\n lu,p,info = paddle.linalg.lu(x, get_infos=True)\n\n # >>> lu:\n # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,\n # [[5. , 6. ],\n # [0.20000000, 0.80000000],\n # [0.60000000, 0.50000000]])\n # >>> p\n # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True,\n # [3, 3])\n # >>> info\n # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True,\n # 0)\n \n P,L,U = paddle.linalg.lu_unpack(lu,p)\n\n # >>> P\n # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True,\n # [[0., 1., 0.],\n # [0., 0., 1.],\n # [1., 0., 0.]]), \n # >>> L\n # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,\n # [[1. , 0. ],\n # [0.20000000, 1. ],\n # [0.60000000, 0.50000000]]), \n # >>> U\n # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,\n # [[5. , 6. ],\n # [0. , 0.80000000]]))\n \n\n # one can verify : X = P @ L @ U ; \n "
if paddle.in_dynamic_mode():
(LU, Piv, Info) = _C_ops.lu(x, 'pivots', pivot)
if get_infos:
return (LU, Piv, Info)
else:
return (LU, Piv)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu')
helper = LayerHelper('lu', **locals())
lu = helper.create_variable_for_type_inference(dtype=x.dtype)
p = helper.create_variable_for_type_inference(dtype='int')
info = helper.create_variable_for_type_inference(dtype='int')
attrs = dict()
attrs['pivots'] = pivot
helper.append_op(type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs)
if get_infos:
return (lu, p, info)
else:
return (lu, p) |
def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None):
"\n Unpack L U and P to single matrix tensor . \n unpack L and U matrix from LU, unpack permutation matrix P from Pivtos .\n\n P mat can be get by pivots:\n # ones = eye(rows) #eye matrix of rank rows\n # for i in range(cols):\n # swap(ones[i], ones[pivots[i]])\n\n\n Args:\n x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U.\n\n y (Tensor): Pivots get from paddle.linalg.lu.\n\n unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True.\n\n unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True.\n\n name (str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n \n Returns:\n P (Tensor): Permutation matrix P of lu factorization.\n\n L (Tensor): The lower triangular matrix tensor of lu factorization.\n\n U (Tensor): The upper triangular matrix tensor of lu factorization.\n\n \n Examples: \n .. code-block:: python\n\n import paddle \n\n x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64')\n lu,p,info = paddle.linalg.lu(x, get_infos=True)\n\n # >>> lu:\n # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,\n # [[5. , 6. ],\n # [0.20000000, 0.80000000],\n # [0.60000000, 0.50000000]])\n # >>> p\n # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True,\n # [3, 3])\n # >>> info\n # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True,\n # 0)\n \n P,L,U = paddle.linalg.lu_unpack(lu,p)\n\n # >>> P\n # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True,\n # [[0., 1., 0.],\n # [0., 0., 1.],\n # [1., 0., 0.]]), \n # >>> L\n # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,\n # [[1. , 0. ],\n # [0.20000000, 1. ],\n # [0.60000000, 0.50000000]]), \n # >>> U\n # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,\n # [[5. , 6. ],\n # [0. , 0.80000000]]))\n\n # one can verify : X = P @ L @ U ; \n "
if paddle.in_dynamic_mode():
(P, L, U) = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots)
return (P, L, U)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack')
helper = LayerHelper('lu_unpack', **locals())
p = helper.create_variable_for_type_inference(dtype=x.dtype)
l = helper.create_variable_for_type_inference(dtype=x.dtype)
u = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = dict()
attrs['unpack_ludata'] = unpack_ludata
attrs['unpack_pivots'] = unpack_pivots
helper.append_op(type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs)
return (p, l, u) | -8,822,091,043,951,989,000 | Unpack L U and P to single matrix tensor .
unpack L and U matrix from LU, unpack permutation matrix P from Pivtos .
P mat can be get by pivots:
# ones = eye(rows) #eye matrix of rank rows
# for i in range(cols):
# swap(ones[i], ones[pivots[i]])
Args:
x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U.
y (Tensor): Pivots get from paddle.linalg.lu.
unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True.
unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
P (Tensor): Permutation matrix P of lu factorization.
L (Tensor): The lower triangular matrix tensor of lu factorization.
U (Tensor): The upper triangular matrix tensor of lu factorization.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64')
lu,p,info = paddle.linalg.lu(x, get_infos=True)
# >>> lu:
# Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[5. , 6. ],
# [0.20000000, 0.80000000],
# [0.60000000, 0.50000000]])
# >>> p
# Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True,
# [3, 3])
# >>> info
# Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True,
# 0)
P,L,U = paddle.linalg.lu_unpack(lu,p)
# >>> P
# (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[0., 1., 0.],
# [0., 0., 1.],
# [1., 0., 0.]]),
# >>> L
# Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[1. , 0. ],
# [0.20000000, 1. ],
# [0.60000000, 0.50000000]]),
# >>> U
# Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[5. , 6. ],
# [0. , 0.80000000]]))
# one can verify : X = P @ L @ U ; | python/paddle/tensor/linalg.py | lu_unpack | DevilCarp/Paddle | python | def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None):
"\n Unpack L U and P to single matrix tensor . \n unpack L and U matrix from LU, unpack permutation matrix P from Pivtos .\n\n P mat can be get by pivots:\n # ones = eye(rows) #eye matrix of rank rows\n # for i in range(cols):\n # swap(ones[i], ones[pivots[i]])\n\n\n Args:\n x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U.\n\n y (Tensor): Pivots get from paddle.linalg.lu.\n\n unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True.\n\n unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True.\n\n name (str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n \n Returns:\n P (Tensor): Permutation matrix P of lu factorization.\n\n L (Tensor): The lower triangular matrix tensor of lu factorization.\n\n U (Tensor): The upper triangular matrix tensor of lu factorization.\n\n \n Examples: \n .. code-block:: python\n\n import paddle \n\n x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64')\n lu,p,info = paddle.linalg.lu(x, get_infos=True)\n\n # >>> lu:\n # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,\n # [[5. , 6. ],\n # [0.20000000, 0.80000000],\n # [0.60000000, 0.50000000]])\n # >>> p\n # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True,\n # [3, 3])\n # >>> info\n # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True,\n # 0)\n \n P,L,U = paddle.linalg.lu_unpack(lu,p)\n\n # >>> P\n # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True,\n # [[0., 1., 0.],\n # [0., 0., 1.],\n # [1., 0., 0.]]), \n # >>> L\n # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,\n # [[1. , 0. ],\n # [0.20000000, 1. ],\n # [0.60000000, 0.50000000]]), \n # >>> U\n # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,\n # [[5. , 6. ],\n # [0. , 0.80000000]]))\n\n # one can verify : X = P @ L @ U ; \n "
if paddle.in_dynamic_mode():
(P, L, U) = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots)
return (P, L, U)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack')
helper = LayerHelper('lu_unpack', **locals())
p = helper.create_variable_for_type_inference(dtype=x.dtype)
l = helper.create_variable_for_type_inference(dtype=x.dtype)
u = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = dict()
attrs['unpack_ludata'] = unpack_ludata
attrs['unpack_pivots'] = unpack_pivots
helper.append_op(type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs)
return (p, l, u) |
def eig(x, name=None):
'\n This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices.\n\n .. note::\n If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster.\n If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead.\n If the matrix is of any shape, please use :ref:`paddle.linalg.svd`.\n This API is only supported on CPU device.\n The output datatype is always complex for both real and complex input.\n\n Args:\n x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``,\n ``float64``, ``compplex64`` or ``complex128``.\n name (str, optional): The default value is `None`. Normally there is no need for user to set \n this property. For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values.\n Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n paddle.device.set_device("cpu")\n\n x_data = np.array([[1.6707249, 7.2249975, 6.5045543],\n [9.956216, 8.749598, 6.066444 ],\n [4.4251957, 1.7983172, 0.370647 ]]).astype("float32")\n x = paddle.to_tensor(x_data)\n w, v = paddle.linalg.eig(x)\n print(w)\n # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False,\n # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) ,\n # (0.18518077798279986+0j)],\n # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) ,\n # (-0.6837005269141947+0j) ],\n # [(-0.23142567697893396+0j), (0.4944999840400175+0j) ,\n # (0.7058765252952796+0j) ]])\n\n print(v)\n # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False,\n # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) ,\n # (-0.21026087843552282+0j)])\n '
if paddle.in_dynamic_mode():
(w, v) = _C_ops.eig(x)
return (w, v)
check_variable_and_dtype(x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig')
helper = LayerHelper('eig', **locals())
w = helper.create_variable_for_type_inference(x.dtype)
v = helper.create_variable_for_type_inference(x.dtype)
inputs = {'X': x}
outputs = {'Eigenvalues': w, 'Eigenvectors': v}
helper.append_op(type='eig', inputs=inputs, outputs=outputs)
return (w, v) | 4,681,175,119,224,986,000 | This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices.
.. note::
If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster.
If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead.
If the matrix is of any shape, please use :ref:`paddle.linalg.svd`.
This API is only supported on CPU device.
The output datatype is always complex for both real and complex input.
Args:
x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``,
``float64``, ``compplex64`` or ``complex128``.
name (str, optional): The default value is `None`. Normally there is no need for user to set
this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values.
Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.device.set_device("cpu")
x_data = np.array([[1.6707249, 7.2249975, 6.5045543],
[9.956216, 8.749598, 6.066444 ],
[4.4251957, 1.7983172, 0.370647 ]]).astype("float32")
x = paddle.to_tensor(x_data)
w, v = paddle.linalg.eig(x)
print(w)
# Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False,
# [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) ,
# (0.18518077798279986+0j)],
# [(-0.8308237755993192+0j) , (0.3463813401919749+0j) ,
# (-0.6837005269141947+0j) ],
# [(-0.23142567697893396+0j), (0.4944999840400175+0j) ,
# (0.7058765252952796+0j) ]])
print(v)
# Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False,
# [ (16.50471283351188+0j) , (-5.5034820550763515+0j) ,
# (-0.21026087843552282+0j)]) | python/paddle/tensor/linalg.py | eig | DevilCarp/Paddle | python | def eig(x, name=None):
'\n This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices.\n\n .. note::\n If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster.\n If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead.\n If the matrix is of any shape, please use :ref:`paddle.linalg.svd`.\n This API is only supported on CPU device.\n The output datatype is always complex for both real and complex input.\n\n Args:\n x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``,\n ``float64``, ``compplex64`` or ``complex128``.\n name (str, optional): The default value is `None`. Normally there is no need for user to set \n this property. For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values.\n Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n paddle.device.set_device("cpu")\n\n x_data = np.array([[1.6707249, 7.2249975, 6.5045543],\n [9.956216, 8.749598, 6.066444 ],\n [4.4251957, 1.7983172, 0.370647 ]]).astype("float32")\n x = paddle.to_tensor(x_data)\n w, v = paddle.linalg.eig(x)\n print(w)\n # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False,\n # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) ,\n # (0.18518077798279986+0j)],\n # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) ,\n # (-0.6837005269141947+0j) ],\n # [(-0.23142567697893396+0j), (0.4944999840400175+0j) ,\n # (0.7058765252952796+0j) ]])\n\n print(v)\n # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False,\n # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) ,\n # (-0.21026087843552282+0j)])\n '
if paddle.in_dynamic_mode():
(w, v) = _C_ops.eig(x)
return (w, v)
check_variable_and_dtype(x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig')
helper = LayerHelper('eig', **locals())
w = helper.create_variable_for_type_inference(x.dtype)
v = helper.create_variable_for_type_inference(x.dtype)
inputs = {'X': x}
outputs = {'Eigenvalues': w, 'Eigenvectors': v}
helper.append_op(type='eig', inputs=inputs, outputs=outputs)
return (w, v) |
def eigvals(x, name=None):
'\n Compute the eigenvalues of one or more general matrices.\n\n Warning:\n The gradient kernel of this operator does not yet developed.\n If you need back propagation through this operator, please replace it with paddle.linalg.eig.\n\n Args:\n x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed.\n Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions.\n Its data type should be float32, float64, complex64, or complex128.\n name (str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n \n Returns:\n Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`.\n The eigenvalues are complex-valued even when `x` is real.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n paddle.set_device("cpu")\n paddle.seed(1234)\n\n x = paddle.rand(shape=[3, 3], dtype=\'float64\')\n # [[0.02773777, 0.93004224, 0.06911496],\n # [0.24831591, 0.45733623, 0.07717843],\n # [0.48016702, 0.14235102, 0.42620817]])\n\n print(paddle.linalg.eigvals(x))\n # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128\n '
check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals')
x_shape = list(x.shape)
if (len(x_shape) < 2):
raise ValueError("The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}".format(len(x_shape), x_shape))
if (x_shape[(- 1)] != x_shape[(- 2)]):
raise ValueError("The last two dimensions of Input(x) should be equal, but received x's shape = {}".format(x_shape))
if paddle.in_dynamic_mode():
return _C_ops.eigvals(x)
helper = LayerHelper('eigvals', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out})
return out | -910,280,352,137,155,100 | Compute the eigenvalues of one or more general matrices.
Warning:
The gradient kernel of this operator does not yet developed.
If you need back propagation through this operator, please replace it with paddle.linalg.eig.
Args:
x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed.
Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions.
Its data type should be float32, float64, complex64, or complex128.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`.
The eigenvalues are complex-valued even when `x` is real.
Examples:
.. code-block:: python
import paddle
paddle.set_device("cpu")
paddle.seed(1234)
x = paddle.rand(shape=[3, 3], dtype='float64')
# [[0.02773777, 0.93004224, 0.06911496],
# [0.24831591, 0.45733623, 0.07717843],
# [0.48016702, 0.14235102, 0.42620817]])
print(paddle.linalg.eigvals(x))
# [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 | python/paddle/tensor/linalg.py | eigvals | DevilCarp/Paddle | python | def eigvals(x, name=None):
'\n Compute the eigenvalues of one or more general matrices.\n\n Warning:\n The gradient kernel of this operator does not yet developed.\n If you need back propagation through this operator, please replace it with paddle.linalg.eig.\n\n Args:\n x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed.\n Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions.\n Its data type should be float32, float64, complex64, or complex128.\n name (str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n \n Returns:\n Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`.\n The eigenvalues are complex-valued even when `x` is real.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n paddle.set_device("cpu")\n paddle.seed(1234)\n\n x = paddle.rand(shape=[3, 3], dtype=\'float64\')\n # [[0.02773777, 0.93004224, 0.06911496],\n # [0.24831591, 0.45733623, 0.07717843],\n # [0.48016702, 0.14235102, 0.42620817]])\n\n print(paddle.linalg.eigvals(x))\n # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128\n '
check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals')
x_shape = list(x.shape)
if (len(x_shape) < 2):
raise ValueError("The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}".format(len(x_shape), x_shape))
if (x_shape[(- 1)] != x_shape[(- 2)]):
raise ValueError("The last two dimensions of Input(x) should be equal, but received x's shape = {}".format(x_shape))
if paddle.in_dynamic_mode():
return _C_ops.eigvals(x)
helper = LayerHelper('eigvals', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out})
return out |
def multi_dot(x, name=None):
'\n Multi_dot is an operator that calculates multiple matrix multiplications.\n\n Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not\n support batched inputs.\n\n The input tensor in [x] must be 2-D except for the first and last can be 1-D.\n If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector\n of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it\n is treated as a column vector of shape(n, 1).\n\n If the first and last tensor are 2-D matrix, then the output is also 2-D matrix,\n otherwise the output is a 1-D vector.\n\n Multi_dot will select the lowest cost multiplication order for calculation. The\n cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c.\n Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively,\n we can calculate the cost of different multiplication orders as follows:\n - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000\n - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000\n\n In this case, multiplying B and C first, then multiply A, which is 5 times faster\n than sequential calculation.\n\n Args:\n x ([Tensor]): The input tensors which is a list Tensor.\n name(str|None): A name for this layer(optional). If set None, the layer\n will be named automatically.\n\n Returns:\n Tensor: The output Tensor.\n\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n # A * B\n A_data = np.random.random([3, 4]).astype(np.float32)\n B_data = np.random.random([4, 5]).astype(np.float32)\n A = paddle.to_tensor(A_data)\n B = paddle.to_tensor(B_data)\n out = paddle.linalg.multi_dot([A, B])\n print(out.numpy().shape)\n # [3, 5]\n\n # A * B * C\n A_data = np.random.random([10, 5]).astype(np.float32)\n B_data = np.random.random([5, 8]).astype(np.float32)\n C_data = np.random.random([8, 7]).astype(np.float32)\n A = paddle.to_tensor(A_data)\n B = paddle.to_tensor(B_data)\n C = paddle.to_tensor(C_data)\n out = paddle.linalg.multi_dot([A, B, C])\n print(out.numpy().shape)\n # [10, 7]\n\n '
if paddle.in_dynamic_mode():
return _C_ops.multi_dot(x)
check_type(x, 'x', (list, tuple), 'multi_dot')
for (id, item) in enumerate(x):
check_variable_and_dtype(item, (('x[' + str(id)) + ']'), ['float16', 'float32', 'float64'], 'multi_dot')
if (item.dtype != x[0].dtype):
raise TypeError('All the Tensors in the input must have the same data type.')
helper = LayerHelper('multi_dot', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='multi_dot', inputs={'X': x}, outputs={'Out': out})
return out | 1,592,148,319,123,424,000 | Multi_dot is an operator that calculates multiple matrix multiplications.
Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not
support batched inputs.
The input tensor in [x] must be 2-D except for the first and last can be 1-D.
If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector
of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it
is treated as a column vector of shape(n, 1).
If the first and last tensor are 2-D matrix, then the output is also 2-D matrix,
otherwise the output is a 1-D vector.
Multi_dot will select the lowest cost multiplication order for calculation. The
cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c.
Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively,
we can calculate the cost of different multiplication orders as follows:
- Cost((AB)C) = 20x5x100 + 20x100x10 = 30000
- Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000
In this case, multiplying B and C first, then multiply A, which is 5 times faster
than sequential calculation.
Args:
x ([Tensor]): The input tensors which is a list Tensor.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Tensor: The output Tensor.
Examples:
.. code-block:: python
import paddle
import numpy as np
# A * B
A_data = np.random.random([3, 4]).astype(np.float32)
B_data = np.random.random([4, 5]).astype(np.float32)
A = paddle.to_tensor(A_data)
B = paddle.to_tensor(B_data)
out = paddle.linalg.multi_dot([A, B])
print(out.numpy().shape)
# [3, 5]
# A * B * C
A_data = np.random.random([10, 5]).astype(np.float32)
B_data = np.random.random([5, 8]).astype(np.float32)
C_data = np.random.random([8, 7]).astype(np.float32)
A = paddle.to_tensor(A_data)
B = paddle.to_tensor(B_data)
C = paddle.to_tensor(C_data)
out = paddle.linalg.multi_dot([A, B, C])
print(out.numpy().shape)
# [10, 7] | python/paddle/tensor/linalg.py | multi_dot | DevilCarp/Paddle | python | def multi_dot(x, name=None):
'\n Multi_dot is an operator that calculates multiple matrix multiplications.\n\n Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not\n support batched inputs.\n\n The input tensor in [x] must be 2-D except for the first and last can be 1-D.\n If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector\n of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it\n is treated as a column vector of shape(n, 1).\n\n If the first and last tensor are 2-D matrix, then the output is also 2-D matrix,\n otherwise the output is a 1-D vector.\n\n Multi_dot will select the lowest cost multiplication order for calculation. The\n cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c.\n Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively,\n we can calculate the cost of different multiplication orders as follows:\n - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000\n - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000\n\n In this case, multiplying B and C first, then multiply A, which is 5 times faster\n than sequential calculation.\n\n Args:\n x ([Tensor]): The input tensors which is a list Tensor.\n name(str|None): A name for this layer(optional). If set None, the layer\n will be named automatically.\n\n Returns:\n Tensor: The output Tensor.\n\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n # A * B\n A_data = np.random.random([3, 4]).astype(np.float32)\n B_data = np.random.random([4, 5]).astype(np.float32)\n A = paddle.to_tensor(A_data)\n B = paddle.to_tensor(B_data)\n out = paddle.linalg.multi_dot([A, B])\n print(out.numpy().shape)\n # [3, 5]\n\n # A * B * C\n A_data = np.random.random([10, 5]).astype(np.float32)\n B_data = np.random.random([5, 8]).astype(np.float32)\n C_data = np.random.random([8, 7]).astype(np.float32)\n A = paddle.to_tensor(A_data)\n B = paddle.to_tensor(B_data)\n C = paddle.to_tensor(C_data)\n out = paddle.linalg.multi_dot([A, B, C])\n print(out.numpy().shape)\n # [10, 7]\n\n '
if paddle.in_dynamic_mode():
return _C_ops.multi_dot(x)
check_type(x, 'x', (list, tuple), 'multi_dot')
for (id, item) in enumerate(x):
check_variable_and_dtype(item, (('x[' + str(id)) + ']'), ['float16', 'float32', 'float64'], 'multi_dot')
if (item.dtype != x[0].dtype):
raise TypeError('All the Tensors in the input must have the same data type.')
helper = LayerHelper('multi_dot', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='multi_dot', inputs={'X': x}, outputs={'Out': out})
return out |
def eigh(x, UPLO='L', name=None):
'\n Compute the eigenvalues and eigenvectors of a\n complex Hermitian (conjugate symmetric) or a real symmetric matrix.\n\n Args:\n x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x\n should be one of float32, float64, complex64, complex128.\n UPLO(str, optional): (string, default \'L\'), \'L\' represents the lower triangular matrix,\n "\'U\' represents the upper triangular matrix.".\n name(str, optional): The default value is None. Normally there is no need for user to set this\n property. For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n\n out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op.\n out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op.\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n import paddle\n\n x_data = np.array([[1, -2j], [2j, 5]])\n x = paddle.to_tensor(x_data)\n out_value, out_vector = paddle.linalg.eigh(x, UPLO=\'L\')\n print(out_value)\n #[0.17157288, 5.82842712]\n print(out_vector)\n #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)],\n #[ 0.3826834323650898j , -0.9238795325112867j ]]\n\n '
if paddle.in_dynamic_mode():
return _C_ops.eigh(x, 'UPLO', UPLO)
def __check_input(x, UPLO):
x_shape = list(x.shape)
if (len(x.shape) < 2):
raise ValueError(('Input(input) only support >=2 tensor, but received length of Input(input) is %s.' % len(x.shape)))
if (x_shape[(- 1)] != x_shape[(- 2)]):
raise ValueError("The input matrix must be batches of square matrices. But received x's dimention: {}".format(x_shape))
if ((UPLO != 'L') and (UPLO != 'U')):
raise ValueError('UPLO must be L or U. But received UPLO is: {}'.format(UPLO))
__check_input(x, UPLO)
helper = LayerHelper('eigh', **locals())
check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh')
out_value = helper.create_variable_for_type_inference(dtype=x.dtype)
out_vector = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO})
return (out_value, out_vector) | -1,568,547,505,044,493,800 | Compute the eigenvalues and eigenvectors of a
complex Hermitian (conjugate symmetric) or a real symmetric matrix.
Args:
x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x
should be one of float32, float64, complex64, complex128.
UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix,
"'U' represents the upper triangular matrix.".
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op.
out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op.
Examples:
.. code-block:: python
import numpy as np
import paddle
x_data = np.array([[1, -2j], [2j, 5]])
x = paddle.to_tensor(x_data)
out_value, out_vector = paddle.linalg.eigh(x, UPLO='L')
print(out_value)
#[0.17157288, 5.82842712]
print(out_vector)
#[(-0.9238795325112867+0j), (-0.3826834323650898+0j)],
#[ 0.3826834323650898j , -0.9238795325112867j ]] | python/paddle/tensor/linalg.py | eigh | DevilCarp/Paddle | python | def eigh(x, UPLO='L', name=None):
'\n Compute the eigenvalues and eigenvectors of a\n complex Hermitian (conjugate symmetric) or a real symmetric matrix.\n\n Args:\n x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x\n should be one of float32, float64, complex64, complex128.\n UPLO(str, optional): (string, default \'L\'), \'L\' represents the lower triangular matrix,\n "\'U\' represents the upper triangular matrix.".\n name(str, optional): The default value is None. Normally there is no need for user to set this\n property. For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n\n out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op.\n out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op.\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n import paddle\n\n x_data = np.array([[1, -2j], [2j, 5]])\n x = paddle.to_tensor(x_data)\n out_value, out_vector = paddle.linalg.eigh(x, UPLO=\'L\')\n print(out_value)\n #[0.17157288, 5.82842712]\n print(out_vector)\n #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)],\n #[ 0.3826834323650898j , -0.9238795325112867j ]]\n\n '
if paddle.in_dynamic_mode():
return _C_ops.eigh(x, 'UPLO', UPLO)
def __check_input(x, UPLO):
x_shape = list(x.shape)
if (len(x.shape) < 2):
raise ValueError(('Input(input) only support >=2 tensor, but received length of Input(input) is %s.' % len(x.shape)))
if (x_shape[(- 1)] != x_shape[(- 2)]):
raise ValueError("The input matrix must be batches of square matrices. But received x's dimention: {}".format(x_shape))
if ((UPLO != 'L') and (UPLO != 'U')):
raise ValueError('UPLO must be L or U. But received UPLO is: {}'.format(UPLO))
__check_input(x, UPLO)
helper = LayerHelper('eigh', **locals())
check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh')
out_value = helper.create_variable_for_type_inference(dtype=x.dtype)
out_vector = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO})
return (out_value, out_vector) |
def pinv(x, rcond=1e-15, hermitian=False, name=None):
"\n Calculate pseudo inverse via SVD(singular value decomposition)\n of one matrix or batches of regular matrix.\n\n .. math::\n\n if hermitian == False:\n x = u * s * vt (SVD)\n out = v * 1/s * ut\n else:\n x = u * s * ut (eigh)\n out = u * 1/s * u.conj().transpose(-2,-1)\n\n If x is hermitian or symmetric matrix, svd will be replaced with eigh.\n\n Args:\n x(Tensor): The input tensor. Its shape should be (*, m, n)\n where * is zero or more batch dimensions. m and n can be\n arbitraty positive number. The data type of x should be\n float32 or float64 or complex64 or complex128. When data\n type is complex64 or cpmplex128, hermitian should be set\n True.\n\n rcond(Tensor, optional): the tolerance value to determine\n when is a singular value zero. Defalut:1e-15.\n\n hermitian(bool, optional): indicates whether x is Hermitian\n if complex or symmetric if real. Default: False.\n\n name(str|None): A name for this layer(optional). If set None,\n the layer will be named automatically.\n\n Returns:\n Tensor: The tensor with same data type with x. it represents\n pseudo inverse of x. Its shape should be (*, n, m).\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.arange(15).reshape((3, 5)).astype('float64')\n input = paddle.to_tensor(x)\n out = paddle.linalg.pinv(input)\n print(input)\n print(out)\n\n # input:\n # [[0. , 1. , 2. , 3. , 4. ],\n # [5. , 6. , 7. , 8. , 9. ],\n # [10., 11., 12., 13., 14.]]\n\n # out:\n # [[-0.22666667, -0.06666667, 0.09333333],\n # [-0.12333333, -0.03333333, 0.05666667],\n # [-0.02000000, 0.00000000, 0.02000000],\n # [ 0.08333333, 0.03333333, -0.01666667],\n # [ 0.18666667, 0.06666667, -0.05333333]]\n\n # one can verify : x * out * x = x ;\n # or out * x * out = x ;\n "
if paddle.in_dynamic_mode():
if (not hermitian):
(u, s, vt) = _C_ops.svd(x, 'full_matrices', False)
max_singular_val = _C_ops.reduce_max(s, 'dim', [(- 1)], 'keep_dim', True, 'reduce_all', False)
rcond = paddle.to_tensor(rcond, dtype=x.dtype)
cutoff = (rcond * max_singular_val)
y = float('inf')
y = paddle.to_tensor(y, dtype=x.dtype)
condition = (s > cutoff)
cond_int = layers.cast(condition, s.dtype)
cond_not_int = layers.cast(layers.logical_not(condition), s.dtype)
out1 = layers.elementwise_mul((1 / s), cond_int)
out2 = layers.elementwise_mul((1 / y), cond_not_int)
singular = layers.elementwise_add(out1, out2)
(st, _) = _C_ops.unsqueeze2(singular, 'axes', [(- 2)])
dims = list(range(len(vt.shape)))
perm = ((dims[:(- 2)] + [dims[(- 1)]]) + [dims[(- 2)]])
(v, _) = _C_ops.transpose2(vt, 'axis', perm)
out_1 = (v * st)
out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True)
return out_2
else:
(s, u) = _C_ops.eigh(x, 'UPLO', 'L')
s_abs = paddle.abs(s)
max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [(- 1)], 'keep_dim', True, 'reduce_all', False)
rcond = paddle.to_tensor(rcond, dtype=s.dtype)
cutoff = (rcond * max_singular_val)
y = float('inf')
y = paddle.to_tensor(y, dtype=s.dtype)
condition = (s_abs > cutoff)
cond_int = layers.cast(condition, s.dtype)
cond_not_int = layers.cast(layers.logical_not(condition), s.dtype)
out1 = layers.elementwise_mul((1 / s), cond_int)
out2 = layers.elementwise_mul((1 / y), cond_not_int)
singular = layers.elementwise_add(out1, out2)
(st, _) = _C_ops.unsqueeze2(singular, 'axes', [(- 2)])
out_1 = (u * st)
u_conj = _C_ops.conj(u)
out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True)
return out_2
elif (not hermitian):
helper = LayerHelper('pinv', **locals())
dtype = x.dtype
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv')
u = helper.create_variable_for_type_inference(dtype)
s = helper.create_variable_for_type_inference(dtype)
vt = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False})
max_singular_val = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [(- 1)], 'keep_dim': True, 'reduce_all': False})
rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype)
cutoff = (rcond * max_singular_val)
y = float('inf')
y = layers.fill_constant(shape=[1], value=y, dtype=dtype)
condition = (s > cutoff)
cond_int = layers.cast(condition, dtype)
cond_not_int = layers.cast(layers.logical_not(condition), dtype)
out1 = layers.elementwise_mul((1 / s), cond_int)
out2 = layers.elementwise_mul((1 / y), cond_not_int)
singular = layers.elementwise_add(out1, out2)
st = helper.create_variable_for_type_inference(dtype=dtype)
st_shape = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [(- 2)]}, outputs={'Out': st, 'XShape': st_shape})
dims = list(range(len(vt.shape)))
perm = ((dims[:(- 2)] + [dims[(- 1)]]) + [dims[(- 2)]])
v = helper.create_variable_for_type_inference(dtype)
v_shape = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm})
out_1 = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': (- 1), 'use_mkldnn': False})
out_1 = helper.append_activation(out_1)
out_2 = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True})
return out_2
else:
helper = LayerHelper('pinv', **locals())
dtype = x.dtype
check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv')
if (dtype == paddle.complex128):
s_type = 'float64'
elif (dtype == paddle.complex64):
s_type = 'float32'
else:
s_type = dtype
u = helper.create_variable_for_type_inference(dtype)
s = helper.create_variable_for_type_inference(s_type)
helper.append_op(type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'})
s_abs = helper.create_variable_for_type_inference(s_type)
helper.append_op(type='abs', inputs={'X': s}, outputs={'Out': s_abs})
max_singular_val = helper.create_variable_for_type_inference(s_type)
helper.append_op(type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [(- 1)], 'keep_dim': True, 'reduce_all': False})
rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type)
cutoff = (rcond * max_singular_val)
y = float('inf')
y = layers.fill_constant(shape=[1], value=y, dtype=s_type)
condition = (s_abs > cutoff)
cond_int = layers.cast(condition, s_type)
cond_not_int = layers.cast(layers.logical_not(condition), s_type)
out1 = layers.elementwise_mul((1 / s), cond_int)
out2 = layers.elementwise_mul((1 / y), cond_not_int)
singular = layers.elementwise_add(out1, out2)
st = helper.create_variable_for_type_inference(dtype=s_type)
st_shape = helper.create_variable_for_type_inference(dtype=s_type)
helper.append_op(type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [(- 2)]}, outputs={'Out': st, 'XShape': st_shape})
out_1 = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': (- 1), 'use_mkldnn': False})
out_1 = helper.append_activation(out_1)
u_conj = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='conj', inputs={'X': u}, outputs={'Out': [u_conj]})
out_2 = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True})
return out_2 | 9,053,780,694,763,835,000 | Calculate pseudo inverse via SVD(singular value decomposition)
of one matrix or batches of regular matrix.
.. math::
if hermitian == False:
x = u * s * vt (SVD)
out = v * 1/s * ut
else:
x = u * s * ut (eigh)
out = u * 1/s * u.conj().transpose(-2,-1)
If x is hermitian or symmetric matrix, svd will be replaced with eigh.
Args:
x(Tensor): The input tensor. Its shape should be (*, m, n)
where * is zero or more batch dimensions. m and n can be
arbitraty positive number. The data type of x should be
float32 or float64 or complex64 or complex128. When data
type is complex64 or cpmplex128, hermitian should be set
True.
rcond(Tensor, optional): the tolerance value to determine
when is a singular value zero. Defalut:1e-15.
hermitian(bool, optional): indicates whether x is Hermitian
if complex or symmetric if real. Default: False.
name(str|None): A name for this layer(optional). If set None,
the layer will be named automatically.
Returns:
Tensor: The tensor with same data type with x. it represents
pseudo inverse of x. Its shape should be (*, n, m).
Examples:
.. code-block:: python
import paddle
x = paddle.arange(15).reshape((3, 5)).astype('float64')
input = paddle.to_tensor(x)
out = paddle.linalg.pinv(input)
print(input)
print(out)
# input:
# [[0. , 1. , 2. , 3. , 4. ],
# [5. , 6. , 7. , 8. , 9. ],
# [10., 11., 12., 13., 14.]]
# out:
# [[-0.22666667, -0.06666667, 0.09333333],
# [-0.12333333, -0.03333333, 0.05666667],
# [-0.02000000, 0.00000000, 0.02000000],
# [ 0.08333333, 0.03333333, -0.01666667],
# [ 0.18666667, 0.06666667, -0.05333333]]
# one can verify : x * out * x = x ;
# or out * x * out = x ; | python/paddle/tensor/linalg.py | pinv | DevilCarp/Paddle | python | def pinv(x, rcond=1e-15, hermitian=False, name=None):
"\n Calculate pseudo inverse via SVD(singular value decomposition)\n of one matrix or batches of regular matrix.\n\n .. math::\n\n if hermitian == False:\n x = u * s * vt (SVD)\n out = v * 1/s * ut\n else:\n x = u * s * ut (eigh)\n out = u * 1/s * u.conj().transpose(-2,-1)\n\n If x is hermitian or symmetric matrix, svd will be replaced with eigh.\n\n Args:\n x(Tensor): The input tensor. Its shape should be (*, m, n)\n where * is zero or more batch dimensions. m and n can be\n arbitraty positive number. The data type of x should be\n float32 or float64 or complex64 or complex128. When data\n type is complex64 or cpmplex128, hermitian should be set\n True.\n\n rcond(Tensor, optional): the tolerance value to determine\n when is a singular value zero. Defalut:1e-15.\n\n hermitian(bool, optional): indicates whether x is Hermitian\n if complex or symmetric if real. Default: False.\n\n name(str|None): A name for this layer(optional). If set None,\n the layer will be named automatically.\n\n Returns:\n Tensor: The tensor with same data type with x. it represents\n pseudo inverse of x. Its shape should be (*, n, m).\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.arange(15).reshape((3, 5)).astype('float64')\n input = paddle.to_tensor(x)\n out = paddle.linalg.pinv(input)\n print(input)\n print(out)\n\n # input:\n # [[0. , 1. , 2. , 3. , 4. ],\n # [5. , 6. , 7. , 8. , 9. ],\n # [10., 11., 12., 13., 14.]]\n\n # out:\n # [[-0.22666667, -0.06666667, 0.09333333],\n # [-0.12333333, -0.03333333, 0.05666667],\n # [-0.02000000, 0.00000000, 0.02000000],\n # [ 0.08333333, 0.03333333, -0.01666667],\n # [ 0.18666667, 0.06666667, -0.05333333]]\n\n # one can verify : x * out * x = x ;\n # or out * x * out = x ;\n "
if paddle.in_dynamic_mode():
if (not hermitian):
(u, s, vt) = _C_ops.svd(x, 'full_matrices', False)
max_singular_val = _C_ops.reduce_max(s, 'dim', [(- 1)], 'keep_dim', True, 'reduce_all', False)
rcond = paddle.to_tensor(rcond, dtype=x.dtype)
cutoff = (rcond * max_singular_val)
y = float('inf')
y = paddle.to_tensor(y, dtype=x.dtype)
condition = (s > cutoff)
cond_int = layers.cast(condition, s.dtype)
cond_not_int = layers.cast(layers.logical_not(condition), s.dtype)
out1 = layers.elementwise_mul((1 / s), cond_int)
out2 = layers.elementwise_mul((1 / y), cond_not_int)
singular = layers.elementwise_add(out1, out2)
(st, _) = _C_ops.unsqueeze2(singular, 'axes', [(- 2)])
dims = list(range(len(vt.shape)))
perm = ((dims[:(- 2)] + [dims[(- 1)]]) + [dims[(- 2)]])
(v, _) = _C_ops.transpose2(vt, 'axis', perm)
out_1 = (v * st)
out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True)
return out_2
else:
(s, u) = _C_ops.eigh(x, 'UPLO', 'L')
s_abs = paddle.abs(s)
max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [(- 1)], 'keep_dim', True, 'reduce_all', False)
rcond = paddle.to_tensor(rcond, dtype=s.dtype)
cutoff = (rcond * max_singular_val)
y = float('inf')
y = paddle.to_tensor(y, dtype=s.dtype)
condition = (s_abs > cutoff)
cond_int = layers.cast(condition, s.dtype)
cond_not_int = layers.cast(layers.logical_not(condition), s.dtype)
out1 = layers.elementwise_mul((1 / s), cond_int)
out2 = layers.elementwise_mul((1 / y), cond_not_int)
singular = layers.elementwise_add(out1, out2)
(st, _) = _C_ops.unsqueeze2(singular, 'axes', [(- 2)])
out_1 = (u * st)
u_conj = _C_ops.conj(u)
out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True)
return out_2
elif (not hermitian):
helper = LayerHelper('pinv', **locals())
dtype = x.dtype
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv')
u = helper.create_variable_for_type_inference(dtype)
s = helper.create_variable_for_type_inference(dtype)
vt = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False})
max_singular_val = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [(- 1)], 'keep_dim': True, 'reduce_all': False})
rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype)
cutoff = (rcond * max_singular_val)
y = float('inf')
y = layers.fill_constant(shape=[1], value=y, dtype=dtype)
condition = (s > cutoff)
cond_int = layers.cast(condition, dtype)
cond_not_int = layers.cast(layers.logical_not(condition), dtype)
out1 = layers.elementwise_mul((1 / s), cond_int)
out2 = layers.elementwise_mul((1 / y), cond_not_int)
singular = layers.elementwise_add(out1, out2)
st = helper.create_variable_for_type_inference(dtype=dtype)
st_shape = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [(- 2)]}, outputs={'Out': st, 'XShape': st_shape})
dims = list(range(len(vt.shape)))
perm = ((dims[:(- 2)] + [dims[(- 1)]]) + [dims[(- 2)]])
v = helper.create_variable_for_type_inference(dtype)
v_shape = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm})
out_1 = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': (- 1), 'use_mkldnn': False})
out_1 = helper.append_activation(out_1)
out_2 = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True})
return out_2
else:
helper = LayerHelper('pinv', **locals())
dtype = x.dtype
check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv')
if (dtype == paddle.complex128):
s_type = 'float64'
elif (dtype == paddle.complex64):
s_type = 'float32'
else:
s_type = dtype
u = helper.create_variable_for_type_inference(dtype)
s = helper.create_variable_for_type_inference(s_type)
helper.append_op(type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'})
s_abs = helper.create_variable_for_type_inference(s_type)
helper.append_op(type='abs', inputs={'X': s}, outputs={'Out': s_abs})
max_singular_val = helper.create_variable_for_type_inference(s_type)
helper.append_op(type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [(- 1)], 'keep_dim': True, 'reduce_all': False})
rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type)
cutoff = (rcond * max_singular_val)
y = float('inf')
y = layers.fill_constant(shape=[1], value=y, dtype=s_type)
condition = (s_abs > cutoff)
cond_int = layers.cast(condition, s_type)
cond_not_int = layers.cast(layers.logical_not(condition), s_type)
out1 = layers.elementwise_mul((1 / s), cond_int)
out2 = layers.elementwise_mul((1 / y), cond_not_int)
singular = layers.elementwise_add(out1, out2)
st = helper.create_variable_for_type_inference(dtype=s_type)
st_shape = helper.create_variable_for_type_inference(dtype=s_type)
helper.append_op(type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [(- 2)]}, outputs={'Out': st, 'XShape': st_shape})
out_1 = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': (- 1), 'use_mkldnn': False})
out_1 = helper.append_activation(out_1)
u_conj = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='conj', inputs={'X': u}, outputs={'Out': [u_conj]})
out_2 = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True})
return out_2 |
def solve(x, y, name=None):
'\n Computes the solution of a square system of linear equations with a unique solution for input \'X\' and \'Y\'.\n Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be\n a vector/matrix or a batch of vectors/matrices, the equation should be:\n\n .. math::\n Out = X^-1 * Y\n Specifically,\n - This system of linear equations has one solution if and only if input \'X\' is invertible.\n\n Args:\n x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or\n more batch dimensions. Its data type should be float32 or float64.\n y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or\n more batch dimensions. Its data type should be float32 or float64.\n name(str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: The solution of a square system of linear equations with a unique solution for input \'x\' and \'y\'.\n Its data type should be the same as that of `x`.\n\n Examples:\n .. code-block:: python\n\n # a square system of linear equations:\n # 2*X0 + X1 = 9\n # X0 + 2*X1 = 8\n\n import paddle\n import numpy as np\n\n np_x = np.array([[3, 1],[1, 2]])\n np_y = np.array([9, 8])\n x = paddle.to_tensor(np_x, dtype="float64")\n y = paddle.to_tensor(np_y, dtype="float64")\n out = paddle.linalg.solve(x, y)\n\n print(out)\n # [2., 3.])\n '
if paddle.in_dynamic_mode():
return _C_ops.solve(x, y)
inputs = {'X': [x], 'Y': [y]}
helper = LayerHelper('solve', **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='solve', inputs={'X': x, 'Y': y}, outputs={'Out': out})
return out | -3,942,150,556,993,506,300 | Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'.
Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be
a vector/matrix or a batch of vectors/matrices, the equation should be:
.. math::
Out = X^-1 * Y
Specifically,
- This system of linear equations has one solution if and only if input 'X' is invertible.
Args:
x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or
more batch dimensions. Its data type should be float32 or float64.
y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or
more batch dimensions. Its data type should be float32 or float64.
name(str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'.
Its data type should be the same as that of `x`.
Examples:
.. code-block:: python
# a square system of linear equations:
# 2*X0 + X1 = 9
# X0 + 2*X1 = 8
import paddle
import numpy as np
np_x = np.array([[3, 1],[1, 2]])
np_y = np.array([9, 8])
x = paddle.to_tensor(np_x, dtype="float64")
y = paddle.to_tensor(np_y, dtype="float64")
out = paddle.linalg.solve(x, y)
print(out)
# [2., 3.]) | python/paddle/tensor/linalg.py | solve | DevilCarp/Paddle | python | def solve(x, y, name=None):
'\n Computes the solution of a square system of linear equations with a unique solution for input \'X\' and \'Y\'.\n Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be\n a vector/matrix or a batch of vectors/matrices, the equation should be:\n\n .. math::\n Out = X^-1 * Y\n Specifically,\n - This system of linear equations has one solution if and only if input \'X\' is invertible.\n\n Args:\n x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or\n more batch dimensions. Its data type should be float32 or float64.\n y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or\n more batch dimensions. Its data type should be float32 or float64.\n name(str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: The solution of a square system of linear equations with a unique solution for input \'x\' and \'y\'.\n Its data type should be the same as that of `x`.\n\n Examples:\n .. code-block:: python\n\n # a square system of linear equations:\n # 2*X0 + X1 = 9\n # X0 + 2*X1 = 8\n\n import paddle\n import numpy as np\n\n np_x = np.array([[3, 1],[1, 2]])\n np_y = np.array([9, 8])\n x = paddle.to_tensor(np_x, dtype="float64")\n y = paddle.to_tensor(np_y, dtype="float64")\n out = paddle.linalg.solve(x, y)\n\n print(out)\n # [2., 3.])\n '
if paddle.in_dynamic_mode():
return _C_ops.solve(x, y)
inputs = {'X': [x], 'Y': [y]}
helper = LayerHelper('solve', **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='solve', inputs={'X': x, 'Y': y}, outputs={'Out': out})
return out |
def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None):
'\n Computes the solution of a system of equations with a triangular coefficient matrix `x` and\n multiple right-hand sides `y` .\n\n Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs\n is also batches.\n\n Args:\n x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or\n more batch dimensions. Its data type should be float32 or float64.\n y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is \n zero or more batch dimensions. Its data type should be float32 or float64.\n upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular \n system of equations. Default: True.\n transpose (bool, optional): whether `x` should be transposed before calculation. Default: False.\n unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed \n to be 1 and not referenced from `x` . Default: False.\n name(str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: The solution of the system of equations. Its data type should be the same as that of `x`.\n\n Examples:\n .. code-block:: python\n\n # a square system of linear equations:\n # x1 + x2 + x3 = 0\n # 2*x2 + x3 = -9\n # -x3 = 5\n\n import paddle\n import numpy as np\n\n x = paddle.to_tensor([[1, 1, 1], \n [0, 2, 1],\n [0, 0,-1]], dtype="float64")\n y = paddle.to_tensor([[0], [-9], [5]], dtype="float64")\n out = paddle.linalg.triangular_solve(x, y, upper=True)\n\n print(out)\n # [7, -2, -5]\n '
if paddle.in_dynamic_mode():
return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular)
inputs = {'X': [x], 'Y': [y]}
helper = LayerHelper('triangular_solve', **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular})
return out | -3,320,546,878,268,656,600 | Computes the solution of a system of equations with a triangular coefficient matrix `x` and
multiple right-hand sides `y` .
Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs
is also batches.
Args:
x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or
more batch dimensions. Its data type should be float32 or float64.
y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is
zero or more batch dimensions. Its data type should be float32 or float64.
upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular
system of equations. Default: True.
transpose (bool, optional): whether `x` should be transposed before calculation. Default: False.
unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed
to be 1 and not referenced from `x` . Default: False.
name(str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The solution of the system of equations. Its data type should be the same as that of `x`.
Examples:
.. code-block:: python
# a square system of linear equations:
# x1 + x2 + x3 = 0
# 2*x2 + x3 = -9
# -x3 = 5
import paddle
import numpy as np
x = paddle.to_tensor([[1, 1, 1],
[0, 2, 1],
[0, 0,-1]], dtype="float64")
y = paddle.to_tensor([[0], [-9], [5]], dtype="float64")
out = paddle.linalg.triangular_solve(x, y, upper=True)
print(out)
# [7, -2, -5] | python/paddle/tensor/linalg.py | triangular_solve | DevilCarp/Paddle | python | def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None):
'\n Computes the solution of a system of equations with a triangular coefficient matrix `x` and\n multiple right-hand sides `y` .\n\n Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs\n is also batches.\n\n Args:\n x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or\n more batch dimensions. Its data type should be float32 or float64.\n y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is \n zero or more batch dimensions. Its data type should be float32 or float64.\n upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular \n system of equations. Default: True.\n transpose (bool, optional): whether `x` should be transposed before calculation. Default: False.\n unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed \n to be 1 and not referenced from `x` . Default: False.\n name(str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: The solution of the system of equations. Its data type should be the same as that of `x`.\n\n Examples:\n .. code-block:: python\n\n # a square system of linear equations:\n # x1 + x2 + x3 = 0\n # 2*x2 + x3 = -9\n # -x3 = 5\n\n import paddle\n import numpy as np\n\n x = paddle.to_tensor([[1, 1, 1], \n [0, 2, 1],\n [0, 0,-1]], dtype="float64")\n y = paddle.to_tensor([[0], [-9], [5]], dtype="float64")\n out = paddle.linalg.triangular_solve(x, y, upper=True)\n\n print(out)\n # [7, -2, -5]\n '
if paddle.in_dynamic_mode():
return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular)
inputs = {'X': [x], 'Y': [y]}
helper = LayerHelper('triangular_solve', **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular})
return out |
def cholesky_solve(x, y, upper=False, name=None):
'\n Solves a linear system of equations A @ X = B, given A\'s Cholesky factor matrix u and matrix B.\n\n Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs\n is also batches.\n\n Args:\n x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or\n more batch dimensions. Its data type should be float32 or float64.\n y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is \n zero or more batch dimensions. Its data type should be float32 or float64.\n upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False.\n name(str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: The solution of the system of equations. Its data type is the same as that of `x`.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n u = paddle.to_tensor([[1, 1, 1], \n [0, 2, 1],\n [0, 0,-1]], dtype="float64")\n b = paddle.to_tensor([[0], [-9], [5]], dtype="float64")\n out = paddle.linalg.cholesky_solve(b, u, upper=True)\n\n print(out)\n # [-2.5, -7, 9.5]\n '
if paddle.in_dynamic_mode():
return _C_ops.cholesky_solve(x, y, 'upper', upper)
helper = LayerHelper('cholesky_solve', **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper})
return out | -8,255,322,614,350,314,000 | Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B.
Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs
is also batches.
Args:
x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or
more batch dimensions. Its data type should be float32 or float64.
y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is
zero or more batch dimensions. Its data type should be float32 or float64.
upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False.
name(str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The solution of the system of equations. Its data type is the same as that of `x`.
Examples:
.. code-block:: python
import paddle
u = paddle.to_tensor([[1, 1, 1],
[0, 2, 1],
[0, 0,-1]], dtype="float64")
b = paddle.to_tensor([[0], [-9], [5]], dtype="float64")
out = paddle.linalg.cholesky_solve(b, u, upper=True)
print(out)
# [-2.5, -7, 9.5] | python/paddle/tensor/linalg.py | cholesky_solve | DevilCarp/Paddle | python | def cholesky_solve(x, y, upper=False, name=None):
'\n Solves a linear system of equations A @ X = B, given A\'s Cholesky factor matrix u and matrix B.\n\n Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs\n is also batches.\n\n Args:\n x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or\n more batch dimensions. Its data type should be float32 or float64.\n y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is \n zero or more batch dimensions. Its data type should be float32 or float64.\n upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False.\n name(str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: The solution of the system of equations. Its data type is the same as that of `x`.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n u = paddle.to_tensor([[1, 1, 1], \n [0, 2, 1],\n [0, 0,-1]], dtype="float64")\n b = paddle.to_tensor([[0], [-9], [5]], dtype="float64")\n out = paddle.linalg.cholesky_solve(b, u, upper=True)\n\n print(out)\n # [-2.5, -7, 9.5]\n '
if paddle.in_dynamic_mode():
return _C_ops.cholesky_solve(x, y, 'upper', upper)
helper = LayerHelper('cholesky_solve', **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper})
return out |
def eigvalsh(x, UPLO='L', name=None):
"\n Computes the eigenvalues of a \n complex Hermitian (conjugate symmetric) or a real symmetric matrix.\n\n Args:\n x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x\n should be one of float32, float64, complex64, complex128.\n UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’).\n name(str, optional): The default value is None. Normally there is no need for user to set this\n property. For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: The tensor eigenvalues in ascending order.\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n import paddle\n\n x_data = np.array([[1, -2j], [2j, 5]])\n x = paddle.to_tensor(x_data)\n out_value = paddle.eigvalsh(x, UPLO='L')\n print(out_value)\n #[0.17157288, 5.82842712]\n "
if paddle.in_dynamic_mode():
is_test = x.stop_gradient
(values, _) = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test)
return values
def __check_input(x, UPLO):
x_shape = list(x.shape)
if (len(x.shape) < 2):
raise ValueError(('Input(input) only support >=2 tensor, but received length of Input(input) is %s.' % len(x.shape)))
if (x_shape[(- 1)] != x_shape[(- 2)]):
raise ValueError("The input matrix must be batches of square matrices. But received x's dimention: {}".format(x_shape))
if ((UPLO != 'L') and (UPLO != 'U')):
raise ValueError('UPLO must be L or U. But received UPLO is: {}'.format(UPLO))
__check_input(x, UPLO)
helper = LayerHelper('eigvalsh', **locals())
check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh')
out_value = helper.create_variable_for_type_inference(dtype=x.dtype)
out_vector = helper.create_variable_for_type_inference(dtype=x.dtype)
is_test = x.stop_gradient
helper.append_op(type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test})
return out_value | -352,417,534,785,145,600 | Computes the eigenvalues of a
complex Hermitian (conjugate symmetric) or a real symmetric matrix.
Args:
x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x
should be one of float32, float64, complex64, complex128.
UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’).
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The tensor eigenvalues in ascending order.
Examples:
.. code-block:: python
import numpy as np
import paddle
x_data = np.array([[1, -2j], [2j, 5]])
x = paddle.to_tensor(x_data)
out_value = paddle.eigvalsh(x, UPLO='L')
print(out_value)
#[0.17157288, 5.82842712] | python/paddle/tensor/linalg.py | eigvalsh | DevilCarp/Paddle | python | def eigvalsh(x, UPLO='L', name=None):
"\n Computes the eigenvalues of a \n complex Hermitian (conjugate symmetric) or a real symmetric matrix.\n\n Args:\n x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x\n should be one of float32, float64, complex64, complex128.\n UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’).\n name(str, optional): The default value is None. Normally there is no need for user to set this\n property. For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: The tensor eigenvalues in ascending order.\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n import paddle\n\n x_data = np.array([[1, -2j], [2j, 5]])\n x = paddle.to_tensor(x_data)\n out_value = paddle.eigvalsh(x, UPLO='L')\n print(out_value)\n #[0.17157288, 5.82842712]\n "
if paddle.in_dynamic_mode():
is_test = x.stop_gradient
(values, _) = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test)
return values
def __check_input(x, UPLO):
x_shape = list(x.shape)
if (len(x.shape) < 2):
raise ValueError(('Input(input) only support >=2 tensor, but received length of Input(input) is %s.' % len(x.shape)))
if (x_shape[(- 1)] != x_shape[(- 2)]):
raise ValueError("The input matrix must be batches of square matrices. But received x's dimention: {}".format(x_shape))
if ((UPLO != 'L') and (UPLO != 'U')):
raise ValueError('UPLO must be L or U. But received UPLO is: {}'.format(UPLO))
__check_input(x, UPLO)
helper = LayerHelper('eigvalsh', **locals())
check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh')
out_value = helper.create_variable_for_type_inference(dtype=x.dtype)
out_vector = helper.create_variable_for_type_inference(dtype=x.dtype)
is_test = x.stop_gradient
helper.append_op(type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test})
return out_value |
def lstsq(x, y, rcond=None, driver=None, name=None):
'\n Computes a solution to\n the least squares problem of a system of linear equations.\n\n Args:\n x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x``\n should be one of float32, float64.\n y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` \n should be one of float32, float64.\n rcond(float, optional): The default value is None. A float pointing number used to determine \n the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the \n machine precision of x_dtype.\n driver(str, optional): The default value is None. The name of LAPACK method to be used. For \n CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only \n valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ \n for CUDA inputs.\n name(str, optional): The default value is None. Normally there is no need for user to set \n this property. For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). \n ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` \n is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed \n when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor \n with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in \n (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with \n shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when \n ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n paddle.set_device("cpu")\n x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]])\n y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]])\n results = paddle.linalg.lstsq(x, y, driver="gelsd")\n print(results[0])\n # [[ 0.78350395, -0.22165027, -0.62371236],\n # [-0.11340097, 0.78866047, 1.14948535]]\n print(results[1])\n # [19.81443405, 10.43814468, 30.56185532])\n print(results[2])\n # 2\n print(results[3])\n # [9.03455734, 1.54167950]\n\n x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]])\n y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]])\n results = paddle.linalg.lstsq(x, y, driver="gels")\n print(results[0])\n # [[ 0.39386186, 0.10230173, 0.93606132],\n # [ 0.10741687, -0.29028133, 0.11892585],\n # [-0.05115091, 0.51918161, -0.19948854]]\n print(results[1])\n # []\n '
device = paddle.get_device()
if (device == 'cpu'):
if (driver not in (None, 'gels', 'gelss', 'gelsd', 'gelsy')):
raise ValueError("Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}".format(driver))
driver = ('gelsy' if (driver is None) else driver)
elif ('gpu' in device):
if (driver not in (None, 'gels')):
raise ValueError("Only support valid driver is 'gels' or None for CUDA inputs. But got {}".format(driver))
driver = ('gels' if (driver is None) else driver)
else:
raise RuntimeError('Only support lstsq api for CPU or CUDA device.')
if ((x.dtype == y.dtype) and (x.dtype in (paddle.float32, paddle.float64))):
pass
else:
raise ValueError("Only support x and y have the same dtype such as 'float32' and 'float64'.")
if (rcond is None):
if (x.dtype == paddle.float32):
rcond = (1e-07 * max(x.shape[(- 2)], x.shape[(- 1)]))
elif (x.dtype == paddle.float64):
rcond = (1e-15 * max(x.shape[(- 2)], x.shape[(- 1)]))
if paddle.in_dynamic_mode():
(solution, rank, singular_values) = _C_ops.lstsq(x, y, 'rcond', rcond, 'driver', driver)
if (x.shape[(- 2)] > x.shape[(- 1)]):
matmul_out = _varbase_creator(dtype=x.dtype)
_C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False)
minus_out = _C_ops.elementwise_sub(matmul_out, y)
pow_out = _C_ops.pow(minus_out, 'factor', 2)
residuals = _C_ops.reduce_sum(pow_out, 'dim', [(- 2)], 'keepdim', False, 'reduce_all', False)
else:
residuals = paddle.empty(shape=[0], dtype=x.dtype)
if (driver == 'gels'):
rank = paddle.empty(shape=[0], dtype=paddle.int32)
singular_values = paddle.empty(shape=[0], dtype=x.dtype)
elif (driver == 'gelsy'):
singular_values = paddle.empty(shape=[0], dtype=x.dtype)
return (solution, residuals, rank, singular_values)
helper = LayerHelper('lstsq', **locals())
check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq')
check_variable_and_dtype(y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq')
solution = helper.create_variable_for_type_inference(dtype=x.dtype)
residuals = helper.create_variable_for_type_inference(dtype=x.dtype)
rank = helper.create_variable_for_type_inference(dtype=paddle.int32)
singular_values = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='lstsq', inputs={'X': x, 'Y': y}, outputs={'Solution': solution, 'Rank': rank, 'SingularValues': singular_values}, attrs={'rcond': rcond, 'driver': driver})
matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype)
minus_out = helper.create_variable_for_type_inference(dtype=x.dtype)
pow_out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={'trans_x': False, 'trans_y': False})
helper.append_op(type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out})
helper.append_op(type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2})
helper.append_op(type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [(- 2)], 'keep_dim': False, 'reduce_all': False})
if (driver == 'gels'):
rank = paddle.static.data(name='rank', shape=[0])
singular_values = paddle.static.data(name='singular_values', shape=[0])
elif (driver == 'gelsy'):
singular_values = paddle.static.data(name='singular_values', shape=[0])
return (solution, residuals, rank, singular_values) | 7,536,901,320,083,422,000 | Computes a solution to
the least squares problem of a system of linear equations.
Args:
x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x``
should be one of float32, float64.
y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y``
should be one of float32, float64.
rcond(float, optional): The default value is None. A float pointing number used to determine
the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the
machine precision of x_dtype.
driver(str, optional): The default value is None. The name of LAPACK method to be used. For
CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only
valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’
for CUDA inputs.
name(str, optional): The default value is None. Normally there is no need for user to set
this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``).
``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals``
is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed
when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor
with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in
(‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with
shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when
``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor.
Examples:
.. code-block:: python
import paddle
paddle.set_device("cpu")
x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]])
y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]])
results = paddle.linalg.lstsq(x, y, driver="gelsd")
print(results[0])
# [[ 0.78350395, -0.22165027, -0.62371236],
# [-0.11340097, 0.78866047, 1.14948535]]
print(results[1])
# [19.81443405, 10.43814468, 30.56185532])
print(results[2])
# 2
print(results[3])
# [9.03455734, 1.54167950]
x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]])
y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]])
results = paddle.linalg.lstsq(x, y, driver="gels")
print(results[0])
# [[ 0.39386186, 0.10230173, 0.93606132],
# [ 0.10741687, -0.29028133, 0.11892585],
# [-0.05115091, 0.51918161, -0.19948854]]
print(results[1])
# [] | python/paddle/tensor/linalg.py | lstsq | DevilCarp/Paddle | python | def lstsq(x, y, rcond=None, driver=None, name=None):
'\n Computes a solution to\n the least squares problem of a system of linear equations.\n\n Args:\n x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x``\n should be one of float32, float64.\n y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` \n should be one of float32, float64.\n rcond(float, optional): The default value is None. A float pointing number used to determine \n the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the \n machine precision of x_dtype.\n driver(str, optional): The default value is None. The name of LAPACK method to be used. For \n CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only \n valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ \n for CUDA inputs.\n name(str, optional): The default value is None. Normally there is no need for user to set \n this property. For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). \n ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` \n is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed \n when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor \n with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in \n (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with \n shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when \n ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n paddle.set_device("cpu")\n x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]])\n y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]])\n results = paddle.linalg.lstsq(x, y, driver="gelsd")\n print(results[0])\n # [[ 0.78350395, -0.22165027, -0.62371236],\n # [-0.11340097, 0.78866047, 1.14948535]]\n print(results[1])\n # [19.81443405, 10.43814468, 30.56185532])\n print(results[2])\n # 2\n print(results[3])\n # [9.03455734, 1.54167950]\n\n x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]])\n y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]])\n results = paddle.linalg.lstsq(x, y, driver="gels")\n print(results[0])\n # [[ 0.39386186, 0.10230173, 0.93606132],\n # [ 0.10741687, -0.29028133, 0.11892585],\n # [-0.05115091, 0.51918161, -0.19948854]]\n print(results[1])\n # []\n '
device = paddle.get_device()
if (device == 'cpu'):
if (driver not in (None, 'gels', 'gelss', 'gelsd', 'gelsy')):
raise ValueError("Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}".format(driver))
driver = ('gelsy' if (driver is None) else driver)
elif ('gpu' in device):
if (driver not in (None, 'gels')):
raise ValueError("Only support valid driver is 'gels' or None for CUDA inputs. But got {}".format(driver))
driver = ('gels' if (driver is None) else driver)
else:
raise RuntimeError('Only support lstsq api for CPU or CUDA device.')
if ((x.dtype == y.dtype) and (x.dtype in (paddle.float32, paddle.float64))):
pass
else:
raise ValueError("Only support x and y have the same dtype such as 'float32' and 'float64'.")
if (rcond is None):
if (x.dtype == paddle.float32):
rcond = (1e-07 * max(x.shape[(- 2)], x.shape[(- 1)]))
elif (x.dtype == paddle.float64):
rcond = (1e-15 * max(x.shape[(- 2)], x.shape[(- 1)]))
if paddle.in_dynamic_mode():
(solution, rank, singular_values) = _C_ops.lstsq(x, y, 'rcond', rcond, 'driver', driver)
if (x.shape[(- 2)] > x.shape[(- 1)]):
matmul_out = _varbase_creator(dtype=x.dtype)
_C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False)
minus_out = _C_ops.elementwise_sub(matmul_out, y)
pow_out = _C_ops.pow(minus_out, 'factor', 2)
residuals = _C_ops.reduce_sum(pow_out, 'dim', [(- 2)], 'keepdim', False, 'reduce_all', False)
else:
residuals = paddle.empty(shape=[0], dtype=x.dtype)
if (driver == 'gels'):
rank = paddle.empty(shape=[0], dtype=paddle.int32)
singular_values = paddle.empty(shape=[0], dtype=x.dtype)
elif (driver == 'gelsy'):
singular_values = paddle.empty(shape=[0], dtype=x.dtype)
return (solution, residuals, rank, singular_values)
helper = LayerHelper('lstsq', **locals())
check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq')
check_variable_and_dtype(y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq')
solution = helper.create_variable_for_type_inference(dtype=x.dtype)
residuals = helper.create_variable_for_type_inference(dtype=x.dtype)
rank = helper.create_variable_for_type_inference(dtype=paddle.int32)
singular_values = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='lstsq', inputs={'X': x, 'Y': y}, outputs={'Solution': solution, 'Rank': rank, 'SingularValues': singular_values}, attrs={'rcond': rcond, 'driver': driver})
matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype)
minus_out = helper.create_variable_for_type_inference(dtype=x.dtype)
pow_out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={'trans_x': False, 'trans_y': False})
helper.append_op(type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out})
helper.append_op(type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2})
helper.append_op(type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [(- 2)], 'keep_dim': False, 'reduce_all': False})
if (driver == 'gels'):
rank = paddle.static.data(name='rank', shape=[0])
singular_values = paddle.static.data(name='singular_values', shape=[0])
elif (driver == 'gelsy'):
singular_values = paddle.static.data(name='singular_values', shape=[0])
return (solution, residuals, rank, singular_values) |
def frobenius_norm(input, dim=None, keepdim=False, name=None):
'\n The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`.\n Args:\n input (Variable): Tensor, data type float32, float64.\n dim (list, optional): None for last two dimensions.\n keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.\n '
if ((dim is not None) and (not (isinstance(dim, list) and (len(dim) == 2)))):
raise ValueError('The dim of frobenius norm op should be None or two elements list!')
if paddle.in_dynamic_mode():
if (dim is None):
return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True)
return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False)
attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False}
if (dim is None):
attrs['reduce_all'] = True
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm')
helper = LayerHelper('frobenius_norm', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs)
return out | 8,133,598,796,588,167,000 | The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`.
Args:
input (Variable): Tensor, data type float32, float64.
dim (list, optional): None for last two dimensions.
keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. | python/paddle/tensor/linalg.py | frobenius_norm | DevilCarp/Paddle | python | def frobenius_norm(input, dim=None, keepdim=False, name=None):
'\n The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`.\n Args:\n input (Variable): Tensor, data type float32, float64.\n dim (list, optional): None for last two dimensions.\n keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.\n '
if ((dim is not None) and (not (isinstance(dim, list) and (len(dim) == 2)))):
raise ValueError('The dim of frobenius norm op should be None or two elements list!')
if paddle.in_dynamic_mode():
if (dim is None):
return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True)
return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False)
attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False}
if (dim is None):
attrs['reduce_all'] = True
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm')
helper = LayerHelper('frobenius_norm', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs)
return out |
def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None):
'\n Calculate the p-order vector norm for certain dimension of Tensor `input`.\n Args:\n input (Variable): Tensor, data type float32, float64.\n porder (float, optional): None for porder=2.0.\n axis (int, optional): None for last dimension.\n keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.\n '
if paddle.in_dynamic_mode():
if (axis is None):
axis = (- 1)
return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector)
if (porder is not None):
check_type(porder, 'porder', (float, int), 'p_norm')
if (axis is not None):
check_type(axis, 'axis', int, 'p_norm')
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm')
attrs = {'axis': (axis if (axis is not None) else (- 1)), 'porder': (float(porder) if (porder is not None) else 2.0), 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12}
helper = LayerHelper('p_norm', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs)
return out | -1,317,694,213,258,792,200 | Calculate the p-order vector norm for certain dimension of Tensor `input`.
Args:
input (Variable): Tensor, data type float32, float64.
porder (float, optional): None for porder=2.0.
axis (int, optional): None for last dimension.
keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. | python/paddle/tensor/linalg.py | vector_norm | DevilCarp/Paddle | python | def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None):
'\n Calculate the p-order vector norm for certain dimension of Tensor `input`.\n Args:\n input (Variable): Tensor, data type float32, float64.\n porder (float, optional): None for porder=2.0.\n axis (int, optional): None for last dimension.\n keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.\n '
if paddle.in_dynamic_mode():
if (axis is None):
axis = (- 1)
return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector)
if (porder is not None):
check_type(porder, 'porder', (float, int), 'p_norm')
if (axis is not None):
check_type(axis, 'axis', int, 'p_norm')
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm')
attrs = {'axis': (axis if (axis is not None) else (- 1)), 'porder': (float(porder) if (porder is not None) else 2.0), 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12}
helper = LayerHelper('p_norm', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs)
return out |
def p_matrix_norm(input, porder=1.0, axis=axis, keepdim=False, name=None):
'\n NOTE:\n This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm.\n '
block = LayerHelper('norm', **locals())
out = block.create_variable_for_type_inference(dtype=block.input_dtype())
abs_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='abs', inputs={'X': input}, outputs={'Out': abs_out})
pow_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder})
sum_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': (True if (axis is None) else False)})
porder
block.append_op(type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float((1.0 / porder))})
return out | -4,288,087,778,360,846,000 | NOTE:
This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. | python/paddle/tensor/linalg.py | p_matrix_norm | DevilCarp/Paddle | python | def p_matrix_norm(input, porder=1.0, axis=axis, keepdim=False, name=None):
'\n NOTE:\n This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm.\n '
block = LayerHelper('norm', **locals())
out = block.create_variable_for_type_inference(dtype=block.input_dtype())
abs_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='abs', inputs={'X': input}, outputs={'Out': abs_out})
pow_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder})
sum_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': (True if (axis is None) else False)})
porder
block.append_op(type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float((1.0 / porder))})
return out |
def mat_norm(input, porder=1.0, axis=None):
'\n NOTE:\n Calculate the matrix norm of a square matrix or batches of square matrices,\n when porder is in (1, -1, inf, -inf)\n '
reduce_all = (True if ((axis is None) or (axis == [])) else False)
axis = (axis if ((axis != None) and (axis != [])) else [0])
keepdim = False
if paddle.in_dynamic_mode():
abs_out = _C_ops.abs(input)
sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
if ((porder == 1) or (porder == np.inf)):
return _C_ops.reduce_max(sum_out, 'dim', [(- 1)], 'keepdim', keepdim, 'reduce_all', reduce_all)
if ((porder == (- 1)) or (porder == (- np.inf))):
return _C_ops.reduce_min(sum_out, 'dim', [(- 1)], 'keepdim', keepdim, 'reduce_all', reduce_all)
block = LayerHelper('norm', **locals())
abs_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
sum_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='abs', inputs={'X': input}, outputs={'Out': abs_out})
block.append_op(type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
if ((porder == 1) or (porder == np.inf)):
block.append_op(type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'dim': [(- 1)], 'keep_dim': keepdim, 'reduce_all': reduce_all})
if ((porder == (- 1)) or (porder == (- np.inf))):
block.append_op(type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'dim': [(- 1)], 'keep_dim': keepdim, 'reduce_all': reduce_all})
return out | 1,976,357,735,964,301,800 | NOTE:
Calculate the matrix norm of a square matrix or batches of square matrices,
when porder is in (1, -1, inf, -inf) | python/paddle/tensor/linalg.py | mat_norm | DevilCarp/Paddle | python | def mat_norm(input, porder=1.0, axis=None):
'\n NOTE:\n Calculate the matrix norm of a square matrix or batches of square matrices,\n when porder is in (1, -1, inf, -inf)\n '
reduce_all = (True if ((axis is None) or (axis == [])) else False)
axis = (axis if ((axis != None) and (axis != [])) else [0])
keepdim = False
if paddle.in_dynamic_mode():
abs_out = _C_ops.abs(input)
sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
if ((porder == 1) or (porder == np.inf)):
return _C_ops.reduce_max(sum_out, 'dim', [(- 1)], 'keepdim', keepdim, 'reduce_all', reduce_all)
if ((porder == (- 1)) or (porder == (- np.inf))):
return _C_ops.reduce_min(sum_out, 'dim', [(- 1)], 'keepdim', keepdim, 'reduce_all', reduce_all)
block = LayerHelper('norm', **locals())
abs_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
sum_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='abs', inputs={'X': input}, outputs={'Out': abs_out})
block.append_op(type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
if ((porder == 1) or (porder == np.inf)):
block.append_op(type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'dim': [(- 1)], 'keep_dim': keepdim, 'reduce_all': reduce_all})
if ((porder == (- 1)) or (porder == (- np.inf))):
block.append_op(type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'dim': [(- 1)], 'keep_dim': keepdim, 'reduce_all': reduce_all})
return out |
def fro_norm(input, porder=2, axis=[(- 1)]):
'\n NOTE:\n Calculate the frobenius norm of a square matrix or batches of square matrices.\n '
reduce_all = (True if ((axis is None) or (axis == [])) else False)
keepdim = False
if paddle.in_dynamic_mode():
pow_out = _C_ops.pow(input, 'factor', porder)
sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
return _C_ops.pow(sum_out_2, 'factor', float((1.0 / porder)))
block = LayerHelper('norm', **locals())
pow_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
sum_out_1 = block.create_variable_for_type_inference(dtype=block.input_dtype())
sum_out_2 = block.create_variable_for_type_inference(dtype=block.input_dtype())
out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder})
block.append_op(type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
block.append_op(type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
block.append_op(type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float((1.0 / porder))})
return out | -7,539,193,297,382,894,000 | NOTE:
Calculate the frobenius norm of a square matrix or batches of square matrices. | python/paddle/tensor/linalg.py | fro_norm | DevilCarp/Paddle | python | def fro_norm(input, porder=2, axis=[(- 1)]):
'\n NOTE:\n Calculate the frobenius norm of a square matrix or batches of square matrices.\n '
reduce_all = (True if ((axis is None) or (axis == [])) else False)
keepdim = False
if paddle.in_dynamic_mode():
pow_out = _C_ops.pow(input, 'factor', porder)
sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
return _C_ops.pow(sum_out_2, 'factor', float((1.0 / porder)))
block = LayerHelper('norm', **locals())
pow_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
sum_out_1 = block.create_variable_for_type_inference(dtype=block.input_dtype())
sum_out_2 = block.create_variable_for_type_inference(dtype=block.input_dtype())
out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder})
block.append_op(type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
block.append_op(type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
block.append_op(type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float((1.0 / porder))})
return out |
def svd_norm(input, porder, axis=[(- 1)]):
'\n NOTE:\n Calculate the matrix norm, which is related to singular values, of a matrix\n or batches of matrices, including nuclear norm, 2-norm and (-2)-norm.\n '
reduce_all = (True if ((axis is None) or (axis == [])) else False)
keepdim = False
(u, s, vh) = svd(input, full_matrices=False)
if paddle.in_dynamic_mode():
if (porder == 'nuc'):
return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
if (porder == 2):
return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False)
if (porder == (- 2)):
return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False)
block = LayerHelper('norm', **locals())
out = block.create_variable_for_type_inference(dtype=block.input_dtype())
if (porder == 'nuc'):
block.append_op(type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
return out
max_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
min_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
block.append_op(type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
if (porder == 2):
block.append_op(type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False})
return out
if (porder == (- 2)):
block.append_op(type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False})
return out | -4,169,968,877,713,200,600 | NOTE:
Calculate the matrix norm, which is related to singular values, of a matrix
or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. | python/paddle/tensor/linalg.py | svd_norm | DevilCarp/Paddle | python | def svd_norm(input, porder, axis=[(- 1)]):
'\n NOTE:\n Calculate the matrix norm, which is related to singular values, of a matrix\n or batches of matrices, including nuclear norm, 2-norm and (-2)-norm.\n '
reduce_all = (True if ((axis is None) or (axis == [])) else False)
keepdim = False
(u, s, vh) = svd(input, full_matrices=False)
if paddle.in_dynamic_mode():
if (porder == 'nuc'):
return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
if (porder == 2):
return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False)
if (porder == (- 2)):
return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False)
block = LayerHelper('norm', **locals())
out = block.create_variable_for_type_inference(dtype=block.input_dtype())
if (porder == 'nuc'):
block.append_op(type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
return out
max_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
min_out = block.create_variable_for_type_inference(dtype=block.input_dtype())
block.append_op(type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
block.append_op(type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all})
if (porder == 2):
block.append_op(type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False})
return out
if (porder == (- 2)):
block.append_op(type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False})
return out |
def testV1ScaleIOPersistentVolumeSource(self):
'\n Test V1ScaleIOPersistentVolumeSource\n '
pass | 5,658,198,493,830,950,000 | Test V1ScaleIOPersistentVolumeSource | kubernetes/test/test_v1_scale_io_persistent_volume_source.py | testV1ScaleIOPersistentVolumeSource | MiaoRachelYu/python | python | def testV1ScaleIOPersistentVolumeSource(self):
'\n \n '
pass |
def compute_positivity(dico):
" This computes the positivity score of each statement. \n Takes a dictionary with each statement as liste item and the corresponding interlocutor's name in names item \n \n "
dico_score = defaultdict((lambda : list()))
for (name, liste) in dico.items():
neg_score = 0
pos_score = 0
for ele in liste:
if (ele in negative_word_list):
neg_score += 1
elif (ele in positive_word_list):
pos_score += 1
else:
pass
if ((neg_score < 30) or (pos_score < 30)):
pass
else:
score = ((pos_score - neg_score) / (pos_score + neg_score))
dico_score[name] = score
return dico_score | 1,236,163,244,579,036,700 | This computes the positivity score of each statement.
Takes a dictionary with each statement as liste item and the corresponding interlocutor's name in names item | RA_project/code_python/image_score_posi.py | compute_positivity | erialc-cal/NLP-FOMC | python | def compute_positivity(dico):
" This computes the positivity score of each statement. \n Takes a dictionary with each statement as liste item and the corresponding interlocutor's name in names item \n \n "
dico_score = defaultdict((lambda : list()))
for (name, liste) in dico.items():
neg_score = 0
pos_score = 0
for ele in liste:
if (ele in negative_word_list):
neg_score += 1
elif (ele in positive_word_list):
pos_score += 1
else:
pass
if ((neg_score < 30) or (pos_score < 30)):
pass
else:
score = ((pos_score - neg_score) / (pos_score + neg_score))
dico_score[name] = score
return dico_score |
def __init__(self, code):
'code: int - index of macro to run'
self.code = code | -4,599,207,852,549,858,300 | code: int - index of macro to run | homevision_netio_controller/controller.py | __init__ | jackoson/homevision-netio-controller | python | def __init__(self, code):
self.code = code |
def __init__(self, command):
'command: string - command to send'
self.command = command | -6,435,207,489,040,474,000 | command: string - command to send | homevision_netio_controller/controller.py | __init__ | jackoson/homevision-netio-controller | python | def __init__(self, command):
self.command = command |
def __init__(self, ip_address, port, auth, on_off_appliance_codes={}, actions={}, process_actions={}, var_queries={}, flag_queries={}, flag_return_values={True: ['True', 'On', 'Yes', 'Occupied', 'Set', '1'], False: ['False', 'Off', 'No', 'Vacant', 'Clear', '0']}, on_off_commands=None):
'\n Args:\n ip_address: string\n port: int\n auth: string\n - key for authenticating with netio\n on_off_appliance_codes: dict[string] => int - codes to be fed to \'on_off_commands\' for each appliance\n actions: dict[string] => Macro/Command/(_, _, ...) - named actions to be completed\n process_actions: dict[string] => {"START": X, "STOP": X} where X is Macro/Command/(_, _, ...) - named processes to be started and stopped\n var_queries: dict[string] => int - mapping of names to variable indexes\n flag_queries: dict[string] => int - mapping of names to flag indexes\n flag_return_values: {True: [string], False: [string]} - synonyms for true and false that are returned by netio \'read flag command\'. (ignore if you haven\'t set them up)\n on_off_commands: {"ON": (int) => Macro/Command/(_, _, ...), "OFF": (int) => Macro/Command} - how to handle on and off commands\n '
self.ip_address = ip_address
self.port = port
self.auth = auth
self.on_off_appliance_codes = on_off_appliance_codes
self.actions = actions
self.process_actions = process_actions
self.var_queries = var_queries
self.flag_queries = flag_queries
self.flag_return_values = flag_return_values
self.on_off_commands = on_off_commands | -5,714,372,172,230,657,000 | Args:
ip_address: string
port: int
auth: string
- key for authenticating with netio
on_off_appliance_codes: dict[string] => int - codes to be fed to 'on_off_commands' for each appliance
actions: dict[string] => Macro/Command/(_, _, ...) - named actions to be completed
process_actions: dict[string] => {"START": X, "STOP": X} where X is Macro/Command/(_, _, ...) - named processes to be started and stopped
var_queries: dict[string] => int - mapping of names to variable indexes
flag_queries: dict[string] => int - mapping of names to flag indexes
flag_return_values: {True: [string], False: [string]} - synonyms for true and false that are returned by netio 'read flag command'. (ignore if you haven't set them up)
on_off_commands: {"ON": (int) => Macro/Command/(_, _, ...), "OFF": (int) => Macro/Command} - how to handle on and off commands | homevision_netio_controller/controller.py | __init__ | jackoson/homevision-netio-controller | python | def __init__(self, ip_address, port, auth, on_off_appliance_codes={}, actions={}, process_actions={}, var_queries={}, flag_queries={}, flag_return_values={True: ['True', 'On', 'Yes', 'Occupied', 'Set', '1'], False: ['False', 'Off', 'No', 'Vacant', 'Clear', '0']}, on_off_commands=None):
'\n Args:\n ip_address: string\n port: int\n auth: string\n - key for authenticating with netio\n on_off_appliance_codes: dict[string] => int - codes to be fed to \'on_off_commands\' for each appliance\n actions: dict[string] => Macro/Command/(_, _, ...) - named actions to be completed\n process_actions: dict[string] => {"START": X, "STOP": X} where X is Macro/Command/(_, _, ...) - named processes to be started and stopped\n var_queries: dict[string] => int - mapping of names to variable indexes\n flag_queries: dict[string] => int - mapping of names to flag indexes\n flag_return_values: {True: [string], False: [string]} - synonyms for true and false that are returned by netio \'read flag command\'. (ignore if you haven\'t set them up)\n on_off_commands: {"ON": (int) => Macro/Command/(_, _, ...), "OFF": (int) => Macro/Command} - how to handle on and off commands\n '
self.ip_address = ip_address
self.port = port
self.auth = auth
self.on_off_appliance_codes = on_off_appliance_codes
self.actions = actions
self.process_actions = process_actions
self.var_queries = var_queries
self.flag_queries = flag_queries
self.flag_return_values = flag_return_values
self.on_off_commands = on_off_commands |
def on_off_command(self, details):
'Send an on or off command to an appliance\n \n Sends the specified command to the homevision through netio interface to control the specified appliance.\n \n Args:\n details: {"appliance": string, "state": string} \n '
if ('appliance' not in details):
raise Exception('appliance not specified')
elif ('state' not in details):
raise Exception('state not specified')
if (details['appliance'] not in self.on_off_appliance_codes.keys()):
raise Exception(('appliance not supported. Must be one of: ' + ','.join(self.on_off_appliance_codes.keys())))
appliance_code = self.on_off_appliance_codes[details['appliance']]
if (details['state'] == 'ON'):
self._switch_on(appliance_code)
elif (details['state'] == 'OFF'):
self._switch_off(appliance_code)
else:
raise Exception('state not supported. Must be either "ON" or "OFF".') | -79,934,147,174,809,070 | Send an on or off command to an appliance
Sends the specified command to the homevision through netio interface to control the specified appliance.
Args:
details: {"appliance": string, "state": string} | homevision_netio_controller/controller.py | on_off_command | jackoson/homevision-netio-controller | python | def on_off_command(self, details):
'Send an on or off command to an appliance\n \n Sends the specified command to the homevision through netio interface to control the specified appliance.\n \n Args:\n details: {"appliance": string, "state": string} \n '
if ('appliance' not in details):
raise Exception('appliance not specified')
elif ('state' not in details):
raise Exception('state not specified')
if (details['appliance'] not in self.on_off_appliance_codes.keys()):
raise Exception(('appliance not supported. Must be one of: ' + ','.join(self.on_off_appliance_codes.keys())))
appliance_code = self.on_off_appliance_codes[details['appliance']]
if (details['state'] == 'ON'):
self._switch_on(appliance_code)
elif (details['state'] == 'OFF'):
self._switch_off(appliance_code)
else:
raise Exception('state not supported. Must be either "ON" or "OFF".') |
def action_command(self, details):
'Send an action command\n \n Sends the specified command to the homevision through netio interface.\n \n Args:\n details: {"command": string} \n '
if ('command' not in details):
raise Exception('Command not specified')
if (details['command'] not in self.actions.keys()):
raise Exception(('Command not supported. Must be one of: ' + ','.join(self.actions.keys())))
self._handle_action(self.actions[details['command']]) | -232,058,001,206,219,100 | Send an action command
Sends the specified command to the homevision through netio interface.
Args:
details: {"command": string} | homevision_netio_controller/controller.py | action_command | jackoson/homevision-netio-controller | python | def action_command(self, details):
'Send an action command\n \n Sends the specified command to the homevision through netio interface.\n \n Args:\n details: {"command": string} \n '
if ('command' not in details):
raise Exception('Command not specified')
if (details['command'] not in self.actions.keys()):
raise Exception(('Command not supported. Must be one of: ' + ','.join(self.actions.keys())))
self._handle_action(self.actions[details['command']]) |
def start_stop_command(self, details):
'Starts or stops a process\n \n Sends the specified command to the homevision through netio interface to control the specified process.\n \n Args:\n details: {"action": string, "process": string} \n '
if ('action' not in details):
raise Exception('action not specified')
elif ('process' not in details):
raise Exception('process not specified')
if (details['process'] not in self.process_actions.keys()):
raise Exception(('process not supported. Must be one of: ' + ','.join(self.process_actions.keys())))
if (details['action'] == 'START'):
self._handle_action(self.process_actions[details['process']]['START'])
elif (details['action'] == 'STOP'):
self._handle_action(self.process_actions[details['process']]['STOP'])
else:
raise Exception('action not supported. Must be either "START" or "STOP".') | -596,609,483,046,554,400 | Starts or stops a process
Sends the specified command to the homevision through netio interface to control the specified process.
Args:
details: {"action": string, "process": string} | homevision_netio_controller/controller.py | start_stop_command | jackoson/homevision-netio-controller | python | def start_stop_command(self, details):
'Starts or stops a process\n \n Sends the specified command to the homevision through netio interface to control the specified process.\n \n Args:\n details: {"action": string, "process": string} \n '
if ('action' not in details):
raise Exception('action not specified')
elif ('process' not in details):
raise Exception('process not specified')
if (details['process'] not in self.process_actions.keys()):
raise Exception(('process not supported. Must be one of: ' + ','.join(self.process_actions.keys())))
if (details['action'] == 'START'):
self._handle_action(self.process_actions[details['process']]['START'])
elif (details['action'] == 'STOP'):
self._handle_action(self.process_actions[details['process']]['STOP'])
else:
raise Exception('action not supported. Must be either "START" or "STOP".') |
def var_query(self, details):
'Returns the answer to a query on variable\n \n Returns the answer to a query on the specified variable using netio\n \n Args:\n details: {"query": string} \n '
if ('query' not in details):
raise Exception('query not specified')
if (details['query'] not in self.var_queries.keys()):
raise Exception(('query not supported. Must be one of: ' + ','.join(self.var_queries.keys())))
code = self.var_queries[details['query']]
if (type(code) == int):
val = self._get_var(code)
elif (type(code) == tuple):
val = [self._get_var(c) for c in code]
else:
raise Exception('Internal Exception: variable code is not valid')
return val | -7,702,968,477,016,100,000 | Returns the answer to a query on variable
Returns the answer to a query on the specified variable using netio
Args:
details: {"query": string} | homevision_netio_controller/controller.py | var_query | jackoson/homevision-netio-controller | python | def var_query(self, details):
'Returns the answer to a query on variable\n \n Returns the answer to a query on the specified variable using netio\n \n Args:\n details: {"query": string} \n '
if ('query' not in details):
raise Exception('query not specified')
if (details['query'] not in self.var_queries.keys()):
raise Exception(('query not supported. Must be one of: ' + ','.join(self.var_queries.keys())))
code = self.var_queries[details['query']]
if (type(code) == int):
val = self._get_var(code)
elif (type(code) == tuple):
val = [self._get_var(c) for c in code]
else:
raise Exception('Internal Exception: variable code is not valid')
return val |
def flag_query(self, details):
'Returns the answer to a query on flag\n \n Returns the answer to a query on the specified variable using netio\n \n Args:\n details: {"query": string} \n '
if ('query' not in details):
raise Exception('query not specified')
if (details['query'] not in self.flag_queries.keys()):
raise Exception(('query not supported. Must be one of: ' + ','.join(self.flag_queries.keys())))
val = self._get_flag(self.flag_queries[details['query']])
return ('yes' if val else 'no') | -8,740,578,671,466,509,000 | Returns the answer to a query on flag
Returns the answer to a query on the specified variable using netio
Args:
details: {"query": string} | homevision_netio_controller/controller.py | flag_query | jackoson/homevision-netio-controller | python | def flag_query(self, details):
'Returns the answer to a query on flag\n \n Returns the answer to a query on the specified variable using netio\n \n Args:\n details: {"query": string} \n '
if ('query' not in details):
raise Exception('query not specified')
if (details['query'] not in self.flag_queries.keys()):
raise Exception(('query not supported. Must be one of: ' + ','.join(self.flag_queries.keys())))
val = self._get_flag(self.flag_queries[details['query']])
return ('yes' if val else 'no') |
def I():
'Identity operator.'
return np.identity(2) | 4,501,568,749,302,991,000 | Identity operator. | quantum.py | I | duboviy/misc | python | def I():
return np.identity(2) |
def X():
'X-rotation, negation operator.'
return np.identity(2)[..., ::(- 1)] | -164,720,657,164,185,280 | X-rotation, negation operator. | quantum.py | X | duboviy/misc | python | def X():
return np.identity(2)[..., ::(- 1)] |
def H():
'Adamara operator, superposition.'
return (np.array([[1, 1], [1, (- 1)]]) / np.sqrt(2)) | -3,200,611,757,409,645,600 | Adamara operator, superposition. | quantum.py | H | duboviy/misc | python | def H():
return (np.array([[1, 1], [1, (- 1)]]) / np.sqrt(2)) |
def SWAP():
'Swap 2 qubits'
m = np.identity(4)
m[[1, 2]] = m[[2, 1]]
return m | -3,730,938,159,623,653,400 | Swap 2 qubits | quantum.py | SWAP | duboviy/misc | python | def SWAP():
m = np.identity(4)
m[[1, 2]] = m[[2, 1]]
return m |
def CX():
'Controlled negation.'
m = np.identity(4)
m[[3, 2]] = m[[2, 3]]
return m | -5,465,632,063,229,900,000 | Controlled negation. | quantum.py | CX | duboviy/misc | python | def CX():
m = np.identity(4)
m[[3, 2]] = m[[2, 3]]
return m |
@classmethod
def schema(cls) -> dict:
"we're overriding the schema classmethod to enable some post-processing"
schema = super().schema()
schema = cls.change_format_to_oneOf(schema)
return cls.resolve_refs(schema) | 2,795,126,804,326,684,700 | we're overriding the schema classmethod to enable some post-processing | airbyte-integrations/connectors/source-tiktok-marketing/source_tiktok_marketing/spec.py | schema | 99designs/airbyte | python | @classmethod
def schema(cls) -> dict:
schema = super().schema()
schema = cls.change_format_to_oneOf(schema)
return cls.resolve_refs(schema) |
@javaConstructorOverload(java_imports['Long'], (make_sig(['long'], 'void'), (metaLong,)), (make_sig([java_imports['String']], 'void'), (str,)))
def __init__(self, *args, **kwargs):
'\n Instantiates a new Long\n\n Signatures:\n\n Long(long value)\n Long(String s)\n\n Arguments:\n\n Long(long value)\n value -- The long to wrap in the object\n Long (String s)\n s -- The string representing the long\n '
super(Long, self).__init__(*args, generic=(Long,), **kwargs) | 9,194,312,380,020,609,000 | Instantiates a new Long
Signatures:
Long(long value)
Long(String s)
Arguments:
Long(long value)
value -- The long to wrap in the object
Long (String s)
s -- The string representing the long | TASSELpy/java/lang/Long.py | __init__ | er432/TASSELpy | python | @javaConstructorOverload(java_imports['Long'], (make_sig(['long'], 'void'), (metaLong,)), (make_sig([java_imports['String']], 'void'), (str,)))
def __init__(self, *args, **kwargs):
'\n Instantiates a new Long\n\n Signatures:\n\n Long(long value)\n Long(String s)\n\n Arguments:\n\n Long(long value)\n value -- The long to wrap in the object\n Long (String s)\n s -- The string representing the long\n '
super(Long, self).__init__(*args, generic=(Long,), **kwargs) |
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities) -> None:
'Set up discovered lights.'
devs = []
for dev in hass.data[AQUALINK_DOMAIN][DOMAIN]:
devs.append(HassAqualinkLight(dev))
async_add_entities(devs, True) | 4,148,767,531,453,222,000 | Set up discovered lights. | homeassistant/components/iaqualink/light.py | async_setup_entry | 0xFEEDC0DE64/homeassistant-core | python | async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities) -> None:
devs = []
for dev in hass.data[AQUALINK_DOMAIN][DOMAIN]:
devs.append(HassAqualinkLight(dev))
async_add_entities(devs, True) |
@property
def name(self) -> str:
'Return the name of the light.'
return self.dev.label | 331,499,236,811,538,800 | Return the name of the light. | homeassistant/components/iaqualink/light.py | name | 0xFEEDC0DE64/homeassistant-core | python | @property
def name(self) -> str:
return self.dev.label |
@property
def is_on(self) -> bool:
'Return whether the light is on or off.'
return self.dev.is_on | -7,804,622,775,240,501,000 | Return whether the light is on or off. | homeassistant/components/iaqualink/light.py | is_on | 0xFEEDC0DE64/homeassistant-core | python | @property
def is_on(self) -> bool:
return self.dev.is_on |
@refresh_system
async def async_turn_on(self, **kwargs) -> None:
'Turn on the light.\n\n This handles brightness and light effects for lights that do support\n them.\n '
brightness = kwargs.get(ATTR_BRIGHTNESS)
effect = kwargs.get(ATTR_EFFECT)
if effect:
effect = AqualinkLightEffect[effect].value
(await self.dev.set_effect(effect))
elif brightness:
pct = (int(round(((brightness * 4.0) / 255))) * 25)
(await self.dev.set_brightness(pct))
else:
(await self.dev.turn_on()) | -9,093,624,857,957,879,000 | Turn on the light.
This handles brightness and light effects for lights that do support
them. | homeassistant/components/iaqualink/light.py | async_turn_on | 0xFEEDC0DE64/homeassistant-core | python | @refresh_system
async def async_turn_on(self, **kwargs) -> None:
'Turn on the light.\n\n This handles brightness and light effects for lights that do support\n them.\n '
brightness = kwargs.get(ATTR_BRIGHTNESS)
effect = kwargs.get(ATTR_EFFECT)
if effect:
effect = AqualinkLightEffect[effect].value
(await self.dev.set_effect(effect))
elif brightness:
pct = (int(round(((brightness * 4.0) / 255))) * 25)
(await self.dev.set_brightness(pct))
else:
(await self.dev.turn_on()) |
@refresh_system
async def async_turn_off(self, **kwargs) -> None:
'Turn off the light.'
(await self.dev.turn_off()) | -5,875,403,621,456,464,000 | Turn off the light. | homeassistant/components/iaqualink/light.py | async_turn_off | 0xFEEDC0DE64/homeassistant-core | python | @refresh_system
async def async_turn_off(self, **kwargs) -> None:
(await self.dev.turn_off()) |
@property
def brightness(self) -> int:
'Return current brightness of the light.\n\n The scale needs converting between 0-100 and 0-255.\n '
return ((self.dev.brightness * 255) / 100) | -8,752,403,519,390,625,000 | Return current brightness of the light.
The scale needs converting between 0-100 and 0-255. | homeassistant/components/iaqualink/light.py | brightness | 0xFEEDC0DE64/homeassistant-core | python | @property
def brightness(self) -> int:
'Return current brightness of the light.\n\n The scale needs converting between 0-100 and 0-255.\n '
return ((self.dev.brightness * 255) / 100) |
@property
def effect(self) -> str:
'Return the current light effect if supported.'
return AqualinkLightEffect(self.dev.effect).name | 4,174,574,449,923,972,600 | Return the current light effect if supported. | homeassistant/components/iaqualink/light.py | effect | 0xFEEDC0DE64/homeassistant-core | python | @property
def effect(self) -> str:
return AqualinkLightEffect(self.dev.effect).name |
@property
def effect_list(self) -> list:
'Return supported light effects.'
return list(AqualinkLightEffect.__members__) | 9,045,013,550,262,554,000 | Return supported light effects. | homeassistant/components/iaqualink/light.py | effect_list | 0xFEEDC0DE64/homeassistant-core | python | @property
def effect_list(self) -> list:
return list(AqualinkLightEffect.__members__) |
@property
def supported_features(self) -> int:
'Return the list of features supported by the light.'
if self.dev.is_dimmer:
return SUPPORT_BRIGHTNESS
if self.dev.is_color:
return SUPPORT_EFFECT
return 0 | 2,749,345,628,372,254,700 | Return the list of features supported by the light. | homeassistant/components/iaqualink/light.py | supported_features | 0xFEEDC0DE64/homeassistant-core | python | @property
def supported_features(self) -> int:
if self.dev.is_dimmer:
return SUPPORT_BRIGHTNESS
if self.dev.is_color:
return SUPPORT_EFFECT
return 0 |
def is_language(self, s, expected_lang):
' Check if the language of the segment cannot be reliably identified\n as another language. If another than the expected language is\n detected return False '
expected_lang = expected_lang.lower()
if self.valid_languages:
assert (expected_lang in self.valid_languages)
if self.use_cld2:
(reliable, _text_bytes, details) = cld2.detect(s.encode('utf-8'), isPlainText=True, useFullLangTables=True, bestEffort=True)
if reliable:
for (_lang, langcode, confidence, score) in details:
if ((langcode == expected_lang) and (confidence >= 10)):
return True
return False
else:
return True
else:
(lang, confidence) = langid.classify(source.lower())
if ((lang != expected_lang) and (confidence > 0.9)):
return False
else:
return True | 5,011,547,454,327,538,000 | Check if the language of the segment cannot be reliably identified
as another language. If another than the expected language is
detected return False | baseline/filter_hunalign_bitext.py | is_language | christianbuck/CorpusMining | python | def is_language(self, s, expected_lang):
' Check if the language of the segment cannot be reliably identified\n as another language. If another than the expected language is\n detected return False '
expected_lang = expected_lang.lower()
if self.valid_languages:
assert (expected_lang in self.valid_languages)
if self.use_cld2:
(reliable, _text_bytes, details) = cld2.detect(s.encode('utf-8'), isPlainText=True, useFullLangTables=True, bestEffort=True)
if reliable:
for (_lang, langcode, confidence, score) in details:
if ((langcode == expected_lang) and (confidence >= 10)):
return True
return False
else:
return True
else:
(lang, confidence) = langid.classify(source.lower())
if ((lang != expected_lang) and (confidence > 0.9)):
return False
else:
return True |
@abstractmethod
def find_files(self, directory: Union[(str, Path)], extensions: Optional[Union[(Sequence, str)]]=None, keywords: Optional[Union[(list, str)]]=None, hemisphere: Optional[str]=None, stimulation: Optional[str]=None, medication: Optional[str]=None, exclude: Optional[Union[(str, list)]]=None, verbose: bool=False) -> None:
'Find files in directory with optional\n keywords and extensions.' | 3,752,076,389,859,590,700 | Find files in directory with optional
keywords and extensions. | src/pte/filetools/filefinder_abc.py | find_files | richardkoehler/pte | python | @abstractmethod
def find_files(self, directory: Union[(str, Path)], extensions: Optional[Union[(Sequence, str)]]=None, keywords: Optional[Union[(list, str)]]=None, hemisphere: Optional[str]=None, stimulation: Optional[str]=None, medication: Optional[str]=None, exclude: Optional[Union[(str, list)]]=None, verbose: bool=False) -> None:
'Find files in directory with optional\n keywords and extensions.' |
@abstractmethod
def filter_files(self, keywords: Optional[Union[(str, list)]]=None, hemisphere: Optional[str]=None, stimulation: Optional[str]=None, medication: Optional[str]=None, exclude: Optional[Union[(str, list)]]=None, verbose: bool=False) -> None:
'Filter list of filepaths for given parameters.' | -1,411,756,026,739,772,400 | Filter list of filepaths for given parameters. | src/pte/filetools/filefinder_abc.py | filter_files | richardkoehler/pte | python | @abstractmethod
def filter_files(self, keywords: Optional[Union[(str, list)]]=None, hemisphere: Optional[str]=None, stimulation: Optional[str]=None, medication: Optional[str]=None, exclude: Optional[Union[(str, list)]]=None, verbose: bool=False) -> None:
|
def _find_files(self, directory: Union[(Path, str)], extensions: Optional[Union[(list, str)]]=None) -> None:
'Find files in directory with optional extensions.\n\n Args:\n directory (string)\n keywords (list): e.g. ["SelfpacedRota", "ButtonPress] (optional)\n extensions (list): e.g. [".json" or "tsv"] (optional)\n verbose (bool): verbosity level (optional, default=True)\n '
files = []
for (root, _, fnames) in os.walk(directory):
fnames = [os.path.join(root, file) for file in fnames]
fnames = self._keyword_search(fnames, extensions)
if fnames:
files.extend(fnames)
self.files = files | 56,373,943,236,067,064 | Find files in directory with optional extensions.
Args:
directory (string)
keywords (list): e.g. ["SelfpacedRota", "ButtonPress] (optional)
extensions (list): e.g. [".json" or "tsv"] (optional)
verbose (bool): verbosity level (optional, default=True) | src/pte/filetools/filefinder_abc.py | _find_files | richardkoehler/pte | python | def _find_files(self, directory: Union[(Path, str)], extensions: Optional[Union[(list, str)]]=None) -> None:
'Find files in directory with optional extensions.\n\n Args:\n directory (string)\n keywords (list): e.g. ["SelfpacedRota", "ButtonPress] (optional)\n extensions (list): e.g. [".json" or "tsv"] (optional)\n verbose (bool): verbosity level (optional, default=True)\n '
files = []
for (root, _, fnames) in os.walk(directory):
fnames = [os.path.join(root, file) for file in fnames]
fnames = self._keyword_search(fnames, extensions)
if fnames:
files.extend(fnames)
self.files = files |
def _filter_files(self, keywords: Optional[Union[(str, list[str])]]=None, hemisphere: Optional[str]=None, stimulation: Optional[str]=None, medication: Optional[str]=None, exclude: Optional[Union[(str, list[str])]]=None) -> None:
'Filter filepaths for given parameters.'
filtered_files = self.files
if exclude:
if (not isinstance(exclude, list)):
exclude = [exclude]
filtered_files = [file for file in filtered_files if (not any(((item in file) for item in exclude)))]
if keywords:
if (not isinstance(keywords, list)):
keywords = [keywords]
filtered_files = self._keyword_search(filtered_files, keywords)
if stimulation:
if (stimulation.lower() in 'stimon'):
stim = 'StimOn'
elif (stimulation.lower() in 'stimoff'):
stim = 'StimOff'
else:
raise ValueError('Keyword for stimulation not valid.')
filtered_files = self._keyword_search(filtered_files, [stim])
if medication:
if (medication.lower() in 'medon'):
med = 'MedOn'
elif (medication.lower() in 'medoff'):
med = 'MedOff'
else:
raise ValueError('Keyword for medication not valid.')
filtered_files = self._keyword_search(filtered_files, [med])
if hemisphere:
matching_files = []
for file in filtered_files:
subject = mne_bids.get_entities_from_fname(file)['subject']
if ((subject not in self.hemispheres) or (self.hemispheres[subject] is None)):
raise HemisphereNotSpecifiedError(subject, self.hemispheres)
hem = (self.hemispheres[subject] + '_')
if ((hemisphere.lower() in 'ipsilateral') and (hem in file)):
matching_files.append(file)
if ((hemisphere.lower() in 'contralateral') and (hem not in file)):
matching_files.append(file)
filtered_files = matching_files
self.files = filtered_files | 7,211,439,597,194,100,000 | Filter filepaths for given parameters. | src/pte/filetools/filefinder_abc.py | _filter_files | richardkoehler/pte | python | def _filter_files(self, keywords: Optional[Union[(str, list[str])]]=None, hemisphere: Optional[str]=None, stimulation: Optional[str]=None, medication: Optional[str]=None, exclude: Optional[Union[(str, list[str])]]=None) -> None:
filtered_files = self.files
if exclude:
if (not isinstance(exclude, list)):
exclude = [exclude]
filtered_files = [file for file in filtered_files if (not any(((item in file) for item in exclude)))]
if keywords:
if (not isinstance(keywords, list)):
keywords = [keywords]
filtered_files = self._keyword_search(filtered_files, keywords)
if stimulation:
if (stimulation.lower() in 'stimon'):
stim = 'StimOn'
elif (stimulation.lower() in 'stimoff'):
stim = 'StimOff'
else:
raise ValueError('Keyword for stimulation not valid.')
filtered_files = self._keyword_search(filtered_files, [stim])
if medication:
if (medication.lower() in 'medon'):
med = 'MedOn'
elif (medication.lower() in 'medoff'):
med = 'MedOff'
else:
raise ValueError('Keyword for medication not valid.')
filtered_files = self._keyword_search(filtered_files, [med])
if hemisphere:
matching_files = []
for file in filtered_files:
subject = mne_bids.get_entities_from_fname(file)['subject']
if ((subject not in self.hemispheres) or (self.hemispheres[subject] is None)):
raise HemisphereNotSpecifiedError(subject, self.hemispheres)
hem = (self.hemispheres[subject] + '_')
if ((hemisphere.lower() in 'ipsilateral') and (hem in file)):
matching_files.append(file)
if ((hemisphere.lower() in 'contralateral') and (hem not in file)):
matching_files.append(file)
filtered_files = matching_files
self.files = filtered_files |
def irfft2(a, s=None, axes=((- 2), (- 1)), norm=None):
'\n Compute the 2-dimensional inverse FFT of a real array.\n\n Parameters\n ----------\n a : array_like\n The input tensor\n s : sequence of ints, optional\n Shape of the inverse FFT.\n axes : sequence of ints, optional\n The axes over which to compute the inverse fft.\n Default is the last two axes.\n norm : {None, "ortho"}, optional\n Normalization mode (see `mt.fft`). Default is None.\n\n Returns\n -------\n out : Tensor\n The result of the inverse real 2-D FFT.\n\n See Also\n --------\n irfftn : Compute the inverse of the N-dimensional FFT of real input.\n\n Notes\n -----\n This is really `irfftn` with different defaults.\n For more details see `irfftn`.\n\n '
if (len(axes) != 2):
raise ValueError('axes length should be 2')
a = astensor(a)
axes = validate_fftn(a, s=s, axes=axes, norm=norm)
op = TensorIRFFT2(shape=s, axes=axes, norm=norm, dtype=np.dtype(np.float_))
return op(a) | 31,699,221,590,624,984 | Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input tensor
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
Normalization mode (see `mt.fft`). Default is None.
Returns
-------
out : Tensor
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`. | mars/tensor/fft/irfft2.py | irfft2 | JeffroMF/mars | python | def irfft2(a, s=None, axes=((- 2), (- 1)), norm=None):
'\n Compute the 2-dimensional inverse FFT of a real array.\n\n Parameters\n ----------\n a : array_like\n The input tensor\n s : sequence of ints, optional\n Shape of the inverse FFT.\n axes : sequence of ints, optional\n The axes over which to compute the inverse fft.\n Default is the last two axes.\n norm : {None, "ortho"}, optional\n Normalization mode (see `mt.fft`). Default is None.\n\n Returns\n -------\n out : Tensor\n The result of the inverse real 2-D FFT.\n\n See Also\n --------\n irfftn : Compute the inverse of the N-dimensional FFT of real input.\n\n Notes\n -----\n This is really `irfftn` with different defaults.\n For more details see `irfftn`.\n\n '
if (len(axes) != 2):
raise ValueError('axes length should be 2')
a = astensor(a)
axes = validate_fftn(a, s=s, axes=axes, norm=norm)
op = TensorIRFFT2(shape=s, axes=axes, norm=norm, dtype=np.dtype(np.float_))
return op(a) |
def __init__(self, conf, router_conf, db, agent):
'Create a new Router'
self.conf = conf
self.router_conf = router_conf
self.db = db
self.agent = agent | 7,695,343,446,000,534,000 | Create a new Router | autopush/router/webpush.py | __init__ | Acidburn0zzz/autopush | python | def __init__(self, conf, router_conf, db, agent):
self.conf = conf
self.router_conf = router_conf
self.db = db
self.agent = agent |
def register(self, uaid, router_data, app_id, *args, **kwargs):
'No additional routing data' | -4,153,625,044,012,655,000 | No additional routing data | autopush/router/webpush.py | register | Acidburn0zzz/autopush | python | def register(self, uaid, router_data, app_id, *args, **kwargs):
|
def amend_endpoint_response(self, response, router_data):
'Stubbed out for this router' | 4,586,840,260,404,607,500 | Stubbed out for this router | autopush/router/webpush.py | amend_endpoint_response | Acidburn0zzz/autopush | python | def amend_endpoint_response(self, response, router_data):
|
@inlineCallbacks
def route_notification(self, notification, uaid_data):
"Route a notification to an internal node, and store it if the node\n can't deliver immediately or is no longer a valid node\n "
node_id = uaid_data.get('node_id')
uaid = uaid_data['uaid']
router = self.db.router
if node_id:
result = None
try:
result = (yield self._send_notification(uaid, node_id, notification))
except (ConnectError, ConnectionClosed, ResponseFailed, CancelledError, PotentialDataLoss) as exc:
self.metrics.increment('updates.client.host_gone')
(yield deferToThread(router.clear_node, uaid_data).addErrback(self._eat_db_err))
if isinstance(exc, ConnectionRefusedError):
self.log.debug('Could not route message: {exc}', exc=exc)
if (result and (result.code == 200)):
returnValue(self.delivered_response(notification))
try:
(yield self._save_notification(uaid_data, notification))
except ClientError as e:
log_exception = (e.response['Error']['Code'] != 'ProvisionedThroughputExceededException')
raise RouterException('Error saving to database', status_code=503, response_body='Retry Request', log_exception=log_exception, errno=201)
try:
uaid_data = (yield deferToThread(router.get_uaid, uaid))
except ClientError:
returnValue(self.stored_response(notification))
except ItemNotFound:
self.metrics.increment('updates.client.deleted')
raise RouterException('User was deleted', status_code=410, response_body='Invalid UAID', log_exception=False, errno=105)
node_id = uaid_data.get('node_id')
if (not node_id):
returnValue(self.stored_response(notification))
try:
result = (yield self._send_notification_check(uaid, node_id))
except (ConnectError, ConnectionClosed, ResponseFailed) as exc:
self.metrics.increment('updates.client.host_gone')
if isinstance(exc, ConnectionRefusedError):
self.log.debug('Could not route message: {exc}', exc=exc)
(yield deferToThread(router.clear_node, uaid_data).addErrback(self._eat_db_err))
returnValue(self.stored_response(notification))
if (result.code == 200):
returnValue(self.delivered_response(notification))
else:
ret_val = self.stored_response(notification)
returnValue(ret_val) | 3,801,602,032,211,172,400 | Route a notification to an internal node, and store it if the node
can't deliver immediately or is no longer a valid node | autopush/router/webpush.py | route_notification | Acidburn0zzz/autopush | python | @inlineCallbacks
def route_notification(self, notification, uaid_data):
"Route a notification to an internal node, and store it if the node\n can't deliver immediately or is no longer a valid node\n "
node_id = uaid_data.get('node_id')
uaid = uaid_data['uaid']
router = self.db.router
if node_id:
result = None
try:
result = (yield self._send_notification(uaid, node_id, notification))
except (ConnectError, ConnectionClosed, ResponseFailed, CancelledError, PotentialDataLoss) as exc:
self.metrics.increment('updates.client.host_gone')
(yield deferToThread(router.clear_node, uaid_data).addErrback(self._eat_db_err))
if isinstance(exc, ConnectionRefusedError):
self.log.debug('Could not route message: {exc}', exc=exc)
if (result and (result.code == 200)):
returnValue(self.delivered_response(notification))
try:
(yield self._save_notification(uaid_data, notification))
except ClientError as e:
log_exception = (e.response['Error']['Code'] != 'ProvisionedThroughputExceededException')
raise RouterException('Error saving to database', status_code=503, response_body='Retry Request', log_exception=log_exception, errno=201)
try:
uaid_data = (yield deferToThread(router.get_uaid, uaid))
except ClientError:
returnValue(self.stored_response(notification))
except ItemNotFound:
self.metrics.increment('updates.client.deleted')
raise RouterException('User was deleted', status_code=410, response_body='Invalid UAID', log_exception=False, errno=105)
node_id = uaid_data.get('node_id')
if (not node_id):
returnValue(self.stored_response(notification))
try:
result = (yield self._send_notification_check(uaid, node_id))
except (ConnectError, ConnectionClosed, ResponseFailed) as exc:
self.metrics.increment('updates.client.host_gone')
if isinstance(exc, ConnectionRefusedError):
self.log.debug('Could not route message: {exc}', exc=exc)
(yield deferToThread(router.clear_node, uaid_data).addErrback(self._eat_db_err))
returnValue(self.stored_response(notification))
if (result.code == 200):
returnValue(self.delivered_response(notification))
else:
ret_val = self.stored_response(notification)
returnValue(ret_val) |
def _send_notification(self, uaid, node_id, notification):
'Send a notification to a specific node_id\n\n This version of the overriden method includes the necessary crypto\n headers for the notification.\n\n :type notification: autopush.utils.WebPushNotification\n\n '
payload = notification.serialize()
payload['timestamp'] = int(time.time())
url = ((node_id + '/push/') + uaid)
request = self.agent.request('PUT', url.encode('utf8'), bodyProducer=FileBodyProducer(StringIO(json.dumps(payload))))
request.addCallback(IgnoreBody.ignore)
return request | 4,112,651,449,687,784,400 | Send a notification to a specific node_id
This version of the overriden method includes the necessary crypto
headers for the notification.
:type notification: autopush.utils.WebPushNotification | autopush/router/webpush.py | _send_notification | Acidburn0zzz/autopush | python | def _send_notification(self, uaid, node_id, notification):
'Send a notification to a specific node_id\n\n This version of the overriden method includes the necessary crypto\n headers for the notification.\n\n :type notification: autopush.utils.WebPushNotification\n\n '
payload = notification.serialize()
payload['timestamp'] = int(time.time())
url = ((node_id + '/push/') + uaid)
request = self.agent.request('PUT', url.encode('utf8'), bodyProducer=FileBodyProducer(StringIO(json.dumps(payload))))
request.addCallback(IgnoreBody.ignore)
return request |
def _send_notification_check(self, uaid, node_id):
'Send a command to the node to check for notifications'
url = ((node_id + '/notif/') + uaid)
return self.agent.request('PUT', url.encode('utf8')).addCallback(IgnoreBody.ignore) | 4,989,087,466,341,468,000 | Send a command to the node to check for notifications | autopush/router/webpush.py | _send_notification_check | Acidburn0zzz/autopush | python | def _send_notification_check(self, uaid, node_id):
url = ((node_id + '/notif/') + uaid)
return self.agent.request('PUT', url.encode('utf8')).addCallback(IgnoreBody.ignore) |
def _save_notification(self, uaid_data, notification):
'Saves a notification, returns a deferred.\n\n This version of the overridden method saves each individual message\n to the message table along with relevant request headers if\n available.\n\n :type uaid_data: dict\n\n '
month_table = uaid_data['current_month']
if (notification.ttl is None):
raise RouterException('Missing TTL Header', response_body=('Missing TTL Header, see: %s' % TTL_URL), status_code=400, errno=111, log_exception=False)
if (notification.ttl == 0):
location = ('%s/m/%s' % (self.conf.endpoint_url, notification.version))
raise RouterException('Finished Routing', status_code=201, log_exception=False, headers={'TTL': str(notification.ttl), 'Location': location}, logged_status=204)
return deferToThread(self.db.message_table(month_table).store_message, notification=notification) | -1,176,066,011,258,479,600 | Saves a notification, returns a deferred.
This version of the overridden method saves each individual message
to the message table along with relevant request headers if
available.
:type uaid_data: dict | autopush/router/webpush.py | _save_notification | Acidburn0zzz/autopush | python | def _save_notification(self, uaid_data, notification):
'Saves a notification, returns a deferred.\n\n This version of the overridden method saves each individual message\n to the message table along with relevant request headers if\n available.\n\n :type uaid_data: dict\n\n '
month_table = uaid_data['current_month']
if (notification.ttl is None):
raise RouterException('Missing TTL Header', response_body=('Missing TTL Header, see: %s' % TTL_URL), status_code=400, errno=111, log_exception=False)
if (notification.ttl == 0):
location = ('%s/m/%s' % (self.conf.endpoint_url, notification.version))
raise RouterException('Finished Routing', status_code=201, log_exception=False, headers={'TTL': str(notification.ttl), 'Location': location}, logged_status=204)
return deferToThread(self.db.message_table(month_table).store_message, notification=notification) |
def _eat_db_err(self, fail):
'errBack for ignoring provisioned throughput errors'
fail.trap(ClientError) | -5,169,902,337,626,011,000 | errBack for ignoring provisioned throughput errors | autopush/router/webpush.py | _eat_db_err | Acidburn0zzz/autopush | python | def _eat_db_err(self, fail):
fail.trap(ClientError) |
def save_bloguser_extra_profile(backend, user, response, *args, **kwargs):
'\n see more:\n http://python-social-auth.readthedocs.io/en/latest/use_cases.html#retrieve-google-friends\n http://python-social-auth.readthedocs.io/en/latest/pipeline.html\n :param backend:\n :param user:\n :param response:\n :param args:\n :param kwargs:\n :return:\n '
if (backend.name == 'github'):
image_url = response.get('avatar_url')
image_file = get_image_from_url(image_url)
if (image_file is not None):
avatar_name = ('avatar' + uuid4().hex[:16])
if (user.image == 'bloguser/avatar.png'):
user.image.save(avatar_name, image_file)
user.save() | 8,284,591,256,816,238,000 | see more:
http://python-social-auth.readthedocs.io/en/latest/use_cases.html#retrieve-google-friends
http://python-social-auth.readthedocs.io/en/latest/pipeline.html
:param backend:
:param user:
:param response:
:param args:
:param kwargs:
:return: | apps/bloguser/pipline.py | save_bloguser_extra_profile | Jennei/MyBlog | python | def save_bloguser_extra_profile(backend, user, response, *args, **kwargs):
'\n see more:\n http://python-social-auth.readthedocs.io/en/latest/use_cases.html#retrieve-google-friends\n http://python-social-auth.readthedocs.io/en/latest/pipeline.html\n :param backend:\n :param user:\n :param response:\n :param args:\n :param kwargs:\n :return:\n '
if (backend.name == 'github'):
image_url = response.get('avatar_url')
image_file = get_image_from_url(image_url)
if (image_file is not None):
avatar_name = ('avatar' + uuid4().hex[:16])
if (user.image == 'bloguser/avatar.png'):
user.image.save(avatar_name, image_file)
user.save() |
def extractMichilunWordpressCom(item):
"\n\tParser for 'michilun.wordpress.com'\n\t"
bad = ['Recommendations and Reviews']
if any([(tmp in item['tags']) for tmp in bad]):
return None
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('Side Projects - Scheme of the Official Descendant', 'Scheme of the Official Descendant', 'translated'), ('Song in the Peach Blossoms', 'Song in the Peach Blossoms', 'translated'), ('Onrain (Online - The Novel)', 'Onrain (Online - The Novel)', 'translated'), ('At the End of the Wish', 'At the End of the Wish', 'translated'), ('Bringing Calamity to the Nation', 'Bringing Calamity to the Nation', 'translated'), ("Side Projects - The Flame's Daughter", "The Flame's Daughter", 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | 2,736,643,888,499,868,000 | Parser for 'michilun.wordpress.com' | WebMirror/management/rss_parser_funcs/feed_parse_extractMichilunWordpressCom.py | extractMichilunWordpressCom | fake-name/ReadableWebProxy | python | def extractMichilunWordpressCom(item):
"\n\t\n\t"
bad = ['Recommendations and Reviews']
if any([(tmp in item['tags']) for tmp in bad]):
return None
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('Side Projects - Scheme of the Official Descendant', 'Scheme of the Official Descendant', 'translated'), ('Song in the Peach Blossoms', 'Song in the Peach Blossoms', 'translated'), ('Onrain (Online - The Novel)', 'Onrain (Online - The Novel)', 'translated'), ('At the End of the Wish', 'At the End of the Wish', 'translated'), ('Bringing Calamity to the Nation', 'Bringing Calamity to the Nation', 'translated'), ("Side Projects - The Flame's Daughter", "The Flame's Daughter", 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def _recursive_apply(self, block):
'\n This function is "applied" to every child in the block. This function in turn\n registers the forward hook to each module. It helps logging the input output tensors\n of that module.\n '
if (block in self.registered_blocks):
self.logger.warning(f'The hook is already registered to block {block.name}')
return
block.register_forward_hook(self.forward_hook)
self.registered_blocks.add(block) | 8,230,639,869,853,194,000 | This function is "applied" to every child in the block. This function in turn
registers the forward hook to each module. It helps logging the input output tensors
of that module. | smdebug/mxnet/hook.py | _recursive_apply | arjkesh/sagemaker-debugger | python | def _recursive_apply(self, block):
'\n This function is "applied" to every child in the block. This function in turn\n registers the forward hook to each module. It helps logging the input output tensors\n of that module.\n '
if (block in self.registered_blocks):
self.logger.warning(f'The hook is already registered to block {block.name}')
return
block.register_forward_hook(self.forward_hook)
self.registered_blocks.add(block) |
@error_handling_agent.catch_smdebug_errors()
def register_block(self, block):
'\n This function registers the forward hook. If user wants to register the hook\n for every child in the given block, then the function calls "apply" API for\n registration of the hook.\n The hook is registered recursively, if user has specified the collections that are more than\n the default collectors viz. gradients, weight and bias\n '
if (not isinstance(block, mx.gluon.Block)):
self.logger.error(f'The given block type {block.__class__.__name__} is unsupported.')
return
if (block in self.registered_blocks):
self.logger.warning(f'The hook is already registered to block {block.name}')
return
if isinstance(block, mx.gluon.loss.Loss):
self.logger.info(f'Registering hook for block {block.name}')
block.register_forward_hook(self.forward_hook)
self.registered_blocks.add(block)
return
else:
self.model = block
is_recursive = self._is_recursive_needed()
block.register_forward_pre_hook(self.forward_pre_hook)
if (is_recursive is True):
block.apply(self._recursive_apply)
else:
block.register_forward_hook(self.forward_hook)
self.registered_blocks.add(block) | 3,938,190,178,072,743,400 | This function registers the forward hook. If user wants to register the hook
for every child in the given block, then the function calls "apply" API for
registration of the hook.
The hook is registered recursively, if user has specified the collections that are more than
the default collectors viz. gradients, weight and bias | smdebug/mxnet/hook.py | register_block | arjkesh/sagemaker-debugger | python | @error_handling_agent.catch_smdebug_errors()
def register_block(self, block):
'\n This function registers the forward hook. If user wants to register the hook\n for every child in the given block, then the function calls "apply" API for\n registration of the hook.\n The hook is registered recursively, if user has specified the collections that are more than\n the default collectors viz. gradients, weight and bias\n '
if (not isinstance(block, mx.gluon.Block)):
self.logger.error(f'The given block type {block.__class__.__name__} is unsupported.')
return
if (block in self.registered_blocks):
self.logger.warning(f'The hook is already registered to block {block.name}')
return
if isinstance(block, mx.gluon.loss.Loss):
self.logger.info(f'Registering hook for block {block.name}')
block.register_forward_hook(self.forward_hook)
self.registered_blocks.add(block)
return
else:
self.model = block
is_recursive = self._is_recursive_needed()
block.register_forward_pre_hook(self.forward_pre_hook)
if (is_recursive is True):
block.apply(self._recursive_apply)
else:
block.register_forward_hook(self.forward_hook)
self.registered_blocks.add(block) |
def get_virtual_machine_scale_set(expand: Optional[str]=None, resource_group_name: Optional[str]=None, vm_scale_set_name: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetVirtualMachineScaleSetResult:
"\n Describes a Virtual Machine Scale Set.\n API Version: 2021-03-01.\n\n\n :param str expand: The expand expression to apply on the operation. 'UserData' retrieves the UserData property of the VM scale set that was provided by the user during the VM scale set Create/Update operation\n :param str resource_group_name: The name of the resource group.\n :param str vm_scale_set_name: The name of the VM scale set.\n "
__args__ = dict()
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
__args__['vmScaleSetName'] = vm_scale_set_name
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:compute:getVirtualMachineScaleSet', __args__, opts=opts, typ=GetVirtualMachineScaleSetResult).value
return AwaitableGetVirtualMachineScaleSetResult(additional_capabilities=__ret__.additional_capabilities, automatic_repairs_policy=__ret__.automatic_repairs_policy, do_not_run_extensions_on_overprovisioned_vms=__ret__.do_not_run_extensions_on_overprovisioned_vms, extended_location=__ret__.extended_location, host_group=__ret__.host_group, id=__ret__.id, identity=__ret__.identity, location=__ret__.location, name=__ret__.name, orchestration_mode=__ret__.orchestration_mode, overprovision=__ret__.overprovision, plan=__ret__.plan, platform_fault_domain_count=__ret__.platform_fault_domain_count, provisioning_state=__ret__.provisioning_state, proximity_placement_group=__ret__.proximity_placement_group, scale_in_policy=__ret__.scale_in_policy, single_placement_group=__ret__.single_placement_group, sku=__ret__.sku, tags=__ret__.tags, type=__ret__.type, unique_id=__ret__.unique_id, upgrade_policy=__ret__.upgrade_policy, virtual_machine_profile=__ret__.virtual_machine_profile, zone_balance=__ret__.zone_balance, zones=__ret__.zones) | -7,936,196,944,535,669,000 | Describes a Virtual Machine Scale Set.
API Version: 2021-03-01.
:param str expand: The expand expression to apply on the operation. 'UserData' retrieves the UserData property of the VM scale set that was provided by the user during the VM scale set Create/Update operation
:param str resource_group_name: The name of the resource group.
:param str vm_scale_set_name: The name of the VM scale set. | sdk/python/pulumi_azure_native/compute/get_virtual_machine_scale_set.py | get_virtual_machine_scale_set | polivbr/pulumi-azure-native | python | def get_virtual_machine_scale_set(expand: Optional[str]=None, resource_group_name: Optional[str]=None, vm_scale_set_name: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetVirtualMachineScaleSetResult:
"\n Describes a Virtual Machine Scale Set.\n API Version: 2021-03-01.\n\n\n :param str expand: The expand expression to apply on the operation. 'UserData' retrieves the UserData property of the VM scale set that was provided by the user during the VM scale set Create/Update operation\n :param str resource_group_name: The name of the resource group.\n :param str vm_scale_set_name: The name of the VM scale set.\n "
__args__ = dict()
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
__args__['vmScaleSetName'] = vm_scale_set_name
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:compute:getVirtualMachineScaleSet', __args__, opts=opts, typ=GetVirtualMachineScaleSetResult).value
return AwaitableGetVirtualMachineScaleSetResult(additional_capabilities=__ret__.additional_capabilities, automatic_repairs_policy=__ret__.automatic_repairs_policy, do_not_run_extensions_on_overprovisioned_vms=__ret__.do_not_run_extensions_on_overprovisioned_vms, extended_location=__ret__.extended_location, host_group=__ret__.host_group, id=__ret__.id, identity=__ret__.identity, location=__ret__.location, name=__ret__.name, orchestration_mode=__ret__.orchestration_mode, overprovision=__ret__.overprovision, plan=__ret__.plan, platform_fault_domain_count=__ret__.platform_fault_domain_count, provisioning_state=__ret__.provisioning_state, proximity_placement_group=__ret__.proximity_placement_group, scale_in_policy=__ret__.scale_in_policy, single_placement_group=__ret__.single_placement_group, sku=__ret__.sku, tags=__ret__.tags, type=__ret__.type, unique_id=__ret__.unique_id, upgrade_policy=__ret__.upgrade_policy, virtual_machine_profile=__ret__.virtual_machine_profile, zone_balance=__ret__.zone_balance, zones=__ret__.zones) |
@property
@pulumi.getter(name='additionalCapabilities')
def additional_capabilities(self) -> Optional['outputs.AdditionalCapabilitiesResponse']:
'\n Specifies additional capabilities enabled or disabled on the Virtual Machines in the Virtual Machine Scale Set. For instance: whether the Virtual Machines have the capability to support attaching managed data disks with UltraSSD_LRS storage account type.\n '
return pulumi.get(self, 'additional_capabilities') | -4,984,097,992,721,295,000 | Specifies additional capabilities enabled or disabled on the Virtual Machines in the Virtual Machine Scale Set. For instance: whether the Virtual Machines have the capability to support attaching managed data disks with UltraSSD_LRS storage account type. | sdk/python/pulumi_azure_native/compute/get_virtual_machine_scale_set.py | additional_capabilities | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='additionalCapabilities')
def additional_capabilities(self) -> Optional['outputs.AdditionalCapabilitiesResponse']:
'\n \n '
return pulumi.get(self, 'additional_capabilities') |
@property
@pulumi.getter(name='automaticRepairsPolicy')
def automatic_repairs_policy(self) -> Optional['outputs.AutomaticRepairsPolicyResponse']:
'\n Policy for automatic repairs.\n '
return pulumi.get(self, 'automatic_repairs_policy') | -5,255,793,026,184,793,000 | Policy for automatic repairs. | sdk/python/pulumi_azure_native/compute/get_virtual_machine_scale_set.py | automatic_repairs_policy | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='automaticRepairsPolicy')
def automatic_repairs_policy(self) -> Optional['outputs.AutomaticRepairsPolicyResponse']:
'\n \n '
return pulumi.get(self, 'automatic_repairs_policy') |