body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
dfbb32565e802695cdc054c8e06f66ec55244968d2a96d74d139710fb5223ea3
def simulate_highest_outlier(self, s_all, lpost, t0, max_post=True, seed=None): '\n Simulate :math:`n` power spectra from a model and then find the highest\n data/model outlier in each.\n\n The data/model outlier is defined as\n\n .. math::\n\n \\max{(T_R = 2(\\mathrm{data}/\\mathrm{model}))} .\n\n Parameters\n ----------\n s_all : numpy.ndarray\n A list of parameter values derived either from an approximation of the\n likelihood surface, or from an MCMC run. Has dimensions ``(n, ndim)``, where\n ``n`` is the number of simulated power spectra to generate, and ``ndim`` the\n number of model parameters.\n\n lpost : instance of a :class:`Posterior` subclass\n an instance of class :class:`Posterior` or one of its subclasses\n that defines the function to be minimized (either in ``loglikelihood``\n or ``logposterior``)\n\n t0 : iterable\n list or array containing the starting parameters. Its length\n must match ``lpost.model.npar``.\n\n max_post: bool, optional, default ``False``\n If ``True``, do MAP fits on the power spectrum to find the highest data/model outlier\n Otherwise, do a Maximum Likelihood fit. If True, the simulated power spectra will\n be generated from an MCMC run, otherwise the method will employ the approximated\n covariance matrix for the parameters derived from the likelihood surface to generate\n samples from that likelihood function.\n\n seed : int, optional, default ``None``\n An optional number to seed the random number generator with, for reproducibility of\n the results obtained with this method.\n\n Returns\n -------\n max_y_all : numpy.ndarray\n An array of maximum outliers for each simulated power spectrum\n ' nsim = s_all.shape[0] max_y_all = np.zeros(nsim) rng = np.random.RandomState(seed) for (i, s) in enumerate(s_all): sim_ps = self._generate_data(lpost, s, rng=rng) if (not max_post): sim_lpost = PSDLogLikelihood(sim_ps.freq, sim_ps.power, model=lpost.model, m=sim_ps.m) else: sim_lpost = PSDPosterior(sim_ps.freq, sim_ps.power, lpost.model, m=sim_ps.m) sim_lpost.logprior = lpost.logprior parest_sim = PSDParEst(sim_ps, max_post=max_post) try: res = parest_sim.fit(sim_lpost, t0, neg=True) (max_y_all[i], maxfreq, maxind) = self._compute_highest_outlier(sim_lpost, res, nmax=1) except RuntimeError: logging.warning('Fitting unsuccessful! Skipping this simulation!') continue return np.hstack(max_y_all)
Simulate :math:`n` power spectra from a model and then find the highest data/model outlier in each. The data/model outlier is defined as .. math:: \max{(T_R = 2(\mathrm{data}/\mathrm{model}))} . Parameters ---------- s_all : numpy.ndarray A list of parameter values derived either from an approximation of the likelihood surface, or from an MCMC run. Has dimensions ``(n, ndim)``, where ``n`` is the number of simulated power spectra to generate, and ``ndim`` the number of model parameters. lpost : instance of a :class:`Posterior` subclass an instance of class :class:`Posterior` or one of its subclasses that defines the function to be minimized (either in ``loglikelihood`` or ``logposterior``) t0 : iterable list or array containing the starting parameters. Its length must match ``lpost.model.npar``. max_post: bool, optional, default ``False`` If ``True``, do MAP fits on the power spectrum to find the highest data/model outlier Otherwise, do a Maximum Likelihood fit. If True, the simulated power spectra will be generated from an MCMC run, otherwise the method will employ the approximated covariance matrix for the parameters derived from the likelihood surface to generate samples from that likelihood function. seed : int, optional, default ``None`` An optional number to seed the random number generator with, for reproducibility of the results obtained with this method. Returns ------- max_y_all : numpy.ndarray An array of maximum outliers for each simulated power spectrum
stingray/modeling/parameterestimation.py
simulate_highest_outlier
nimeshvashistha/stingray
133
python
def simulate_highest_outlier(self, s_all, lpost, t0, max_post=True, seed=None): '\n Simulate :math:`n` power spectra from a model and then find the highest\n data/model outlier in each.\n\n The data/model outlier is defined as\n\n .. math::\n\n \\max{(T_R = 2(\\mathrm{data}/\\mathrm{model}))} .\n\n Parameters\n ----------\n s_all : numpy.ndarray\n A list of parameter values derived either from an approximation of the\n likelihood surface, or from an MCMC run. Has dimensions ``(n, ndim)``, where\n ``n`` is the number of simulated power spectra to generate, and ``ndim`` the\n number of model parameters.\n\n lpost : instance of a :class:`Posterior` subclass\n an instance of class :class:`Posterior` or one of its subclasses\n that defines the function to be minimized (either in ``loglikelihood``\n or ``logposterior``)\n\n t0 : iterable\n list or array containing the starting parameters. Its length\n must match ``lpost.model.npar``.\n\n max_post: bool, optional, default ``False``\n If ``True``, do MAP fits on the power spectrum to find the highest data/model outlier\n Otherwise, do a Maximum Likelihood fit. If True, the simulated power spectra will\n be generated from an MCMC run, otherwise the method will employ the approximated\n covariance matrix for the parameters derived from the likelihood surface to generate\n samples from that likelihood function.\n\n seed : int, optional, default ``None``\n An optional number to seed the random number generator with, for reproducibility of\n the results obtained with this method.\n\n Returns\n -------\n max_y_all : numpy.ndarray\n An array of maximum outliers for each simulated power spectrum\n ' nsim = s_all.shape[0] max_y_all = np.zeros(nsim) rng = np.random.RandomState(seed) for (i, s) in enumerate(s_all): sim_ps = self._generate_data(lpost, s, rng=rng) if (not max_post): sim_lpost = PSDLogLikelihood(sim_ps.freq, sim_ps.power, model=lpost.model, m=sim_ps.m) else: sim_lpost = PSDPosterior(sim_ps.freq, sim_ps.power, lpost.model, m=sim_ps.m) sim_lpost.logprior = lpost.logprior parest_sim = PSDParEst(sim_ps, max_post=max_post) try: res = parest_sim.fit(sim_lpost, t0, neg=True) (max_y_all[i], maxfreq, maxind) = self._compute_highest_outlier(sim_lpost, res, nmax=1) except RuntimeError: logging.warning('Fitting unsuccessful! Skipping this simulation!') continue return np.hstack(max_y_all)
def simulate_highest_outlier(self, s_all, lpost, t0, max_post=True, seed=None): '\n Simulate :math:`n` power spectra from a model and then find the highest\n data/model outlier in each.\n\n The data/model outlier is defined as\n\n .. math::\n\n \\max{(T_R = 2(\\mathrm{data}/\\mathrm{model}))} .\n\n Parameters\n ----------\n s_all : numpy.ndarray\n A list of parameter values derived either from an approximation of the\n likelihood surface, or from an MCMC run. Has dimensions ``(n, ndim)``, where\n ``n`` is the number of simulated power spectra to generate, and ``ndim`` the\n number of model parameters.\n\n lpost : instance of a :class:`Posterior` subclass\n an instance of class :class:`Posterior` or one of its subclasses\n that defines the function to be minimized (either in ``loglikelihood``\n or ``logposterior``)\n\n t0 : iterable\n list or array containing the starting parameters. Its length\n must match ``lpost.model.npar``.\n\n max_post: bool, optional, default ``False``\n If ``True``, do MAP fits on the power spectrum to find the highest data/model outlier\n Otherwise, do a Maximum Likelihood fit. If True, the simulated power spectra will\n be generated from an MCMC run, otherwise the method will employ the approximated\n covariance matrix for the parameters derived from the likelihood surface to generate\n samples from that likelihood function.\n\n seed : int, optional, default ``None``\n An optional number to seed the random number generator with, for reproducibility of\n the results obtained with this method.\n\n Returns\n -------\n max_y_all : numpy.ndarray\n An array of maximum outliers for each simulated power spectrum\n ' nsim = s_all.shape[0] max_y_all = np.zeros(nsim) rng = np.random.RandomState(seed) for (i, s) in enumerate(s_all): sim_ps = self._generate_data(lpost, s, rng=rng) if (not max_post): sim_lpost = PSDLogLikelihood(sim_ps.freq, sim_ps.power, model=lpost.model, m=sim_ps.m) else: sim_lpost = PSDPosterior(sim_ps.freq, sim_ps.power, lpost.model, m=sim_ps.m) sim_lpost.logprior = lpost.logprior parest_sim = PSDParEst(sim_ps, max_post=max_post) try: res = parest_sim.fit(sim_lpost, t0, neg=True) (max_y_all[i], maxfreq, maxind) = self._compute_highest_outlier(sim_lpost, res, nmax=1) except RuntimeError: logging.warning('Fitting unsuccessful! Skipping this simulation!') continue return np.hstack(max_y_all)<|docstring|>Simulate :math:`n` power spectra from a model and then find the highest data/model outlier in each. The data/model outlier is defined as .. math:: \max{(T_R = 2(\mathrm{data}/\mathrm{model}))} . Parameters ---------- s_all : numpy.ndarray A list of parameter values derived either from an approximation of the likelihood surface, or from an MCMC run. Has dimensions ``(n, ndim)``, where ``n`` is the number of simulated power spectra to generate, and ``ndim`` the number of model parameters. lpost : instance of a :class:`Posterior` subclass an instance of class :class:`Posterior` or one of its subclasses that defines the function to be minimized (either in ``loglikelihood`` or ``logposterior``) t0 : iterable list or array containing the starting parameters. Its length must match ``lpost.model.npar``. max_post: bool, optional, default ``False`` If ``True``, do MAP fits on the power spectrum to find the highest data/model outlier Otherwise, do a Maximum Likelihood fit. If True, the simulated power spectra will be generated from an MCMC run, otherwise the method will employ the approximated covariance matrix for the parameters derived from the likelihood surface to generate samples from that likelihood function. seed : int, optional, default ``None`` An optional number to seed the random number generator with, for reproducibility of the results obtained with this method. Returns ------- max_y_all : numpy.ndarray An array of maximum outliers for each simulated power spectrum<|endoftext|>
8c162a907f11f84f8db0bcafa20588617686ba885a15dfb4e9f7f02760531d8d
def _compute_highest_outlier(self, lpost, res, nmax=1): '\n Auxiliary method calculating the highest outlier statistic in\n a power spectrum.\n\n The maximum data/model outlier is defined as\n\n .. math::\n\n \\max{(T_R = 2(\\mathrm{data}/\\mathrm{model}))}\n\n Parameters\n ----------\n lpost : instance of a :class:`Posterior` subclass\n and instance of class :class:`Posterior` or one of its subclasses\n that defines the function to be minimized (either in ``loglikelihood``\n or ``logposterior``)\n\n res : :class:`OptimizationResults` object\n An object containing useful summaries of the fitting procedure.\n For details, see documentation of :class:`OptimizationResults`.\n\n nmax : int, optional, default ``1``\n The number of maxima to extract from the power spectra. By default,\n only the highest data/model outlier is extracted. This number allows\n to extract the ``nmax`` highest outliers, useful when looking for\n multiple signals in a power spectrum.\n\n Returns\n -------\n max_y : {float | numpy.ndarray}\n The ``nmax`` highest data/model outliers\n\n max_x : {float | numpy.ndarray}\n The frequencies corresponding to the outliers in ``max_y``\n\n max_ind : {int | numpy.ndarray}\n The indices corresponding to the outliers in ``max_y``\n ' residuals = ((2.0 * lpost.y) / res.mfit) ratio_sort = copy.copy(residuals) ratio_sort.sort() max_y = ratio_sort[(- nmax):] max_x = np.zeros(max_y.shape[0]) max_ind = np.zeros(max_y.shape[0]) for (i, my) in enumerate(max_y): (max_x[i], max_ind[i]) = self._find_outlier(lpost.x, residuals, my) return (max_y, max_x, max_ind)
Auxiliary method calculating the highest outlier statistic in a power spectrum. The maximum data/model outlier is defined as .. math:: \max{(T_R = 2(\mathrm{data}/\mathrm{model}))} Parameters ---------- lpost : instance of a :class:`Posterior` subclass and instance of class :class:`Posterior` or one of its subclasses that defines the function to be minimized (either in ``loglikelihood`` or ``logposterior``) res : :class:`OptimizationResults` object An object containing useful summaries of the fitting procedure. For details, see documentation of :class:`OptimizationResults`. nmax : int, optional, default ``1`` The number of maxima to extract from the power spectra. By default, only the highest data/model outlier is extracted. This number allows to extract the ``nmax`` highest outliers, useful when looking for multiple signals in a power spectrum. Returns ------- max_y : {float | numpy.ndarray} The ``nmax`` highest data/model outliers max_x : {float | numpy.ndarray} The frequencies corresponding to the outliers in ``max_y`` max_ind : {int | numpy.ndarray} The indices corresponding to the outliers in ``max_y``
stingray/modeling/parameterestimation.py
_compute_highest_outlier
nimeshvashistha/stingray
133
python
def _compute_highest_outlier(self, lpost, res, nmax=1): '\n Auxiliary method calculating the highest outlier statistic in\n a power spectrum.\n\n The maximum data/model outlier is defined as\n\n .. math::\n\n \\max{(T_R = 2(\\mathrm{data}/\\mathrm{model}))}\n\n Parameters\n ----------\n lpost : instance of a :class:`Posterior` subclass\n and instance of class :class:`Posterior` or one of its subclasses\n that defines the function to be minimized (either in ``loglikelihood``\n or ``logposterior``)\n\n res : :class:`OptimizationResults` object\n An object containing useful summaries of the fitting procedure.\n For details, see documentation of :class:`OptimizationResults`.\n\n nmax : int, optional, default ``1``\n The number of maxima to extract from the power spectra. By default,\n only the highest data/model outlier is extracted. This number allows\n to extract the ``nmax`` highest outliers, useful when looking for\n multiple signals in a power spectrum.\n\n Returns\n -------\n max_y : {float | numpy.ndarray}\n The ``nmax`` highest data/model outliers\n\n max_x : {float | numpy.ndarray}\n The frequencies corresponding to the outliers in ``max_y``\n\n max_ind : {int | numpy.ndarray}\n The indices corresponding to the outliers in ``max_y``\n ' residuals = ((2.0 * lpost.y) / res.mfit) ratio_sort = copy.copy(residuals) ratio_sort.sort() max_y = ratio_sort[(- nmax):] max_x = np.zeros(max_y.shape[0]) max_ind = np.zeros(max_y.shape[0]) for (i, my) in enumerate(max_y): (max_x[i], max_ind[i]) = self._find_outlier(lpost.x, residuals, my) return (max_y, max_x, max_ind)
def _compute_highest_outlier(self, lpost, res, nmax=1): '\n Auxiliary method calculating the highest outlier statistic in\n a power spectrum.\n\n The maximum data/model outlier is defined as\n\n .. math::\n\n \\max{(T_R = 2(\\mathrm{data}/\\mathrm{model}))}\n\n Parameters\n ----------\n lpost : instance of a :class:`Posterior` subclass\n and instance of class :class:`Posterior` or one of its subclasses\n that defines the function to be minimized (either in ``loglikelihood``\n or ``logposterior``)\n\n res : :class:`OptimizationResults` object\n An object containing useful summaries of the fitting procedure.\n For details, see documentation of :class:`OptimizationResults`.\n\n nmax : int, optional, default ``1``\n The number of maxima to extract from the power spectra. By default,\n only the highest data/model outlier is extracted. This number allows\n to extract the ``nmax`` highest outliers, useful when looking for\n multiple signals in a power spectrum.\n\n Returns\n -------\n max_y : {float | numpy.ndarray}\n The ``nmax`` highest data/model outliers\n\n max_x : {float | numpy.ndarray}\n The frequencies corresponding to the outliers in ``max_y``\n\n max_ind : {int | numpy.ndarray}\n The indices corresponding to the outliers in ``max_y``\n ' residuals = ((2.0 * lpost.y) / res.mfit) ratio_sort = copy.copy(residuals) ratio_sort.sort() max_y = ratio_sort[(- nmax):] max_x = np.zeros(max_y.shape[0]) max_ind = np.zeros(max_y.shape[0]) for (i, my) in enumerate(max_y): (max_x[i], max_ind[i]) = self._find_outlier(lpost.x, residuals, my) return (max_y, max_x, max_ind)<|docstring|>Auxiliary method calculating the highest outlier statistic in a power spectrum. The maximum data/model outlier is defined as .. math:: \max{(T_R = 2(\mathrm{data}/\mathrm{model}))} Parameters ---------- lpost : instance of a :class:`Posterior` subclass and instance of class :class:`Posterior` or one of its subclasses that defines the function to be minimized (either in ``loglikelihood`` or ``logposterior``) res : :class:`OptimizationResults` object An object containing useful summaries of the fitting procedure. For details, see documentation of :class:`OptimizationResults`. nmax : int, optional, default ``1`` The number of maxima to extract from the power spectra. By default, only the highest data/model outlier is extracted. This number allows to extract the ``nmax`` highest outliers, useful when looking for multiple signals in a power spectrum. Returns ------- max_y : {float | numpy.ndarray} The ``nmax`` highest data/model outliers max_x : {float | numpy.ndarray} The frequencies corresponding to the outliers in ``max_y`` max_ind : {int | numpy.ndarray} The indices corresponding to the outliers in ``max_y``<|endoftext|>
83ec51142c5a1cb0a2c6c24e5a97b956f19b380dbfe933b54c0df5f46aa0a37b
@staticmethod def _find_outlier(xdata, ratio, max_y): '\n Small auxiliary method that finds the index where an array has\n its maximum, and the corresponding value in ``xdata``.\n\n Parameters\n ----------\n xdata : numpy.ndarray\n A list of independent variables\n\n ratio : Numpy.ndarray\n A list of dependent variables corresponding to ``xdata``\n\n max_y : float\n The maximum value of ``ratio``\n\n Returns\n -------\n max_x : float\n The value in ``xdata`` corresponding to the entry in ``ratio`` where\n ``ratio == `max_y``\n\n max_ind : float\n The index of the entry in ``ratio`` where ``ratio == max_y``\n ' max_ind = np.where((ratio == max_y))[0][0] max_x = xdata[max_ind] return (max_x, max_ind)
Small auxiliary method that finds the index where an array has its maximum, and the corresponding value in ``xdata``. Parameters ---------- xdata : numpy.ndarray A list of independent variables ratio : Numpy.ndarray A list of dependent variables corresponding to ``xdata`` max_y : float The maximum value of ``ratio`` Returns ------- max_x : float The value in ``xdata`` corresponding to the entry in ``ratio`` where ``ratio == `max_y`` max_ind : float The index of the entry in ``ratio`` where ``ratio == max_y``
stingray/modeling/parameterestimation.py
_find_outlier
nimeshvashistha/stingray
133
python
@staticmethod def _find_outlier(xdata, ratio, max_y): '\n Small auxiliary method that finds the index where an array has\n its maximum, and the corresponding value in ``xdata``.\n\n Parameters\n ----------\n xdata : numpy.ndarray\n A list of independent variables\n\n ratio : Numpy.ndarray\n A list of dependent variables corresponding to ``xdata``\n\n max_y : float\n The maximum value of ``ratio``\n\n Returns\n -------\n max_x : float\n The value in ``xdata`` corresponding to the entry in ``ratio`` where\n ``ratio == `max_y``\n\n max_ind : float\n The index of the entry in ``ratio`` where ``ratio == max_y``\n ' max_ind = np.where((ratio == max_y))[0][0] max_x = xdata[max_ind] return (max_x, max_ind)
@staticmethod def _find_outlier(xdata, ratio, max_y): '\n Small auxiliary method that finds the index where an array has\n its maximum, and the corresponding value in ``xdata``.\n\n Parameters\n ----------\n xdata : numpy.ndarray\n A list of independent variables\n\n ratio : Numpy.ndarray\n A list of dependent variables corresponding to ``xdata``\n\n max_y : float\n The maximum value of ``ratio``\n\n Returns\n -------\n max_x : float\n The value in ``xdata`` corresponding to the entry in ``ratio`` where\n ``ratio == `max_y``\n\n max_ind : float\n The index of the entry in ``ratio`` where ``ratio == max_y``\n ' max_ind = np.where((ratio == max_y))[0][0] max_x = xdata[max_ind] return (max_x, max_ind)<|docstring|>Small auxiliary method that finds the index where an array has its maximum, and the corresponding value in ``xdata``. Parameters ---------- xdata : numpy.ndarray A list of independent variables ratio : Numpy.ndarray A list of dependent variables corresponding to ``xdata`` max_y : float The maximum value of ``ratio`` Returns ------- max_x : float The value in ``xdata`` corresponding to the entry in ``ratio`` where ``ratio == `max_y`` max_ind : float The index of the entry in ``ratio`` where ``ratio == max_y``<|endoftext|>
eb3ffc947aecf8cb6b71db818a4b0e7f55049c6ffc5385c4e95afe0b8dcce6fc
def plotfits(self, res1, res2=None, save_plot=False, namestr='test', log=False): '\n Plotting method that allows to plot either one or two best-fit models\n with the data.\n\n Plots a power spectrum with the best-fit model, as well as the data/model\n residuals for each model.\n\n Parameters\n ----------\n res1 : :class:`OptimizationResults` object\n Output of a successful fitting procedure\n\n res2 : :class:`OptimizationResults` object, optional, default ``None``\n Optional output of a second successful fitting procedure, e.g. with a\n competing model\n\n save_plot : bool, optional, default ``False``\n If ``True``, the resulting figure will be saved to a file\n\n namestr : str, optional, default ``test``\n If ``save_plot`` is ``True``, this string defines the path and file name\n for the output plot\n\n log : bool, optional, default ``False``\n If ``True``, plot the axes logarithmically.\n ' if (not can_plot): logging.info("No matplotlib imported. Can't plot!") else: f = plt.figure(figsize=(12, 10)) plt.subplots_adjust(hspace=0.0, wspace=0.4) s1 = plt.subplot2grid((4, 1), (0, 0), rowspan=2) if log: logx = np.log10(self.ps.freq) logy = np.log10(self.ps.power) logpar1 = np.log10(res1.mfit) (p1,) = s1.plot(logx, logy, color='black', drawstyle='steps-mid') (p2,) = s1.plot(logx, logpar1, color='blue', lw=2) s1.set_xlim([np.min(logx), np.max(logx)]) s1.set_ylim([(np.min(logy) - 1.0), (np.max(logy) + 1)]) if (self.ps.norm == 'leahy'): s1.set_ylabel('log(Leahy-Normalized Power)', fontsize=18) elif (self.ps.norm == 'rms'): s1.set_ylabel('log(RMS-Normalized Power)', fontsize=18) else: s1.set_ylabel('log(Power)', fontsize=18) else: (p1,) = s1.plot(self.ps.freq, self.ps.power, color='black', drawstyle='steps-mid') (p2,) = s1.plot(self.ps.freq, res1.mfit, color='blue', lw=2) s1.set_xscale('log') s1.set_yscale('log') s1.set_xlim([np.min(self.ps.freq), np.max(self.ps.freq)]) s1.set_ylim([(np.min(self.ps.freq) / 10.0), (np.max(self.ps.power) * 10.0)]) if (self.ps.norm == 'leahy'): s1.set_ylabel('Leahy-Normalized Power', fontsize=18) elif (self.ps.norm == 'rms'): s1.set_ylabel('RMS-Normalized Power', fontsize=18) else: s1.set_ylabel('Power', fontsize=18) if (res2 is not None): if log: logpar2 = np.log10(res2.mfit) (p3,) = s1.plot(logx, logpar2, color='red', lw=2) else: (p3,) = s1.plot(self.ps.freq, res2.mfit, color='red', lw=2) s1.legend([p1, p2, p3], ['data', 'model 1 fit', 'model 2 fit']) else: s1.legend([p1, p2], ['data', 'model fit']) s1.set_title(('Periodogram and fits for data set ' + namestr), fontsize=18) s2 = plt.subplot2grid((4, 1), (2, 0), rowspan=1) pldif = (self.ps.power / res1.mfit) s2.set_ylabel('Residuals, \n first model', fontsize=18) if log: s2.plot(logx, pldif, color='black', drawstyle='steps-mid') s2.plot(logx, np.ones(self.ps.freq.shape[0]), color='blue', lw=2) s2.set_xlim([np.min(logx), np.max(logx)]) s2.set_ylim([np.min(pldif), np.max(pldif)]) else: s2.plot(self.ps.freq, pldif, color='black', drawstyle='steps-mid') s2.plot(self.ps.freq, np.ones_like(self.ps.power), color='blue', lw=2) s2.set_xscale('log') s2.set_yscale('log') s2.set_xlim([np.min(self.ps.freq), np.max(self.ps.freq)]) s2.set_ylim([np.min(pldif), np.max(pldif)]) if (res2 is not None): bpldif = (self.ps.power / res2.mfit) s3 = plt.subplot2grid((4, 1), (3, 0), rowspan=1) if log: s3.plot(logx, bpldif, color='black', drawstyle='steps-mid') s3.plot(logx, np.ones(len(self.ps.freq)), color='red', lw=2) s3.axis([np.min(logx), np.max(logx), np.min(bpldif), np.max(bpldif)]) s3.set_xlabel('log(Frequency) [Hz]', fontsize=18) else: s3.plot(self.ps.freq, bpldif, color='black', drawstyle='steps-mid') s3.plot(self.ps.freq, np.ones(len(self.ps.freq)), color='red', lw=2) s3.set_xscale('log') s3.set_yscale('log') s3.set_xlim([np.min(self.ps.freq), np.max(self.ps.freq)]) s3.set_ylim([np.min(bpldif), np.max(bpldif)]) s3.set_xlabel('Frequency [Hz]', fontsize=18) s3.set_ylabel('Residuals, \n second model', fontsize=18) elif log: s2.set_xlabel('log(Frequency) [Hz]', fontsize=18) else: s2.set_xlabel('Frequency [Hz]', fontsize=18) ax = plt.gca() for label in (ax.get_xticklabels() + ax.get_yticklabels()): label.set_fontsize(14) plt.setp(s1.get_xticklabels(), visible=False) if save_plot: plt.savefig((namestr + '_ps_fit.png'), format='png') return
Plotting method that allows to plot either one or two best-fit models with the data. Plots a power spectrum with the best-fit model, as well as the data/model residuals for each model. Parameters ---------- res1 : :class:`OptimizationResults` object Output of a successful fitting procedure res2 : :class:`OptimizationResults` object, optional, default ``None`` Optional output of a second successful fitting procedure, e.g. with a competing model save_plot : bool, optional, default ``False`` If ``True``, the resulting figure will be saved to a file namestr : str, optional, default ``test`` If ``save_plot`` is ``True``, this string defines the path and file name for the output plot log : bool, optional, default ``False`` If ``True``, plot the axes logarithmically.
stingray/modeling/parameterestimation.py
plotfits
nimeshvashistha/stingray
133
python
def plotfits(self, res1, res2=None, save_plot=False, namestr='test', log=False): '\n Plotting method that allows to plot either one or two best-fit models\n with the data.\n\n Plots a power spectrum with the best-fit model, as well as the data/model\n residuals for each model.\n\n Parameters\n ----------\n res1 : :class:`OptimizationResults` object\n Output of a successful fitting procedure\n\n res2 : :class:`OptimizationResults` object, optional, default ``None``\n Optional output of a second successful fitting procedure, e.g. with a\n competing model\n\n save_plot : bool, optional, default ``False``\n If ``True``, the resulting figure will be saved to a file\n\n namestr : str, optional, default ``test``\n If ``save_plot`` is ``True``, this string defines the path and file name\n for the output plot\n\n log : bool, optional, default ``False``\n If ``True``, plot the axes logarithmically.\n ' if (not can_plot): logging.info("No matplotlib imported. Can't plot!") else: f = plt.figure(figsize=(12, 10)) plt.subplots_adjust(hspace=0.0, wspace=0.4) s1 = plt.subplot2grid((4, 1), (0, 0), rowspan=2) if log: logx = np.log10(self.ps.freq) logy = np.log10(self.ps.power) logpar1 = np.log10(res1.mfit) (p1,) = s1.plot(logx, logy, color='black', drawstyle='steps-mid') (p2,) = s1.plot(logx, logpar1, color='blue', lw=2) s1.set_xlim([np.min(logx), np.max(logx)]) s1.set_ylim([(np.min(logy) - 1.0), (np.max(logy) + 1)]) if (self.ps.norm == 'leahy'): s1.set_ylabel('log(Leahy-Normalized Power)', fontsize=18) elif (self.ps.norm == 'rms'): s1.set_ylabel('log(RMS-Normalized Power)', fontsize=18) else: s1.set_ylabel('log(Power)', fontsize=18) else: (p1,) = s1.plot(self.ps.freq, self.ps.power, color='black', drawstyle='steps-mid') (p2,) = s1.plot(self.ps.freq, res1.mfit, color='blue', lw=2) s1.set_xscale('log') s1.set_yscale('log') s1.set_xlim([np.min(self.ps.freq), np.max(self.ps.freq)]) s1.set_ylim([(np.min(self.ps.freq) / 10.0), (np.max(self.ps.power) * 10.0)]) if (self.ps.norm == 'leahy'): s1.set_ylabel('Leahy-Normalized Power', fontsize=18) elif (self.ps.norm == 'rms'): s1.set_ylabel('RMS-Normalized Power', fontsize=18) else: s1.set_ylabel('Power', fontsize=18) if (res2 is not None): if log: logpar2 = np.log10(res2.mfit) (p3,) = s1.plot(logx, logpar2, color='red', lw=2) else: (p3,) = s1.plot(self.ps.freq, res2.mfit, color='red', lw=2) s1.legend([p1, p2, p3], ['data', 'model 1 fit', 'model 2 fit']) else: s1.legend([p1, p2], ['data', 'model fit']) s1.set_title(('Periodogram and fits for data set ' + namestr), fontsize=18) s2 = plt.subplot2grid((4, 1), (2, 0), rowspan=1) pldif = (self.ps.power / res1.mfit) s2.set_ylabel('Residuals, \n first model', fontsize=18) if log: s2.plot(logx, pldif, color='black', drawstyle='steps-mid') s2.plot(logx, np.ones(self.ps.freq.shape[0]), color='blue', lw=2) s2.set_xlim([np.min(logx), np.max(logx)]) s2.set_ylim([np.min(pldif), np.max(pldif)]) else: s2.plot(self.ps.freq, pldif, color='black', drawstyle='steps-mid') s2.plot(self.ps.freq, np.ones_like(self.ps.power), color='blue', lw=2) s2.set_xscale('log') s2.set_yscale('log') s2.set_xlim([np.min(self.ps.freq), np.max(self.ps.freq)]) s2.set_ylim([np.min(pldif), np.max(pldif)]) if (res2 is not None): bpldif = (self.ps.power / res2.mfit) s3 = plt.subplot2grid((4, 1), (3, 0), rowspan=1) if log: s3.plot(logx, bpldif, color='black', drawstyle='steps-mid') s3.plot(logx, np.ones(len(self.ps.freq)), color='red', lw=2) s3.axis([np.min(logx), np.max(logx), np.min(bpldif), np.max(bpldif)]) s3.set_xlabel('log(Frequency) [Hz]', fontsize=18) else: s3.plot(self.ps.freq, bpldif, color='black', drawstyle='steps-mid') s3.plot(self.ps.freq, np.ones(len(self.ps.freq)), color='red', lw=2) s3.set_xscale('log') s3.set_yscale('log') s3.set_xlim([np.min(self.ps.freq), np.max(self.ps.freq)]) s3.set_ylim([np.min(bpldif), np.max(bpldif)]) s3.set_xlabel('Frequency [Hz]', fontsize=18) s3.set_ylabel('Residuals, \n second model', fontsize=18) elif log: s2.set_xlabel('log(Frequency) [Hz]', fontsize=18) else: s2.set_xlabel('Frequency [Hz]', fontsize=18) ax = plt.gca() for label in (ax.get_xticklabels() + ax.get_yticklabels()): label.set_fontsize(14) plt.setp(s1.get_xticklabels(), visible=False) if save_plot: plt.savefig((namestr + '_ps_fit.png'), format='png') return
def plotfits(self, res1, res2=None, save_plot=False, namestr='test', log=False): '\n Plotting method that allows to plot either one or two best-fit models\n with the data.\n\n Plots a power spectrum with the best-fit model, as well as the data/model\n residuals for each model.\n\n Parameters\n ----------\n res1 : :class:`OptimizationResults` object\n Output of a successful fitting procedure\n\n res2 : :class:`OptimizationResults` object, optional, default ``None``\n Optional output of a second successful fitting procedure, e.g. with a\n competing model\n\n save_plot : bool, optional, default ``False``\n If ``True``, the resulting figure will be saved to a file\n\n namestr : str, optional, default ``test``\n If ``save_plot`` is ``True``, this string defines the path and file name\n for the output plot\n\n log : bool, optional, default ``False``\n If ``True``, plot the axes logarithmically.\n ' if (not can_plot): logging.info("No matplotlib imported. Can't plot!") else: f = plt.figure(figsize=(12, 10)) plt.subplots_adjust(hspace=0.0, wspace=0.4) s1 = plt.subplot2grid((4, 1), (0, 0), rowspan=2) if log: logx = np.log10(self.ps.freq) logy = np.log10(self.ps.power) logpar1 = np.log10(res1.mfit) (p1,) = s1.plot(logx, logy, color='black', drawstyle='steps-mid') (p2,) = s1.plot(logx, logpar1, color='blue', lw=2) s1.set_xlim([np.min(logx), np.max(logx)]) s1.set_ylim([(np.min(logy) - 1.0), (np.max(logy) + 1)]) if (self.ps.norm == 'leahy'): s1.set_ylabel('log(Leahy-Normalized Power)', fontsize=18) elif (self.ps.norm == 'rms'): s1.set_ylabel('log(RMS-Normalized Power)', fontsize=18) else: s1.set_ylabel('log(Power)', fontsize=18) else: (p1,) = s1.plot(self.ps.freq, self.ps.power, color='black', drawstyle='steps-mid') (p2,) = s1.plot(self.ps.freq, res1.mfit, color='blue', lw=2) s1.set_xscale('log') s1.set_yscale('log') s1.set_xlim([np.min(self.ps.freq), np.max(self.ps.freq)]) s1.set_ylim([(np.min(self.ps.freq) / 10.0), (np.max(self.ps.power) * 10.0)]) if (self.ps.norm == 'leahy'): s1.set_ylabel('Leahy-Normalized Power', fontsize=18) elif (self.ps.norm == 'rms'): s1.set_ylabel('RMS-Normalized Power', fontsize=18) else: s1.set_ylabel('Power', fontsize=18) if (res2 is not None): if log: logpar2 = np.log10(res2.mfit) (p3,) = s1.plot(logx, logpar2, color='red', lw=2) else: (p3,) = s1.plot(self.ps.freq, res2.mfit, color='red', lw=2) s1.legend([p1, p2, p3], ['data', 'model 1 fit', 'model 2 fit']) else: s1.legend([p1, p2], ['data', 'model fit']) s1.set_title(('Periodogram and fits for data set ' + namestr), fontsize=18) s2 = plt.subplot2grid((4, 1), (2, 0), rowspan=1) pldif = (self.ps.power / res1.mfit) s2.set_ylabel('Residuals, \n first model', fontsize=18) if log: s2.plot(logx, pldif, color='black', drawstyle='steps-mid') s2.plot(logx, np.ones(self.ps.freq.shape[0]), color='blue', lw=2) s2.set_xlim([np.min(logx), np.max(logx)]) s2.set_ylim([np.min(pldif), np.max(pldif)]) else: s2.plot(self.ps.freq, pldif, color='black', drawstyle='steps-mid') s2.plot(self.ps.freq, np.ones_like(self.ps.power), color='blue', lw=2) s2.set_xscale('log') s2.set_yscale('log') s2.set_xlim([np.min(self.ps.freq), np.max(self.ps.freq)]) s2.set_ylim([np.min(pldif), np.max(pldif)]) if (res2 is not None): bpldif = (self.ps.power / res2.mfit) s3 = plt.subplot2grid((4, 1), (3, 0), rowspan=1) if log: s3.plot(logx, bpldif, color='black', drawstyle='steps-mid') s3.plot(logx, np.ones(len(self.ps.freq)), color='red', lw=2) s3.axis([np.min(logx), np.max(logx), np.min(bpldif), np.max(bpldif)]) s3.set_xlabel('log(Frequency) [Hz]', fontsize=18) else: s3.plot(self.ps.freq, bpldif, color='black', drawstyle='steps-mid') s3.plot(self.ps.freq, np.ones(len(self.ps.freq)), color='red', lw=2) s3.set_xscale('log') s3.set_yscale('log') s3.set_xlim([np.min(self.ps.freq), np.max(self.ps.freq)]) s3.set_ylim([np.min(bpldif), np.max(bpldif)]) s3.set_xlabel('Frequency [Hz]', fontsize=18) s3.set_ylabel('Residuals, \n second model', fontsize=18) elif log: s2.set_xlabel('log(Frequency) [Hz]', fontsize=18) else: s2.set_xlabel('Frequency [Hz]', fontsize=18) ax = plt.gca() for label in (ax.get_xticklabels() + ax.get_yticklabels()): label.set_fontsize(14) plt.setp(s1.get_xticklabels(), visible=False) if save_plot: plt.savefig((namestr + '_ps_fit.png'), format='png') return<|docstring|>Plotting method that allows to plot either one or two best-fit models with the data. Plots a power spectrum with the best-fit model, as well as the data/model residuals for each model. Parameters ---------- res1 : :class:`OptimizationResults` object Output of a successful fitting procedure res2 : :class:`OptimizationResults` object, optional, default ``None`` Optional output of a second successful fitting procedure, e.g. with a competing model save_plot : bool, optional, default ``False`` If ``True``, the resulting figure will be saved to a file namestr : str, optional, default ``test`` If ``save_plot`` is ``True``, this string defines the path and file name for the output plot log : bool, optional, default ``False`` If ``True``, plot the axes logarithmically.<|endoftext|>
538d41151e7e3ba2606711975241ad9162c75ca573e054dee7e31c37bafffd88
def _cosine_similarity(sentence_1, sentence_2): '\n Calculates and returns cosine similarity using raw count tf-idf of two sentence objects\n ' numerator = 0 denominator_1 = 0 denominator_2 = 0 for i in sentence_1.tf_idf.keys(): numerator += (sentence_1.tf_idf.get(i) * sentence_2.tf_idf.get(i, 0.0)) denominator_1 += (sentence_1.tf_idf.get(i) ** 2) for i in sentence_2.tf_idf.values(): denominator_2 += (i * i) denominator = math.sqrt((denominator_1 * denominator_2)) if (denominator != 0): return (numerator / denominator) else: return denominator
Calculates and returns cosine similarity using raw count tf-idf of two sentence objects
src/content_selection.py
_cosine_similarity
aventon1/text-summarizer
0
python
def _cosine_similarity(sentence_1, sentence_2): '\n \n ' numerator = 0 denominator_1 = 0 denominator_2 = 0 for i in sentence_1.tf_idf.keys(): numerator += (sentence_1.tf_idf.get(i) * sentence_2.tf_idf.get(i, 0.0)) denominator_1 += (sentence_1.tf_idf.get(i) ** 2) for i in sentence_2.tf_idf.values(): denominator_2 += (i * i) denominator = math.sqrt((denominator_1 * denominator_2)) if (denominator != 0): return (numerator / denominator) else: return denominator
def _cosine_similarity(sentence_1, sentence_2): '\n \n ' numerator = 0 denominator_1 = 0 denominator_2 = 0 for i in sentence_1.tf_idf.keys(): numerator += (sentence_1.tf_idf.get(i) * sentence_2.tf_idf.get(i, 0.0)) denominator_1 += (sentence_1.tf_idf.get(i) ** 2) for i in sentence_2.tf_idf.values(): denominator_2 += (i * i) denominator = math.sqrt((denominator_1 * denominator_2)) if (denominator != 0): return (numerator / denominator) else: return denominator<|docstring|>Calculates and returns cosine similarity using raw count tf-idf of two sentence objects<|endoftext|>
5d1460f6daed088b5c5a426ee4a902de8fb0eb3b469d16a03e48babdd6027ef2
def _build_sim_matrix(sent_list, intersent_threshold, intersent_formula, mle_lambda, k, topic): '\n Builds and returns a 2D numpy matrix of inter-sentential similarity.\n ' num_sent = len(sent_list) sim_matrix = np.zeros((num_sent, num_sent)) for i in range(num_sent): sim_vals = [] for j in range(i, num_sent): if (intersent_formula == 'norm'): sim = _calc_norm_gen_prob(sent_list[i], sent_list[j], mle_lambda, topic) sim_vals.append((i, j, sim)) elif (i != j): sim = _cosine_similarity(sent_list[i], sent_list[j]) if (sim >= intersent_threshold): sim_matrix[i][j] = sim sim_matrix[j][i] = sim else: sim_matrix[i][i] = 1.0 if (intersent_formula == 'norm'): sorted_sim_vals = sorted(sim_vals, key=itemgetter(2), reverse=True)[:k] for (i, j, sim) in sorted_sim_vals: sim_matrix[i][j] = sim sim_matrix[j][i] = sim row_sums = sim_matrix.sum(axis=1, keepdims=True) sim_matrix = (sim_matrix / row_sums) return sim_matrix
Builds and returns a 2D numpy matrix of inter-sentential similarity.
src/content_selection.py
_build_sim_matrix
aventon1/text-summarizer
0
python
def _build_sim_matrix(sent_list, intersent_threshold, intersent_formula, mle_lambda, k, topic): '\n \n ' num_sent = len(sent_list) sim_matrix = np.zeros((num_sent, num_sent)) for i in range(num_sent): sim_vals = [] for j in range(i, num_sent): if (intersent_formula == 'norm'): sim = _calc_norm_gen_prob(sent_list[i], sent_list[j], mle_lambda, topic) sim_vals.append((i, j, sim)) elif (i != j): sim = _cosine_similarity(sent_list[i], sent_list[j]) if (sim >= intersent_threshold): sim_matrix[i][j] = sim sim_matrix[j][i] = sim else: sim_matrix[i][i] = 1.0 if (intersent_formula == 'norm'): sorted_sim_vals = sorted(sim_vals, key=itemgetter(2), reverse=True)[:k] for (i, j, sim) in sorted_sim_vals: sim_matrix[i][j] = sim sim_matrix[j][i] = sim row_sums = sim_matrix.sum(axis=1, keepdims=True) sim_matrix = (sim_matrix / row_sums) return sim_matrix
def _build_sim_matrix(sent_list, intersent_threshold, intersent_formula, mle_lambda, k, topic): '\n \n ' num_sent = len(sent_list) sim_matrix = np.zeros((num_sent, num_sent)) for i in range(num_sent): sim_vals = [] for j in range(i, num_sent): if (intersent_formula == 'norm'): sim = _calc_norm_gen_prob(sent_list[i], sent_list[j], mle_lambda, topic) sim_vals.append((i, j, sim)) elif (i != j): sim = _cosine_similarity(sent_list[i], sent_list[j]) if (sim >= intersent_threshold): sim_matrix[i][j] = sim sim_matrix[j][i] = sim else: sim_matrix[i][i] = 1.0 if (intersent_formula == 'norm'): sorted_sim_vals = sorted(sim_vals, key=itemgetter(2), reverse=True)[:k] for (i, j, sim) in sorted_sim_vals: sim_matrix[i][j] = sim sim_matrix[j][i] = sim row_sums = sim_matrix.sum(axis=1, keepdims=True) sim_matrix = (sim_matrix / row_sums) return sim_matrix<|docstring|>Builds and returns a 2D numpy matrix of inter-sentential similarity.<|endoftext|>
300c28843b14038664679bbb9378455b0a15a8a88af6ecdc813c9a14cf9818f0
def _build_bias_vec(sent_list, topic_sent, include_narrative, bias_formula, mle_lambda, topic): '\n Builds and returns a 1D numpy vector of the similarity between each sentence\n and the topic title. Additionally adds the similarity of the narrative if include_narrative.\n ' narrative = topic.narrative if (narrative is None): narrative = topic.category num_sent = len(sent_list) bias_vec = np.zeros(num_sent) for i in range(num_sent): if (bias_formula == 'rel'): bias_vec[i] = _calc_relevance(sent_list[i], topic_sent, topic) if include_narrative: bias_vec[i] += _calc_relevance(sent_list[i], narrative, topic) elif (bias_formula == 'gen'): bias_vec[i] = _calc_gen_prob(topic_sent, sent_list[i], mle_lambda, topic) if include_narrative: bias_vec[i] += _calc_gen_prob(narrative, sent_list[i], mle_lambda, topic) else: bias_vec[i] = _cosine_similarity(sent_list[i], topic_sent) if include_narrative: bias_vec[i] += _cosine_similarity(sent_list[i], narrative) bias_sum = np.sum(bias_vec) if (bias_sum == 0): bias_sum = 1 bias_vec = (bias_vec / bias_sum) return bias_vec
Builds and returns a 1D numpy vector of the similarity between each sentence and the topic title. Additionally adds the similarity of the narrative if include_narrative.
src/content_selection.py
_build_bias_vec
aventon1/text-summarizer
0
python
def _build_bias_vec(sent_list, topic_sent, include_narrative, bias_formula, mle_lambda, topic): '\n Builds and returns a 1D numpy vector of the similarity between each sentence\n and the topic title. Additionally adds the similarity of the narrative if include_narrative.\n ' narrative = topic.narrative if (narrative is None): narrative = topic.category num_sent = len(sent_list) bias_vec = np.zeros(num_sent) for i in range(num_sent): if (bias_formula == 'rel'): bias_vec[i] = _calc_relevance(sent_list[i], topic_sent, topic) if include_narrative: bias_vec[i] += _calc_relevance(sent_list[i], narrative, topic) elif (bias_formula == 'gen'): bias_vec[i] = _calc_gen_prob(topic_sent, sent_list[i], mle_lambda, topic) if include_narrative: bias_vec[i] += _calc_gen_prob(narrative, sent_list[i], mle_lambda, topic) else: bias_vec[i] = _cosine_similarity(sent_list[i], topic_sent) if include_narrative: bias_vec[i] += _cosine_similarity(sent_list[i], narrative) bias_sum = np.sum(bias_vec) if (bias_sum == 0): bias_sum = 1 bias_vec = (bias_vec / bias_sum) return bias_vec
def _build_bias_vec(sent_list, topic_sent, include_narrative, bias_formula, mle_lambda, topic): '\n Builds and returns a 1D numpy vector of the similarity between each sentence\n and the topic title. Additionally adds the similarity of the narrative if include_narrative.\n ' narrative = topic.narrative if (narrative is None): narrative = topic.category num_sent = len(sent_list) bias_vec = np.zeros(num_sent) for i in range(num_sent): if (bias_formula == 'rel'): bias_vec[i] = _calc_relevance(sent_list[i], topic_sent, topic) if include_narrative: bias_vec[i] += _calc_relevance(sent_list[i], narrative, topic) elif (bias_formula == 'gen'): bias_vec[i] = _calc_gen_prob(topic_sent, sent_list[i], mle_lambda, topic) if include_narrative: bias_vec[i] += _calc_gen_prob(narrative, sent_list[i], mle_lambda, topic) else: bias_vec[i] = _cosine_similarity(sent_list[i], topic_sent) if include_narrative: bias_vec[i] += _cosine_similarity(sent_list[i], narrative) bias_sum = np.sum(bias_vec) if (bias_sum == 0): bias_sum = 1 bias_vec = (bias_vec / bias_sum) return bias_vec<|docstring|>Builds and returns a 1D numpy vector of the similarity between each sentence and the topic title. Additionally adds the similarity of the narrative if include_narrative.<|endoftext|>
726fcaff19246c779050bf409be9cc4ed9adbdaf30032ec725f3f3ef4a34fde0
def _calc_relevance(sent, topic_sent, topic): 'Calculates relevance for two sentences.' rel_sum = 0 for i in topic_sent.raw_counts.keys(): rel_sum += ((math.log((sent.raw_counts.get(i, 0.0) + 1)) * math.log((topic_sent.raw_counts.get(i) + 1))) * topic.idf.get(i)) return rel_sum
Calculates relevance for two sentences.
src/content_selection.py
_calc_relevance
aventon1/text-summarizer
0
python
def _calc_relevance(sent, topic_sent, topic): rel_sum = 0 for i in topic_sent.raw_counts.keys(): rel_sum += ((math.log((sent.raw_counts.get(i, 0.0) + 1)) * math.log((topic_sent.raw_counts.get(i) + 1))) * topic.idf.get(i)) return rel_sum
def _calc_relevance(sent, topic_sent, topic): rel_sum = 0 for i in topic_sent.raw_counts.keys(): rel_sum += ((math.log((sent.raw_counts.get(i, 0.0) + 1)) * math.log((topic_sent.raw_counts.get(i) + 1))) * topic.idf.get(i)) return rel_sum<|docstring|>Calculates relevance for two sentences.<|endoftext|>
3ce365cd8adc5423e1f0b5c533bca8304150b7ae0216bf27a9e225611d93ae59
def _calc_smoothed_mle(word, sent, mle_lambda, topic): '\n Calculates the smoothed MLE (Maximum Likelihood Estimate) for a word\n in a sentence, smoothing with the tf values from the entire topic cluster.\n Returns the smoothed MLE value.\n ' return (((1 - mle_lambda) * sent.tf_norm_values.get(word, 0)) + (mle_lambda * topic.tf_norm_values[word]))
Calculates the smoothed MLE (Maximum Likelihood Estimate) for a word in a sentence, smoothing with the tf values from the entire topic cluster. Returns the smoothed MLE value.
src/content_selection.py
_calc_smoothed_mle
aventon1/text-summarizer
0
python
def _calc_smoothed_mle(word, sent, mle_lambda, topic): '\n Calculates the smoothed MLE (Maximum Likelihood Estimate) for a word\n in a sentence, smoothing with the tf values from the entire topic cluster.\n Returns the smoothed MLE value.\n ' return (((1 - mle_lambda) * sent.tf_norm_values.get(word, 0)) + (mle_lambda * topic.tf_norm_values[word]))
def _calc_smoothed_mle(word, sent, mle_lambda, topic): '\n Calculates the smoothed MLE (Maximum Likelihood Estimate) for a word\n in a sentence, smoothing with the tf values from the entire topic cluster.\n Returns the smoothed MLE value.\n ' return (((1 - mle_lambda) * sent.tf_norm_values.get(word, 0)) + (mle_lambda * topic.tf_norm_values[word]))<|docstring|>Calculates the smoothed MLE (Maximum Likelihood Estimate) for a word in a sentence, smoothing with the tf values from the entire topic cluster. Returns the smoothed MLE value.<|endoftext|>
213ab628b22277859c80a8044d95ac0e0f7a29ba26615492d1adc20a503148b0
def _calc_gen_prob(sent_1, sent_2, mle_lambda, topic): '\n Calculates and returns the generative probability of sent_1 given sent_2.\n ' gen_prod = 1 for word in sent_1.raw_counts: gen_prod *= (_calc_smoothed_mle(word, sent_2, mle_lambda, topic) ** sent_1.raw_counts[word]) return gen_prod
Calculates and returns the generative probability of sent_1 given sent_2.
src/content_selection.py
_calc_gen_prob
aventon1/text-summarizer
0
python
def _calc_gen_prob(sent_1, sent_2, mle_lambda, topic): '\n \n ' gen_prod = 1 for word in sent_1.raw_counts: gen_prod *= (_calc_smoothed_mle(word, sent_2, mle_lambda, topic) ** sent_1.raw_counts[word]) return gen_prod
def _calc_gen_prob(sent_1, sent_2, mle_lambda, topic): '\n \n ' gen_prod = 1 for word in sent_1.raw_counts: gen_prod *= (_calc_smoothed_mle(word, sent_2, mle_lambda, topic) ** sent_1.raw_counts[word]) return gen_prod<|docstring|>Calculates and returns the generative probability of sent_1 given sent_2.<|endoftext|>
6f461c779df92fcbb14da4aa0cbb67eb072cebd2812e6ee17346d0d16cbaa788
def _calc_norm_gen_prob(sent_1, sent_2, mle_lambda, topic): '\n Calculates and returns the length-normalized generative probability of sent_1 given sent_2.\n ' sent_1_len = sum([count for count in sent_1.raw_counts.values()]) return (_calc_gen_prob(sent_1, sent_2, mle_lambda, topic) ** (1.0 / sent_1_len))
Calculates and returns the length-normalized generative probability of sent_1 given sent_2.
src/content_selection.py
_calc_norm_gen_prob
aventon1/text-summarizer
0
python
def _calc_norm_gen_prob(sent_1, sent_2, mle_lambda, topic): '\n \n ' sent_1_len = sum([count for count in sent_1.raw_counts.values()]) return (_calc_gen_prob(sent_1, sent_2, mle_lambda, topic) ** (1.0 / sent_1_len))
def _calc_norm_gen_prob(sent_1, sent_2, mle_lambda, topic): '\n \n ' sent_1_len = sum([count for count in sent_1.raw_counts.values()]) return (_calc_gen_prob(sent_1, sent_2, mle_lambda, topic) ** (1.0 / sent_1_len))<|docstring|>Calculates and returns the length-normalized generative probability of sent_1 given sent_2.<|endoftext|>
8d5963394c300f8a235cdc853a01aa8ee234f0321101b7d0a448dfe4ac9cce47
def _build_markov_matrix(sim_matrix, bias_vec, d): '\n Builds and returns the markov matrix (matrix to multiply by in the power method)\n using the Biased LexRank formula with cosine similarity.\n ' markov_matrix = ((d * bias_vec) + ((1 - d) * sim_matrix)) return markov_matrix
Builds and returns the markov matrix (matrix to multiply by in the power method) using the Biased LexRank formula with cosine similarity.
src/content_selection.py
_build_markov_matrix
aventon1/text-summarizer
0
python
def _build_markov_matrix(sim_matrix, bias_vec, d): '\n Builds and returns the markov matrix (matrix to multiply by in the power method)\n using the Biased LexRank formula with cosine similarity.\n ' markov_matrix = ((d * bias_vec) + ((1 - d) * sim_matrix)) return markov_matrix
def _build_markov_matrix(sim_matrix, bias_vec, d): '\n Builds and returns the markov matrix (matrix to multiply by in the power method)\n using the Biased LexRank formula with cosine similarity.\n ' markov_matrix = ((d * bias_vec) + ((1 - d) * sim_matrix)) return markov_matrix<|docstring|>Builds and returns the markov matrix (matrix to multiply by in the power method) using the Biased LexRank formula with cosine similarity.<|endoftext|>
91dfaaf7aaf45cbe9bad2f859bd1f444a8a9a381dc7e96c5bc81defd74c6be7c
def _power_method(markov_matrix, epsilon): '\n Uses the power method to find the LexRank values of each sentence\n and returns a 1D vector of the final LexRank values after convergence.\n ' num_sent = len(markov_matrix) transition_matrix = markov_matrix.T prob_vec = (np.ones(num_sent) / num_sent) matrix_diff = 1.0 while (matrix_diff > epsilon): updated_prob_vec = np.dot(transition_matrix, prob_vec) matrix_diff = np.linalg.norm(np.subtract(updated_prob_vec, prob_vec)) prob_vec = updated_prob_vec return prob_vec
Uses the power method to find the LexRank values of each sentence and returns a 1D vector of the final LexRank values after convergence.
src/content_selection.py
_power_method
aventon1/text-summarizer
0
python
def _power_method(markov_matrix, epsilon): '\n Uses the power method to find the LexRank values of each sentence\n and returns a 1D vector of the final LexRank values after convergence.\n ' num_sent = len(markov_matrix) transition_matrix = markov_matrix.T prob_vec = (np.ones(num_sent) / num_sent) matrix_diff = 1.0 while (matrix_diff > epsilon): updated_prob_vec = np.dot(transition_matrix, prob_vec) matrix_diff = np.linalg.norm(np.subtract(updated_prob_vec, prob_vec)) prob_vec = updated_prob_vec return prob_vec
def _power_method(markov_matrix, epsilon): '\n Uses the power method to find the LexRank values of each sentence\n and returns a 1D vector of the final LexRank values after convergence.\n ' num_sent = len(markov_matrix) transition_matrix = markov_matrix.T prob_vec = (np.ones(num_sent) / num_sent) matrix_diff = 1.0 while (matrix_diff > epsilon): updated_prob_vec = np.dot(transition_matrix, prob_vec) matrix_diff = np.linalg.norm(np.subtract(updated_prob_vec, prob_vec)) prob_vec = updated_prob_vec return prob_vec<|docstring|>Uses the power method to find the LexRank values of each sentence and returns a 1D vector of the final LexRank values after convergence.<|endoftext|>
a80375e348afb5e65ce3484c79d88b982b1a42ca6eeb93c71496c741f6516eaf
def _select_sentences(sorted_sentences, summary_threshold): ' \n Takes a list of sentences sorted by LexRank value (descending)\n and selects the sentences to add to the summary greedily based on LexRank value\n while excluding sentences with cosine similarity >= summary_threshold (default = 0.5)\n to any sentence already in the summary.\n Returns a list of selected sentences.\n ' max_summary_size = 100 added_sents = [] summary_size = 0 for sent_index in range(len(sorted_sentences)): if (sent_index == 0): added_sents.append(sorted_sentences[sent_index]) summary_size += sorted_sentences[sent_index].sent_len elif ((summary_size + sorted_sentences[sent_index].sent_len) <= max_summary_size): cos_sim = [_cosine_similarity(sorted_sentences[sent_index], added_sent) for added_sent in added_sents] similar = any(((cos_similarity >= summary_threshold) for cos_similarity in cos_sim)) if (not similar): added_sents.append(sorted_sentences[sent_index]) summary_size += sorted_sentences[sent_index].sent_len return added_sents
Takes a list of sentences sorted by LexRank value (descending) and selects the sentences to add to the summary greedily based on LexRank value while excluding sentences with cosine similarity >= summary_threshold (default = 0.5) to any sentence already in the summary. Returns a list of selected sentences.
src/content_selection.py
_select_sentences
aventon1/text-summarizer
0
python
def _select_sentences(sorted_sentences, summary_threshold): ' \n Takes a list of sentences sorted by LexRank value (descending)\n and selects the sentences to add to the summary greedily based on LexRank value\n while excluding sentences with cosine similarity >= summary_threshold (default = 0.5)\n to any sentence already in the summary.\n Returns a list of selected sentences.\n ' max_summary_size = 100 added_sents = [] summary_size = 0 for sent_index in range(len(sorted_sentences)): if (sent_index == 0): added_sents.append(sorted_sentences[sent_index]) summary_size += sorted_sentences[sent_index].sent_len elif ((summary_size + sorted_sentences[sent_index].sent_len) <= max_summary_size): cos_sim = [_cosine_similarity(sorted_sentences[sent_index], added_sent) for added_sent in added_sents] similar = any(((cos_similarity >= summary_threshold) for cos_similarity in cos_sim)) if (not similar): added_sents.append(sorted_sentences[sent_index]) summary_size += sorted_sentences[sent_index].sent_len return added_sents
def _select_sentences(sorted_sentences, summary_threshold): ' \n Takes a list of sentences sorted by LexRank value (descending)\n and selects the sentences to add to the summary greedily based on LexRank value\n while excluding sentences with cosine similarity >= summary_threshold (default = 0.5)\n to any sentence already in the summary.\n Returns a list of selected sentences.\n ' max_summary_size = 100 added_sents = [] summary_size = 0 for sent_index in range(len(sorted_sentences)): if (sent_index == 0): added_sents.append(sorted_sentences[sent_index]) summary_size += sorted_sentences[sent_index].sent_len elif ((summary_size + sorted_sentences[sent_index].sent_len) <= max_summary_size): cos_sim = [_cosine_similarity(sorted_sentences[sent_index], added_sent) for added_sent in added_sents] similar = any(((cos_similarity >= summary_threshold) for cos_similarity in cos_sim)) if (not similar): added_sents.append(sorted_sentences[sent_index]) summary_size += sorted_sentences[sent_index].sent_len return added_sents<|docstring|>Takes a list of sentences sorted by LexRank value (descending) and selects the sentences to add to the summary greedily based on LexRank value while excluding sentences with cosine similarity >= summary_threshold (default = 0.5) to any sentence already in the summary. Returns a list of selected sentences.<|endoftext|>
cb778c36d162df755a08e90987107e06cb4e0d197948df7ca6fa9421bf19834f
def select_content(topics_list, d=0.7, intersent_threshold=0.0, summary_threshold=0.5, epsilon=0.1, mle_lambda=0.6, k=20, min_sent_len=5, include_narrative=False, bias_formula='cos', intersent_formula='cos'): '\n For each topic, creates summaries of <= 100 words (full sentences only) \n using a Biased LexRank similarity graph algorithm\n with tf-idf cosine similarity and a bias for query topic.\n\n Args:\n topic_list: a list of Topic objects (which include Documents and Sentences)\n d: damping factor, amount to prioritize topic bias in Markov Matrix\n intersent_threshold: minimum amount of similarity required to include in Similarity Matrix\n summary_threshold: maximum amount of similarity between sentences in summary\n epsilon: minimum amount of difference between probabilities between rounds of power method\n mle_lambda: amount to prioritize topic MLE over sentence MLE \n k: maximum number of intersentential similarity nodes to connect when doing normalized generation probability\n min_sent_len: minimum number of words in a sentence to be used in the summary\n include_narrative: True if the narrative (in addition to title) should be in the bias\n bias_formula: which formula to use for sentence-topic similarity weighting - cos (cosine similarity), rel (relevance), or gen (generation probability)\n intersent_formula: which formula to use for inter-sentential similarity weighting - cos (cosine similarity) or norm (normalized generation probability)\n\n Returns:\n topic_list: the modified topic_list from the input, with a list of selected sentences\n in the topic.summary fields of each topic.\n\n ' topic_summaries = {} for topic in topics_list: topic_id = topic.topic_id topic_title = topic.title topic_docs_list = topic.document_list total_sentences = [sent for doc in topic.document_list for sent in doc.sentence_list if (sent.sent_len >= min_sent_len)] sim_matrix = _build_sim_matrix(total_sentences, intersent_threshold, intersent_formula, mle_lambda, k, topic) bias_vec = _build_bias_vec(total_sentences, topic_title, include_narrative, bias_formula, mle_lambda, topic) markov_matrix = _build_markov_matrix(sim_matrix, bias_vec, d) lex_rank_vec = _power_method(markov_matrix, epsilon) for i in range(len(total_sentences)): total_sentences[i].score = lex_rank_vec[i] sorted_sentences = sorted(total_sentences, reverse=True) topic.summary = _select_sentences(sorted_sentences, summary_threshold) return topics_list
For each topic, creates summaries of <= 100 words (full sentences only) using a Biased LexRank similarity graph algorithm with tf-idf cosine similarity and a bias for query topic. Args: topic_list: a list of Topic objects (which include Documents and Sentences) d: damping factor, amount to prioritize topic bias in Markov Matrix intersent_threshold: minimum amount of similarity required to include in Similarity Matrix summary_threshold: maximum amount of similarity between sentences in summary epsilon: minimum amount of difference between probabilities between rounds of power method mle_lambda: amount to prioritize topic MLE over sentence MLE k: maximum number of intersentential similarity nodes to connect when doing normalized generation probability min_sent_len: minimum number of words in a sentence to be used in the summary include_narrative: True if the narrative (in addition to title) should be in the bias bias_formula: which formula to use for sentence-topic similarity weighting - cos (cosine similarity), rel (relevance), or gen (generation probability) intersent_formula: which formula to use for inter-sentential similarity weighting - cos (cosine similarity) or norm (normalized generation probability) Returns: topic_list: the modified topic_list from the input, with a list of selected sentences in the topic.summary fields of each topic.
src/content_selection.py
select_content
aventon1/text-summarizer
0
python
def select_content(topics_list, d=0.7, intersent_threshold=0.0, summary_threshold=0.5, epsilon=0.1, mle_lambda=0.6, k=20, min_sent_len=5, include_narrative=False, bias_formula='cos', intersent_formula='cos'): '\n For each topic, creates summaries of <= 100 words (full sentences only) \n using a Biased LexRank similarity graph algorithm\n with tf-idf cosine similarity and a bias for query topic.\n\n Args:\n topic_list: a list of Topic objects (which include Documents and Sentences)\n d: damping factor, amount to prioritize topic bias in Markov Matrix\n intersent_threshold: minimum amount of similarity required to include in Similarity Matrix\n summary_threshold: maximum amount of similarity between sentences in summary\n epsilon: minimum amount of difference between probabilities between rounds of power method\n mle_lambda: amount to prioritize topic MLE over sentence MLE \n k: maximum number of intersentential similarity nodes to connect when doing normalized generation probability\n min_sent_len: minimum number of words in a sentence to be used in the summary\n include_narrative: True if the narrative (in addition to title) should be in the bias\n bias_formula: which formula to use for sentence-topic similarity weighting - cos (cosine similarity), rel (relevance), or gen (generation probability)\n intersent_formula: which formula to use for inter-sentential similarity weighting - cos (cosine similarity) or norm (normalized generation probability)\n\n Returns:\n topic_list: the modified topic_list from the input, with a list of selected sentences\n in the topic.summary fields of each topic.\n\n ' topic_summaries = {} for topic in topics_list: topic_id = topic.topic_id topic_title = topic.title topic_docs_list = topic.document_list total_sentences = [sent for doc in topic.document_list for sent in doc.sentence_list if (sent.sent_len >= min_sent_len)] sim_matrix = _build_sim_matrix(total_sentences, intersent_threshold, intersent_formula, mle_lambda, k, topic) bias_vec = _build_bias_vec(total_sentences, topic_title, include_narrative, bias_formula, mle_lambda, topic) markov_matrix = _build_markov_matrix(sim_matrix, bias_vec, d) lex_rank_vec = _power_method(markov_matrix, epsilon) for i in range(len(total_sentences)): total_sentences[i].score = lex_rank_vec[i] sorted_sentences = sorted(total_sentences, reverse=True) topic.summary = _select_sentences(sorted_sentences, summary_threshold) return topics_list
def select_content(topics_list, d=0.7, intersent_threshold=0.0, summary_threshold=0.5, epsilon=0.1, mle_lambda=0.6, k=20, min_sent_len=5, include_narrative=False, bias_formula='cos', intersent_formula='cos'): '\n For each topic, creates summaries of <= 100 words (full sentences only) \n using a Biased LexRank similarity graph algorithm\n with tf-idf cosine similarity and a bias for query topic.\n\n Args:\n topic_list: a list of Topic objects (which include Documents and Sentences)\n d: damping factor, amount to prioritize topic bias in Markov Matrix\n intersent_threshold: minimum amount of similarity required to include in Similarity Matrix\n summary_threshold: maximum amount of similarity between sentences in summary\n epsilon: minimum amount of difference between probabilities between rounds of power method\n mle_lambda: amount to prioritize topic MLE over sentence MLE \n k: maximum number of intersentential similarity nodes to connect when doing normalized generation probability\n min_sent_len: minimum number of words in a sentence to be used in the summary\n include_narrative: True if the narrative (in addition to title) should be in the bias\n bias_formula: which formula to use for sentence-topic similarity weighting - cos (cosine similarity), rel (relevance), or gen (generation probability)\n intersent_formula: which formula to use for inter-sentential similarity weighting - cos (cosine similarity) or norm (normalized generation probability)\n\n Returns:\n topic_list: the modified topic_list from the input, with a list of selected sentences\n in the topic.summary fields of each topic.\n\n ' topic_summaries = {} for topic in topics_list: topic_id = topic.topic_id topic_title = topic.title topic_docs_list = topic.document_list total_sentences = [sent for doc in topic.document_list for sent in doc.sentence_list if (sent.sent_len >= min_sent_len)] sim_matrix = _build_sim_matrix(total_sentences, intersent_threshold, intersent_formula, mle_lambda, k, topic) bias_vec = _build_bias_vec(total_sentences, topic_title, include_narrative, bias_formula, mle_lambda, topic) markov_matrix = _build_markov_matrix(sim_matrix, bias_vec, d) lex_rank_vec = _power_method(markov_matrix, epsilon) for i in range(len(total_sentences)): total_sentences[i].score = lex_rank_vec[i] sorted_sentences = sorted(total_sentences, reverse=True) topic.summary = _select_sentences(sorted_sentences, summary_threshold) return topics_list<|docstring|>For each topic, creates summaries of <= 100 words (full sentences only) using a Biased LexRank similarity graph algorithm with tf-idf cosine similarity and a bias for query topic. Args: topic_list: a list of Topic objects (which include Documents and Sentences) d: damping factor, amount to prioritize topic bias in Markov Matrix intersent_threshold: minimum amount of similarity required to include in Similarity Matrix summary_threshold: maximum amount of similarity between sentences in summary epsilon: minimum amount of difference between probabilities between rounds of power method mle_lambda: amount to prioritize topic MLE over sentence MLE k: maximum number of intersentential similarity nodes to connect when doing normalized generation probability min_sent_len: minimum number of words in a sentence to be used in the summary include_narrative: True if the narrative (in addition to title) should be in the bias bias_formula: which formula to use for sentence-topic similarity weighting - cos (cosine similarity), rel (relevance), or gen (generation probability) intersent_formula: which formula to use for inter-sentential similarity weighting - cos (cosine similarity) or norm (normalized generation probability) Returns: topic_list: the modified topic_list from the input, with a list of selected sentences in the topic.summary fields of each topic.<|endoftext|>
1e0966f935ded95592f0d0d13deb349fcd0e5e0dd317eb6c93c69a308b936529
def deprecated(func): 'This is a decorator which can be used to mark functions\n as deprecated. It will result in a warning being emitted\n when the function is used.' @functools.wraps(func) def new_func(*args, **kwargs): warnings.simplefilter('always', DeprecationWarning) warnings.warn('Call to deprecated function {}.'.format(func.__name__), category=DeprecationWarning, stacklevel=2) warnings.simplefilter('default', DeprecationWarning) return func(*args, **kwargs) return new_func
This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used.
valorant/utils/helper.py
deprecated
vnpnh/Pyvalo
0
python
def deprecated(func): 'This is a decorator which can be used to mark functions\n as deprecated. It will result in a warning being emitted\n when the function is used.' @functools.wraps(func) def new_func(*args, **kwargs): warnings.simplefilter('always', DeprecationWarning) warnings.warn('Call to deprecated function {}.'.format(func.__name__), category=DeprecationWarning, stacklevel=2) warnings.simplefilter('default', DeprecationWarning) return func(*args, **kwargs) return new_func
def deprecated(func): 'This is a decorator which can be used to mark functions\n as deprecated. It will result in a warning being emitted\n when the function is used.' @functools.wraps(func) def new_func(*args, **kwargs): warnings.simplefilter('always', DeprecationWarning) warnings.warn('Call to deprecated function {}.'.format(func.__name__), category=DeprecationWarning, stacklevel=2) warnings.simplefilter('default', DeprecationWarning) return func(*args, **kwargs) return new_func<|docstring|>This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used.<|endoftext|>
8552c9d0b1f65cb24eeedf59925dc56354a28c678c5428fdaa9fcdaf8f0ae034
@responses.activate def test_1000(self, gmn_client_v1_v2): 'MNCore.getCapabilities(): Returns a valid Node Registry document.' with d1_gmn.tests.gmn_mock.disable_auth(): node = gmn_client_v1_v2.getCapabilities() assert isinstance(node, gmn_client_v1_v2.pyxb_binding.Node)
MNCore.getCapabilities(): Returns a valid Node Registry document.
gmn/src/d1_gmn/tests/test_get_capabilities.py
test_1000
DataONEorg/d1_python
15
python
@responses.activate def test_1000(self, gmn_client_v1_v2): with d1_gmn.tests.gmn_mock.disable_auth(): node = gmn_client_v1_v2.getCapabilities() assert isinstance(node, gmn_client_v1_v2.pyxb_binding.Node)
@responses.activate def test_1000(self, gmn_client_v1_v2): with d1_gmn.tests.gmn_mock.disable_auth(): node = gmn_client_v1_v2.getCapabilities() assert isinstance(node, gmn_client_v1_v2.pyxb_binding.Node)<|docstring|>MNCore.getCapabilities(): Returns a valid Node Registry document.<|endoftext|>
7f589a8e9487eae09b7f02a1b7031d86f1296d3057b8114cdeb889e83e52b5cd
def get_dataset(identifier): 'Returns the correct CrepeDataset based on Id in `{ag,amazon,dbpedia,sogou,yahoo,yelp,yelp-polarity}`' if (identifier == 'ag'): dataset = AgNews elif (identifier == 'amazon-polarity'): dataset = AmazonReviewPolarity elif (identifier == 'amazon'): dataset = AmazonReview elif (identifier == 'dbpedia'): dataset = DbPedia elif (identifier == 'sogou'): dataset = SogouNews elif (identifier == 'yahoo'): dataset = YahooAnswers elif (identifier == 'yelp'): dataset = YelpReview elif (identifier == 'yelp-polarity'): dataset = YelpReviewPolarity else: raise ValueError('Unkown dataset identifier: {}'.format(identifier)) return dataset
Returns the correct CrepeDataset based on Id in `{ag,amazon,dbpedia,sogou,yahoo,yelp,yelp-polarity}`
evaluate/load/dataset.py
get_dataset
YannDubs/Hash-Embeddings
159
python
def get_dataset(identifier): if (identifier == 'ag'): dataset = AgNews elif (identifier == 'amazon-polarity'): dataset = AmazonReviewPolarity elif (identifier == 'amazon'): dataset = AmazonReview elif (identifier == 'dbpedia'): dataset = DbPedia elif (identifier == 'sogou'): dataset = SogouNews elif (identifier == 'yahoo'): dataset = YahooAnswers elif (identifier == 'yelp'): dataset = YelpReview elif (identifier == 'yelp-polarity'): dataset = YelpReviewPolarity else: raise ValueError('Unkown dataset identifier: {}'.format(identifier)) return dataset
def get_dataset(identifier): if (identifier == 'ag'): dataset = AgNews elif (identifier == 'amazon-polarity'): dataset = AmazonReviewPolarity elif (identifier == 'amazon'): dataset = AmazonReview elif (identifier == 'dbpedia'): dataset = DbPedia elif (identifier == 'sogou'): dataset = SogouNews elif (identifier == 'yahoo'): dataset = YahooAnswers elif (identifier == 'yelp'): dataset = YelpReview elif (identifier == 'yelp-polarity'): dataset = YelpReviewPolarity else: raise ValueError('Unkown dataset identifier: {}'.format(identifier)) return dataset<|docstring|>Returns the correct CrepeDataset based on Id in `{ag,amazon,dbpedia,sogou,yahoo,yelp,yelp-polarity}`<|endoftext|>
28545d89b7c0ccd19730b1e2c3bc6b7affee242b20ade6e44122f2ff206ff319
def get_path(identifier): 'Returns the correct root directory to dataset based on Id in `{ag,amazon,dbpedia,sogou,yahoo,yelp,yelp-polarity}`' if (identifier == 'ag'): path = '../../data/ag_news_csv' elif (identifier == 'amazon-polarity'): path = '../../data/amazon_review_polarity_csv' elif (identifier == 'amazon'): path = '../../data/amazon_review_full_csv' elif (identifier == 'dbpedia'): path = '../../data/dbpedia_csv' elif (identifier == 'sogou'): path = '../../data/sogou_news_csv' elif (identifier == 'yahoo'): path = '../../data/yahoo_answers_csv' elif (identifier == 'yelp'): path = '../../data/yelp_review_full_csv' elif (identifier == 'yelp-polarity'): path = '../../data/yelp_review_polarity_csv' else: raise ValueError('Unkown dataset identifier: {}'.format(identifier)) return os.path.abspath(os.path.join(os.path.dirname(__file__), path))
Returns the correct root directory to dataset based on Id in `{ag,amazon,dbpedia,sogou,yahoo,yelp,yelp-polarity}`
evaluate/load/dataset.py
get_path
YannDubs/Hash-Embeddings
159
python
def get_path(identifier): if (identifier == 'ag'): path = '../../data/ag_news_csv' elif (identifier == 'amazon-polarity'): path = '../../data/amazon_review_polarity_csv' elif (identifier == 'amazon'): path = '../../data/amazon_review_full_csv' elif (identifier == 'dbpedia'): path = '../../data/dbpedia_csv' elif (identifier == 'sogou'): path = '../../data/sogou_news_csv' elif (identifier == 'yahoo'): path = '../../data/yahoo_answers_csv' elif (identifier == 'yelp'): path = '../../data/yelp_review_full_csv' elif (identifier == 'yelp-polarity'): path = '../../data/yelp_review_polarity_csv' else: raise ValueError('Unkown dataset identifier: {}'.format(identifier)) return os.path.abspath(os.path.join(os.path.dirname(__file__), path))
def get_path(identifier): if (identifier == 'ag'): path = '../../data/ag_news_csv' elif (identifier == 'amazon-polarity'): path = '../../data/amazon_review_polarity_csv' elif (identifier == 'amazon'): path = '../../data/amazon_review_full_csv' elif (identifier == 'dbpedia'): path = '../../data/dbpedia_csv' elif (identifier == 'sogou'): path = '../../data/sogou_news_csv' elif (identifier == 'yahoo'): path = '../../data/yahoo_answers_csv' elif (identifier == 'yelp'): path = '../../data/yelp_review_full_csv' elif (identifier == 'yelp-polarity'): path = '../../data/yelp_review_polarity_csv' else: raise ValueError('Unkown dataset identifier: {}'.format(identifier)) return os.path.abspath(os.path.join(os.path.dirname(__file__), path))<|docstring|>Returns the correct root directory to dataset based on Id in `{ag,amazon,dbpedia,sogou,yahoo,yelp,yelp-polarity}`<|endoftext|>
59d0ffbf432b12b6aff0426ae575bcddee3276ede9a6c0992664f8b8e4127c4a
def get_skin(self, request=None): 'Get skin matching this content'
Get skin matching this content
src/pyams_layer/interfaces.py
get_skin
Py-AMS/pyams-layer
0
python
def get_skin(self, request=None):
def get_skin(self, request=None): <|docstring|>Get skin matching this content<|endoftext|>
190d9d3641cfaa44cfa4ba5ce3e70f73c7cdcb54035776d6dc22704623b2dc4a
@pytest.mark.parametrize('numpy_dtype', [np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64]) def test_torch_tensorable_types(numpy_dtype): "Make sure that we 'sanitize' only integer types that can not be made into torch tensors natively" value = np.zeros((2, 2), dtype=numpy_dtype) dict_to_sanitize = {'value': value} _sanitize_pytorch_types(dict_to_sanitize) torchable = False try: torch.Tensor(value) torchable = True except TypeError: pass tensor = torch.as_tensor(dict_to_sanitize['value']) tensor_and_back = tensor.numpy() if (tensor_and_back.dtype != value.dtype): assert (tensor_and_back.dtype.itemsize > value.dtype.itemsize) assert (not torchable), '_sanitize_pytorch_types modified value of type {}, but it was possible to create a Tensor directly from a value with that type'.format(numpy_dtype)
Make sure that we 'sanitize' only integer types that can not be made into torch tensors natively
petastorm/tests/test_pytorch_dataloader.py
test_torch_tensorable_types
dongpohezui/petastorm
0
python
@pytest.mark.parametrize('numpy_dtype', [np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64]) def test_torch_tensorable_types(numpy_dtype): value = np.zeros((2, 2), dtype=numpy_dtype) dict_to_sanitize = {'value': value} _sanitize_pytorch_types(dict_to_sanitize) torchable = False try: torch.Tensor(value) torchable = True except TypeError: pass tensor = torch.as_tensor(dict_to_sanitize['value']) tensor_and_back = tensor.numpy() if (tensor_and_back.dtype != value.dtype): assert (tensor_and_back.dtype.itemsize > value.dtype.itemsize) assert (not torchable), '_sanitize_pytorch_types modified value of type {}, but it was possible to create a Tensor directly from a value with that type'.format(numpy_dtype)
@pytest.mark.parametrize('numpy_dtype', [np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64]) def test_torch_tensorable_types(numpy_dtype): value = np.zeros((2, 2), dtype=numpy_dtype) dict_to_sanitize = {'value': value} _sanitize_pytorch_types(dict_to_sanitize) torchable = False try: torch.Tensor(value) torchable = True except TypeError: pass tensor = torch.as_tensor(dict_to_sanitize['value']) tensor_and_back = tensor.numpy() if (tensor_and_back.dtype != value.dtype): assert (tensor_and_back.dtype.itemsize > value.dtype.itemsize) assert (not torchable), '_sanitize_pytorch_types modified value of type {}, but it was possible to create a Tensor directly from a value with that type'.format(numpy_dtype)<|docstring|>Make sure that we 'sanitize' only integer types that can not be made into torch tensors natively<|endoftext|>
4e7aecb36a9df3e69a27231b1d72358ba7cc431ab23442e15428f6b48f8b14a4
@pytest.mark.parametrize('shuffling_queue_capacity', [0, 3, 11, 1000]) @pytest.mark.parametrize('data_loader_type', ALL_DATA_LOADERS) def test_with_batch_reader(scalar_dataset, shuffling_queue_capacity, data_loader_type): 'See if we are getting correct batch sizes when using DataLoader with make_batch_reader' pytorch_compatible_fields = [k for (k, v) in scalar_dataset.data[0].items() if (not isinstance(v, (np.datetime64, np.unicode_)))] with data_loader_type(make_batch_reader(scalar_dataset.url, schema_fields=pytorch_compatible_fields), batch_size=3, shuffling_queue_capacity=shuffling_queue_capacity) as loader: batches = list(loader) assert (len(scalar_dataset.data) == sum((batch['id'].shape[0] for batch in batches))) if (pa.__version__ != '0.15.0'): assert (len(scalar_dataset.data) == sum((batch['int_fixed_size_list'].shape[0] for batch in batches))) assert (batches[0]['int_fixed_size_list'].shape[1] == len(scalar_dataset.data[0]['int_fixed_size_list']))
See if we are getting correct batch sizes when using DataLoader with make_batch_reader
petastorm/tests/test_pytorch_dataloader.py
test_with_batch_reader
dongpohezui/petastorm
0
python
@pytest.mark.parametrize('shuffling_queue_capacity', [0, 3, 11, 1000]) @pytest.mark.parametrize('data_loader_type', ALL_DATA_LOADERS) def test_with_batch_reader(scalar_dataset, shuffling_queue_capacity, data_loader_type): pytorch_compatible_fields = [k for (k, v) in scalar_dataset.data[0].items() if (not isinstance(v, (np.datetime64, np.unicode_)))] with data_loader_type(make_batch_reader(scalar_dataset.url, schema_fields=pytorch_compatible_fields), batch_size=3, shuffling_queue_capacity=shuffling_queue_capacity) as loader: batches = list(loader) assert (len(scalar_dataset.data) == sum((batch['id'].shape[0] for batch in batches))) if (pa.__version__ != '0.15.0'): assert (len(scalar_dataset.data) == sum((batch['int_fixed_size_list'].shape[0] for batch in batches))) assert (batches[0]['int_fixed_size_list'].shape[1] == len(scalar_dataset.data[0]['int_fixed_size_list']))
@pytest.mark.parametrize('shuffling_queue_capacity', [0, 3, 11, 1000]) @pytest.mark.parametrize('data_loader_type', ALL_DATA_LOADERS) def test_with_batch_reader(scalar_dataset, shuffling_queue_capacity, data_loader_type): pytorch_compatible_fields = [k for (k, v) in scalar_dataset.data[0].items() if (not isinstance(v, (np.datetime64, np.unicode_)))] with data_loader_type(make_batch_reader(scalar_dataset.url, schema_fields=pytorch_compatible_fields), batch_size=3, shuffling_queue_capacity=shuffling_queue_capacity) as loader: batches = list(loader) assert (len(scalar_dataset.data) == sum((batch['id'].shape[0] for batch in batches))) if (pa.__version__ != '0.15.0'): assert (len(scalar_dataset.data) == sum((batch['int_fixed_size_list'].shape[0] for batch in batches))) assert (batches[0]['int_fixed_size_list'].shape[1] == len(scalar_dataset.data[0]['int_fixed_size_list']))<|docstring|>See if we are getting correct batch sizes when using DataLoader with make_batch_reader<|endoftext|>
db9eaf20cc4e18550506bdd3da2e0099b1d1eae7eb10da1b40363299e7b010b1
def validate(self, data): 'validate data before we save ' confirmed_password = data.get('confirmed_password') try: validate_password(data['password']) except ValidationError as e: raise serializers.ValidationError({'password': str(e).replace('[, ).replace(]', '')}) if (not self.do_passwords_match(data['password'], confirmed_password)): raise serializers.ValidationError({'passwords': 'Passwords do not match'}) return data
validate data before we save
authentication/serializers.py
validate
kelvinndmo/scholarship_system
0
python
def validate(self, data): ' ' confirmed_password = data.get('confirmed_password') try: validate_password(data['password']) except ValidationError as e: raise serializers.ValidationError({'password': str(e).replace('[, ).replace(]', )}) if (not self.do_passwords_match(data['password'], confirmed_password)): raise serializers.ValidationError({'passwords': 'Passwords do not match'}) return data
def validate(self, data): ' ' confirmed_password = data.get('confirmed_password') try: validate_password(data['password']) except ValidationError as e: raise serializers.ValidationError({'password': str(e).replace('[, ).replace(]', )}) if (not self.do_passwords_match(data['password'], confirmed_password)): raise serializers.ValidationError({'passwords': 'Passwords do not match'}) return data<|docstring|>validate data before we save<|endoftext|>
6cc80fc473ed497c4f81c8412ddc4fb9c36e71b88523a1d3977752d6af7faf8f
def create(self, validated_data): 'Create a user.' del validated_data['confirmed_password'] return User.objects.create_user(**validated_data)
Create a user.
authentication/serializers.py
create
kelvinndmo/scholarship_system
0
python
def create(self, validated_data): del validated_data['confirmed_password'] return User.objects.create_user(**validated_data)
def create(self, validated_data): del validated_data['confirmed_password'] return User.objects.create_user(**validated_data)<|docstring|>Create a user.<|endoftext|>
91e4dfa418d86950e4f252bd6820f22b07ddaf8fb5a27b044c7fe618b480e092
def do_passwords_match(self, password1, password2): 'Check if passwords match.' return (password1 == password2)
Check if passwords match.
authentication/serializers.py
do_passwords_match
kelvinndmo/scholarship_system
0
python
def do_passwords_match(self, password1, password2): return (password1 == password2)
def do_passwords_match(self, password1, password2): return (password1 == password2)<|docstring|>Check if passwords match.<|endoftext|>
676a33cfadfbdc179635c6432b0978d9b2821b4e9d6bdf0761b2cb90f59ff01d
def main(): 'Reads in raw log files/pcap and writes entries to an sqlite database' parser = argparse.ArgumentParser() parser.add_argument('-e', '--endpoint', dest='host', type=str, required=False, help='endpoint, aka host ("client", "server", etc.)') parser.add_argument('-c', '--cpu-file', dest='cpu_file', type=str, required=False, default=None, help='(uncompressed) cpu log file') parser.add_argument('-d', '--disk-file', dest='disk_file', type=str, required=False, default=None, help='(uncompressed) disk log file') parser.add_argument('-r', '--ram-file', dest='ram_file', type=str, required=False, default=None, help='(uncompressed) ram log file') parser.add_argument('-n', '--network-file', dest='network_file', type=str, required=False, default=None, help='pcap file') parser.add_argument('-m', '--network-map', dest='network_map', type=str, required=False, default=None, help='file containing map from IP address to host ("client")') parser.add_argument('-o', '--output-file', dest='sqlite_file', type=str, required=True, help='sqlite3 file to write to') options = parser.parse_args() con = create_perf_db(options.sqlite_file) if (options.cpu_file != None): reader = CpuLogReader(handle=open(options.cpu_file), host=options.host) reader.write_to_database(con) if (options.disk_file != None): reader = DiskLogReader(handle=open(options.disk_file), host=options.host) reader.write_to_database(con) if (options.ram_file != None): reader = RamLogReader(handle=open(options.ram_file), host=options.host) reader.write_to_database(con) if (options.network_file != None): if (options.network_map == None): sys.stderr.write('Must include network map if parsing pcap files') else: mappings = get_mappings_from_handle(open(options.network_map)) reader = pcap_to_log(options.network_file, mappings) reader.write_to_database(con)
Reads in raw log files/pcap and writes entries to an sqlite database
spar_python/perf_monitoring/perf_logs_to_db.py
main
nathanawmk/SPARTA
37
python
def main(): parser = argparse.ArgumentParser() parser.add_argument('-e', '--endpoint', dest='host', type=str, required=False, help='endpoint, aka host ("client", "server", etc.)') parser.add_argument('-c', '--cpu-file', dest='cpu_file', type=str, required=False, default=None, help='(uncompressed) cpu log file') parser.add_argument('-d', '--disk-file', dest='disk_file', type=str, required=False, default=None, help='(uncompressed) disk log file') parser.add_argument('-r', '--ram-file', dest='ram_file', type=str, required=False, default=None, help='(uncompressed) ram log file') parser.add_argument('-n', '--network-file', dest='network_file', type=str, required=False, default=None, help='pcap file') parser.add_argument('-m', '--network-map', dest='network_map', type=str, required=False, default=None, help='file containing map from IP address to host ("client")') parser.add_argument('-o', '--output-file', dest='sqlite_file', type=str, required=True, help='sqlite3 file to write to') options = parser.parse_args() con = create_perf_db(options.sqlite_file) if (options.cpu_file != None): reader = CpuLogReader(handle=open(options.cpu_file), host=options.host) reader.write_to_database(con) if (options.disk_file != None): reader = DiskLogReader(handle=open(options.disk_file), host=options.host) reader.write_to_database(con) if (options.ram_file != None): reader = RamLogReader(handle=open(options.ram_file), host=options.host) reader.write_to_database(con) if (options.network_file != None): if (options.network_map == None): sys.stderr.write('Must include network map if parsing pcap files') else: mappings = get_mappings_from_handle(open(options.network_map)) reader = pcap_to_log(options.network_file, mappings) reader.write_to_database(con)
def main(): parser = argparse.ArgumentParser() parser.add_argument('-e', '--endpoint', dest='host', type=str, required=False, help='endpoint, aka host ("client", "server", etc.)') parser.add_argument('-c', '--cpu-file', dest='cpu_file', type=str, required=False, default=None, help='(uncompressed) cpu log file') parser.add_argument('-d', '--disk-file', dest='disk_file', type=str, required=False, default=None, help='(uncompressed) disk log file') parser.add_argument('-r', '--ram-file', dest='ram_file', type=str, required=False, default=None, help='(uncompressed) ram log file') parser.add_argument('-n', '--network-file', dest='network_file', type=str, required=False, default=None, help='pcap file') parser.add_argument('-m', '--network-map', dest='network_map', type=str, required=False, default=None, help='file containing map from IP address to host ("client")') parser.add_argument('-o', '--output-file', dest='sqlite_file', type=str, required=True, help='sqlite3 file to write to') options = parser.parse_args() con = create_perf_db(options.sqlite_file) if (options.cpu_file != None): reader = CpuLogReader(handle=open(options.cpu_file), host=options.host) reader.write_to_database(con) if (options.disk_file != None): reader = DiskLogReader(handle=open(options.disk_file), host=options.host) reader.write_to_database(con) if (options.ram_file != None): reader = RamLogReader(handle=open(options.ram_file), host=options.host) reader.write_to_database(con) if (options.network_file != None): if (options.network_map == None): sys.stderr.write('Must include network map if parsing pcap files') else: mappings = get_mappings_from_handle(open(options.network_map)) reader = pcap_to_log(options.network_file, mappings) reader.write_to_database(con)<|docstring|>Reads in raw log files/pcap and writes entries to an sqlite database<|endoftext|>
bf4981d8e131d914796245212a2de77c81c4e5821875e3b2b17f67e8b18fe1d5
def on(self): '\n Turns the device on.\n ' self._write(True)
Turns the device on.
gpiozero/output_devices.py
on
lurch/python-gpiozero
0
python
def on(self): '\n \n ' self._write(True)
def on(self): '\n \n ' self._write(True)<|docstring|>Turns the device on.<|endoftext|>
e4b761944f334631b322a25ec017e7c0e4ce3626a022ed6703e6e027e43efbf3
def off(self): '\n Turns the device off.\n ' self._write(False)
Turns the device off.
gpiozero/output_devices.py
off
lurch/python-gpiozero
0
python
def off(self): '\n \n ' self._write(False)
def off(self): '\n \n ' self._write(False)<|docstring|>Turns the device off.<|endoftext|>
9bea51fee3d5e19ab8069a9c87f54d9b719cfe411e07ea3becf42624221528fa
def toggle(self): "\n Reverse the state of the device. If it's on, turn it off; if it's off,\n turn it on.\n " with self._lock: if self.is_active: self.off() else: self.on()
Reverse the state of the device. If it's on, turn it off; if it's off, turn it on.
gpiozero/output_devices.py
toggle
lurch/python-gpiozero
0
python
def toggle(self): "\n Reverse the state of the device. If it's on, turn it off; if it's off,\n turn it on.\n " with self._lock: if self.is_active: self.off() else: self.on()
def toggle(self): "\n Reverse the state of the device. If it's on, turn it off; if it's off,\n turn it on.\n " with self._lock: if self.is_active: self.off() else: self.on()<|docstring|>Reverse the state of the device. If it's on, turn it off; if it's off, turn it on.<|endoftext|>
9afa24e9c959fef1db0e5e3ddc79897fac6294398ef2a3e413bf507e753dc1f5
@property def value(self): '\n Returns ``True`` if the device is currently active and ``False``\n otherwise. Setting this property changes the state of the device.\n ' return super(OutputDevice, self).value
Returns ``True`` if the device is currently active and ``False`` otherwise. Setting this property changes the state of the device.
gpiozero/output_devices.py
value
lurch/python-gpiozero
0
python
@property def value(self): '\n Returns ``True`` if the device is currently active and ``False``\n otherwise. Setting this property changes the state of the device.\n ' return super(OutputDevice, self).value
@property def value(self): '\n Returns ``True`` if the device is currently active and ``False``\n otherwise. Setting this property changes the state of the device.\n ' return super(OutputDevice, self).value<|docstring|>Returns ``True`` if the device is currently active and ``False`` otherwise. Setting this property changes the state of the device.<|endoftext|>
4a2f1c0a9f6c47f1b4cae396b4d73204ad2b1854f517aa15d170c84996961a16
@property def active_high(self): "\n When ``True``, the :attr:`value` property is ``True`` when the device's\n :attr:`pin` is high. When ``False`` the :attr:`value` property is\n ``True`` when the device's pin is low (i.e. the value is inverted).\n\n This property can be set after construction; be warned that changing it\n will invert :attr:`value` (i.e. changing this property doesn't change\n the device's pin state - it just changes how that state is\n interpreted).\n " return self._active_state
When ``True``, the :attr:`value` property is ``True`` when the device's :attr:`pin` is high. When ``False`` the :attr:`value` property is ``True`` when the device's pin is low (i.e. the value is inverted). This property can be set after construction; be warned that changing it will invert :attr:`value` (i.e. changing this property doesn't change the device's pin state - it just changes how that state is interpreted).
gpiozero/output_devices.py
active_high
lurch/python-gpiozero
0
python
@property def active_high(self): "\n When ``True``, the :attr:`value` property is ``True`` when the device's\n :attr:`pin` is high. When ``False`` the :attr:`value` property is\n ``True`` when the device's pin is low (i.e. the value is inverted).\n\n This property can be set after construction; be warned that changing it\n will invert :attr:`value` (i.e. changing this property doesn't change\n the device's pin state - it just changes how that state is\n interpreted).\n " return self._active_state
@property def active_high(self): "\n When ``True``, the :attr:`value` property is ``True`` when the device's\n :attr:`pin` is high. When ``False`` the :attr:`value` property is\n ``True`` when the device's pin is low (i.e. the value is inverted).\n\n This property can be set after construction; be warned that changing it\n will invert :attr:`value` (i.e. changing this property doesn't change\n the device's pin state - it just changes how that state is\n interpreted).\n " return self._active_state<|docstring|>When ``True``, the :attr:`value` property is ``True`` when the device's :attr:`pin` is high. When ``False`` the :attr:`value` property is ``True`` when the device's pin is low (i.e. the value is inverted). This property can be set after construction; be warned that changing it will invert :attr:`value` (i.e. changing this property doesn't change the device's pin state - it just changes how that state is interpreted).<|endoftext|>
f7e6118514b711ae28fc2d2206dbd32164251ed49f7b1f5f3e2b367132c2b601
def blink(self, on_time=1, off_time=1, n=None, background=True): '\n Make the device turn on and off repeatedly.\n\n :param float on_time:\n Number of seconds on. Defaults to 1 second.\n\n :param float off_time:\n Number of seconds off. Defaults to 1 second.\n\n :param int n:\n Number of times to blink; ``None`` (the default) means forever.\n\n :param bool background:\n If ``True`` (the default), start a background thread to continue\n blinking and return immediately. If ``False``, only return when the\n blink is finished (warning: the default value of *n* will result in\n this method never returning).\n ' self._stop_blink() self._blink_thread = GPIOThread(target=self._blink_device, args=(on_time, off_time, n)) self._blink_thread.start() if (not background): self._blink_thread.join() self._blink_thread = None
Make the device turn on and off repeatedly. :param float on_time: Number of seconds on. Defaults to 1 second. :param float off_time: Number of seconds off. Defaults to 1 second. :param int n: Number of times to blink; ``None`` (the default) means forever. :param bool background: If ``True`` (the default), start a background thread to continue blinking and return immediately. If ``False``, only return when the blink is finished (warning: the default value of *n* will result in this method never returning).
gpiozero/output_devices.py
blink
lurch/python-gpiozero
0
python
def blink(self, on_time=1, off_time=1, n=None, background=True): '\n Make the device turn on and off repeatedly.\n\n :param float on_time:\n Number of seconds on. Defaults to 1 second.\n\n :param float off_time:\n Number of seconds off. Defaults to 1 second.\n\n :param int n:\n Number of times to blink; ``None`` (the default) means forever.\n\n :param bool background:\n If ``True`` (the default), start a background thread to continue\n blinking and return immediately. If ``False``, only return when the\n blink is finished (warning: the default value of *n* will result in\n this method never returning).\n ' self._stop_blink() self._blink_thread = GPIOThread(target=self._blink_device, args=(on_time, off_time, n)) self._blink_thread.start() if (not background): self._blink_thread.join() self._blink_thread = None
def blink(self, on_time=1, off_time=1, n=None, background=True): '\n Make the device turn on and off repeatedly.\n\n :param float on_time:\n Number of seconds on. Defaults to 1 second.\n\n :param float off_time:\n Number of seconds off. Defaults to 1 second.\n\n :param int n:\n Number of times to blink; ``None`` (the default) means forever.\n\n :param bool background:\n If ``True`` (the default), start a background thread to continue\n blinking and return immediately. If ``False``, only return when the\n blink is finished (warning: the default value of *n* will result in\n this method never returning).\n ' self._stop_blink() self._blink_thread = GPIOThread(target=self._blink_device, args=(on_time, off_time, n)) self._blink_thread.start() if (not background): self._blink_thread.join() self._blink_thread = None<|docstring|>Make the device turn on and off repeatedly. :param float on_time: Number of seconds on. Defaults to 1 second. :param float off_time: Number of seconds off. Defaults to 1 second. :param int n: Number of times to blink; ``None`` (the default) means forever. :param bool background: If ``True`` (the default), start a background thread to continue blinking and return immediately. If ``False``, only return when the blink is finished (warning: the default value of *n* will result in this method never returning).<|endoftext|>
9e313deae59eb8d3be9625a17e4ebab539a4402fbed737ba1ebad953bbf48a7a
@property def value(self): '\n The duty cycle of the PWM device. 0.0 is off, 1.0 is fully on. Values\n in between may be specified for varying levels of power in the device.\n ' return self._read()
The duty cycle of the PWM device. 0.0 is off, 1.0 is fully on. Values in between may be specified for varying levels of power in the device.
gpiozero/output_devices.py
value
lurch/python-gpiozero
0
python
@property def value(self): '\n The duty cycle of the PWM device. 0.0 is off, 1.0 is fully on. Values\n in between may be specified for varying levels of power in the device.\n ' return self._read()
@property def value(self): '\n The duty cycle of the PWM device. 0.0 is off, 1.0 is fully on. Values\n in between may be specified for varying levels of power in the device.\n ' return self._read()<|docstring|>The duty cycle of the PWM device. 0.0 is off, 1.0 is fully on. Values in between may be specified for varying levels of power in the device.<|endoftext|>
0213df2fb3ae0466bb4bfa469a2c9de398929661bc259ccc179e1061ebc843de
def toggle(self): '\n Toggle the state of the device. If the device is currently off\n (:attr:`value` is 0.0), this changes it to "fully" on (:attr:`value` is\n 1.0). If the device has a duty cycle (:attr:`value`) of 0.1, this will\n toggle it to 0.9, and so on.\n ' self._stop_blink() self.value = (1 - self.value)
Toggle the state of the device. If the device is currently off (:attr:`value` is 0.0), this changes it to "fully" on (:attr:`value` is 1.0). If the device has a duty cycle (:attr:`value`) of 0.1, this will toggle it to 0.9, and so on.
gpiozero/output_devices.py
toggle
lurch/python-gpiozero
0
python
def toggle(self): '\n Toggle the state of the device. If the device is currently off\n (:attr:`value` is 0.0), this changes it to "fully" on (:attr:`value` is\n 1.0). If the device has a duty cycle (:attr:`value`) of 0.1, this will\n toggle it to 0.9, and so on.\n ' self._stop_blink() self.value = (1 - self.value)
def toggle(self): '\n Toggle the state of the device. If the device is currently off\n (:attr:`value` is 0.0), this changes it to "fully" on (:attr:`value` is\n 1.0). If the device has a duty cycle (:attr:`value`) of 0.1, this will\n toggle it to 0.9, and so on.\n ' self._stop_blink() self.value = (1 - self.value)<|docstring|>Toggle the state of the device. If the device is currently off (:attr:`value` is 0.0), this changes it to "fully" on (:attr:`value` is 1.0). If the device has a duty cycle (:attr:`value`) of 0.1, this will toggle it to 0.9, and so on.<|endoftext|>
75bee28abce166cc7fe1be0cb6538da52876946303b72b518afdc4be1300ce07
@property def is_active(self): '\n Returns ``True`` if the device is currently active (:attr:`value` is\n non-zero) and ``False`` otherwise.\n ' return (self.value != 0)
Returns ``True`` if the device is currently active (:attr:`value` is non-zero) and ``False`` otherwise.
gpiozero/output_devices.py
is_active
lurch/python-gpiozero
0
python
@property def is_active(self): '\n Returns ``True`` if the device is currently active (:attr:`value` is\n non-zero) and ``False`` otherwise.\n ' return (self.value != 0)
@property def is_active(self): '\n Returns ``True`` if the device is currently active (:attr:`value` is\n non-zero) and ``False`` otherwise.\n ' return (self.value != 0)<|docstring|>Returns ``True`` if the device is currently active (:attr:`value` is non-zero) and ``False`` otherwise.<|endoftext|>
0c040a050b5d90233877468dd078cd893a08953b3414185a391f29c27aa166e1
@property def frequency(self): '\n The frequency of the pulses used with the PWM device, in Hz. The\n default is 100Hz.\n ' return self.pin.frequency
The frequency of the pulses used with the PWM device, in Hz. The default is 100Hz.
gpiozero/output_devices.py
frequency
lurch/python-gpiozero
0
python
@property def frequency(self): '\n The frequency of the pulses used with the PWM device, in Hz. The\n default is 100Hz.\n ' return self.pin.frequency
@property def frequency(self): '\n The frequency of the pulses used with the PWM device, in Hz. The\n default is 100Hz.\n ' return self.pin.frequency<|docstring|>The frequency of the pulses used with the PWM device, in Hz. The default is 100Hz.<|endoftext|>
96d7c5134b6bf485ec2bdc61f47ae3c2dfe749a6af4514a2a1308c2bb02f1146
def blink(self, on_time=1, off_time=1, fade_in_time=0, fade_out_time=0, n=None, background=True): '\n Make the device turn on and off repeatedly.\n\n :param float on_time:\n Number of seconds on. Defaults to 1 second.\n\n :param float off_time:\n Number of seconds off. Defaults to 1 second.\n\n :param float fade_in_time:\n Number of seconds to spend fading in. Defaults to 0.\n\n :param float fade_out_time:\n Number of seconds to spend fading out. Defaults to 0.\n\n :param int n:\n Number of times to blink; ``None`` (the default) means forever.\n\n :param bool background:\n If ``True`` (the default), start a background thread to continue\n blinking and return immediately. If ``False``, only return when the\n blink is finished (warning: the default value of *n* will result in\n this method never returning).\n ' self._stop_blink() self._blink_thread = GPIOThread(target=self._blink_device, args=(on_time, off_time, fade_in_time, fade_out_time, n)) self._blink_thread.start() if (not background): self._blink_thread.join() self._blink_thread = None
Make the device turn on and off repeatedly. :param float on_time: Number of seconds on. Defaults to 1 second. :param float off_time: Number of seconds off. Defaults to 1 second. :param float fade_in_time: Number of seconds to spend fading in. Defaults to 0. :param float fade_out_time: Number of seconds to spend fading out. Defaults to 0. :param int n: Number of times to blink; ``None`` (the default) means forever. :param bool background: If ``True`` (the default), start a background thread to continue blinking and return immediately. If ``False``, only return when the blink is finished (warning: the default value of *n* will result in this method never returning).
gpiozero/output_devices.py
blink
lurch/python-gpiozero
0
python
def blink(self, on_time=1, off_time=1, fade_in_time=0, fade_out_time=0, n=None, background=True): '\n Make the device turn on and off repeatedly.\n\n :param float on_time:\n Number of seconds on. Defaults to 1 second.\n\n :param float off_time:\n Number of seconds off. Defaults to 1 second.\n\n :param float fade_in_time:\n Number of seconds to spend fading in. Defaults to 0.\n\n :param float fade_out_time:\n Number of seconds to spend fading out. Defaults to 0.\n\n :param int n:\n Number of times to blink; ``None`` (the default) means forever.\n\n :param bool background:\n If ``True`` (the default), start a background thread to continue\n blinking and return immediately. If ``False``, only return when the\n blink is finished (warning: the default value of *n* will result in\n this method never returning).\n ' self._stop_blink() self._blink_thread = GPIOThread(target=self._blink_device, args=(on_time, off_time, fade_in_time, fade_out_time, n)) self._blink_thread.start() if (not background): self._blink_thread.join() self._blink_thread = None
def blink(self, on_time=1, off_time=1, fade_in_time=0, fade_out_time=0, n=None, background=True): '\n Make the device turn on and off repeatedly.\n\n :param float on_time:\n Number of seconds on. Defaults to 1 second.\n\n :param float off_time:\n Number of seconds off. Defaults to 1 second.\n\n :param float fade_in_time:\n Number of seconds to spend fading in. Defaults to 0.\n\n :param float fade_out_time:\n Number of seconds to spend fading out. Defaults to 0.\n\n :param int n:\n Number of times to blink; ``None`` (the default) means forever.\n\n :param bool background:\n If ``True`` (the default), start a background thread to continue\n blinking and return immediately. If ``False``, only return when the\n blink is finished (warning: the default value of *n* will result in\n this method never returning).\n ' self._stop_blink() self._blink_thread = GPIOThread(target=self._blink_device, args=(on_time, off_time, fade_in_time, fade_out_time, n)) self._blink_thread.start() if (not background): self._blink_thread.join() self._blink_thread = None<|docstring|>Make the device turn on and off repeatedly. :param float on_time: Number of seconds on. Defaults to 1 second. :param float off_time: Number of seconds off. Defaults to 1 second. :param float fade_in_time: Number of seconds to spend fading in. Defaults to 0. :param float fade_out_time: Number of seconds to spend fading out. Defaults to 0. :param int n: Number of times to blink; ``None`` (the default) means forever. :param bool background: If ``True`` (the default), start a background thread to continue blinking and return immediately. If ``False``, only return when the blink is finished (warning: the default value of *n* will result in this method never returning).<|endoftext|>
cdd1186bcf75b03c499ccde542e196cb3e01ac46d07de638a295b9aab485d5dc
def pulse(self, fade_in_time=1, fade_out_time=1, n=None, background=True): '\n Make the device fade in and out repeatedly.\n\n :param float fade_in_time:\n Number of seconds to spend fading in. Defaults to 1.\n\n :param float fade_out_time:\n Number of seconds to spend fading out. Defaults to 1.\n\n :param int n:\n Number of times to pulse; ``None`` (the default) means forever.\n\n :param bool background:\n If ``True`` (the default), start a background thread to continue\n pulsing and return immediately. If ``False``, only return when the\n pulse is finished (warning: the default value of *n* will result in\n this method never returning).\n ' on_time = off_time = 0 self.blink(on_time, off_time, fade_in_time, fade_out_time, n, background)
Make the device fade in and out repeatedly. :param float fade_in_time: Number of seconds to spend fading in. Defaults to 1. :param float fade_out_time: Number of seconds to spend fading out. Defaults to 1. :param int n: Number of times to pulse; ``None`` (the default) means forever. :param bool background: If ``True`` (the default), start a background thread to continue pulsing and return immediately. If ``False``, only return when the pulse is finished (warning: the default value of *n* will result in this method never returning).
gpiozero/output_devices.py
pulse
lurch/python-gpiozero
0
python
def pulse(self, fade_in_time=1, fade_out_time=1, n=None, background=True): '\n Make the device fade in and out repeatedly.\n\n :param float fade_in_time:\n Number of seconds to spend fading in. Defaults to 1.\n\n :param float fade_out_time:\n Number of seconds to spend fading out. Defaults to 1.\n\n :param int n:\n Number of times to pulse; ``None`` (the default) means forever.\n\n :param bool background:\n If ``True`` (the default), start a background thread to continue\n pulsing and return immediately. If ``False``, only return when the\n pulse is finished (warning: the default value of *n* will result in\n this method never returning).\n ' on_time = off_time = 0 self.blink(on_time, off_time, fade_in_time, fade_out_time, n, background)
def pulse(self, fade_in_time=1, fade_out_time=1, n=None, background=True): '\n Make the device fade in and out repeatedly.\n\n :param float fade_in_time:\n Number of seconds to spend fading in. Defaults to 1.\n\n :param float fade_out_time:\n Number of seconds to spend fading out. Defaults to 1.\n\n :param int n:\n Number of times to pulse; ``None`` (the default) means forever.\n\n :param bool background:\n If ``True`` (the default), start a background thread to continue\n pulsing and return immediately. If ``False``, only return when the\n pulse is finished (warning: the default value of *n* will result in\n this method never returning).\n ' on_time = off_time = 0 self.blink(on_time, off_time, fade_in_time, fade_out_time, n, background)<|docstring|>Make the device fade in and out repeatedly. :param float fade_in_time: Number of seconds to spend fading in. Defaults to 1. :param float fade_out_time: Number of seconds to spend fading out. Defaults to 1. :param int n: Number of times to pulse; ``None`` (the default) means forever. :param bool background: If ``True`` (the default), start a background thread to continue pulsing and return immediately. If ``False``, only return when the pulse is finished (warning: the default value of *n* will result in this method never returning).<|endoftext|>
8a233cd1fa58d83b282e33776c734ef5d6d44d844fd6289a066cffdd7d2bb7bb
@property def value(self): '\n Represents the color of the LED as an RGB 3-tuple of ``(red, green,\n blue)`` where each value is between 0 and 1 if ``pwm`` was ``True``\n when the class was constructed (and only 0 or 1 if not).\n\n For example, purple would be ``(1, 0, 1)`` and yellow would be ``(1, 1,\n 0)``, while orange would be ``(1, 0.5, 0)``.\n ' return (self.red, self.green, self.blue)
Represents the color of the LED as an RGB 3-tuple of ``(red, green, blue)`` where each value is between 0 and 1 if ``pwm`` was ``True`` when the class was constructed (and only 0 or 1 if not). For example, purple would be ``(1, 0, 1)`` and yellow would be ``(1, 1, 0)``, while orange would be ``(1, 0.5, 0)``.
gpiozero/output_devices.py
value
lurch/python-gpiozero
0
python
@property def value(self): '\n Represents the color of the LED as an RGB 3-tuple of ``(red, green,\n blue)`` where each value is between 0 and 1 if ``pwm`` was ``True``\n when the class was constructed (and only 0 or 1 if not).\n\n For example, purple would be ``(1, 0, 1)`` and yellow would be ``(1, 1,\n 0)``, while orange would be ``(1, 0.5, 0)``.\n ' return (self.red, self.green, self.blue)
@property def value(self): '\n Represents the color of the LED as an RGB 3-tuple of ``(red, green,\n blue)`` where each value is between 0 and 1 if ``pwm`` was ``True``\n when the class was constructed (and only 0 or 1 if not).\n\n For example, purple would be ``(1, 0, 1)`` and yellow would be ``(1, 1,\n 0)``, while orange would be ``(1, 0.5, 0)``.\n ' return (self.red, self.green, self.blue)<|docstring|>Represents the color of the LED as an RGB 3-tuple of ``(red, green, blue)`` where each value is between 0 and 1 if ``pwm`` was ``True`` when the class was constructed (and only 0 or 1 if not). For example, purple would be ``(1, 0, 1)`` and yellow would be ``(1, 1, 0)``, while orange would be ``(1, 0.5, 0)``.<|endoftext|>
16b607cc81b48b65315aab218ea506402c95c039692689a64f1c5c016bcf5c38
@property def is_active(self): '\n Returns ``True`` if the LED is currently active (not black) and\n ``False`` otherwise.\n ' return (self.value != (0, 0, 0))
Returns ``True`` if the LED is currently active (not black) and ``False`` otherwise.
gpiozero/output_devices.py
is_active
lurch/python-gpiozero
0
python
@property def is_active(self): '\n Returns ``True`` if the LED is currently active (not black) and\n ``False`` otherwise.\n ' return (self.value != (0, 0, 0))
@property def is_active(self): '\n Returns ``True`` if the LED is currently active (not black) and\n ``False`` otherwise.\n ' return (self.value != (0, 0, 0))<|docstring|>Returns ``True`` if the LED is currently active (not black) and ``False`` otherwise.<|endoftext|>
8fea2b74b05ae68b379f2dfa3a4d73f2ef48657516834ac0a1c7bf1bf1928ab0
def on(self): '\n Turn the LED on. This equivalent to setting the LED color to white\n ``(1, 1, 1)``.\n ' self.value = (1, 1, 1)
Turn the LED on. This equivalent to setting the LED color to white ``(1, 1, 1)``.
gpiozero/output_devices.py
on
lurch/python-gpiozero
0
python
def on(self): '\n Turn the LED on. This equivalent to setting the LED color to white\n ``(1, 1, 1)``.\n ' self.value = (1, 1, 1)
def on(self): '\n Turn the LED on. This equivalent to setting the LED color to white\n ``(1, 1, 1)``.\n ' self.value = (1, 1, 1)<|docstring|>Turn the LED on. This equivalent to setting the LED color to white ``(1, 1, 1)``.<|endoftext|>
5e0ba0b608f06959baf11927f7877da59c859ae1d40d6c8c0f22c511a1cbf354
def off(self): '\n Turn the LED off. This is equivalent to setting the LED color to black\n ``(0, 0, 0)``.\n ' self.value = (0, 0, 0)
Turn the LED off. This is equivalent to setting the LED color to black ``(0, 0, 0)``.
gpiozero/output_devices.py
off
lurch/python-gpiozero
0
python
def off(self): '\n Turn the LED off. This is equivalent to setting the LED color to black\n ``(0, 0, 0)``.\n ' self.value = (0, 0, 0)
def off(self): '\n Turn the LED off. This is equivalent to setting the LED color to black\n ``(0, 0, 0)``.\n ' self.value = (0, 0, 0)<|docstring|>Turn the LED off. This is equivalent to setting the LED color to black ``(0, 0, 0)``.<|endoftext|>
923566da9952616fc66bb8fbc7262f7a49cfe1317bc63a2e4d3cbb4a554c5cd3
def toggle(self): '\n Toggle the state of the device. If the device is currently off\n (:attr:`value` is ``(0, 0, 0)``), this changes it to "fully" on\n (:attr:`value` is ``(1, 1, 1)``). If the device has a specific color,\n this method inverts the color.\n ' (r, g, b) = self.value self.value = ((1 - r), (1 - g), (1 - b))
Toggle the state of the device. If the device is currently off (:attr:`value` is ``(0, 0, 0)``), this changes it to "fully" on (:attr:`value` is ``(1, 1, 1)``). If the device has a specific color, this method inverts the color.
gpiozero/output_devices.py
toggle
lurch/python-gpiozero
0
python
def toggle(self): '\n Toggle the state of the device. If the device is currently off\n (:attr:`value` is ``(0, 0, 0)``), this changes it to "fully" on\n (:attr:`value` is ``(1, 1, 1)``). If the device has a specific color,\n this method inverts the color.\n ' (r, g, b) = self.value self.value = ((1 - r), (1 - g), (1 - b))
def toggle(self): '\n Toggle the state of the device. If the device is currently off\n (:attr:`value` is ``(0, 0, 0)``), this changes it to "fully" on\n (:attr:`value` is ``(1, 1, 1)``). If the device has a specific color,\n this method inverts the color.\n ' (r, g, b) = self.value self.value = ((1 - r), (1 - g), (1 - b))<|docstring|>Toggle the state of the device. If the device is currently off (:attr:`value` is ``(0, 0, 0)``), this changes it to "fully" on (:attr:`value` is ``(1, 1, 1)``). If the device has a specific color, this method inverts the color.<|endoftext|>
ca25742d1cc67cc3042b3ac92f721a4b05d06caa6f080ebb78bafb1ba4e1d78d
def blink(self, on_time=1, off_time=1, fade_in_time=0, fade_out_time=0, on_color=(1, 1, 1), off_color=(0, 0, 0), n=None, background=True): '\n Make the device turn on and off repeatedly.\n\n :param float on_time:\n Number of seconds on. Defaults to 1 second.\n\n :param float off_time:\n Number of seconds off. Defaults to 1 second.\n\n :param float fade_in_time:\n Number of seconds to spend fading in. Defaults to 0. Must be 0 if\n ``pwm`` was ``False`` when the class was constructed\n (:exc:`ValueError` will be raised if not).\n\n :param float fade_out_time:\n Number of seconds to spend fading out. Defaults to 0. Must be 0 if\n ``pwm`` was ``False`` when the class was constructed\n (:exc:`ValueError` will be raised if not).\n\n :param tuple on_color:\n The color to use when the LED is "on". Defaults to white.\n\n :param tuple off_color:\n The color to use when the LED is "off". Defaults to black.\n\n :param int n:\n Number of times to blink; ``None`` (the default) means forever.\n\n :param bool background:\n If ``True`` (the default), start a background thread to continue\n blinking and return immediately. If ``False``, only return when the\n blink is finished (warning: the default value of *n* will result in\n this method never returning).\n ' if isinstance(self._leds[0], LED): if fade_in_time: raise ValueError('fade_in_time must be 0 with non-PWM RGBLEDs') if fade_out_time: raise ValueError('fade_out_time must be 0 with non-PWM RGBLEDs') self._stop_blink() self._blink_thread = GPIOThread(target=self._blink_device, args=(on_time, off_time, fade_in_time, fade_out_time, on_color, off_color, n)) self._blink_thread.start() if (not background): self._blink_thread.join() self._blink_thread = None
Make the device turn on and off repeatedly. :param float on_time: Number of seconds on. Defaults to 1 second. :param float off_time: Number of seconds off. Defaults to 1 second. :param float fade_in_time: Number of seconds to spend fading in. Defaults to 0. Must be 0 if ``pwm`` was ``False`` when the class was constructed (:exc:`ValueError` will be raised if not). :param float fade_out_time: Number of seconds to spend fading out. Defaults to 0. Must be 0 if ``pwm`` was ``False`` when the class was constructed (:exc:`ValueError` will be raised if not). :param tuple on_color: The color to use when the LED is "on". Defaults to white. :param tuple off_color: The color to use when the LED is "off". Defaults to black. :param int n: Number of times to blink; ``None`` (the default) means forever. :param bool background: If ``True`` (the default), start a background thread to continue blinking and return immediately. If ``False``, only return when the blink is finished (warning: the default value of *n* will result in this method never returning).
gpiozero/output_devices.py
blink
lurch/python-gpiozero
0
python
def blink(self, on_time=1, off_time=1, fade_in_time=0, fade_out_time=0, on_color=(1, 1, 1), off_color=(0, 0, 0), n=None, background=True): '\n Make the device turn on and off repeatedly.\n\n :param float on_time:\n Number of seconds on. Defaults to 1 second.\n\n :param float off_time:\n Number of seconds off. Defaults to 1 second.\n\n :param float fade_in_time:\n Number of seconds to spend fading in. Defaults to 0. Must be 0 if\n ``pwm`` was ``False`` when the class was constructed\n (:exc:`ValueError` will be raised if not).\n\n :param float fade_out_time:\n Number of seconds to spend fading out. Defaults to 0. Must be 0 if\n ``pwm`` was ``False`` when the class was constructed\n (:exc:`ValueError` will be raised if not).\n\n :param tuple on_color:\n The color to use when the LED is "on". Defaults to white.\n\n :param tuple off_color:\n The color to use when the LED is "off". Defaults to black.\n\n :param int n:\n Number of times to blink; ``None`` (the default) means forever.\n\n :param bool background:\n If ``True`` (the default), start a background thread to continue\n blinking and return immediately. If ``False``, only return when the\n blink is finished (warning: the default value of *n* will result in\n this method never returning).\n ' if isinstance(self._leds[0], LED): if fade_in_time: raise ValueError('fade_in_time must be 0 with non-PWM RGBLEDs') if fade_out_time: raise ValueError('fade_out_time must be 0 with non-PWM RGBLEDs') self._stop_blink() self._blink_thread = GPIOThread(target=self._blink_device, args=(on_time, off_time, fade_in_time, fade_out_time, on_color, off_color, n)) self._blink_thread.start() if (not background): self._blink_thread.join() self._blink_thread = None
def blink(self, on_time=1, off_time=1, fade_in_time=0, fade_out_time=0, on_color=(1, 1, 1), off_color=(0, 0, 0), n=None, background=True): '\n Make the device turn on and off repeatedly.\n\n :param float on_time:\n Number of seconds on. Defaults to 1 second.\n\n :param float off_time:\n Number of seconds off. Defaults to 1 second.\n\n :param float fade_in_time:\n Number of seconds to spend fading in. Defaults to 0. Must be 0 if\n ``pwm`` was ``False`` when the class was constructed\n (:exc:`ValueError` will be raised if not).\n\n :param float fade_out_time:\n Number of seconds to spend fading out. Defaults to 0. Must be 0 if\n ``pwm`` was ``False`` when the class was constructed\n (:exc:`ValueError` will be raised if not).\n\n :param tuple on_color:\n The color to use when the LED is "on". Defaults to white.\n\n :param tuple off_color:\n The color to use when the LED is "off". Defaults to black.\n\n :param int n:\n Number of times to blink; ``None`` (the default) means forever.\n\n :param bool background:\n If ``True`` (the default), start a background thread to continue\n blinking and return immediately. If ``False``, only return when the\n blink is finished (warning: the default value of *n* will result in\n this method never returning).\n ' if isinstance(self._leds[0], LED): if fade_in_time: raise ValueError('fade_in_time must be 0 with non-PWM RGBLEDs') if fade_out_time: raise ValueError('fade_out_time must be 0 with non-PWM RGBLEDs') self._stop_blink() self._blink_thread = GPIOThread(target=self._blink_device, args=(on_time, off_time, fade_in_time, fade_out_time, on_color, off_color, n)) self._blink_thread.start() if (not background): self._blink_thread.join() self._blink_thread = None<|docstring|>Make the device turn on and off repeatedly. :param float on_time: Number of seconds on. Defaults to 1 second. :param float off_time: Number of seconds off. Defaults to 1 second. :param float fade_in_time: Number of seconds to spend fading in. Defaults to 0. Must be 0 if ``pwm`` was ``False`` when the class was constructed (:exc:`ValueError` will be raised if not). :param float fade_out_time: Number of seconds to spend fading out. Defaults to 0. Must be 0 if ``pwm`` was ``False`` when the class was constructed (:exc:`ValueError` will be raised if not). :param tuple on_color: The color to use when the LED is "on". Defaults to white. :param tuple off_color: The color to use when the LED is "off". Defaults to black. :param int n: Number of times to blink; ``None`` (the default) means forever. :param bool background: If ``True`` (the default), start a background thread to continue blinking and return immediately. If ``False``, only return when the blink is finished (warning: the default value of *n* will result in this method never returning).<|endoftext|>
db93cced240802ef8b77fdcd0fe6a4392aa787ac711161a838d0172240d777c3
def pulse(self, fade_in_time=1, fade_out_time=1, on_color=(1, 1, 1), off_color=(0, 0, 0), n=None, background=True): '\n Make the device fade in and out repeatedly.\n\n :param float fade_in_time:\n Number of seconds to spend fading in. Defaults to 1.\n\n :param float fade_out_time:\n Number of seconds to spend fading out. Defaults to 1.\n\n :param tuple on_color:\n The color to use when the LED is "on". Defaults to white.\n\n :param tuple off_color:\n The color to use when the LED is "off". Defaults to black.\n\n :param int n:\n Number of times to pulse; ``None`` (the default) means forever.\n\n :param bool background:\n If ``True`` (the default), start a background thread to continue\n pulsing and return immediately. If ``False``, only return when the\n pulse is finished (warning: the default value of *n* will result in\n this method never returning).\n ' on_time = off_time = 0 self.blink(on_time, off_time, fade_in_time, fade_out_time, on_color, off_color, n, background)
Make the device fade in and out repeatedly. :param float fade_in_time: Number of seconds to spend fading in. Defaults to 1. :param float fade_out_time: Number of seconds to spend fading out. Defaults to 1. :param tuple on_color: The color to use when the LED is "on". Defaults to white. :param tuple off_color: The color to use when the LED is "off". Defaults to black. :param int n: Number of times to pulse; ``None`` (the default) means forever. :param bool background: If ``True`` (the default), start a background thread to continue pulsing and return immediately. If ``False``, only return when the pulse is finished (warning: the default value of *n* will result in this method never returning).
gpiozero/output_devices.py
pulse
lurch/python-gpiozero
0
python
def pulse(self, fade_in_time=1, fade_out_time=1, on_color=(1, 1, 1), off_color=(0, 0, 0), n=None, background=True): '\n Make the device fade in and out repeatedly.\n\n :param float fade_in_time:\n Number of seconds to spend fading in. Defaults to 1.\n\n :param float fade_out_time:\n Number of seconds to spend fading out. Defaults to 1.\n\n :param tuple on_color:\n The color to use when the LED is "on". Defaults to white.\n\n :param tuple off_color:\n The color to use when the LED is "off". Defaults to black.\n\n :param int n:\n Number of times to pulse; ``None`` (the default) means forever.\n\n :param bool background:\n If ``True`` (the default), start a background thread to continue\n pulsing and return immediately. If ``False``, only return when the\n pulse is finished (warning: the default value of *n* will result in\n this method never returning).\n ' on_time = off_time = 0 self.blink(on_time, off_time, fade_in_time, fade_out_time, on_color, off_color, n, background)
def pulse(self, fade_in_time=1, fade_out_time=1, on_color=(1, 1, 1), off_color=(0, 0, 0), n=None, background=True): '\n Make the device fade in and out repeatedly.\n\n :param float fade_in_time:\n Number of seconds to spend fading in. Defaults to 1.\n\n :param float fade_out_time:\n Number of seconds to spend fading out. Defaults to 1.\n\n :param tuple on_color:\n The color to use when the LED is "on". Defaults to white.\n\n :param tuple off_color:\n The color to use when the LED is "off". Defaults to black.\n\n :param int n:\n Number of times to pulse; ``None`` (the default) means forever.\n\n :param bool background:\n If ``True`` (the default), start a background thread to continue\n pulsing and return immediately. If ``False``, only return when the\n pulse is finished (warning: the default value of *n* will result in\n this method never returning).\n ' on_time = off_time = 0 self.blink(on_time, off_time, fade_in_time, fade_out_time, on_color, off_color, n, background)<|docstring|>Make the device fade in and out repeatedly. :param float fade_in_time: Number of seconds to spend fading in. Defaults to 1. :param float fade_out_time: Number of seconds to spend fading out. Defaults to 1. :param tuple on_color: The color to use when the LED is "on". Defaults to white. :param tuple off_color: The color to use when the LED is "off". Defaults to black. :param int n: Number of times to pulse; ``None`` (the default) means forever. :param bool background: If ``True`` (the default), start a background thread to continue pulsing and return immediately. If ``False``, only return when the pulse is finished (warning: the default value of *n* will result in this method never returning).<|endoftext|>
4c5a2b604c96344f7690346cdf699b51a8b2ca144be24dc616e0ac1b810c6431
@property def value(self): '\n Represents the speed of the motor as a floating point value between -1\n (full speed backward) and 1 (full speed forward), with 0 representing\n stopped.\n ' return (self.forward_device.value - self.backward_device.value)
Represents the speed of the motor as a floating point value between -1 (full speed backward) and 1 (full speed forward), with 0 representing stopped.
gpiozero/output_devices.py
value
lurch/python-gpiozero
0
python
@property def value(self): '\n Represents the speed of the motor as a floating point value between -1\n (full speed backward) and 1 (full speed forward), with 0 representing\n stopped.\n ' return (self.forward_device.value - self.backward_device.value)
@property def value(self): '\n Represents the speed of the motor as a floating point value between -1\n (full speed backward) and 1 (full speed forward), with 0 representing\n stopped.\n ' return (self.forward_device.value - self.backward_device.value)<|docstring|>Represents the speed of the motor as a floating point value between -1 (full speed backward) and 1 (full speed forward), with 0 representing stopped.<|endoftext|>
9c3c522d7fe9009b86cdffa7fb58d5ea6caf9a1444a76311a7c908164827d2a8
@property def is_active(self): '\n Returns ``True`` if the motor is currently running and ``False``\n otherwise.\n ' return (self.value != 0)
Returns ``True`` if the motor is currently running and ``False`` otherwise.
gpiozero/output_devices.py
is_active
lurch/python-gpiozero
0
python
@property def is_active(self): '\n Returns ``True`` if the motor is currently running and ``False``\n otherwise.\n ' return (self.value != 0)
@property def is_active(self): '\n Returns ``True`` if the motor is currently running and ``False``\n otherwise.\n ' return (self.value != 0)<|docstring|>Returns ``True`` if the motor is currently running and ``False`` otherwise.<|endoftext|>
4d6deef7e2b0183c8da46c3a35021b7316b4c6121563c863e291163454319503
def forward(self, speed=1): '\n Drive the motor forwards.\n\n :param float speed:\n The speed at which the motor should turn. Can be any value between\n 0 (stopped) and the default 1 (maximum speed) if ``pwm`` was\n ``True`` when the class was constructed (and only 0 or 1 if not).\n ' if (not (0 <= speed <= 1)): raise ValueError('forward speed must be between 0 and 1') if isinstance(self.forward_device, DigitalOutputDevice): if (speed not in (0, 1)): raise ValueError('forward speed must be 0 or 1 with non-PWM Motors') self.backward_device.off() self.forward_device.value = speed
Drive the motor forwards. :param float speed: The speed at which the motor should turn. Can be any value between 0 (stopped) and the default 1 (maximum speed) if ``pwm`` was ``True`` when the class was constructed (and only 0 or 1 if not).
gpiozero/output_devices.py
forward
lurch/python-gpiozero
0
python
def forward(self, speed=1): '\n Drive the motor forwards.\n\n :param float speed:\n The speed at which the motor should turn. Can be any value between\n 0 (stopped) and the default 1 (maximum speed) if ``pwm`` was\n ``True`` when the class was constructed (and only 0 or 1 if not).\n ' if (not (0 <= speed <= 1)): raise ValueError('forward speed must be between 0 and 1') if isinstance(self.forward_device, DigitalOutputDevice): if (speed not in (0, 1)): raise ValueError('forward speed must be 0 or 1 with non-PWM Motors') self.backward_device.off() self.forward_device.value = speed
def forward(self, speed=1): '\n Drive the motor forwards.\n\n :param float speed:\n The speed at which the motor should turn. Can be any value between\n 0 (stopped) and the default 1 (maximum speed) if ``pwm`` was\n ``True`` when the class was constructed (and only 0 or 1 if not).\n ' if (not (0 <= speed <= 1)): raise ValueError('forward speed must be between 0 and 1') if isinstance(self.forward_device, DigitalOutputDevice): if (speed not in (0, 1)): raise ValueError('forward speed must be 0 or 1 with non-PWM Motors') self.backward_device.off() self.forward_device.value = speed<|docstring|>Drive the motor forwards. :param float speed: The speed at which the motor should turn. Can be any value between 0 (stopped) and the default 1 (maximum speed) if ``pwm`` was ``True`` when the class was constructed (and only 0 or 1 if not).<|endoftext|>
1605a5ddb5bd6a4c809686d017a97226c67f0d4678acb4c394cfa0750f72da1d
def backward(self, speed=1): '\n Drive the motor backwards.\n\n :param float speed:\n The speed at which the motor should turn. Can be any value between\n 0 (stopped) and the default 1 (maximum speed) if ``pwm`` was\n ``True`` when the class was constructed (and only 0 or 1 if not).\n ' if (not (0 <= speed <= 1)): raise ValueError('backward speed must be between 0 and 1') if isinstance(self.backward_device, DigitalOutputDevice): if (speed not in (0, 1)): raise ValueError('backward speed must be 0 or 1 with non-PWM Motors') self.forward_device.off() self.backward_device.value = speed
Drive the motor backwards. :param float speed: The speed at which the motor should turn. Can be any value between 0 (stopped) and the default 1 (maximum speed) if ``pwm`` was ``True`` when the class was constructed (and only 0 or 1 if not).
gpiozero/output_devices.py
backward
lurch/python-gpiozero
0
python
def backward(self, speed=1): '\n Drive the motor backwards.\n\n :param float speed:\n The speed at which the motor should turn. Can be any value between\n 0 (stopped) and the default 1 (maximum speed) if ``pwm`` was\n ``True`` when the class was constructed (and only 0 or 1 if not).\n ' if (not (0 <= speed <= 1)): raise ValueError('backward speed must be between 0 and 1') if isinstance(self.backward_device, DigitalOutputDevice): if (speed not in (0, 1)): raise ValueError('backward speed must be 0 or 1 with non-PWM Motors') self.forward_device.off() self.backward_device.value = speed
def backward(self, speed=1): '\n Drive the motor backwards.\n\n :param float speed:\n The speed at which the motor should turn. Can be any value between\n 0 (stopped) and the default 1 (maximum speed) if ``pwm`` was\n ``True`` when the class was constructed (and only 0 or 1 if not).\n ' if (not (0 <= speed <= 1)): raise ValueError('backward speed must be between 0 and 1') if isinstance(self.backward_device, DigitalOutputDevice): if (speed not in (0, 1)): raise ValueError('backward speed must be 0 or 1 with non-PWM Motors') self.forward_device.off() self.backward_device.value = speed<|docstring|>Drive the motor backwards. :param float speed: The speed at which the motor should turn. Can be any value between 0 (stopped) and the default 1 (maximum speed) if ``pwm`` was ``True`` when the class was constructed (and only 0 or 1 if not).<|endoftext|>
2139cfd07c83439c71e7c6649e92daf722ae03e631b81e483d6c7a0f4061e438
def reverse(self): "\n Reverse the current direction of the motor. If the motor is currently\n idle this does nothing. Otherwise, the motor's direction will be\n reversed at the current speed.\n " self.value = (- self.value)
Reverse the current direction of the motor. If the motor is currently idle this does nothing. Otherwise, the motor's direction will be reversed at the current speed.
gpiozero/output_devices.py
reverse
lurch/python-gpiozero
0
python
def reverse(self): "\n Reverse the current direction of the motor. If the motor is currently\n idle this does nothing. Otherwise, the motor's direction will be\n reversed at the current speed.\n " self.value = (- self.value)
def reverse(self): "\n Reverse the current direction of the motor. If the motor is currently\n idle this does nothing. Otherwise, the motor's direction will be\n reversed at the current speed.\n " self.value = (- self.value)<|docstring|>Reverse the current direction of the motor. If the motor is currently idle this does nothing. Otherwise, the motor's direction will be reversed at the current speed.<|endoftext|>
35d639d8b86f1328c7e86df48f9f9a2831bc8cc095e4bf3fa495fbb90e9beb23
def stop(self): '\n Stop the motor.\n ' self.forward_device.off() self.backward_device.off()
Stop the motor.
gpiozero/output_devices.py
stop
lurch/python-gpiozero
0
python
def stop(self): '\n \n ' self.forward_device.off() self.backward_device.off()
def stop(self): '\n \n ' self.forward_device.off() self.backward_device.off()<|docstring|>Stop the motor.<|endoftext|>
7575dd5cb669b4029dd7d93f02784a3eb9812d3a83cc0df23fb0b8ead5cce6af
@property def frame_width(self): '\n The time between control pulses, measured in seconds.\n ' return self._frame_width
The time between control pulses, measured in seconds.
gpiozero/output_devices.py
frame_width
lurch/python-gpiozero
0
python
@property def frame_width(self): '\n \n ' return self._frame_width
@property def frame_width(self): '\n \n ' return self._frame_width<|docstring|>The time between control pulses, measured in seconds.<|endoftext|>
146081473f58777ffd536f10d4a5948d98210eb41abaed75237d68f45977d6e0
@property def min_pulse_width(self): "\n The control pulse width corresponding to the servo's minimum position,\n measured in seconds.\n " return (self._min_dc * self.frame_width)
The control pulse width corresponding to the servo's minimum position, measured in seconds.
gpiozero/output_devices.py
min_pulse_width
lurch/python-gpiozero
0
python
@property def min_pulse_width(self): "\n The control pulse width corresponding to the servo's minimum position,\n measured in seconds.\n " return (self._min_dc * self.frame_width)
@property def min_pulse_width(self): "\n The control pulse width corresponding to the servo's minimum position,\n measured in seconds.\n " return (self._min_dc * self.frame_width)<|docstring|>The control pulse width corresponding to the servo's minimum position, measured in seconds.<|endoftext|>
a94f00be4208aa4e6c16ed88af80f2257af4550169c6dd09012f292c5a640a74
@property def max_pulse_width(self): "\n The control pulse width corresponding to the servo's maximum position,\n measured in seconds.\n " return ((self._dc_range * self.frame_width) + self.min_pulse_width)
The control pulse width corresponding to the servo's maximum position, measured in seconds.
gpiozero/output_devices.py
max_pulse_width
lurch/python-gpiozero
0
python
@property def max_pulse_width(self): "\n The control pulse width corresponding to the servo's maximum position,\n measured in seconds.\n " return ((self._dc_range * self.frame_width) + self.min_pulse_width)
@property def max_pulse_width(self): "\n The control pulse width corresponding to the servo's maximum position,\n measured in seconds.\n " return ((self._dc_range * self.frame_width) + self.min_pulse_width)<|docstring|>The control pulse width corresponding to the servo's maximum position, measured in seconds.<|endoftext|>
0d945e4237d730290113d6831988ee32592b502691e575e114345b0059a5d877
@property def pulse_width(self): '\n Returns the current pulse width controlling the servo.\n ' if (self.pwm_device.pin.frequency is None): return None else: return (self.pwm_device.pin.state * self.frame_width)
Returns the current pulse width controlling the servo.
gpiozero/output_devices.py
pulse_width
lurch/python-gpiozero
0
python
@property def pulse_width(self): '\n \n ' if (self.pwm_device.pin.frequency is None): return None else: return (self.pwm_device.pin.state * self.frame_width)
@property def pulse_width(self): '\n \n ' if (self.pwm_device.pin.frequency is None): return None else: return (self.pwm_device.pin.state * self.frame_width)<|docstring|>Returns the current pulse width controlling the servo.<|endoftext|>
90474062eaaab6393055369a0083c4fec2d97f951acb850f4b0cae301dcf2a8c
def min(self): '\n Set the servo to its minimum position.\n ' self.value = (- 1)
Set the servo to its minimum position.
gpiozero/output_devices.py
min
lurch/python-gpiozero
0
python
def min(self): '\n \n ' self.value = (- 1)
def min(self): '\n \n ' self.value = (- 1)<|docstring|>Set the servo to its minimum position.<|endoftext|>
266b0345e71b99fb05e599b80ef09a39b7e5918bf99dc68a0c4cc3eb4b0233ec
def mid(self): '\n Set the servo to its mid-point position.\n ' self.value = 0
Set the servo to its mid-point position.
gpiozero/output_devices.py
mid
lurch/python-gpiozero
0
python
def mid(self): '\n \n ' self.value = 0
def mid(self): '\n \n ' self.value = 0<|docstring|>Set the servo to its mid-point position.<|endoftext|>
95db6bb633030cc35777a616333515b83ae9599f148feef39ed694069e73fd0b
def max(self): '\n Set the servo to its maximum position.\n ' self.value = 1
Set the servo to its maximum position.
gpiozero/output_devices.py
max
lurch/python-gpiozero
0
python
def max(self): '\n \n ' self.value = 1
def max(self): '\n \n ' self.value = 1<|docstring|>Set the servo to its maximum position.<|endoftext|>
471e4322d224bdf3d917ee015c85657aee9f0de601010ebeabe5632952d80430
def detach(self): '\n Temporarily disable control of the servo. This is equivalent to\n setting :attr:`value` to ``None``.\n ' self.value = None
Temporarily disable control of the servo. This is equivalent to setting :attr:`value` to ``None``.
gpiozero/output_devices.py
detach
lurch/python-gpiozero
0
python
def detach(self): '\n Temporarily disable control of the servo. This is equivalent to\n setting :attr:`value` to ``None``.\n ' self.value = None
def detach(self): '\n Temporarily disable control of the servo. This is equivalent to\n setting :attr:`value` to ``None``.\n ' self.value = None<|docstring|>Temporarily disable control of the servo. This is equivalent to setting :attr:`value` to ``None``.<|endoftext|>
b59fa5873924e2ac1a7166a58de9d78c0cfbff6781d2563cd221154aa486943d
@property def value(self): '\n Represents the position of the servo as a value between -1 (the minimum\n position) and +1 (the maximum position). This can also be the special\n value ``None`` indicating that the servo is currently "uncontrolled",\n i.e. that no control signal is being sent. Typically this means the\n servo\'s position remains unchanged, but that it can be moved by hand.\n ' result = self._get_value() if (result is None): return result else: return round(result, 14)
Represents the position of the servo as a value between -1 (the minimum position) and +1 (the maximum position). This can also be the special value ``None`` indicating that the servo is currently "uncontrolled", i.e. that no control signal is being sent. Typically this means the servo's position remains unchanged, but that it can be moved by hand.
gpiozero/output_devices.py
value
lurch/python-gpiozero
0
python
@property def value(self): '\n Represents the position of the servo as a value between -1 (the minimum\n position) and +1 (the maximum position). This can also be the special\n value ``None`` indicating that the servo is currently "uncontrolled",\n i.e. that no control signal is being sent. Typically this means the\n servo\'s position remains unchanged, but that it can be moved by hand.\n ' result = self._get_value() if (result is None): return result else: return round(result, 14)
@property def value(self): '\n Represents the position of the servo as a value between -1 (the minimum\n position) and +1 (the maximum position). This can also be the special\n value ``None`` indicating that the servo is currently "uncontrolled",\n i.e. that no control signal is being sent. Typically this means the\n servo\'s position remains unchanged, but that it can be moved by hand.\n ' result = self._get_value() if (result is None): return result else: return round(result, 14)<|docstring|>Represents the position of the servo as a value between -1 (the minimum position) and +1 (the maximum position). This can also be the special value ``None`` indicating that the servo is currently "uncontrolled", i.e. that no control signal is being sent. Typically this means the servo's position remains unchanged, but that it can be moved by hand.<|endoftext|>
c656e676edb000b41109a7f17f9e2a9b1ca63fe59af28e8f15b658656bf0bab1
@property def min_angle(self): '\n The minimum angle that the servo will rotate to when :meth:`min` is\n called.\n ' return self._min_angle
The minimum angle that the servo will rotate to when :meth:`min` is called.
gpiozero/output_devices.py
min_angle
lurch/python-gpiozero
0
python
@property def min_angle(self): '\n The minimum angle that the servo will rotate to when :meth:`min` is\n called.\n ' return self._min_angle
@property def min_angle(self): '\n The minimum angle that the servo will rotate to when :meth:`min` is\n called.\n ' return self._min_angle<|docstring|>The minimum angle that the servo will rotate to when :meth:`min` is called.<|endoftext|>
a4c3e605b60bed82e1d6d2b38fcbd7d64482d63699bbf5dce19737cb01b5ccde
@property def max_angle(self): '\n The maximum angle that the servo will rotate to when :meth:`max` is\n called.\n ' return (self._min_angle + self._angular_range)
The maximum angle that the servo will rotate to when :meth:`max` is called.
gpiozero/output_devices.py
max_angle
lurch/python-gpiozero
0
python
@property def max_angle(self): '\n The maximum angle that the servo will rotate to when :meth:`max` is\n called.\n ' return (self._min_angle + self._angular_range)
@property def max_angle(self): '\n The maximum angle that the servo will rotate to when :meth:`max` is\n called.\n ' return (self._min_angle + self._angular_range)<|docstring|>The maximum angle that the servo will rotate to when :meth:`max` is called.<|endoftext|>
4387e04ddfca88ab10bd1d6f83eb6c5f18b1c5623e34e925311bcc4a86755a1a
@property def angle(self): '\n The position of the servo as an angle measured in degrees. This will\n only be accurate if *min_angle* and *max_angle* have been set\n appropriately in the constructor.\n\n This can also be the special value ``None`` indicating that the servo\n is currently "uncontrolled", i.e. that no control signal is being sent.\n Typically this means the servo\'s position remains unchanged, but that\n it can be moved by hand.\n ' result = self._get_value() if (result is None): return None else: return round(((self._angular_range * ((result - self._min_value) / self._value_range)) + self._min_angle), 12)
The position of the servo as an angle measured in degrees. This will only be accurate if *min_angle* and *max_angle* have been set appropriately in the constructor. This can also be the special value ``None`` indicating that the servo is currently "uncontrolled", i.e. that no control signal is being sent. Typically this means the servo's position remains unchanged, but that it can be moved by hand.
gpiozero/output_devices.py
angle
lurch/python-gpiozero
0
python
@property def angle(self): '\n The position of the servo as an angle measured in degrees. This will\n only be accurate if *min_angle* and *max_angle* have been set\n appropriately in the constructor.\n\n This can also be the special value ``None`` indicating that the servo\n is currently "uncontrolled", i.e. that no control signal is being sent.\n Typically this means the servo\'s position remains unchanged, but that\n it can be moved by hand.\n ' result = self._get_value() if (result is None): return None else: return round(((self._angular_range * ((result - self._min_value) / self._value_range)) + self._min_angle), 12)
@property def angle(self): '\n The position of the servo as an angle measured in degrees. This will\n only be accurate if *min_angle* and *max_angle* have been set\n appropriately in the constructor.\n\n This can also be the special value ``None`` indicating that the servo\n is currently "uncontrolled", i.e. that no control signal is being sent.\n Typically this means the servo\'s position remains unchanged, but that\n it can be moved by hand.\n ' result = self._get_value() if (result is None): return None else: return round(((self._angular_range * ((result - self._min_value) / self._value_range)) + self._min_angle), 12)<|docstring|>The position of the servo as an angle measured in degrees. This will only be accurate if *min_angle* and *max_angle* have been set appropriately in the constructor. This can also be the special value ``None`` indicating that the servo is currently "uncontrolled", i.e. that no control signal is being sent. Typically this means the servo's position remains unchanged, but that it can be moved by hand.<|endoftext|>
0c6bbe42f4a49d1979a3eaf66b50bf19eec214b7f88c5931c57b650fa665b0e7
@pytest.fixture(scope='function') def app(request): 'Provide instance for basic Flask app.' app = flask.Flask(__name__) app.config['TESTING'] = True app.config['DEBUG'] = True return app
Provide instance for basic Flask app.
tests/test_flask_pushjack.py
app
bugzzbunny007/flask-pushjack
74
python
@pytest.fixture(scope='function') def app(request): app = flask.Flask(__name__) app.config['TESTING'] = True app.config['DEBUG'] = True return app
@pytest.fixture(scope='function') def app(request): app = flask.Flask(__name__) app.config['TESTING'] = True app.config['DEBUG'] = True return app<|docstring|>Provide instance for basic Flask app.<|endoftext|>
b5429ef3eaa9eda0bbf4c8858a6ee6ce9a783187a60706228444beb09b321fc4
def setUp(self): 'Initializes the environment for each test.' self._db = sqlite3.connect(':memory:') self.add_data_into_db(self._db, random.randint(100, 10000))
Initializes the environment for each test.
pycast/tests/timeseriesdatabasetest.py
setUp
yuvaraja2303/pycast
76
python
def setUp(self): self._db = sqlite3.connect(':memory:') self.add_data_into_db(self._db, random.randint(100, 10000))
def setUp(self): self._db = sqlite3.connect(':memory:') self.add_data_into_db(self._db, random.randint(100, 10000))<|docstring|>Initializes the environment for each test.<|endoftext|>
23569cbd3b83cb9ce15ff35766dfb17329df3c30e8b46f9bf9c8e7a906e9ab1c
def tearDown(self): 'This function gets called after each test function.' self._db.close() del self._db
This function gets called after each test function.
pycast/tests/timeseriesdatabasetest.py
tearDown
yuvaraja2303/pycast
76
python
def tearDown(self): self._db.close() del self._db
def tearDown(self): self._db.close() del self._db<|docstring|>This function gets called after each test function.<|endoftext|>
a111f570ced226d3e731c5f32d96fae6ab6a24f80187469ec60624ed385b92f9
def add_data_into_db(self, database, numberOfTuples): 'Inserts a numberOfTuples tuples into the given database.\n\n This automatically creates a table called TestTable with the following schema:\n timestamp REAL\n value REAL\n junk_one REAL\n junk_two TEXT\n\n The time stamps will be inserted as an ordered sequence.\n\n @param database dbapi2.connection Instance for the used database.\n @param numberOfTuples Number of tuples that have to be created.\n ' cur = database.cursor() cur.execute('\n CREATE TABLE TestTable(\n timestamp REAL,\n value REAL,\n junk_one REAL,\n junk_two TEXT\n )\n ') database.commit() timestamp = 0 junk_two = ['test'] tuples = [] append = tuples.append for item in xrange(numberOfTuples): timestamp += random.random() value = (random.random() * 1000) junkOne = random.random() junkTwo = random.choice(junk_two) append([timestamp, value, junkOne, junkTwo]) cur.executemany('INSERT INTO TestTable VALUES (?,?,?,?)', tuples) database.commit()
Inserts a numberOfTuples tuples into the given database. This automatically creates a table called TestTable with the following schema: timestamp REAL value REAL junk_one REAL junk_two TEXT The time stamps will be inserted as an ordered sequence. @param database dbapi2.connection Instance for the used database. @param numberOfTuples Number of tuples that have to be created.
pycast/tests/timeseriesdatabasetest.py
add_data_into_db
yuvaraja2303/pycast
76
python
def add_data_into_db(self, database, numberOfTuples): 'Inserts a numberOfTuples tuples into the given database.\n\n This automatically creates a table called TestTable with the following schema:\n timestamp REAL\n value REAL\n junk_one REAL\n junk_two TEXT\n\n The time stamps will be inserted as an ordered sequence.\n\n @param database dbapi2.connection Instance for the used database.\n @param numberOfTuples Number of tuples that have to be created.\n ' cur = database.cursor() cur.execute('\n CREATE TABLE TestTable(\n timestamp REAL,\n value REAL,\n junk_one REAL,\n junk_two TEXT\n )\n ') database.commit() timestamp = 0 junk_two = ['test'] tuples = [] append = tuples.append for item in xrange(numberOfTuples): timestamp += random.random() value = (random.random() * 1000) junkOne = random.random() junkTwo = random.choice(junk_two) append([timestamp, value, junkOne, junkTwo]) cur.executemany('INSERT INTO TestTable VALUES (?,?,?,?)', tuples) database.commit()
def add_data_into_db(self, database, numberOfTuples): 'Inserts a numberOfTuples tuples into the given database.\n\n This automatically creates a table called TestTable with the following schema:\n timestamp REAL\n value REAL\n junk_one REAL\n junk_two TEXT\n\n The time stamps will be inserted as an ordered sequence.\n\n @param database dbapi2.connection Instance for the used database.\n @param numberOfTuples Number of tuples that have to be created.\n ' cur = database.cursor() cur.execute('\n CREATE TABLE TestTable(\n timestamp REAL,\n value REAL,\n junk_one REAL,\n junk_two TEXT\n )\n ') database.commit() timestamp = 0 junk_two = ['test'] tuples = [] append = tuples.append for item in xrange(numberOfTuples): timestamp += random.random() value = (random.random() * 1000) junkOne = random.random() junkTwo = random.choice(junk_two) append([timestamp, value, junkOne, junkTwo]) cur.executemany('INSERT INTO TestTable VALUES (?,?,?,?)', tuples) database.commit()<|docstring|>Inserts a numberOfTuples tuples into the given database. This automatically creates a table called TestTable with the following schema: timestamp REAL value REAL junk_one REAL junk_two TEXT The time stamps will be inserted as an ordered sequence. @param database dbapi2.connection Instance for the used database. @param numberOfTuples Number of tuples that have to be created.<|endoftext|>
21bd296b40f8564e88c63f57920f32da66d51fa07cd3fa96101901f5a3c68a93
def select_to_many_attributes_test(self): 'SELECT timestamp, value, junk, FROM TestTable\n\n This function tests if statements like\n\n SELECT timestamp, value, junk, ... FROM\n\n can be used to initialize a TimeSeries instance. TimeSeries should therefore only\n take the first two attributes for data initialization, regardless of their names.\n ' cur = self._db.cursor().execute('SELECT COUNT(*) from TestTable') nbrOfTuples = cur.fetchall()[0][0] cur = self._db.cursor().execute('SELECT timestamp, value, junk_one, junk_two FROM TestTable') ts = TimeSeries() ts.initialize_from_sql_cursor(cur) assert (len(ts) == nbrOfTuples)
SELECT timestamp, value, junk, FROM TestTable This function tests if statements like SELECT timestamp, value, junk, ... FROM can be used to initialize a TimeSeries instance. TimeSeries should therefore only take the first two attributes for data initialization, regardless of their names.
pycast/tests/timeseriesdatabasetest.py
select_to_many_attributes_test
yuvaraja2303/pycast
76
python
def select_to_many_attributes_test(self): 'SELECT timestamp, value, junk, FROM TestTable\n\n This function tests if statements like\n\n SELECT timestamp, value, junk, ... FROM\n\n can be used to initialize a TimeSeries instance. TimeSeries should therefore only\n take the first two attributes for data initialization, regardless of their names.\n ' cur = self._db.cursor().execute('SELECT COUNT(*) from TestTable') nbrOfTuples = cur.fetchall()[0][0] cur = self._db.cursor().execute('SELECT timestamp, value, junk_one, junk_two FROM TestTable') ts = TimeSeries() ts.initialize_from_sql_cursor(cur) assert (len(ts) == nbrOfTuples)
def select_to_many_attributes_test(self): 'SELECT timestamp, value, junk, FROM TestTable\n\n This function tests if statements like\n\n SELECT timestamp, value, junk, ... FROM\n\n can be used to initialize a TimeSeries instance. TimeSeries should therefore only\n take the first two attributes for data initialization, regardless of their names.\n ' cur = self._db.cursor().execute('SELECT COUNT(*) from TestTable') nbrOfTuples = cur.fetchall()[0][0] cur = self._db.cursor().execute('SELECT timestamp, value, junk_one, junk_two FROM TestTable') ts = TimeSeries() ts.initialize_from_sql_cursor(cur) assert (len(ts) == nbrOfTuples)<|docstring|>SELECT timestamp, value, junk, FROM TestTable This function tests if statements like SELECT timestamp, value, junk, ... FROM can be used to initialize a TimeSeries instance. TimeSeries should therefore only take the first two attributes for data initialization, regardless of their names.<|endoftext|>
ff71eb748af5f24afa08d5e80f2c5c468ad614bcbeb977bf15bace08c3f12983
def select_star_test(self): 'SELECT * FROM TestTable\n\n This function tests if statements like\n\n SELECT * FROM\n\n can be used to initialize a TimeSeries instance. TimeSeries should therefore only\n take the first two attributes for data initialization, regardless of their names.\n ' cur = self._db.cursor().execute('SELECT COUNT(*) from TestTable') nbrOfTuples = cur.fetchall()[0][0] cur = self._db.cursor().execute('SELECT * FROM TestTable') ts = TimeSeries() ts.initialize_from_sql_cursor(cur) assert (len(ts) == nbrOfTuples)
SELECT * FROM TestTable This function tests if statements like SELECT * FROM can be used to initialize a TimeSeries instance. TimeSeries should therefore only take the first two attributes for data initialization, regardless of their names.
pycast/tests/timeseriesdatabasetest.py
select_star_test
yuvaraja2303/pycast
76
python
def select_star_test(self): 'SELECT * FROM TestTable\n\n This function tests if statements like\n\n SELECT * FROM\n\n can be used to initialize a TimeSeries instance. TimeSeries should therefore only\n take the first two attributes for data initialization, regardless of their names.\n ' cur = self._db.cursor().execute('SELECT COUNT(*) from TestTable') nbrOfTuples = cur.fetchall()[0][0] cur = self._db.cursor().execute('SELECT * FROM TestTable') ts = TimeSeries() ts.initialize_from_sql_cursor(cur) assert (len(ts) == nbrOfTuples)
def select_star_test(self): 'SELECT * FROM TestTable\n\n This function tests if statements like\n\n SELECT * FROM\n\n can be used to initialize a TimeSeries instance. TimeSeries should therefore only\n take the first two attributes for data initialization, regardless of their names.\n ' cur = self._db.cursor().execute('SELECT COUNT(*) from TestTable') nbrOfTuples = cur.fetchall()[0][0] cur = self._db.cursor().execute('SELECT * FROM TestTable') ts = TimeSeries() ts.initialize_from_sql_cursor(cur) assert (len(ts) == nbrOfTuples)<|docstring|>SELECT * FROM TestTable This function tests if statements like SELECT * FROM can be used to initialize a TimeSeries instance. TimeSeries should therefore only take the first two attributes for data initialization, regardless of their names.<|endoftext|>
f3495d775cb2234835e23aa5a32b34442ac7704679cc33562067e044095aa902
def multidimensionaltimeseries_test(self): 'Test the initialization of the MultiDimensionalTimeSeries.' cur = self._db.cursor().execute('SELECT COUNT(*) from TestTable') nbrOfTuples = cur.fetchall()[0][0] cur = self._db.cursor().execute('SELECT timestamp, value, junk_one FROM TestTable') ts = MultiDimensionalTimeSeries(dimensions=2) ts.initialize_from_sql_cursor(cur) assert (len(ts) == nbrOfTuples)
Test the initialization of the MultiDimensionalTimeSeries.
pycast/tests/timeseriesdatabasetest.py
multidimensionaltimeseries_test
yuvaraja2303/pycast
76
python
def multidimensionaltimeseries_test(self): cur = self._db.cursor().execute('SELECT COUNT(*) from TestTable') nbrOfTuples = cur.fetchall()[0][0] cur = self._db.cursor().execute('SELECT timestamp, value, junk_one FROM TestTable') ts = MultiDimensionalTimeSeries(dimensions=2) ts.initialize_from_sql_cursor(cur) assert (len(ts) == nbrOfTuples)
def multidimensionaltimeseries_test(self): cur = self._db.cursor().execute('SELECT COUNT(*) from TestTable') nbrOfTuples = cur.fetchall()[0][0] cur = self._db.cursor().execute('SELECT timestamp, value, junk_one FROM TestTable') ts = MultiDimensionalTimeSeries(dimensions=2) ts.initialize_from_sql_cursor(cur) assert (len(ts) == nbrOfTuples)<|docstring|>Test the initialization of the MultiDimensionalTimeSeries.<|endoftext|>
e58da5917852c20f48e7cf2af100da82b9d501caf0ec2622cb11e1eeeb618ade
def check_for_consistency_test(self): 'Tests if database initialization and manual initialization create equal TimeSeries instances.' cur = self._db.cursor().execute('SELECT COUNT(*) from TestTable') nbrOfTuples = cur.fetchall()[0][0] sqlstmt = 'SELECT timestamp, value FROM TestTable ORDER BY timestamp ASC' tsManual = TimeSeries() data = self._db.cursor().execute(sqlstmt).fetchall() for entry in data: tsManual.add_entry(str(entry[0]), entry[1]) tsAuto = TimeSeries() tsAuto.initialize_from_sql_cursor(self._db.cursor().execute(sqlstmt)) assert (nbrOfTuples == len(tsManual)) assert (nbrOfTuples == len(tsAuto)) assert (len(tsManual) == len(tsAuto)) assert (tsManual == tsAuto)
Tests if database initialization and manual initialization create equal TimeSeries instances.
pycast/tests/timeseriesdatabasetest.py
check_for_consistency_test
yuvaraja2303/pycast
76
python
def check_for_consistency_test(self): cur = self._db.cursor().execute('SELECT COUNT(*) from TestTable') nbrOfTuples = cur.fetchall()[0][0] sqlstmt = 'SELECT timestamp, value FROM TestTable ORDER BY timestamp ASC' tsManual = TimeSeries() data = self._db.cursor().execute(sqlstmt).fetchall() for entry in data: tsManual.add_entry(str(entry[0]), entry[1]) tsAuto = TimeSeries() tsAuto.initialize_from_sql_cursor(self._db.cursor().execute(sqlstmt)) assert (nbrOfTuples == len(tsManual)) assert (nbrOfTuples == len(tsAuto)) assert (len(tsManual) == len(tsAuto)) assert (tsManual == tsAuto)
def check_for_consistency_test(self): cur = self._db.cursor().execute('SELECT COUNT(*) from TestTable') nbrOfTuples = cur.fetchall()[0][0] sqlstmt = 'SELECT timestamp, value FROM TestTable ORDER BY timestamp ASC' tsManual = TimeSeries() data = self._db.cursor().execute(sqlstmt).fetchall() for entry in data: tsManual.add_entry(str(entry[0]), entry[1]) tsAuto = TimeSeries() tsAuto.initialize_from_sql_cursor(self._db.cursor().execute(sqlstmt)) assert (nbrOfTuples == len(tsManual)) assert (nbrOfTuples == len(tsAuto)) assert (len(tsManual) == len(tsAuto)) assert (tsManual == tsAuto)<|docstring|>Tests if database initialization and manual initialization create equal TimeSeries instances.<|endoftext|>
afa97195bd608ef2d6d9b11ac18c30bce46f20a900a8770e9138364cdb34a6ee
def hoverStiff(self, x_c, y_c, z_c, yaw_c, goal_r): '\n Hovers the drone to an accurate global setpoint\n Drone will stay at setpoint until other function is called\n Stiff refers to optimization for global positional accuracy\n\n Parameters\n ----------\n x_c, y_c, z_c, yaw_c = reference setpoints\n goal_r = bounding radius for when drone is "close enough" to commanded setpoint\n ' print('Start hover controller') pose = self.getPose(self.cf_name) altitude_ctrl_phys = AltitudeControllerPhys() xy_ctrl_phys = XYControllerPhys() yaw_ctrl_phys = YawControllerPhys() print('after class declarations') while (not rospy.is_shutdown()): pose_prev = pose pose = self.pose quat = [pose.transform.rotation.x, pose.transform.rotation.y, pose.transform.rotation.z, pose.transform.rotation.w] x = pose.transform.translation.x y = pose.transform.translation.y z = pose.transform.translation.z if math.isnan(pose.transform.translation.x): pose = pose_prev quat = [pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w] R = Rotation.from_quat(quat) x_global = R.apply([1, 0, 0]) yaw = np.arctan2(np.cross([1, 0, 0], x_global)[2], np.dot(x_global, [1, 0, 0])) self.msg.linear.z = altitude_ctrl_phys.update(z_c, z) (self.msg.linear.x, self.msg.linear.y) = xy_ctrl_phys.update(x_c, x, y_c, y, yaw) self.msg.angular.z = yaw_ctrl_phys.update(yaw_c, yaw) offset = 0.05 if (((x > (x_c - goal_r)) and (x < (x_c + goal_r))) and ((y > (y_c - goal_r)) and (y < (y_c + goal_r))) and ((z > ((z_c - goal_r) - offset)) and (z < ((z_c + goal_r) + offset)))): print('Found the hover setpoint!') self.pub.publish(self.msg) self.rate.sleep()
Hovers the drone to an accurate global setpoint Drone will stay at setpoint until other function is called Stiff refers to optimization for global positional accuracy Parameters ---------- x_c, y_c, z_c, yaw_c = reference setpoints goal_r = bounding radius for when drone is "close enough" to commanded setpoint
crazyflie_demo/scripts/hover_stiff.py
hoverStiff
CooperControlsLab/crazyflie_cooper_ros
3
python
def hoverStiff(self, x_c, y_c, z_c, yaw_c, goal_r): '\n Hovers the drone to an accurate global setpoint\n Drone will stay at setpoint until other function is called\n Stiff refers to optimization for global positional accuracy\n\n Parameters\n ----------\n x_c, y_c, z_c, yaw_c = reference setpoints\n goal_r = bounding radius for when drone is "close enough" to commanded setpoint\n ' print('Start hover controller') pose = self.getPose(self.cf_name) altitude_ctrl_phys = AltitudeControllerPhys() xy_ctrl_phys = XYControllerPhys() yaw_ctrl_phys = YawControllerPhys() print('after class declarations') while (not rospy.is_shutdown()): pose_prev = pose pose = self.pose quat = [pose.transform.rotation.x, pose.transform.rotation.y, pose.transform.rotation.z, pose.transform.rotation.w] x = pose.transform.translation.x y = pose.transform.translation.y z = pose.transform.translation.z if math.isnan(pose.transform.translation.x): pose = pose_prev quat = [pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w] R = Rotation.from_quat(quat) x_global = R.apply([1, 0, 0]) yaw = np.arctan2(np.cross([1, 0, 0], x_global)[2], np.dot(x_global, [1, 0, 0])) self.msg.linear.z = altitude_ctrl_phys.update(z_c, z) (self.msg.linear.x, self.msg.linear.y) = xy_ctrl_phys.update(x_c, x, y_c, y, yaw) self.msg.angular.z = yaw_ctrl_phys.update(yaw_c, yaw) offset = 0.05 if (((x > (x_c - goal_r)) and (x < (x_c + goal_r))) and ((y > (y_c - goal_r)) and (y < (y_c + goal_r))) and ((z > ((z_c - goal_r) - offset)) and (z < ((z_c + goal_r) + offset)))): print('Found the hover setpoint!') self.pub.publish(self.msg) self.rate.sleep()
def hoverStiff(self, x_c, y_c, z_c, yaw_c, goal_r): '\n Hovers the drone to an accurate global setpoint\n Drone will stay at setpoint until other function is called\n Stiff refers to optimization for global positional accuracy\n\n Parameters\n ----------\n x_c, y_c, z_c, yaw_c = reference setpoints\n goal_r = bounding radius for when drone is "close enough" to commanded setpoint\n ' print('Start hover controller') pose = self.getPose(self.cf_name) altitude_ctrl_phys = AltitudeControllerPhys() xy_ctrl_phys = XYControllerPhys() yaw_ctrl_phys = YawControllerPhys() print('after class declarations') while (not rospy.is_shutdown()): pose_prev = pose pose = self.pose quat = [pose.transform.rotation.x, pose.transform.rotation.y, pose.transform.rotation.z, pose.transform.rotation.w] x = pose.transform.translation.x y = pose.transform.translation.y z = pose.transform.translation.z if math.isnan(pose.transform.translation.x): pose = pose_prev quat = [pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w] R = Rotation.from_quat(quat) x_global = R.apply([1, 0, 0]) yaw = np.arctan2(np.cross([1, 0, 0], x_global)[2], np.dot(x_global, [1, 0, 0])) self.msg.linear.z = altitude_ctrl_phys.update(z_c, z) (self.msg.linear.x, self.msg.linear.y) = xy_ctrl_phys.update(x_c, x, y_c, y, yaw) self.msg.angular.z = yaw_ctrl_phys.update(yaw_c, yaw) offset = 0.05 if (((x > (x_c - goal_r)) and (x < (x_c + goal_r))) and ((y > (y_c - goal_r)) and (y < (y_c + goal_r))) and ((z > ((z_c - goal_r) - offset)) and (z < ((z_c + goal_r) + offset)))): print('Found the hover setpoint!') self.pub.publish(self.msg) self.rate.sleep()<|docstring|>Hovers the drone to an accurate global setpoint Drone will stay at setpoint until other function is called Stiff refers to optimization for global positional accuracy Parameters ---------- x_c, y_c, z_c, yaw_c = reference setpoints goal_r = bounding radius for when drone is "close enough" to commanded setpoint<|endoftext|>
7efc9c123630dccf5ba20be7dd68f8232ee30e8a47c56f9bb665e583963613db
def test_001_simple(self): " Very simple functionality testing:\n - static equalizer\n - init channel state with all ones\n - transmit all ones\n - make sure we rx all ones\n - Tag check: put in frame length tag and one other random tag,\n make sure they're propagated\n " fft_len = 8 equalizer = digital.ofdm_equalizer_static(fft_len) n_syms = 3 tx_data = (((1,) * fft_len) * n_syms) chan_tag = gr.tag_t() chan_tag.offset = 0 chan_tag.key = pmt.string_to_symbol('ofdm_sync_chan_taps') chan_tag.value = pmt.init_c32vector(fft_len, ((1,) * fft_len)) random_tag = gr.tag_t() random_tag.offset = 1 random_tag.key = pmt.string_to_symbol('foo') random_tag.value = pmt.from_long(42) src = blocks.vector_source_c(tx_data, False, fft_len, (chan_tag, random_tag)) eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), 0, self.tsb_key) sink = blocks.tsb_vector_sink_c(fft_len, tsb_key=self.tsb_key) self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, n_syms, self.tsb_key), eq, sink) self.tb.run() self.assertEqual(tx_data, sink.data()[0]) tag_dict = dict() for tag in sink.tags(): ptag = gr.tag_to_python(tag) tag_dict[ptag.key] = ptag.value expected_dict = {'foo': 42} self.assertEqual(tag_dict, expected_dict)
Very simple functionality testing: - static equalizer - init channel state with all ones - transmit all ones - make sure we rx all ones - Tag check: put in frame length tag and one other random tag, make sure they're propagated
gnuradio-3.7.13.4/gr-digital/python/digital/qa_ofdm_frame_equalizer_vcvc.py
test_001_simple
v1259397/cosmic-gnuradio
1
python
def test_001_simple(self): " Very simple functionality testing:\n - static equalizer\n - init channel state with all ones\n - transmit all ones\n - make sure we rx all ones\n - Tag check: put in frame length tag and one other random tag,\n make sure they're propagated\n " fft_len = 8 equalizer = digital.ofdm_equalizer_static(fft_len) n_syms = 3 tx_data = (((1,) * fft_len) * n_syms) chan_tag = gr.tag_t() chan_tag.offset = 0 chan_tag.key = pmt.string_to_symbol('ofdm_sync_chan_taps') chan_tag.value = pmt.init_c32vector(fft_len, ((1,) * fft_len)) random_tag = gr.tag_t() random_tag.offset = 1 random_tag.key = pmt.string_to_symbol('foo') random_tag.value = pmt.from_long(42) src = blocks.vector_source_c(tx_data, False, fft_len, (chan_tag, random_tag)) eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), 0, self.tsb_key) sink = blocks.tsb_vector_sink_c(fft_len, tsb_key=self.tsb_key) self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, n_syms, self.tsb_key), eq, sink) self.tb.run() self.assertEqual(tx_data, sink.data()[0]) tag_dict = dict() for tag in sink.tags(): ptag = gr.tag_to_python(tag) tag_dict[ptag.key] = ptag.value expected_dict = {'foo': 42} self.assertEqual(tag_dict, expected_dict)
def test_001_simple(self): " Very simple functionality testing:\n - static equalizer\n - init channel state with all ones\n - transmit all ones\n - make sure we rx all ones\n - Tag check: put in frame length tag and one other random tag,\n make sure they're propagated\n " fft_len = 8 equalizer = digital.ofdm_equalizer_static(fft_len) n_syms = 3 tx_data = (((1,) * fft_len) * n_syms) chan_tag = gr.tag_t() chan_tag.offset = 0 chan_tag.key = pmt.string_to_symbol('ofdm_sync_chan_taps') chan_tag.value = pmt.init_c32vector(fft_len, ((1,) * fft_len)) random_tag = gr.tag_t() random_tag.offset = 1 random_tag.key = pmt.string_to_symbol('foo') random_tag.value = pmt.from_long(42) src = blocks.vector_source_c(tx_data, False, fft_len, (chan_tag, random_tag)) eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), 0, self.tsb_key) sink = blocks.tsb_vector_sink_c(fft_len, tsb_key=self.tsb_key) self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, n_syms, self.tsb_key), eq, sink) self.tb.run() self.assertEqual(tx_data, sink.data()[0]) tag_dict = dict() for tag in sink.tags(): ptag = gr.tag_to_python(tag) tag_dict[ptag.key] = ptag.value expected_dict = {'foo': 42} self.assertEqual(tag_dict, expected_dict)<|docstring|>Very simple functionality testing: - static equalizer - init channel state with all ones - transmit all ones - make sure we rx all ones - Tag check: put in frame length tag and one other random tag, make sure they're propagated<|endoftext|>
59496e9d50d4b1713da19342169d43e029fd186be6b994eaa0d601bf4db74ebb
def test_001b_simple_skip_nothing(self): '\n Same as before, but put a skip-header in there\n ' fft_len = 8 equalizer = digital.ofdm_equalizer_static(fft_len, symbols_skipped=1) n_syms = 3 tx_data = (((1,) * fft_len) * n_syms) chan_tag = gr.tag_t() chan_tag.offset = 0 chan_tag.key = pmt.string_to_symbol('ofdm_sync_chan_taps') chan_tag.value = pmt.init_c32vector(fft_len, ((1,) * fft_len)) src = blocks.vector_source_c(tx_data, False, fft_len, (chan_tag,)) eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), 0, self.tsb_key) sink = blocks.tsb_vector_sink_c(fft_len, tsb_key=self.tsb_key) self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, n_syms, self.tsb_key), eq, sink) self.tb.run() self.assertEqual(tx_data, sink.data()[0])
Same as before, but put a skip-header in there
gnuradio-3.7.13.4/gr-digital/python/digital/qa_ofdm_frame_equalizer_vcvc.py
test_001b_simple_skip_nothing
v1259397/cosmic-gnuradio
1
python
def test_001b_simple_skip_nothing(self): '\n \n ' fft_len = 8 equalizer = digital.ofdm_equalizer_static(fft_len, symbols_skipped=1) n_syms = 3 tx_data = (((1,) * fft_len) * n_syms) chan_tag = gr.tag_t() chan_tag.offset = 0 chan_tag.key = pmt.string_to_symbol('ofdm_sync_chan_taps') chan_tag.value = pmt.init_c32vector(fft_len, ((1,) * fft_len)) src = blocks.vector_source_c(tx_data, False, fft_len, (chan_tag,)) eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), 0, self.tsb_key) sink = blocks.tsb_vector_sink_c(fft_len, tsb_key=self.tsb_key) self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, n_syms, self.tsb_key), eq, sink) self.tb.run() self.assertEqual(tx_data, sink.data()[0])
def test_001b_simple_skip_nothing(self): '\n \n ' fft_len = 8 equalizer = digital.ofdm_equalizer_static(fft_len, symbols_skipped=1) n_syms = 3 tx_data = (((1,) * fft_len) * n_syms) chan_tag = gr.tag_t() chan_tag.offset = 0 chan_tag.key = pmt.string_to_symbol('ofdm_sync_chan_taps') chan_tag.value = pmt.init_c32vector(fft_len, ((1,) * fft_len)) src = blocks.vector_source_c(tx_data, False, fft_len, (chan_tag,)) eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), 0, self.tsb_key) sink = blocks.tsb_vector_sink_c(fft_len, tsb_key=self.tsb_key) self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, n_syms, self.tsb_key), eq, sink) self.tb.run() self.assertEqual(tx_data, sink.data()[0])<|docstring|>Same as before, but put a skip-header in there<|endoftext|>
c7287557c4f0089e10465f4b48c2e60c63e3509b122b67eb906e3252acaca86b
def test_001c_carrier_offset_no_cp(self): '\n Same as before, but put a carrier offset in there\n ' fft_len = 8 cp_len = 0 n_syms = 1 carr_offset = 1 occupied_carriers = (((- 2), (- 1), 1, 2),) tx_data = (0, 0, 0, (- 1j), (- 1j), 0, (- 1j), (- 1j)) rx_expected = ((0, 0, 1, 1, 0, 1, 1, 0) * n_syms) equalizer = digital.ofdm_equalizer_static(fft_len, occupied_carriers) chan_tag = gr.tag_t() chan_tag.offset = 0 chan_tag.key = pmt.string_to_symbol('ofdm_sync_chan_taps') chan_tag.value = pmt.init_c32vector(fft_len, (0, 0, (- 1j), (- 1j), 0, (- 1j), (- 1j), 0)) offset_tag = gr.tag_t() offset_tag.offset = 0 offset_tag.key = pmt.string_to_symbol('ofdm_sync_carr_offset') offset_tag.value = pmt.from_long(carr_offset) src = blocks.vector_source_c(tx_data, False, fft_len, (chan_tag, offset_tag)) eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), cp_len, self.tsb_key) sink = blocks.tsb_vector_sink_c(fft_len, tsb_key=self.tsb_key) self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, n_syms, self.tsb_key), eq, sink) self.tb.run() self.assertComplexTuplesAlmostEqual(rx_expected, sink.data()[0], places=4)
Same as before, but put a carrier offset in there
gnuradio-3.7.13.4/gr-digital/python/digital/qa_ofdm_frame_equalizer_vcvc.py
test_001c_carrier_offset_no_cp
v1259397/cosmic-gnuradio
1
python
def test_001c_carrier_offset_no_cp(self): '\n \n ' fft_len = 8 cp_len = 0 n_syms = 1 carr_offset = 1 occupied_carriers = (((- 2), (- 1), 1, 2),) tx_data = (0, 0, 0, (- 1j), (- 1j), 0, (- 1j), (- 1j)) rx_expected = ((0, 0, 1, 1, 0, 1, 1, 0) * n_syms) equalizer = digital.ofdm_equalizer_static(fft_len, occupied_carriers) chan_tag = gr.tag_t() chan_tag.offset = 0 chan_tag.key = pmt.string_to_symbol('ofdm_sync_chan_taps') chan_tag.value = pmt.init_c32vector(fft_len, (0, 0, (- 1j), (- 1j), 0, (- 1j), (- 1j), 0)) offset_tag = gr.tag_t() offset_tag.offset = 0 offset_tag.key = pmt.string_to_symbol('ofdm_sync_carr_offset') offset_tag.value = pmt.from_long(carr_offset) src = blocks.vector_source_c(tx_data, False, fft_len, (chan_tag, offset_tag)) eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), cp_len, self.tsb_key) sink = blocks.tsb_vector_sink_c(fft_len, tsb_key=self.tsb_key) self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, n_syms, self.tsb_key), eq, sink) self.tb.run() self.assertComplexTuplesAlmostEqual(rx_expected, sink.data()[0], places=4)
def test_001c_carrier_offset_no_cp(self): '\n \n ' fft_len = 8 cp_len = 0 n_syms = 1 carr_offset = 1 occupied_carriers = (((- 2), (- 1), 1, 2),) tx_data = (0, 0, 0, (- 1j), (- 1j), 0, (- 1j), (- 1j)) rx_expected = ((0, 0, 1, 1, 0, 1, 1, 0) * n_syms) equalizer = digital.ofdm_equalizer_static(fft_len, occupied_carriers) chan_tag = gr.tag_t() chan_tag.offset = 0 chan_tag.key = pmt.string_to_symbol('ofdm_sync_chan_taps') chan_tag.value = pmt.init_c32vector(fft_len, (0, 0, (- 1j), (- 1j), 0, (- 1j), (- 1j), 0)) offset_tag = gr.tag_t() offset_tag.offset = 0 offset_tag.key = pmt.string_to_symbol('ofdm_sync_carr_offset') offset_tag.value = pmt.from_long(carr_offset) src = blocks.vector_source_c(tx_data, False, fft_len, (chan_tag, offset_tag)) eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), cp_len, self.tsb_key) sink = blocks.tsb_vector_sink_c(fft_len, tsb_key=self.tsb_key) self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, n_syms, self.tsb_key), eq, sink) self.tb.run() self.assertComplexTuplesAlmostEqual(rx_expected, sink.data()[0], places=4)<|docstring|>Same as before, but put a carrier offset in there<|endoftext|>
2c41c85e45cabe0f3f20781fafe225212ec714b0321fa98bcee977ece8c9f185
def test_001c_carrier_offset_cp(self): '\n Same as before, but put a carrier offset in there and a CP\n ' fft_len = 8 cp_len = 2 n_syms = 3 occupied_carriers = (((- 2), (- 1), 1, 2),) carr_offset = (- 1) tx_data = (0, (- 1j), (- 1j), 0, (- 1j), (- 1j), 0, 0, 0, (- 1), (- 1), 0, (- 1), (- 1), 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0, 0) rx_expected = ((0, 0, 1, 1, 0, 1, 1, 0) * n_syms) equalizer = digital.ofdm_equalizer_static(fft_len, occupied_carriers) chan_tag = gr.tag_t() chan_tag.offset = 0 chan_tag.key = pmt.string_to_symbol('ofdm_sync_chan_taps') chan_tag.value = pmt.init_c32vector(fft_len, (0, 0, 1, 1, 0, 1, 1, 0)) offset_tag = gr.tag_t() offset_tag.offset = 0 offset_tag.key = pmt.string_to_symbol('ofdm_sync_carr_offset') offset_tag.value = pmt.from_long(carr_offset) src = blocks.vector_source_c(tx_data, False, fft_len, (chan_tag, offset_tag)) eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), cp_len, self.tsb_key) sink = blocks.tsb_vector_sink_c(fft_len, tsb_key=self.tsb_key) self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, n_syms, self.tsb_key), eq, sink) self.tb.run() self.assertComplexTuplesAlmostEqual(rx_expected, sink.data()[0], places=4)
Same as before, but put a carrier offset in there and a CP
gnuradio-3.7.13.4/gr-digital/python/digital/qa_ofdm_frame_equalizer_vcvc.py
test_001c_carrier_offset_cp
v1259397/cosmic-gnuradio
1
python
def test_001c_carrier_offset_cp(self): '\n \n ' fft_len = 8 cp_len = 2 n_syms = 3 occupied_carriers = (((- 2), (- 1), 1, 2),) carr_offset = (- 1) tx_data = (0, (- 1j), (- 1j), 0, (- 1j), (- 1j), 0, 0, 0, (- 1), (- 1), 0, (- 1), (- 1), 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0, 0) rx_expected = ((0, 0, 1, 1, 0, 1, 1, 0) * n_syms) equalizer = digital.ofdm_equalizer_static(fft_len, occupied_carriers) chan_tag = gr.tag_t() chan_tag.offset = 0 chan_tag.key = pmt.string_to_symbol('ofdm_sync_chan_taps') chan_tag.value = pmt.init_c32vector(fft_len, (0, 0, 1, 1, 0, 1, 1, 0)) offset_tag = gr.tag_t() offset_tag.offset = 0 offset_tag.key = pmt.string_to_symbol('ofdm_sync_carr_offset') offset_tag.value = pmt.from_long(carr_offset) src = blocks.vector_source_c(tx_data, False, fft_len, (chan_tag, offset_tag)) eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), cp_len, self.tsb_key) sink = blocks.tsb_vector_sink_c(fft_len, tsb_key=self.tsb_key) self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, n_syms, self.tsb_key), eq, sink) self.tb.run() self.assertComplexTuplesAlmostEqual(rx_expected, sink.data()[0], places=4)
def test_001c_carrier_offset_cp(self): '\n \n ' fft_len = 8 cp_len = 2 n_syms = 3 occupied_carriers = (((- 2), (- 1), 1, 2),) carr_offset = (- 1) tx_data = (0, (- 1j), (- 1j), 0, (- 1j), (- 1j), 0, 0, 0, (- 1), (- 1), 0, (- 1), (- 1), 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0, 0) rx_expected = ((0, 0, 1, 1, 0, 1, 1, 0) * n_syms) equalizer = digital.ofdm_equalizer_static(fft_len, occupied_carriers) chan_tag = gr.tag_t() chan_tag.offset = 0 chan_tag.key = pmt.string_to_symbol('ofdm_sync_chan_taps') chan_tag.value = pmt.init_c32vector(fft_len, (0, 0, 1, 1, 0, 1, 1, 0)) offset_tag = gr.tag_t() offset_tag.offset = 0 offset_tag.key = pmt.string_to_symbol('ofdm_sync_carr_offset') offset_tag.value = pmt.from_long(carr_offset) src = blocks.vector_source_c(tx_data, False, fft_len, (chan_tag, offset_tag)) eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), cp_len, self.tsb_key) sink = blocks.tsb_vector_sink_c(fft_len, tsb_key=self.tsb_key) self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, n_syms, self.tsb_key), eq, sink) self.tb.run() self.assertComplexTuplesAlmostEqual(rx_expected, sink.data()[0], places=4)<|docstring|>Same as before, but put a carrier offset in there and a CP<|endoftext|>
0c788000d54012b7e5e1acca19f029e605decfc1c40ce0a7260ae1f398768487
def test_002_static(self): '\n - Add a simple channel\n - Make symbols QPSK\n ' fft_len = 8 tx_data = [(- 1), (- 1), 1, 2, (- 1), 3, 0, (- 1), (- 1), (- 1), 0, 2, (- 1), 2, 0, (- 1), (- 1), (- 1), 3, 0, (- 1), 1, 0, (- 1), (- 1), (- 1), 1, 1, (- 1), 0, 2, (- 1)] cnst = digital.constellation_qpsk() tx_signal = [(cnst.map_to_points_v(x)[0] if (x != (- 1)) else 0) for x in tx_data] occupied_carriers = ((1, 2, 6, 7),) pilot_carriers = ((), (), (1, 2, 6, 7), ()) pilot_symbols = ([], [], [cnst.map_to_points_v(x)[0] for x in (1, 0, 3, 0)], []) equalizer = digital.ofdm_equalizer_static(fft_len, occupied_carriers, pilot_carriers, pilot_symbols) channel = [0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0] channel = [0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0] for idx in range(fft_len, (2 * fft_len)): channel[idx] = (channel[(idx - fft_len)] * numpy.exp((((1j * 0.1) * numpy.pi) * (numpy.random.rand() - 0.5)))) chan_tag = gr.tag_t() chan_tag.offset = 0 chan_tag.key = pmt.string_to_symbol('ofdm_sync_chan_taps') chan_tag.value = pmt.init_c32vector(fft_len, channel[:fft_len]) src = blocks.vector_source_c(numpy.multiply(tx_signal, channel), False, fft_len, (chan_tag,)) sink = blocks.tsb_vector_sink_c(vlen=fft_len, tsb_key=self.tsb_key) eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), 0, self.tsb_key, True) self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, (len(tx_data) / fft_len), self.tsb_key), eq, sink) self.tb.run() rx_data = [(cnst.decision_maker_v((x,)) if (x != 0) else (- 1)) for x in sink.data()[0]] self.assertEqual(tx_data, rx_data) tag_dict = dict() for tag in sink.tags(): ptag = gr.tag_to_python(tag) tag_dict[ptag.key] = ptag.value if (ptag.key == 'ofdm_sync_chan_taps'): tag_dict[ptag.key] = list(pmt.c32vector_elements(tag.value)) else: tag_dict[ptag.key] = pmt.to_python(tag.value) expected_dict = {'ofdm_sync_chan_taps': channel[(- fft_len):]} self.assertEqual(tag_dict, expected_dict)
- Add a simple channel - Make symbols QPSK
gnuradio-3.7.13.4/gr-digital/python/digital/qa_ofdm_frame_equalizer_vcvc.py
test_002_static
v1259397/cosmic-gnuradio
1
python
def test_002_static(self): '\n - Add a simple channel\n - Make symbols QPSK\n ' fft_len = 8 tx_data = [(- 1), (- 1), 1, 2, (- 1), 3, 0, (- 1), (- 1), (- 1), 0, 2, (- 1), 2, 0, (- 1), (- 1), (- 1), 3, 0, (- 1), 1, 0, (- 1), (- 1), (- 1), 1, 1, (- 1), 0, 2, (- 1)] cnst = digital.constellation_qpsk() tx_signal = [(cnst.map_to_points_v(x)[0] if (x != (- 1)) else 0) for x in tx_data] occupied_carriers = ((1, 2, 6, 7),) pilot_carriers = ((), (), (1, 2, 6, 7), ()) pilot_symbols = ([], [], [cnst.map_to_points_v(x)[0] for x in (1, 0, 3, 0)], []) equalizer = digital.ofdm_equalizer_static(fft_len, occupied_carriers, pilot_carriers, pilot_symbols) channel = [0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0] channel = [0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0] for idx in range(fft_len, (2 * fft_len)): channel[idx] = (channel[(idx - fft_len)] * numpy.exp((((1j * 0.1) * numpy.pi) * (numpy.random.rand() - 0.5)))) chan_tag = gr.tag_t() chan_tag.offset = 0 chan_tag.key = pmt.string_to_symbol('ofdm_sync_chan_taps') chan_tag.value = pmt.init_c32vector(fft_len, channel[:fft_len]) src = blocks.vector_source_c(numpy.multiply(tx_signal, channel), False, fft_len, (chan_tag,)) sink = blocks.tsb_vector_sink_c(vlen=fft_len, tsb_key=self.tsb_key) eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), 0, self.tsb_key, True) self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, (len(tx_data) / fft_len), self.tsb_key), eq, sink) self.tb.run() rx_data = [(cnst.decision_maker_v((x,)) if (x != 0) else (- 1)) for x in sink.data()[0]] self.assertEqual(tx_data, rx_data) tag_dict = dict() for tag in sink.tags(): ptag = gr.tag_to_python(tag) tag_dict[ptag.key] = ptag.value if (ptag.key == 'ofdm_sync_chan_taps'): tag_dict[ptag.key] = list(pmt.c32vector_elements(tag.value)) else: tag_dict[ptag.key] = pmt.to_python(tag.value) expected_dict = {'ofdm_sync_chan_taps': channel[(- fft_len):]} self.assertEqual(tag_dict, expected_dict)
def test_002_static(self): '\n - Add a simple channel\n - Make symbols QPSK\n ' fft_len = 8 tx_data = [(- 1), (- 1), 1, 2, (- 1), 3, 0, (- 1), (- 1), (- 1), 0, 2, (- 1), 2, 0, (- 1), (- 1), (- 1), 3, 0, (- 1), 1, 0, (- 1), (- 1), (- 1), 1, 1, (- 1), 0, 2, (- 1)] cnst = digital.constellation_qpsk() tx_signal = [(cnst.map_to_points_v(x)[0] if (x != (- 1)) else 0) for x in tx_data] occupied_carriers = ((1, 2, 6, 7),) pilot_carriers = ((), (), (1, 2, 6, 7), ()) pilot_symbols = ([], [], [cnst.map_to_points_v(x)[0] for x in (1, 0, 3, 0)], []) equalizer = digital.ofdm_equalizer_static(fft_len, occupied_carriers, pilot_carriers, pilot_symbols) channel = [0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0] channel = [0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0] for idx in range(fft_len, (2 * fft_len)): channel[idx] = (channel[(idx - fft_len)] * numpy.exp((((1j * 0.1) * numpy.pi) * (numpy.random.rand() - 0.5)))) chan_tag = gr.tag_t() chan_tag.offset = 0 chan_tag.key = pmt.string_to_symbol('ofdm_sync_chan_taps') chan_tag.value = pmt.init_c32vector(fft_len, channel[:fft_len]) src = blocks.vector_source_c(numpy.multiply(tx_signal, channel), False, fft_len, (chan_tag,)) sink = blocks.tsb_vector_sink_c(vlen=fft_len, tsb_key=self.tsb_key) eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), 0, self.tsb_key, True) self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, (len(tx_data) / fft_len), self.tsb_key), eq, sink) self.tb.run() rx_data = [(cnst.decision_maker_v((x,)) if (x != 0) else (- 1)) for x in sink.data()[0]] self.assertEqual(tx_data, rx_data) tag_dict = dict() for tag in sink.tags(): ptag = gr.tag_to_python(tag) tag_dict[ptag.key] = ptag.value if (ptag.key == 'ofdm_sync_chan_taps'): tag_dict[ptag.key] = list(pmt.c32vector_elements(tag.value)) else: tag_dict[ptag.key] = pmt.to_python(tag.value) expected_dict = {'ofdm_sync_chan_taps': channel[(- fft_len):]} self.assertEqual(tag_dict, expected_dict)<|docstring|>- Add a simple channel - Make symbols QPSK<|endoftext|>
16e916e895b2530944a3104401cc471e1b566f722ab700a23d1fee135f66512d
def test_002_static_wo_tags(self): ' Same as before, but the input stream has no tag.\n We specify the frame size in the constructor.\n We also specify a tag key, so the output stream *should* have\n a TSB tag.\n ' fft_len = 8 n_syms = 4 tx_data = [(- 1), (- 1), 1, 2, (- 1), 3, 0, (- 1), (- 1), (- 1), 0, 2, (- 1), 2, 0, (- 1), (- 1), (- 1), 3, 0, (- 1), 1, 0, (- 1), (- 1), (- 1), 1, 1, (- 1), 0, 2, (- 1)] cnst = digital.constellation_qpsk() tx_signal = [(cnst.map_to_points_v(x)[0] if (x != (- 1)) else 0) for x in tx_data] occupied_carriers = ((1, 2, 6, 7),) pilot_carriers = ((), (), (1, 2, 6, 7), ()) pilot_symbols = ([], [], [cnst.map_to_points_v(x)[0] for x in (1, 0, 3, 0)], []) equalizer = digital.ofdm_equalizer_static(fft_len, occupied_carriers, pilot_carriers, pilot_symbols) channel = [0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0] for idx in range(fft_len, (2 * fft_len)): channel[idx] = (channel[(idx - fft_len)] * numpy.exp((((1j * 0.1) * numpy.pi) * (numpy.random.rand() - 0.5)))) idx2 = (idx + (2 * fft_len)) channel[idx2] = (channel[idx2] * numpy.exp((((1j * 0) * numpy.pi) * (numpy.random.rand() - 0.5)))) src = blocks.vector_source_c(numpy.multiply(tx_signal, channel), False, fft_len) eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), 0, self.tsb_key, False, n_syms) sink = blocks.tsb_vector_sink_c(vlen=fft_len, tsb_key=self.tsb_key) self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, (len(tx_data) / fft_len), self.tsb_key), eq, sink) self.tb.run() rx_data = [(cnst.decision_maker_v((x,)) if (x != 0) else (- 1)) for x in sink.data()[0]] self.assertEqual(tx_data, rx_data) packets = sink.data() self.assertEqual(len(packets), 1) self.assertEqual(len(packets[0]), len(tx_data))
Same as before, but the input stream has no tag. We specify the frame size in the constructor. We also specify a tag key, so the output stream *should* have a TSB tag.
gnuradio-3.7.13.4/gr-digital/python/digital/qa_ofdm_frame_equalizer_vcvc.py
test_002_static_wo_tags
v1259397/cosmic-gnuradio
1
python
def test_002_static_wo_tags(self): ' Same as before, but the input stream has no tag.\n We specify the frame size in the constructor.\n We also specify a tag key, so the output stream *should* have\n a TSB tag.\n ' fft_len = 8 n_syms = 4 tx_data = [(- 1), (- 1), 1, 2, (- 1), 3, 0, (- 1), (- 1), (- 1), 0, 2, (- 1), 2, 0, (- 1), (- 1), (- 1), 3, 0, (- 1), 1, 0, (- 1), (- 1), (- 1), 1, 1, (- 1), 0, 2, (- 1)] cnst = digital.constellation_qpsk() tx_signal = [(cnst.map_to_points_v(x)[0] if (x != (- 1)) else 0) for x in tx_data] occupied_carriers = ((1, 2, 6, 7),) pilot_carriers = ((), (), (1, 2, 6, 7), ()) pilot_symbols = ([], [], [cnst.map_to_points_v(x)[0] for x in (1, 0, 3, 0)], []) equalizer = digital.ofdm_equalizer_static(fft_len, occupied_carriers, pilot_carriers, pilot_symbols) channel = [0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0] for idx in range(fft_len, (2 * fft_len)): channel[idx] = (channel[(idx - fft_len)] * numpy.exp((((1j * 0.1) * numpy.pi) * (numpy.random.rand() - 0.5)))) idx2 = (idx + (2 * fft_len)) channel[idx2] = (channel[idx2] * numpy.exp((((1j * 0) * numpy.pi) * (numpy.random.rand() - 0.5)))) src = blocks.vector_source_c(numpy.multiply(tx_signal, channel), False, fft_len) eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), 0, self.tsb_key, False, n_syms) sink = blocks.tsb_vector_sink_c(vlen=fft_len, tsb_key=self.tsb_key) self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, (len(tx_data) / fft_len), self.tsb_key), eq, sink) self.tb.run() rx_data = [(cnst.decision_maker_v((x,)) if (x != 0) else (- 1)) for x in sink.data()[0]] self.assertEqual(tx_data, rx_data) packets = sink.data() self.assertEqual(len(packets), 1) self.assertEqual(len(packets[0]), len(tx_data))
def test_002_static_wo_tags(self): ' Same as before, but the input stream has no tag.\n We specify the frame size in the constructor.\n We also specify a tag key, so the output stream *should* have\n a TSB tag.\n ' fft_len = 8 n_syms = 4 tx_data = [(- 1), (- 1), 1, 2, (- 1), 3, 0, (- 1), (- 1), (- 1), 0, 2, (- 1), 2, 0, (- 1), (- 1), (- 1), 3, 0, (- 1), 1, 0, (- 1), (- 1), (- 1), 1, 1, (- 1), 0, 2, (- 1)] cnst = digital.constellation_qpsk() tx_signal = [(cnst.map_to_points_v(x)[0] if (x != (- 1)) else 0) for x in tx_data] occupied_carriers = ((1, 2, 6, 7),) pilot_carriers = ((), (), (1, 2, 6, 7), ()) pilot_symbols = ([], [], [cnst.map_to_points_v(x)[0] for x in (1, 0, 3, 0)], []) equalizer = digital.ofdm_equalizer_static(fft_len, occupied_carriers, pilot_carriers, pilot_symbols) channel = [0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0] for idx in range(fft_len, (2 * fft_len)): channel[idx] = (channel[(idx - fft_len)] * numpy.exp((((1j * 0.1) * numpy.pi) * (numpy.random.rand() - 0.5)))) idx2 = (idx + (2 * fft_len)) channel[idx2] = (channel[idx2] * numpy.exp((((1j * 0) * numpy.pi) * (numpy.random.rand() - 0.5)))) src = blocks.vector_source_c(numpy.multiply(tx_signal, channel), False, fft_len) eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), 0, self.tsb_key, False, n_syms) sink = blocks.tsb_vector_sink_c(vlen=fft_len, tsb_key=self.tsb_key) self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, (len(tx_data) / fft_len), self.tsb_key), eq, sink) self.tb.run() rx_data = [(cnst.decision_maker_v((x,)) if (x != 0) else (- 1)) for x in sink.data()[0]] self.assertEqual(tx_data, rx_data) packets = sink.data() self.assertEqual(len(packets), 1) self.assertEqual(len(packets[0]), len(tx_data))<|docstring|>Same as before, but the input stream has no tag. We specify the frame size in the constructor. We also specify a tag key, so the output stream *should* have a TSB tag.<|endoftext|>
8d35363dd73f32a84cf53d4457ee773800c9ab99c380fb15a46a72af10deb1b0
def test_002_simpledfe(self): ' Use the simple DFE equalizer. ' fft_len = 8 tx_data = [(- 1), (- 1), 1, 2, (- 1), 3, 0, (- 1), (- 1), (- 1), 0, 2, (- 1), 2, 0, (- 1), (- 1), (- 1), 3, 0, (- 1), 1, 0, (- 1), (- 1), (- 1), 1, 1, (- 1), 0, 2, (- 1)] cnst = digital.constellation_qpsk() tx_signal = [(cnst.map_to_points_v(x)[0] if (x != (- 1)) else 0) for x in tx_data] occupied_carriers = ((1, 2, 6, 7),) pilot_carriers = ((), (), (1, 2, 6, 7), ()) pilot_symbols = ([], [], [cnst.map_to_points_v(x)[0] for x in (1, 0, 3, 0)], []) equalizer = digital.ofdm_equalizer_simpledfe(fft_len, cnst.base(), occupied_carriers, pilot_carriers, pilot_symbols, 0, 0.01) channel = [0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0] for idx in range(fft_len, (2 * fft_len)): channel[idx] = (channel[(idx - fft_len)] * numpy.exp((((1j * 0.1) * numpy.pi) * (numpy.random.rand() - 0.5)))) idx2 = (idx + (2 * fft_len)) channel[idx2] = (channel[idx2] * numpy.exp((((1j * 0) * numpy.pi) * (numpy.random.rand() - 0.5)))) chan_tag = gr.tag_t() chan_tag.offset = 0 chan_tag.key = pmt.string_to_symbol('ofdm_sync_chan_taps') chan_tag.value = pmt.init_c32vector(fft_len, channel[:fft_len]) src = blocks.vector_source_c(numpy.multiply(tx_signal, channel), False, fft_len, (chan_tag,)) eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), 0, self.tsb_key, True) sink = blocks.tsb_vector_sink_c(fft_len, tsb_key=self.tsb_key) self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, (len(tx_data) / fft_len), self.tsb_key), eq, sink) self.tb.run() rx_data = [(cnst.decision_maker_v((x,)) if (x != 0) else (- 1)) for x in sink.data()[0]] self.assertEqual(tx_data, rx_data) self.assertEqual(len(sink.tags()), 1) tag = sink.tags()[0] self.assertEqual(pmt.symbol_to_string(tag.key), 'ofdm_sync_chan_taps') self.assertComplexTuplesAlmostEqual(list(pmt.c32vector_elements(tag.value)), channel[(- fft_len):], places=1)
Use the simple DFE equalizer.
gnuradio-3.7.13.4/gr-digital/python/digital/qa_ofdm_frame_equalizer_vcvc.py
test_002_simpledfe
v1259397/cosmic-gnuradio
1
python
def test_002_simpledfe(self): ' ' fft_len = 8 tx_data = [(- 1), (- 1), 1, 2, (- 1), 3, 0, (- 1), (- 1), (- 1), 0, 2, (- 1), 2, 0, (- 1), (- 1), (- 1), 3, 0, (- 1), 1, 0, (- 1), (- 1), (- 1), 1, 1, (- 1), 0, 2, (- 1)] cnst = digital.constellation_qpsk() tx_signal = [(cnst.map_to_points_v(x)[0] if (x != (- 1)) else 0) for x in tx_data] occupied_carriers = ((1, 2, 6, 7),) pilot_carriers = ((), (), (1, 2, 6, 7), ()) pilot_symbols = ([], [], [cnst.map_to_points_v(x)[0] for x in (1, 0, 3, 0)], []) equalizer = digital.ofdm_equalizer_simpledfe(fft_len, cnst.base(), occupied_carriers, pilot_carriers, pilot_symbols, 0, 0.01) channel = [0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0] for idx in range(fft_len, (2 * fft_len)): channel[idx] = (channel[(idx - fft_len)] * numpy.exp((((1j * 0.1) * numpy.pi) * (numpy.random.rand() - 0.5)))) idx2 = (idx + (2 * fft_len)) channel[idx2] = (channel[idx2] * numpy.exp((((1j * 0) * numpy.pi) * (numpy.random.rand() - 0.5)))) chan_tag = gr.tag_t() chan_tag.offset = 0 chan_tag.key = pmt.string_to_symbol('ofdm_sync_chan_taps') chan_tag.value = pmt.init_c32vector(fft_len, channel[:fft_len]) src = blocks.vector_source_c(numpy.multiply(tx_signal, channel), False, fft_len, (chan_tag,)) eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), 0, self.tsb_key, True) sink = blocks.tsb_vector_sink_c(fft_len, tsb_key=self.tsb_key) self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, (len(tx_data) / fft_len), self.tsb_key), eq, sink) self.tb.run() rx_data = [(cnst.decision_maker_v((x,)) if (x != 0) else (- 1)) for x in sink.data()[0]] self.assertEqual(tx_data, rx_data) self.assertEqual(len(sink.tags()), 1) tag = sink.tags()[0] self.assertEqual(pmt.symbol_to_string(tag.key), 'ofdm_sync_chan_taps') self.assertComplexTuplesAlmostEqual(list(pmt.c32vector_elements(tag.value)), channel[(- fft_len):], places=1)
def test_002_simpledfe(self): ' ' fft_len = 8 tx_data = [(- 1), (- 1), 1, 2, (- 1), 3, 0, (- 1), (- 1), (- 1), 0, 2, (- 1), 2, 0, (- 1), (- 1), (- 1), 3, 0, (- 1), 1, 0, (- 1), (- 1), (- 1), 1, 1, (- 1), 0, 2, (- 1)] cnst = digital.constellation_qpsk() tx_signal = [(cnst.map_to_points_v(x)[0] if (x != (- 1)) else 0) for x in tx_data] occupied_carriers = ((1, 2, 6, 7),) pilot_carriers = ((), (), (1, 2, 6, 7), ()) pilot_symbols = ([], [], [cnst.map_to_points_v(x)[0] for x in (1, 0, 3, 0)], []) equalizer = digital.ofdm_equalizer_simpledfe(fft_len, cnst.base(), occupied_carriers, pilot_carriers, pilot_symbols, 0, 0.01) channel = [0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0, 0, 0, 1j, 1j, 0, 1j, 1j, 0] for idx in range(fft_len, (2 * fft_len)): channel[idx] = (channel[(idx - fft_len)] * numpy.exp((((1j * 0.1) * numpy.pi) * (numpy.random.rand() - 0.5)))) idx2 = (idx + (2 * fft_len)) channel[idx2] = (channel[idx2] * numpy.exp((((1j * 0) * numpy.pi) * (numpy.random.rand() - 0.5)))) chan_tag = gr.tag_t() chan_tag.offset = 0 chan_tag.key = pmt.string_to_symbol('ofdm_sync_chan_taps') chan_tag.value = pmt.init_c32vector(fft_len, channel[:fft_len]) src = blocks.vector_source_c(numpy.multiply(tx_signal, channel), False, fft_len, (chan_tag,)) eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), 0, self.tsb_key, True) sink = blocks.tsb_vector_sink_c(fft_len, tsb_key=self.tsb_key) self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, (len(tx_data) / fft_len), self.tsb_key), eq, sink) self.tb.run() rx_data = [(cnst.decision_maker_v((x,)) if (x != 0) else (- 1)) for x in sink.data()[0]] self.assertEqual(tx_data, rx_data) self.assertEqual(len(sink.tags()), 1) tag = sink.tags()[0] self.assertEqual(pmt.symbol_to_string(tag.key), 'ofdm_sync_chan_taps') self.assertComplexTuplesAlmostEqual(list(pmt.c32vector_elements(tag.value)), channel[(- fft_len):], places=1)<|docstring|>Use the simple DFE equalizer.<|endoftext|>
77ed5cb7ec9cd5e8188f2a12d256d44d065dc58b379bbcf79127d8331514fe93
def main(): 'Run all of the tests when run as a module with -m.' suite = tests.get_suite() runner = unittest.TextTestRunner() runner.run(suite)
Run all of the tests when run as a module with -m.
mdx_picture/tests/__main__.py
main
speechkey/mdx_picture
0
python
def main(): suite = tests.get_suite() runner = unittest.TextTestRunner() runner.run(suite)
def main(): suite = tests.get_suite() runner = unittest.TextTestRunner() runner.run(suite)<|docstring|>Run all of the tests when run as a module with -m.<|endoftext|>
e695571a2ac3d1b0383fe90ec6b4d92ff61e1752a9190c1099d334517ea527f4
def add_contact(self, segment_id, contact_id): '\n Add a contact to the segment\n\n :param segment_id: int Segment ID\n :param contact_id: int Contact ID\n :return: dict|str\n ' response = self._client.session.post('{url}/{segment_id}/contact/add/{contact_id}'.format(url=self.endpoint_url, segment_id=segment_id, contact_id=contact_id)) return self.process_response(response)
Add a contact to the segment :param segment_id: int Segment ID :param contact_id: int Contact ID :return: dict|str
mautic/segments.py
add_contact
resumov/python-mautic
18
python
def add_contact(self, segment_id, contact_id): '\n Add a contact to the segment\n\n :param segment_id: int Segment ID\n :param contact_id: int Contact ID\n :return: dict|str\n ' response = self._client.session.post('{url}/{segment_id}/contact/add/{contact_id}'.format(url=self.endpoint_url, segment_id=segment_id, contact_id=contact_id)) return self.process_response(response)
def add_contact(self, segment_id, contact_id): '\n Add a contact to the segment\n\n :param segment_id: int Segment ID\n :param contact_id: int Contact ID\n :return: dict|str\n ' response = self._client.session.post('{url}/{segment_id}/contact/add/{contact_id}'.format(url=self.endpoint_url, segment_id=segment_id, contact_id=contact_id)) return self.process_response(response)<|docstring|>Add a contact to the segment :param segment_id: int Segment ID :param contact_id: int Contact ID :return: dict|str<|endoftext|>
41781b31ce9ecc80f5326f6e6808907201031b7c5a3cfb0b5cab547a99d8f994
def train_preprocess(partition, file_list, max_len): '\n Return processed data (ndarray) and original file length (list)\n ' corpus = [] pkeys = partition.keys() for fn in file_list: if (fn not in pkeys): logging.critical((fn + ' not exists in partition')) else: data = partition[fn]['whole_bytes'] corpus.append(data) len_list = None seq = pad_sequences(corpus, maxlen=max_len, truncating='post', padding='post') return (seq, len_list)
Return processed data (ndarray) and original file length (list)
src/utils/preprocess.py
train_preprocess
anandharaju/Echelon_TF2
0
python
def train_preprocess(partition, file_list, max_len): '\n \n ' corpus = [] pkeys = partition.keys() for fn in file_list: if (fn not in pkeys): logging.critical((fn + ' not exists in partition')) else: data = partition[fn]['whole_bytes'] corpus.append(data) len_list = None seq = pad_sequences(corpus, maxlen=max_len, truncating='post', padding='post') return (seq, len_list)
def train_preprocess(partition, file_list, max_len): '\n \n ' corpus = [] pkeys = partition.keys() for fn in file_list: if (fn not in pkeys): logging.critical((fn + ' not exists in partition')) else: data = partition[fn]['whole_bytes'] corpus.append(data) len_list = None seq = pad_sequences(corpus, maxlen=max_len, truncating='post', padding='post') return (seq, len_list)<|docstring|>Return processed data (ndarray) and original file length (list)<|endoftext|>
827ffa743ea17e895b1e0e21c79308b264b5906589541618dd4741fdb7b9e180
def train_preprocess_by_section(spartition, file_list, max_len, sections): '\n Return processed data (ndarray) and original section length (list)\n ' if (sections is None): logging.critical('No sections supplied to process. Check if Q-criterion based selection completed successfully.') corpus = [] pkeys = spartition.keys() for fn in file_list: if (fn not in pkeys): logging.critical((fn + ' not exists in partition')) else: fjson = spartition[fn] combined = np.zeros((fjson['whole_bytes_size'] if cnst.LINUX_ENV else (2 ** 20))) try: keys = fjson['section_info'].keys() for section in sections: if (section in keys): start = fjson['section_info'][section]['section_bounds']['start_offset'] data = fjson['section_info'][section]['section_data'] combined[start:(start + len(data))] = data corpus.append(combined) except Exception as e: logging.exception('Error in Module: preprocess/process_by_section.') seq = pad_sequences(corpus, maxlen=max_len, padding='post', truncating='post') return (seq, None)
Return processed data (ndarray) and original section length (list)
src/utils/preprocess.py
train_preprocess_by_section
anandharaju/Echelon_TF2
0
python
def train_preprocess_by_section(spartition, file_list, max_len, sections): '\n \n ' if (sections is None): logging.critical('No sections supplied to process. Check if Q-criterion based selection completed successfully.') corpus = [] pkeys = spartition.keys() for fn in file_list: if (fn not in pkeys): logging.critical((fn + ' not exists in partition')) else: fjson = spartition[fn] combined = np.zeros((fjson['whole_bytes_size'] if cnst.LINUX_ENV else (2 ** 20))) try: keys = fjson['section_info'].keys() for section in sections: if (section in keys): start = fjson['section_info'][section]['section_bounds']['start_offset'] data = fjson['section_info'][section]['section_data'] combined[start:(start + len(data))] = data corpus.append(combined) except Exception as e: logging.exception('Error in Module: preprocess/process_by_section.') seq = pad_sequences(corpus, maxlen=max_len, padding='post', truncating='post') return (seq, None)
def train_preprocess_by_section(spartition, file_list, max_len, sections): '\n \n ' if (sections is None): logging.critical('No sections supplied to process. Check if Q-criterion based selection completed successfully.') corpus = [] pkeys = spartition.keys() for fn in file_list: if (fn not in pkeys): logging.critical((fn + ' not exists in partition')) else: fjson = spartition[fn] combined = np.zeros((fjson['whole_bytes_size'] if cnst.LINUX_ENV else (2 ** 20))) try: keys = fjson['section_info'].keys() for section in sections: if (section in keys): start = fjson['section_info'][section]['section_bounds']['start_offset'] data = fjson['section_info'][section]['section_data'] combined[start:(start + len(data))] = data corpus.append(combined) except Exception as e: logging.exception('Error in Module: preprocess/process_by_section.') seq = pad_sequences(corpus, maxlen=max_len, padding='post', truncating='post') return (seq, None)<|docstring|>Return processed data (ndarray) and original section length (list)<|endoftext|>
275698f3c81a475e477d8b0e2a5e4e14c22fcbeb87c9d846eb153b5c074d677f
def preprocess(partition, file_list): '\n Return processed data (ndarray) and original file length (list)\n ' a1 = time.time() corpus = [partition[fn[:(- 4)]]['whole_bytes'] for fn in file_list] b1 = time.time() seq = pad_sequences(corpus, maxlen=cnst.MAX_FILE_SIZE_LIMIT, truncating='post', padding='post') c1 = time.time() return seq
Return processed data (ndarray) and original file length (list)
src/utils/preprocess.py
preprocess
anandharaju/Echelon_TF2
0
python
def preprocess(partition, file_list): '\n \n ' a1 = time.time() corpus = [partition[fn[:(- 4)]]['whole_bytes'] for fn in file_list] b1 = time.time() seq = pad_sequences(corpus, maxlen=cnst.MAX_FILE_SIZE_LIMIT, truncating='post', padding='post') c1 = time.time() return seq
def preprocess(partition, file_list): '\n \n ' a1 = time.time() corpus = [partition[fn[:(- 4)]]['whole_bytes'] for fn in file_list] b1 = time.time() seq = pad_sequences(corpus, maxlen=cnst.MAX_FILE_SIZE_LIMIT, truncating='post', padding='post') c1 = time.time() return seq<|docstring|>Return processed data (ndarray) and original file length (list)<|endoftext|>
b899e222924564ff95be7a4af326213c039cd140444e5ef989a87f0b5b1f9c38
def preprocess_by_section_by_samples(file_list, max_len, sections): '\n Return processed data (ndarray) and original section length (list)\n ' if (sections is None): logging.critical('No sections supplied to process. Check if Q-criterion based selection completed successfully.') corpus = [] for fn in file_list: if (not os.path.isfile((((cnst.PKL_SOURCE_PATH + 't2') + cnst.ESC) + fn))): print(fn, 'not exist') else: try: with open((((cnst.PKL_SOURCE_PATH + 't2') + cnst.ESC) + fn), 'rb') as f: fjson = pickle.load(f) keys = fjson['section_info'].keys() combined = np.zeros(cnst.MAX_FILE_SIZE_LIMIT) for section in sections: if (section in keys): start = fjson['section_info'][section]['section_bounds']['start_offset'] data = fjson['section_info'][section]['section_data'] combined[start:(start + len(data))] = data corpus.append(combined) except Exception as e: logging.exception('Error in Module: preprocess/process_by_section.') corpus = [[byte for byte in doc] for doc in corpus] seq = pad_sequences(corpus, maxlen=max_len, padding='post', truncating='post') return seq
Return processed data (ndarray) and original section length (list)
src/utils/preprocess.py
preprocess_by_section_by_samples
anandharaju/Echelon_TF2
0
python
def preprocess_by_section_by_samples(file_list, max_len, sections): '\n \n ' if (sections is None): logging.critical('No sections supplied to process. Check if Q-criterion based selection completed successfully.') corpus = [] for fn in file_list: if (not os.path.isfile((((cnst.PKL_SOURCE_PATH + 't2') + cnst.ESC) + fn))): print(fn, 'not exist') else: try: with open((((cnst.PKL_SOURCE_PATH + 't2') + cnst.ESC) + fn), 'rb') as f: fjson = pickle.load(f) keys = fjson['section_info'].keys() combined = np.zeros(cnst.MAX_FILE_SIZE_LIMIT) for section in sections: if (section in keys): start = fjson['section_info'][section]['section_bounds']['start_offset'] data = fjson['section_info'][section]['section_data'] combined[start:(start + len(data))] = data corpus.append(combined) except Exception as e: logging.exception('Error in Module: preprocess/process_by_section.') corpus = [[byte for byte in doc] for doc in corpus] seq = pad_sequences(corpus, maxlen=max_len, padding='post', truncating='post') return seq
def preprocess_by_section_by_samples(file_list, max_len, sections): '\n \n ' if (sections is None): logging.critical('No sections supplied to process. Check if Q-criterion based selection completed successfully.') corpus = [] for fn in file_list: if (not os.path.isfile((((cnst.PKL_SOURCE_PATH + 't2') + cnst.ESC) + fn))): print(fn, 'not exist') else: try: with open((((cnst.PKL_SOURCE_PATH + 't2') + cnst.ESC) + fn), 'rb') as f: fjson = pickle.load(f) keys = fjson['section_info'].keys() combined = np.zeros(cnst.MAX_FILE_SIZE_LIMIT) for section in sections: if (section in keys): start = fjson['section_info'][section]['section_bounds']['start_offset'] data = fjson['section_info'][section]['section_data'] combined[start:(start + len(data))] = data corpus.append(combined) except Exception as e: logging.exception('Error in Module: preprocess/process_by_section.') corpus = [[byte for byte in doc] for doc in corpus] seq = pad_sequences(corpus, maxlen=max_len, padding='post', truncating='post') return seq<|docstring|>Return processed data (ndarray) and original section length (list)<|endoftext|>
6a8f0bf4fda9ef31891081e596b6dc287a31eeb2da211e89908c5372959f3d0f
def preprocess_by_section(spartition, file_list, sections): '\n Return processed data (ndarray) and original section length (list)\n ' corpus = [] for fn in file_list: fjson = spartition[fn[:(- 4)]] combined = np.zeros(cnst.MAX_FILE_SIZE_LIMIT) keys = fjson['section_info'].keys() for section in sections: if (section in keys): start = fjson['section_info'][section]['section_bounds']['start_offset'] data = fjson['section_info'][section]['section_data'] combined[start:(start + len(data))] = data corpus.append(combined) '\n # Concatenation logic - non-uniform section ends are padded to meet nearest conv. window multiple\n combined = []\n try:\n keys = fjson["section_info"].keys()\n for section in sections:\n if section in keys:\n # print(np.shape(combined))\n # start = fjson["section_info"][section]["section_bounds"]["start_offset"]\n # end = fjson["section_info"][section]["section_bounds"]["end_offset"] + 1\n data = fjson["section_info"][section]["section_data"]\n # print(np.shape(data), np.shape(combined), np.shape(len(data)))\n combined = np.concatenate((combined, data, np.zeros(cnst.CONV_WINDOW_SIZE - (len(data) % cnst.CONV_WINDOW_SIZE))), axis=None)\n if len(data) % cnst.CONV_WINDOW_SIZE > 0:\n combined = np.concatenate((combined, data, np.zeros(cnst.CONV_WINDOW_SIZE - (len(data) % cnst.CONV_WINDOW_SIZE))), axis=None)\n else:\n combined = np.concatenate((combined, data), axis=None)\n if len(combined) > max_len:\n logging.debug("[CAUTION: LOSS_OF_DATA] Combined sections exceeded max sample length by " + str(\n len(combined) - max_len) + " bytes. #Sections:"+str(len(sections)))\n corpus.append(combined)\n ' seq = pad_sequences(corpus, maxlen=cnst.MAX_FILE_SIZE_LIMIT, padding='post', truncating='post') return seq
Return processed data (ndarray) and original section length (list)
src/utils/preprocess.py
preprocess_by_section
anandharaju/Echelon_TF2
0
python
def preprocess_by_section(spartition, file_list, sections): '\n \n ' corpus = [] for fn in file_list: fjson = spartition[fn[:(- 4)]] combined = np.zeros(cnst.MAX_FILE_SIZE_LIMIT) keys = fjson['section_info'].keys() for section in sections: if (section in keys): start = fjson['section_info'][section]['section_bounds']['start_offset'] data = fjson['section_info'][section]['section_data'] combined[start:(start + len(data))] = data corpus.append(combined) '\n # Concatenation logic - non-uniform section ends are padded to meet nearest conv. window multiple\n combined = []\n try:\n keys = fjson["section_info"].keys()\n for section in sections:\n if section in keys:\n # print(np.shape(combined))\n # start = fjson["section_info"][section]["section_bounds"]["start_offset"]\n # end = fjson["section_info"][section]["section_bounds"]["end_offset"] + 1\n data = fjson["section_info"][section]["section_data"]\n # print(np.shape(data), np.shape(combined), np.shape(len(data)))\n combined = np.concatenate((combined, data, np.zeros(cnst.CONV_WINDOW_SIZE - (len(data) % cnst.CONV_WINDOW_SIZE))), axis=None)\n if len(data) % cnst.CONV_WINDOW_SIZE > 0:\n combined = np.concatenate((combined, data, np.zeros(cnst.CONV_WINDOW_SIZE - (len(data) % cnst.CONV_WINDOW_SIZE))), axis=None)\n else:\n combined = np.concatenate((combined, data), axis=None)\n if len(combined) > max_len:\n logging.debug("[CAUTION: LOSS_OF_DATA] Combined sections exceeded max sample length by " + str(\n len(combined) - max_len) + " bytes. #Sections:"+str(len(sections)))\n corpus.append(combined)\n ' seq = pad_sequences(corpus, maxlen=cnst.MAX_FILE_SIZE_LIMIT, padding='post', truncating='post') return seq
def preprocess_by_section(spartition, file_list, sections): '\n \n ' corpus = [] for fn in file_list: fjson = spartition[fn[:(- 4)]] combined = np.zeros(cnst.MAX_FILE_SIZE_LIMIT) keys = fjson['section_info'].keys() for section in sections: if (section in keys): start = fjson['section_info'][section]['section_bounds']['start_offset'] data = fjson['section_info'][section]['section_data'] combined[start:(start + len(data))] = data corpus.append(combined) '\n # Concatenation logic - non-uniform section ends are padded to meet nearest conv. window multiple\n combined = []\n try:\n keys = fjson["section_info"].keys()\n for section in sections:\n if section in keys:\n # print(np.shape(combined))\n # start = fjson["section_info"][section]["section_bounds"]["start_offset"]\n # end = fjson["section_info"][section]["section_bounds"]["end_offset"] + 1\n data = fjson["section_info"][section]["section_data"]\n # print(np.shape(data), np.shape(combined), np.shape(len(data)))\n combined = np.concatenate((combined, data, np.zeros(cnst.CONV_WINDOW_SIZE - (len(data) % cnst.CONV_WINDOW_SIZE))), axis=None)\n if len(data) % cnst.CONV_WINDOW_SIZE > 0:\n combined = np.concatenate((combined, data, np.zeros(cnst.CONV_WINDOW_SIZE - (len(data) % cnst.CONV_WINDOW_SIZE))), axis=None)\n else:\n combined = np.concatenate((combined, data), axis=None)\n if len(combined) > max_len:\n logging.debug("[CAUTION: LOSS_OF_DATA] Combined sections exceeded max sample length by " + str(\n len(combined) - max_len) + " bytes. #Sections:"+str(len(sections)))\n corpus.append(combined)\n ' seq = pad_sequences(corpus, maxlen=cnst.MAX_FILE_SIZE_LIMIT, padding='post', truncating='post') return seq<|docstring|>Return processed data (ndarray) and original section length (list)<|endoftext|>
105cb08ebfdbb575bef4211671567307609e675995ebb597ce72116f6c680e4a
def __init__(self, pattern, topleft): u'コンストラクタ。\n ' super(_Operatable, self).__init__(pattern) (self._left, self._top) = topleft
コンストラクタ。
Source/pieces/falling.py
__init__
LucXyMan/starseeker
0
python
def __init__(self, pattern, topleft): u'\n ' super(_Operatable, self).__init__(pattern) (self._left, self._top) = topleft
def __init__(self, pattern, topleft): u'\n ' super(_Operatable, self).__init__(pattern) (self._left, self._top) = topleft<|docstring|>コンストラクタ。<|endoftext|>
c4be8b43fec8790768adb2fe5af148d5fdd6304f8d66f64bffacc13692345e16
@property def left(self): u'左位置取得。\n ' return self._left
左位置取得。
Source/pieces/falling.py
left
LucXyMan/starseeker
0
python
@property def left(self): u'\n ' return self._left
@property def left(self): u'\n ' return self._left<|docstring|>左位置取得。<|endoftext|>
42966b22b6894f87ef552852f6f33ebba7831dd6a410232bbe47d8614431283c
@left.setter def left(self, value): u'左位置設定。\n ' self.clear() self._left = value self._create()
左位置設定。
Source/pieces/falling.py
left
LucXyMan/starseeker
0
python
@left.setter def left(self, value): u'\n ' self.clear() self._left = value self._create()
@left.setter def left(self, value): u'\n ' self.clear() self._left = value self._create()<|docstring|>左位置設定。<|endoftext|>
4291841eacca199d93dc3a124400297650b232ac972624937eb3e364947485c8
@property def top(self): u'上位置取得。\n ' return self._top
上位置取得。
Source/pieces/falling.py
top
LucXyMan/starseeker
0
python
@property def top(self): u'\n ' return self._top
@property def top(self): u'\n ' return self._top<|docstring|>上位置取得。<|endoftext|>
84ee7c721a91eed2f45e7b6bacdc8fae49d0401f5b9aa456f4be7cfac19923b6
@top.setter def top(self, value): u'上位置設定。\n ' self.clear() self._top = value self._create()
上位置設定。
Source/pieces/falling.py
top
LucXyMan/starseeker
0
python
@top.setter def top(self, value): u'\n ' self.clear() self._top = value self._create()
@top.setter def top(self, value): u'\n ' self.clear() self._top = value self._create()<|docstring|>上位置設定。<|endoftext|>
af2d594c199d507fd550f4e83fa4b252b4a5b63f32426b2afb75357b2a77fce7
def __init__(self, pattern, pos=(0, 0), is_virtual=False): u'コンストラクタ。\n ' super(Falling, self).__init__(pattern, pos) self.__is_rested = False self.__rotated = 0 self.__is_commanded = False self.__is_t_spin = False self.__is_virtual = is_virtual self._create()
コンストラクタ。
Source/pieces/falling.py
__init__
LucXyMan/starseeker
0
python
def __init__(self, pattern, pos=(0, 0), is_virtual=False): u'\n ' super(Falling, self).__init__(pattern, pos) self.__is_rested = False self.__rotated = 0 self.__is_commanded = False self.__is_t_spin = False self.__is_virtual = is_virtual self._create()
def __init__(self, pattern, pos=(0, 0), is_virtual=False): u'\n ' super(Falling, self).__init__(pattern, pos) self.__is_rested = False self.__rotated = 0 self.__is_commanded = False self.__is_t_spin = False self.__is_virtual = is_virtual self._create()<|docstring|>コンストラクタ。<|endoftext|>
4514d02500d0a6340afba837f984f726c6c10d90d74d588e615426eb7c81288c
def _create(self): u'ブロック作成。\n ' for block in self._pattern.get_blocks((self.left, self.top), self.__is_virtual): block.piece = self self._blocks.append(block)
ブロック作成。
Source/pieces/falling.py
_create
LucXyMan/starseeker
0
python
def _create(self): u'\n ' for block in self._pattern.get_blocks((self.left, self.top), self.__is_virtual): block.piece = self self._blocks.append(block)
def _create(self): u'\n ' for block in self._pattern.get_blocks((self.left, self.top), self.__is_virtual): block.piece = self self._blocks.append(block)<|docstring|>ブロック作成。<|endoftext|>
d132679a0d6e8547991a78af23928a79eff987ce15e96c88acc4e633ad9891e1
def remove(self, block): u'ブロック削除。\n ' if (block in self._blocks[:]): self._blocks.remove(block)
ブロック削除。
Source/pieces/falling.py
remove
LucXyMan/starseeker
0
python
def remove(self, block): u'\n ' if (block in self._blocks[:]): self._blocks.remove(block)
def remove(self, block): u'\n ' if (block in self._blocks[:]): self._blocks.remove(block)<|docstring|>ブロック削除。<|endoftext|>
387a4dbbcf86a68f486b93048caac060d6486ed91d7329479c73ff27c8e3fe58
def clear(self): u'ブロック消去。\n ' self._blocks = []
ブロック消去。
Source/pieces/falling.py
clear
LucXyMan/starseeker
0
python
def clear(self): u'\n ' self._blocks = []
def clear(self): u'\n ' self._blocks = []<|docstring|>ブロック消去。<|endoftext|>
24c56527fe9ec5aa71d609fc15ee88118bc947da5e0aaf03c6dd9b4d2a547eed
def stamp(self, field): u'ピースをフィールドに転写。\n ' if (not self.__is_rested): field.add(*self._pattern.get_blocks((self.left, self.top), self.__is_virtual)) self.clear()
ピースをフィールドに転写。
Source/pieces/falling.py
stamp
LucXyMan/starseeker
0
python
def stamp(self, field): u'\n ' if (not self.__is_rested): field.add(*self._pattern.get_blocks((self.left, self.top), self.__is_virtual)) self.clear()
def stamp(self, field): u'\n ' if (not self.__is_rested): field.add(*self._pattern.get_blocks((self.left, self.top), self.__is_virtual)) self.clear()<|docstring|>ピースをフィールドに転写。<|endoftext|>
d4426db95877125dbaf1d7cd752226e8071221203e281bcc6605b4458929d98f
def rest(self, field): u'ピースの固着。\n ' if (not self.__is_rested): self.stamp(field) self.__is_rested = True
ピースの固着。
Source/pieces/falling.py
rest
LucXyMan/starseeker
0
python
def rest(self, field): u'\n ' if (not self.__is_rested): self.stamp(field) self.__is_rested = True
def rest(self, field): u'\n ' if (not self.__is_rested): self.stamp(field) self.__is_rested = True<|docstring|>ピースの固着。<|endoftext|>
aec38290ace9c8a31128d8c069b5319150e35bac7aba3881e2634d9fb1cdd1e3
def skip(self): u'ピースを消去して固着。\n ' self.clear() self.__is_rested = True
ピースを消去して固着。
Source/pieces/falling.py
skip
LucXyMan/starseeker
0
python
def skip(self): u'\n ' self.clear() self.__is_rested = True
def skip(self): u'\n ' self.clear() self.__is_rested = True<|docstring|>ピースを消去して固着。<|endoftext|>
5478d69c2f153c834d3cbb59db98080faab195b624c6d1c189717178d72e3775
def move(self, field, vector): u'ピースの移動。\n ' if (not self.__is_rested): old_topleft = (self.left, self.top) self.clear() self._left += vector[0] self._top += vector[1] self._create() if self.is_collide(field): self.topleft = old_topleft return False else: self.__is_t_spin = False return True return False
ピースの移動。
Source/pieces/falling.py
move
LucXyMan/starseeker
0
python
def move(self, field, vector): u'\n ' if (not self.__is_rested): old_topleft = (self.left, self.top) self.clear() self._left += vector[0] self._top += vector[1] self._create() if self.is_collide(field): self.topleft = old_topleft return False else: self.__is_t_spin = False return True return False
def move(self, field, vector): u'\n ' if (not self.__is_rested): old_topleft = (self.left, self.top) self.clear() self._left += vector[0] self._top += vector[1] self._create() if self.is_collide(field): self.topleft = old_topleft return False else: self.__is_t_spin = False return True return False<|docstring|>ピースの移動。<|endoftext|>
4e78b2ef6f2574112d36a65bdcd3e18d214b0e2d56dd6a273dcf2f0a422aab52
def slide(self, field, is_right): u'左右移動。\n ' self.__is_commanded = True return (self.move(field, _const.RIGHT) if is_right else self.move(field, _const.LEFT))
左右移動。
Source/pieces/falling.py
slide
LucXyMan/starseeker
0
python
def slide(self, field, is_right): u'\n ' self.__is_commanded = True return (self.move(field, _const.RIGHT) if is_right else self.move(field, _const.LEFT))
def slide(self, field, is_right): u'\n ' self.__is_commanded = True return (self.move(field, _const.RIGHT) if is_right else self.move(field, _const.LEFT))<|docstring|>左右移動。<|endoftext|>