_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 31
13.1k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q1500
|
column_start_to_end
|
train
|
def column_start_to_end(data, column, start_idx, end_idx):
"""Return a list of numeric data entries in the given column from the starting
index to the ending index. This can list can be compiled over one or more
DataFrames.
:param data: a list of DataFrames to extract data in one column from
:type data: Pandas.DataFrame list
:param column: a column index
:type column: int
:param start_idx: the index of the starting row
:type start_idx: int
:param start_idx: the index of the ending row
:type start_idx: int
:return: a list of data from the given column
:rtype: float list
"""
if len(data) == 1:
result = list(pd.to_numeric(data[0].iloc[start_idx:end_idx, column]))
else:
|
python
|
{
"resource": ""
}
|
q1501
|
get_data_by_state
|
train
|
def get_data_by_state(path, dates, state, column):
"""Reads a ProCoDA file and extracts the time and data column for each
iteration ofthe given state.
Note: column 0 is time, the first data column is column 1.
:param path: The path to the folder containing the ProCoDA data file(s), defaults to the current directory
:type path: string
:param dates: A single date or list of dates for which data was recorded, formatted "M-D-YYYY"
:type dates: string or string list
:param state: The state ID number for which data should be plotted
:type state: int
:param column: The integer index of the column that you want to extract OR the header of the column that you want to extract
:type column: int or string
:return: A list of lists of the time and data columns extracted for each iteration of the state. For example, if "data" is the output, data[i][:,0] gives the time column and data[i][:,1] gives the data column for the ith iteration of the given state and column. data[i][0] would give the first [time, data] pair.
:type: list of lists of lists
:Examples:
.. code-block:: python
data = get_data_by_state(path='/Users/.../ProCoDA Data/', dates=["6-19-2013", "6-20-2013"], state=1, column=28)
"""
data_agg = []
day = 0
first_day = True
overnight = False
extension = ".xls"
if path[-1] != '/':
path += '/'
if not isinstance(dates, list):
dates = [dates]
for d in dates:
state_file = path + "statelog " + d + extension
data_file = path + "datalog " + d + extension
states = pd.read_csv(state_file, delimiter='\t')
data = pd.read_csv(data_file, delimiter='\t')
states = np.array(states)
data = np.array(data)
# get the start and end times for the state
state_start_idx = states[:, 1] == state
state_start = states[state_start_idx, 0]
state_end_idx = np.append([False], state_start_idx[0:-1])
state_end = states[state_end_idx, 0]
if overnight:
state_start = np.insert(state_start, 0, 0)
state_end = np.insert(state_end, 0, states[0, 0])
if state_start_idx[-1]:
np.append(state_end, data[0, -1])
# get the corresponding indices in the data array
data_start = []
data_end =
|
python
|
{
"resource": ""
}
|
q1502
|
column_of_time
|
train
|
def column_of_time(path, start, end=-1):
"""This function extracts the column of times from a ProCoDA data file.
:param path: The file path of the ProCoDA data file. If the file is in the working directory, then the file name is sufficient.
:type path: string
:param start: Index of first row of data to extract from the data file
:type start: int
:param end: Index of last row of data to extract from the data. Defaults to last row
:type end: int
:return: Experimental times starting at 0 day with units of days.
:rtype: numpy.array
:Examples:
.. code-block:: python
|
python
|
{
"resource": ""
}
|
q1503
|
column_of_data
|
train
|
def column_of_data(path, start, column, end="-1", units=""):
"""This function extracts a column of data from a ProCoDA data file.
Note: Column 0 is time. The first data column is column 1.
:param path: The file path of the ProCoDA data file. If the file is in the working directory, then the file name is sufficient.
:type path: string
:param start: Index of first row of data to extract from the data file
:type start: int
:param end: Index of last row of data to extract from the data. Defaults to last row
:type end: int, optional
:param column: Index of the column that you want to extract OR name of the column header that you want to extract
:type column: int or string
:param units: The units you want to apply to the data, e.g. 'mg/L'. Defaults to "" (dimensionless)
:type units: string, optional
:return: Experimental data with the units applied.
:rtype: numpy.array
:Examples:
|
python
|
{
"resource": ""
}
|
q1504
|
notes
|
train
|
def notes(path):
"""This function extracts any experimental notes from a ProCoDA data file.
:param path: The file path of the ProCoDA data file. If the file is in the working directory, then the file name is sufficient.
:type path: string
:return: The rows of the data file that contain text notes inserted during the experiment. Use this to identify the section of the data file that you want to extract.
:rtype: pandas.Dataframe
|
python
|
{
"resource": ""
}
|
q1505
|
write_calculations_to_csv
|
train
|
def write_calculations_to_csv(funcs, states, columns, path, headers, out_name,
metaids=[], extension=".xls"):
"""Writes each output of the given functions on the given states and data
columns to a new column in the specified output file.
Note: Column 0 is time. The first data column is column 1.
:param funcs: A function or list of functions which will be applied in order to the data. If only one function is given it is applied to all the states/columns
:type funcs: function or function list
:param states: The state ID numbers for which data should be extracted. List should be in order of calculation or if only one state is given then it will be used for all the calculations
:type states: string or string list
:param columns: The index of a column, the header of a column, a list of indexes, OR a list of headers of the column(s) that you want to apply calculations to
:type columns: int, string, int list, or string list
:param path: Path to your ProCoDA metafile (must be tab-delimited)
:type path: string
:param headers: List of the desired header for each calculation, in order
:type headers: string list
:param out_name: Desired name for the output file. Can include a relative path
:type out_name: string
:param metaids: A list of the experiment IDs you'd like to analyze from the metafile
:type metaids: string list, optional
:param extension: The file extension of the tab delimited file. Defaults to ".xls" if no argument is passed in
:type extension: string, optional
:requires: funcs, states, columns, and headers are all of the same length if they are lists. Some being lists and some single values are okay.
:return: out_name.csv
|
python
|
{
"resource": ""
}
|
q1506
|
OD
|
train
|
def OD(ND):
"""Return a pipe's outer diameter according to its nominal diameter.
The pipe schedule is not required here because all of the pipes of a
given nominal diameter have the same outer diameter.
Steps:
1. Find the index of the closest nominal diameter.
(Should this be changed to find the next largest ND?)
2. Take the values
|
python
|
{
"resource": ""
}
|
q1507
|
ID_sch40
|
train
|
def ID_sch40(ND):
"""Return the inner diameter for schedule 40 pipes.
The wall thickness for these pipes is in the pipedb.
Take the values of the array, subtract the ND, take the absolute
value, find the index of the minimium
|
python
|
{
"resource": ""
}
|
q1508
|
ND_all_available
|
train
|
def ND_all_available():
"""Return an array of available nominal diameters.
NDs available are those commonly used as based on the 'Used' column
in the pipedb.
"""
ND_all_available = []
for i in range(len(pipedb['NDinch'])):
if
|
python
|
{
"resource": ""
}
|
q1509
|
ID_SDR_all_available
|
train
|
def ID_SDR_all_available(SDR):
"""Return an array of inner diameters with a given SDR.
IDs available are those commonly used based on
|
python
|
{
"resource": ""
}
|
q1510
|
ND_SDR_available
|
train
|
def ND_SDR_available(ID, SDR):
""" Return an available ND given an ID and a schedule.
Takes the values of the array, compares to the ID, and finds the index
of the first value greater or equal.
"""
|
python
|
{
"resource": ""
}
|
q1511
|
flow_pipeline
|
train
|
def flow_pipeline(diameters, lengths, k_minors, target_headloss,
nu=con.WATER_NU, pipe_rough=mats.PVC_PIPE_ROUGH):
"""
This function takes a single pipeline with multiple sections, each potentially with different diameters,
lengths and minor loss coefficients and determines the flow rate for a given headloss.
:param diameters: list of diameters, where the i_th diameter corresponds to the i_th pipe section
:type diameters: numpy.ndarray
:param lengths: list of diameters, where the i_th diameter corresponds to the i_th pipe section
:type lengths: numpy.ndarray
:param k_minors: list of diameters, where the i_th diameter corresponds to the i_th pipe section
:type k_minors: numpy.ndarray
:param target_headloss: a single headloss describing the total headloss through the system
:type target_headloss: float
:param nu: The fluid dynamic viscosity of the fluid. Defaults to water at room temperature (1 * 10**-6 * m**2/s)
:type nu: float
:param pipe_rough: The pipe roughness. Defaults to PVC roughness.
:type pipe_rough: float
:return: the total flow through the system
:rtype: float
"""
# Ensure all the arguments except total headloss are the same length
#TODO
# Total number of pipe lengths
n = diameters.size
# Start with a flow rate guess based on the flow through a single pipe section
flow = pc.flow_pipe(diameters[0], target_headloss, lengths[0],
|
python
|
{
"resource": ""
}
|
q1512
|
LFOM.n_rows
|
train
|
def n_rows(self):
"""This equation states that the open area corresponding to one row
can be set equal to two orifices of diameter=row height. If there
are more than two orifices per row at the top of the LFOM then there
are more orifices than are convenient to drill and more than
necessary for good accuracy. Thus this relationship can be used to
increase the spacing between the rows and thus increase the diameter
of the orifices. This spacing function also sets the lower depth on
the high flow rate LFOM with no accurate flows below a depth equal
to the first row height.
But it might be
|
python
|
{
"resource": ""
}
|
q1513
|
LFOM.area_pipe_min
|
train
|
def area_pipe_min(self):
"""The minimum cross-sectional area of the LFOM pipe that assures
a safety factor."""
|
python
|
{
"resource": ""
}
|
q1514
|
LFOM.nom_diam_pipe
|
train
|
def nom_diam_pipe(self):
"""The nominal diameter of the LFOM pipe"""
ID
|
python
|
{
"resource": ""
}
|
q1515
|
LFOM.orifice_diameter
|
train
|
def orifice_diameter(self):
"""The actual orifice diameter. We don't let the diameter extend
beyond its row space. """
|
python
|
{
"resource": ""
}
|
q1516
|
LFOM.flow_ramp
|
train
|
def flow_ramp(self):
"""An equally spaced array representing flow at each row."""
|
python
|
{
"resource": ""
}
|
q1517
|
LFOM.n_orifices_per_row
|
train
|
def n_orifices_per_row(self):
"""Calculate number of orifices at each level given an orifice
diameter.
"""
# H is distance from the bottom of the next row of orifices to the
# center of the current row of orifices
H = self.b_rows - 0.5*self.orifice_diameter
flow_per_orifice = pc.flow_orifice_vert(self.orifice_diameter, H, con.VC_ORIFICE_RATIO)
n = np.zeros(self.n_rows)
for i in range(self.n_rows):
# calculate the ideal number of orifices at the current row without
# constraining to an integer
|
python
|
{
"resource": ""
}
|
q1518
|
LFOM.error_per_row
|
train
|
def error_per_row(self):
"""This function calculates the error of the design based on the
differences between the predicted flow rate
and the actual flow rate through the LFOM."""
FLOW_lfom_error = np.zeros(self.n_rows)
for i in range(self.n_rows):
actual_flow =
|
python
|
{
"resource": ""
}
|
q1519
|
get_drill_bits_d_imperial
|
train
|
def get_drill_bits_d_imperial():
"""Return array of possible drill diameters in imperial."""
step_32nd = np.arange(0.03125, 0.25, 0.03125)
step_8th = np.arange(0.25, 1.0, 0.125)
step_4th = np.arange(1.0, 2.0, 0.25)
maximum = [2.0]
|
python
|
{
"resource": ""
}
|
q1520
|
get_drill_bits_d_metric
|
train
|
def get_drill_bits_d_metric():
"""Return array of possible drill diameters in metric."""
return np.concatenate((np.arange(1.0, 10.0, 0.1),
np.arange(10.0, 18.0, 0.5),
|
python
|
{
"resource": ""
}
|
q1521
|
Variable_C_Stock.C_stock
|
train
|
def C_stock(self):
"""Return the required concentration of material in the stock given a
reactor's desired system flow rate, system concentration, and
|
python
|
{
"resource": ""
}
|
q1522
|
Variable_Q_Stock.Q_stock
|
train
|
def Q_stock(self):
"""Return the required flow rate from the stock of material given
a reactor's desired system flow rate, system concentration, and
|
python
|
{
"resource": ""
}
|
q1523
|
Variable_Q_Stock.rpm
|
train
|
def rpm(self, vol_per_rev):
"""Return the pump speed required for the reactor's stock of material
given the volume of fluid output per revolution by the stock's pump.
:param vol_per_rev: Volume of fluid pumped per revolution (dependent on pump and tubing)
:type vol_per_rev: float
:return: Pump speed for the
|
python
|
{
"resource": ""
}
|
q1524
|
Flocculator.draw
|
train
|
def draw(self):
"""Draw the Onshape flocculator model based off of this object."""
from onshapepy import Part
CAD = Part(
'https://cad.onshape.com/documents/b4cfd328713460beeb3125ac/w/3928b5c91bb0a0be7858d99e/e/6f2eeada21e494cebb49515f'
)
CAD.params = {
'channel_L': self.channel_L,
|
python
|
{
"resource": ""
}
|
q1525
|
max_linear_flow
|
train
|
def max_linear_flow(Diam, HeadlossCDC, Ratio_Error, KMinor):
"""Return the maximum flow that will meet the linear requirement.
Maximum flow that can be put through a tube of a given diameter without
exceeding the allowable deviation from linear head
|
python
|
{
"resource": ""
}
|
q1526
|
_len_tube
|
train
|
def _len_tube(Flow, Diam, HeadLoss, conc_chem, temp, en_chem, KMinor):
"""Length of tube required to get desired head loss at maximum flow based on
the Hagen-Poiseuille equation."""
num1 = pc.gravity.magnitude * HeadLoss * np.pi * (Diam**4)
denom1 = 128 * viscosity_kinematic_chem(conc_chem, temp, en_chem)
|
python
|
{
"resource": ""
}
|
q1527
|
_length_cdc_tube_array
|
train
|
def _length_cdc_tube_array(FlowPlant, ConcDoseMax, ConcStock,
DiamTubeAvail, HeadlossCDC, temp, en_chem, KMinor):
"""Calculate the length of each diameter tube given the corresponding flow rate
and coagulant. Choose the tube that is shorter than the maximum length
|
python
|
{
"resource": ""
}
|
q1528
|
len_cdc_tube
|
train
|
def len_cdc_tube(FlowPlant, ConcDoseMax, ConcStock,
DiamTubeAvail, HeadlossCDC, LenCDCTubeMax, temp,
en_chem, KMinor):
"""The length of tubing may be longer than the max specified if the stock
concentration is too high to give a viable solution with the specified
length of tubing."""
index = i_cdc(FlowPlant, ConcDoseMax, ConcStock,
DiamTubeAvail, HeadlossCDC, LenCDCTubeMax, temp,
|
python
|
{
"resource": ""
}
|
q1529
|
dens_alum_nanocluster
|
train
|
def dens_alum_nanocluster(coag):
"""Return the density of the aluminum in the nanocluster.
This is useful for determining the volume of nanoclusters
given a concentration of aluminum.
"""
|
python
|
{
"resource": ""
}
|
q1530
|
dens_pacl_solution
|
train
|
def dens_pacl_solution(ConcAluminum, temp):
"""Return the density of the PACl solution.
From Stock Tank Mixing report Fall 2013:
https://confluence.cornell.edu/download/attachments/137953883/20131213_Research_Report.pdf
"""
|
python
|
{
"resource": ""
}
|
q1531
|
particle_number_concentration
|
train
|
def particle_number_concentration(ConcMat, material):
"""Return the number of particles in suspension.
:param ConcMat: Concentration of the material
:type ConcMat: float
:param material: The material in solution
|
python
|
{
"resource": ""
}
|
q1532
|
sep_dist_clay
|
train
|
def sep_dist_clay(ConcClay, material):
"""Return the separation distance between clay
|
python
|
{
"resource": ""
}
|
q1533
|
num_nanoclusters
|
train
|
def num_nanoclusters(ConcAluminum, coag):
"""Return the number of Aluminum nanoclusters."""
|
python
|
{
"resource": ""
}
|
q1534
|
frac_vol_floc_initial
|
train
|
def frac_vol_floc_initial(ConcAluminum, ConcClay, coag, material):
"""Return the volume fraction of flocs initially present, accounting for both suspended particles and coagulant precipitates.
:param ConcAluminum: Concentration of aluminum in solution
:type ConcAluminum: float
:param ConcClay: Concentration of particle in suspension
:type ConcClay: float
:param coag: Type of coagulant in solution
:type coag: float
:param material:
|
python
|
{
"resource": ""
}
|
q1535
|
num_coll_reqd
|
train
|
def num_coll_reqd(DIM_FRACTAL, material, DiamTarget):
"""Return the number of doubling collisions required.
Calculates the number of doubling collisions required to produce
a floc of
|
python
|
{
"resource": ""
}
|
q1536
|
sep_dist_floc
|
train
|
def sep_dist_floc(ConcAluminum, ConcClay, coag, material,
DIM_FRACTAL, DiamTarget):
"""Return separation distance as a function of floc size."""
return (material.Diameter
* (np.pi/(6
* frac_vol_floc_initial(ConcAluminum, ConcClay,
|
python
|
{
"resource": ""
}
|
q1537
|
frac_vol_floc
|
train
|
def frac_vol_floc(ConcAluminum, ConcClay, coag, DIM_FRACTAL,
material, DiamTarget):
"""Return the floc volume fraction."""
return (frac_vol_floc_initial(ConcAluminum,
|
python
|
{
"resource": ""
}
|
q1538
|
dens_floc_init
|
train
|
def dens_floc_init(ConcAluminum, ConcClay, coag, material):
"""Return the density of the initial floc.
Initial floc is made primarily of the primary colloid and nanoglobs.
"""
return (conc_floc(ConcAluminum, ConcClay,
|
python
|
{
"resource": ""
}
|
q1539
|
ratio_area_clay_total
|
train
|
def ratio_area_clay_total(ConcClay, material, DiamTube, RatioHeightDiameter):
"""Return the surface area of clay normalized by total surface area.
Total surface area is a combination of clay and reactor wall
surface areas. This function is used to estimate how much coagulant
actually goes to the clay.
:param ConcClay: Concentration of clay in suspension
:type ConcClay: float
:param material: Type of clay in suspension, e.g. floc_model.Clay
:type material: floc_model.Material
|
python
|
{
"resource": ""
}
|
q1540
|
gamma_coag
|
train
|
def gamma_coag(ConcClay, ConcAluminum, coag, material,
DiamTube, RatioHeightDiameter):
"""Return the coverage of clay with nanoglobs.
This function accounts for loss to the tube flocculator walls
and a poisson distribution on the clay given random hits by the
nanoglobs. The poisson distribution results in the coverage only
gradually approaching full coverage as coagulant dose increases.
:param ConcClay: Concentration of clay in suspension
:type ConcClay: float
:param ConcAluminum: Concentration of aluminum in solution
:type ConcAluminum: float
:param coag: Type of coagulant in solution, e.g. floc_model.PACl
:type coag: floc_model.Material
:param material: Type of clay in suspension, e.g. floc_model.Clay
:type material: floc_model.Material
:param DiamTube: Diameter of flocculator tube (assumes tube flocculator for calculation of reactor surface area)
:type DiamTube: float
:param RatioHeightDiameter: Dimensionless ratio of clay height to clay diameter
:type RatioHeightDiameter: float
:return: Fraction of
|
python
|
{
"resource": ""
}
|
q1541
|
gamma_humic_acid_to_coag
|
train
|
def gamma_humic_acid_to_coag(ConcAl, ConcNatOrgMat, NatOrgMat, coag):
"""Return the fraction of the coagulant that is coated with humic acid.
:param ConcAl: Concentration of alumninum in solution
:type ConcAl: float
:param ConcNatOrgMat: Concentration of natural organic matter in solution
:type ConcNatOrgMat: float
:param NatOrgMat: type of natural organic matter, e.g. floc_model.HumicAcid
:type NatOrgMat: floc_model.Material
:param coag: Type of coagulant in solution, e.g. floc_model.PACl
:type coag: floc_model.Material
|
python
|
{
"resource": ""
}
|
q1542
|
pacl_term
|
train
|
def pacl_term(DiamTube, ConcClay, ConcAl, ConcNatOrgMat, NatOrgMat,
coag, material, RatioHeightDiameter):
"""Return the fraction of the surface area that is covered with coagulant
that is not covered with humic acid.
:param DiamTube: Diameter of the dosing tube
:type Diamtube: float
:param ConcClay: Concentration of clay in solution
:type ConcClay: float
:param ConcAl: Concentration of alumninum in solution
:type ConcAl: float
:param ConcNatOrgMat: Concentration of natural organic matter in solution
:type ConcNatOrgMat: float
:param NatOrgMat: type of natural organic matter, e.g. floc_model.HumicAcid
:type NatOrgMat: floc_model.Material
:param coag: Type of coagulant in solution, e.g. floc_model.PACl
:type coag: floc_model.Material
:param material: Type of clay in suspension, e.g. floc_model.Clay
:type material: floc_model.Material
:param RatioHeightDiameter: Dimensionless ratio of clay height to
|
python
|
{
"resource": ""
}
|
q1543
|
dens_floc
|
train
|
def dens_floc(ConcAl, ConcClay, DIM_FRACTAL, DiamTarget, coag, material, Temp):
"""Calculate floc density as a function of size."""
WaterDensity = pc.density_water(Temp).magnitude
return ((dens_floc_init(ConcAl, ConcClay, coag, material).magnitude
|
python
|
{
"resource": ""
}
|
q1544
|
vel_term_floc
|
train
|
def vel_term_floc(ConcAl, ConcClay, coag, material, DIM_FRACTAL,
DiamTarget, Temp):
"""Calculate floc terminal velocity."""
WaterDensity = pc.density_water(Temp).magnitude
return (((pc.gravity.magnitude * material.Diameter**2)
/ (18 * PHI_FLOC * pc.viscosity_kinematic(Temp).magnitude)
)
|
python
|
{
"resource": ""
}
|
q1545
|
diam_floc_vel_term
|
train
|
def diam_floc_vel_term(ConcAl, ConcClay, coag, material,
DIM_FRACTAL, VelTerm, Temp):
"""Calculate floc diamter as a function of terminal velocity."""
WaterDensity = pc.density_water(Temp).magnitude
return (material.Diameter * (((18 * VelTerm * PHI_FLOC
* pc.viscosity_kinematic(Temp).magnitude
|
python
|
{
"resource": ""
}
|
q1546
|
time_col_laminar
|
train
|
def time_col_laminar(EnergyDis, Temp, ConcAl, ConcClay, coag, material,
DiamTarget, DiamTube, DIM_FRACTAL, RatioHeightDiameter):
"""Calculate single collision time for laminar flow mediated collisions.
Calculated as a function of floc size.
"""
return (((1/6) * ((6/np.pi)**(1/3))
* frac_vol_floc_initial(ConcAl, ConcClay, coag, material) ** (-2/3)
* (pc.viscosity_kinematic(Temp).magnitude /
|
python
|
{
"resource": ""
}
|
q1547
|
time_col_turbulent
|
train
|
def time_col_turbulent(EnergyDis, ConcAl, ConcClay, coag, material,
DiamTarget, DIM_FRACTAL):
"""Calculate single collision time for turbulent flow mediated collisions.
Calculated as a function of floc size.
"""
return((1/6) * (6/np.pi)**(1/9) * EnergyDis**(-1/3) * DiamTarget**(2/3)
|
python
|
{
"resource": ""
}
|
q1548
|
diam_kolmogorov
|
train
|
def diam_kolmogorov(EnergyDis, Temp, ConcAl, ConcClay, coag, material,
DIM_FRACTAL):
"""Return the size of the floc with separation distances equal to
the Kolmogorov length and the inner viscous length scale.
"""
return (material.Diameter
* ((eta_kolmogorov(EnergyDis, Temp).magnitude / material.Diameter)
|
python
|
{
"resource": ""
}
|
q1549
|
dean_number
|
train
|
def dean_number(PlantFlow, IDTube, RadiusCoil, Temp):
"""Return the Dean Number.
The Dean Number is a dimensionless parameter that is the unfortunate
combination of Reynolds and tube curvature. It would have been better
to keep the Reynolds number and
|
python
|
{
"resource": ""
}
|
q1550
|
g_coil
|
train
|
def g_coil(FlowPlant, IDTube, RadiusCoil, Temp):
"""We need a reference for this.
Karen's thesis likely has this equation and the reference.
"""
return (g_straight(FlowPlant, IDTube).magnitude
* (1 + 0.033 *
|
python
|
{
"resource": ""
}
|
q1551
|
g_time_res
|
train
|
def g_time_res(FlowPlant, IDTube, RadiusCoil, LengthTube, Temp):
"""G Residence Time calculated for a coiled tube flocculator."""
return (g_coil(FlowPlant, IDTube,
|
python
|
{
"resource": ""
}
|
q1552
|
Chemical.define_Precip
|
train
|
def define_Precip(self, diameter, density, molecweight, alumMPM):
"""Define a precipitate for the chemical.
:param diameter: Diameter of the precipitate in particulate form
:type diameter: float
:param density: Density of the material (mass/volume)
:type density: float
:param molecWeight: Molecular weight of the material (mass/mole)
|
python
|
{
"resource": ""
}
|
q1553
|
Plant.ent_tank_a
|
train
|
def ent_tank_a(self):
"""Calculate the planview area of the entrance tank, given the volume of
the flocculator.
:returns: The planview area of the entrance tank.
:rtype: float * u.m ** 2
"""
# first guess planview area
a_new = 1 * u.m**2
a_ratio = 2 # set to >1+tolerance to start while loop
tolerance = 0.01
a_floc_pv = (
self.floc.vol /
(self.floc.downstream_H + (self.floc.HL / 2))
)
|
python
|
{
"resource": ""
}
|
q1554
|
n_lfom_rows
|
train
|
def n_lfom_rows(FLOW,HL_LFOM):
"""This equation states that the open area corresponding to one row can be
set equal to two orifices of diameter=row height. If there are more than
two orifices per row at the top of the LFOM then there are more orifices
than are convenient to drill and more than necessary for good accuracy.
Thus this relationship can be used to increase the spacing between the
rows and thus increase the diameter of the orifices. This spacing function
also sets the lower depth on the high flow rate LFOM with no accurate
flows below a depth equal to the first row height.
But it might be better to always set then number of rows to 10.
The challenge is to figure out a reasonable system of constraints that
reliably returns a valid solution.
"""
N_estimated
|
python
|
{
"resource": ""
}
|
q1555
|
flow_lfom_actual
|
train
|
def flow_lfom_actual(FLOW,HL_LFOM,drill_bits,Row_Index_Submerged,N_LFOM_Orifices):
"""Calculates the flow for a given number of submerged rows of orifices
harray is the distance from the water level to the center of the orifices
when the water is at the max level
"""
D_LFOM_Orifices=orifice_diameter(FLOW, HL_LFOM, drill_bits).magnitude
row_height=dist_center_lfom_rows(FLOW, HL_LFOM).magnitude
harray = (np.linspace(row_height, HL_LFOM, n_lfom_rows(FLOW, HL_LFOM))) - 0.5 * D_LFOM_Orifices
FLOW_new = 0
for i in
|
python
|
{
"resource": ""
}
|
q1556
|
round_sf
|
train
|
def round_sf(number, digits):
"""Returns inputted value rounded to number of significant figures desired.
:param number: Value to be rounded
:type number: float
:param digits: number of significant digits to be rounded to.
:type digits: int
"""
units = None
try:
num = number.magnitude
units = number.units
except AttributeError:
num = number
try:
if (units != None):
rounded_num = round(num, digits - int(floor(log10(abs(num)))) - 1) * units
|
python
|
{
"resource": ""
}
|
q1557
|
stepceil_with_units
|
train
|
def stepceil_with_units(param, step, unit):
"""This function returns the smallest multiple of 'step' greater than or
equal to 'param' and outputs the result in Pint units.
This function is unit-aware and functions without requiring translation
so
|
python
|
{
"resource": ""
}
|
q1558
|
list_handler
|
train
|
def list_handler(HandlerResult="nparray"):
"""Wraps a function to handle list inputs."""
def decorate(func):
def wrapper(*args, **kwargs):
"""Run through the wrapped function once for each array element.
:param HandlerResult: output type. Defaults to numpy arrays.
"""
sequences = []
enumsUnitCheck = enumerate(args)
argsList = list(args)
#This for loop identifies pint unit objects and strips them
#of their units.
for num, arg in enumsUnitCheck:
if type(arg) == type(1 * u.m):
argsList[num] = arg.to_base_units().magnitude
enumsUnitless = enumerate(argsList)
#This for loop identifies arguments that are sequences and
#adds their index location to the list 'sequences'.
for num, arg in enumsUnitless:
if isinstance(arg, (list, tuple, np.ndarray)):
sequences.append(num)
#If there are no sequences to iterate through, simply return
#the function.
if len(sequences) == 0:
result = func(*args, **kwargs)
else:
#iterant keeps track of how many times we've iterated and
#limiter stops the loop once we've iterated as many times
#as there are list elements. Without this check, a few
#erroneous runs will occur, appending the last couple values
#to the end of the list multiple times.
#
#We only care about the length of sequences[0] because this
#function is recursive, and sequences[0] is always the relevant
#sequences for any given run.
limiter = len(argsList[sequences[0]])
iterant = 0
result = []
for num in sequences:
for arg in argsList[num]:
if iterant >= limiter:
|
python
|
{
"resource": ""
}
|
q1559
|
check_range
|
train
|
def check_range(*args):
"""
Check whether passed paramters fall within approved ranges.
Does not return anything, but will raise an error if a parameter falls
outside of its defined range.
Input should be passed as an array of sequences, with each sequence
having three elements:
[0] is the value being checked,
[1] is the range parameter(s) within which the value should fall, and
[2] is the name of the parameter, for better error messages.
If [2] is not supplied, "Input" will be appended as a generic name.
Range requests that this function understands are listed in the
knownChecks sequence.
"""
knownChecks = ('>0', '>=0', '0-1', '<0', '<=0', 'int', 'boolean')
for arg in args:
#Converts arg to a mutable list
arg = [*arg]
if len(arg) == 1:
#arg[1] details what range the parameter should fall within; if
#len(arg) is 1 that means a validity was not specified and the
#parameter should not have been passed in its current form
raise TypeError("No range-validity parameter provided.")
elif len(arg) == 2:
#Appending 'Input" to the end allows us to give more descriptive
#error messages that do not fail if no description was supplied.
arg.append("Input")
#This ensures that all whitespace is removed before checking if the
#request is understood
arg[1] = "".join(arg[1].lower().split())
#This block checks that each range request is understood.
#If the request is a compound one, it must be separated into individual
#requests for validity comprehension
for i in arg[1].split(","):
if i not in knownChecks:
raise RuntimeError("Unknown parameter validation "
"request: {0}.".format(i))
if not isinstance(arg[0], (list, tuple, np.ndarray)):
arg[0] = [arg[0]]
for i in arg[0]:
if '>0' in arg[1] and i <= 0:
raise ValueError("{1} is {0} but must be greater than "
|
python
|
{
"resource": ""
}
|
q1560
|
Clusterer.update_clusterer
|
train
|
def update_clusterer(self, inst):
"""
Updates the clusterer with the instance.
:param inst: the Instance to update the clusterer with
:type inst: Instance
|
python
|
{
"resource": ""
}
|
q1561
|
Clusterer.update_finished
|
train
|
def update_finished(self):
"""
Signals the clusterer that updating with new data has finished.
"""
if self.is_updateable:
javabridge.call(self.jobject, "updateFinished", "()V")
|
python
|
{
"resource": ""
}
|
q1562
|
Clusterer.distribution_for_instance
|
train
|
def distribution_for_instance(self, inst):
"""
Peforms a prediction, returning the cluster distribution.
:param inst: the Instance to get the cluster distribution for
:type inst: Instance
:return: the cluster distribution
:rtype: float[]
|
python
|
{
"resource": ""
}
|
q1563
|
ClusterEvaluation.cluster_assignments
|
train
|
def cluster_assignments(self):
"""
Return an array of cluster assignments corresponding to the most recent set of instances clustered.
:return: the cluster assignments
:rtype: ndarray
"""
array = javabridge.call(self.jobject, "getClusterAssignments", "()[D")
|
python
|
{
"resource": ""
}
|
q1564
|
ClusterEvaluation.crossvalidate_model
|
train
|
def crossvalidate_model(cls, clusterer, data, num_folds, rnd):
"""
Cross-validates the clusterer and returns the loglikelihood.
:param clusterer: the clusterer instance to evaluate
:type clusterer: Clusterer
:param data: the data to evaluate on
:type data: Instances
:param num_folds: the number of folds
:type num_folds: int
:param rnd: the random number generator to use
:type rnd: Random
:return: the cross-validated loglikelihood
:rtype: float
"""
|
python
|
{
"resource": ""
}
|
q1565
|
read_all
|
train
|
def read_all(filename):
"""
Reads the serialized objects from disk. Caller must wrap objects in appropriate Python wrapper classes.
:param filename: the file with the serialized objects
:type filename: str
:return: the list of JB_OBjects
:rtype: list
"""
array = javabridge.static_call(
"Lweka/core/SerializationHelper;", "readAll",
|
python
|
{
"resource": ""
}
|
q1566
|
write
|
train
|
def write(filename, jobject):
"""
Serializes the object to disk. JavaObject instances get automatically unwrapped.
:param filename: the file to serialize the object to
:type filename: str
:param jobject: the object to serialize
:type jobject: JB_Object or JavaObject
"""
if isinstance(jobject, JavaObject):
jobject =
|
python
|
{
"resource": ""
}
|
q1567
|
Item.decrease_frequency
|
train
|
def decrease_frequency(self, frequency=None):
"""
Decreases the frequency.
:param frequency: the frequency to decrease by, 1 if None
:type frequency: int
"""
if frequency is None:
|
python
|
{
"resource": ""
}
|
q1568
|
Item.increase_frequency
|
train
|
def increase_frequency(self, frequency=None):
"""
Increases the frequency.
:param frequency: the frequency to increase by, 1 if None
:type frequency: int
"""
if frequency is None:
|
python
|
{
"resource": ""
}
|
q1569
|
AssociationRule.consequence
|
train
|
def consequence(self):
"""
Get the the consequence.
:return: the consequence, list of Item objects
:rtype: list
"""
items = javabridge.get_collection_wrapper(
|
python
|
{
"resource": ""
}
|
q1570
|
Associator.can_produce_rules
|
train
|
def can_produce_rules(self):
"""
Checks whether association rules can be generated.
:return: whether scheme implements AssociationRulesProducer interface and
association rules can be generated
:rtype: bool
"""
|
python
|
{
"resource": ""
}
|
q1571
|
Associator.association_rules
|
train
|
def association_rules(self):
"""
Returns association rules that were generated. Only if implements AssociationRulesProducer.
:return: the association rules that were generated
:rtype: AssociationRules
"""
if not self.check_type(self.jobject, "weka.associations.AssociationRulesProducer"):
|
python
|
{
"resource": ""
}
|
q1572
|
Associator.rule_metric_names
|
train
|
def rule_metric_names(self):
"""
Returns the rule metric names of the association rules. Only if implements AssociationRulesProducer.
:return: the metric names
:rtype: list
"""
if not self.check_type(self.jobject, "weka.associations.AssociationRulesProducer"):
|
python
|
{
"resource": ""
}
|
q1573
|
loader_for_file
|
train
|
def loader_for_file(filename):
"""
Returns a Loader that can load the specified file, based on the file extension. None if failed to determine.
:param filename: the filename to get the loader for
:type filename: str
:return: the assoicated loader instance or None if none found
:rtype: Loader
"""
loader = javabridge.static_call(
"weka/core/converters/ConverterUtils",
|
python
|
{
"resource": ""
}
|
q1574
|
saver_for_file
|
train
|
def saver_for_file(filename):
"""
Returns a Saver that can load the specified file, based on the file extension. None if failed to determine.
:param filename: the filename to get the saver for
:type filename: str
:return: the associated saver instance or None if none found
:rtype: Saver
"""
saver = javabridge.static_call(
"weka/core/converters/ConverterUtils",
|
python
|
{
"resource": ""
}
|
q1575
|
save_any_file
|
train
|
def save_any_file(data, filename):
"""
Determines a Saver based on the the file extension. Returns whether successfully saved.
:param filename: the name of the file to save
:type filename: str
:param data: the data to save
:type data: Instances
:return: whether successfully saved
:rtype: bool
|
python
|
{
"resource": ""
}
|
q1576
|
ndarray_to_instances
|
train
|
def ndarray_to_instances(array, relation, att_template="Att-#", att_list=None):
"""
Converts the numpy matrix into an Instances object and returns it.
:param array: the numpy ndarray to convert
:type array: numpy.darray
:param relation: the name of the dataset
:type relation: str
:param att_template: the prefix to use for the attribute names, "#" is the 1-based index,
"!" is the 0-based index, "@" the relation name
:type att_template: str
:param att_list: the list of attribute names to use
:type att_list: list
:return: the generated instances object
:rtype: Instances
"""
if len(numpy.shape(array)) != 2:
raise Exception("Number of array dimensions must be 2!")
rows, cols = numpy.shape(array)
# header
atts = []
if att_list is not None:
if len(att_list) != cols:
raise Exception(
"Number columns and provided attribute names differ: " + str(cols) + " != " + len(att_list))
for name in att_list:
|
python
|
{
"resource": ""
}
|
q1577
|
Loader.load_file
|
train
|
def load_file(self, dfile, incremental=False):
"""
Loads the specified file and returns the Instances object.
In case of incremental loading, only the structure.
:param dfile: the file to load
:type dfile: str
:param incremental: whether to load the dataset incrementally
:type incremental: bool
:return: the full dataset or the header (if incremental)
:rtype: Instances
:raises Exception: if the file does not exist
"""
self.enforce_type(self.jobject, "weka.core.converters.FileSourcedConverter")
self.incremental = incremental
if not javabridge.is_instance_of(dfile, "Ljava/io/File;"):
dfile = javabridge.make_instance(
"Ljava/io/File;", "(Ljava/lang/String;)V", javabridge.get_env().new_string_utf(str(dfile)))
javabridge.call(self.jobject, "reset", "()V")
# check whether file exists, otherwise
|
python
|
{
"resource": ""
}
|
q1578
|
Loader.load_url
|
train
|
def load_url(self, url, incremental=False):
"""
Loads the specified URL and returns the Instances object.
In case of incremental loading, only the structure.
:param url: the URL to load the data from
:type url: str
:param incremental: whether to load the dataset incrementally
:type incremental: bool
:return: the full dataset or the header (if incremental)
|
python
|
{
"resource": ""
}
|
q1579
|
TextDirectoryLoader.load
|
train
|
def load(self):
"""
Loads the text files from the specified directory and returns the Instances object.
In case of incremental loading, only the structure.
:return: the full dataset or the header (if incremental)
:rtype: Instances
|
python
|
{
"resource": ""
}
|
q1580
|
Saver.save_file
|
train
|
def save_file(self, data, dfile):
"""
Saves the Instances object in the specified file.
:param data: the data to save
:type data: Instances
:param dfile: the file to save the data to
:type dfile: str
"""
self.enforce_type(self.jobject, "weka.core.converters.FileSourcedConverter")
if not javabridge.is_instance_of(dfile, "Ljava/io/File;"):
dfile = javabridge.make_instance(
|
python
|
{
"resource": ""
}
|
q1581
|
string_array_to_list
|
train
|
def string_array_to_list(a):
"""
Turns the Java string array into Python unicode string list.
:param a: the string array to convert
:type a: JB_Object
:return: the string list
|
python
|
{
"resource": ""
}
|
q1582
|
string_list_to_array
|
train
|
def string_list_to_array(l):
"""
Turns a Python unicode string list into a Java String array.
:param l: the string list
:type: list
:rtype: java string array
:return: JB_Object
"""
result = javabridge.get_env().make_object_array(len(l), javabridge.get_env().find_class("java/lang/String"))
|
python
|
{
"resource": ""
}
|
q1583
|
enumeration_to_list
|
train
|
def enumeration_to_list(enm):
"""
Turns the java.util.Enumeration into a list.
:param enm: the enumeration to convert
:type enm: JB_Object
|
python
|
{
"resource": ""
}
|
q1584
|
ASSearch.search
|
train
|
def search(self, evaluation, data):
"""
Performs the search and returns the indices of the selected attributes.
:param evaluation: the evaluation algorithm to use
:type evaluation: ASEvaluation
:param data: the data to use
:type data: Instances
:return: the selected attributes (0-based indices)
:rtype: ndarray
"""
|
python
|
{
"resource": ""
}
|
q1585
|
ASEvaluation.post_process
|
train
|
def post_process(self, indices):
"""
Post-processes the evaluator with the selected attribute indices.
:param indices: the attribute indices list to use
:type indices: ndarray
:return: the processed indices
:rtype: ndarray
"""
|
python
|
{
"resource": ""
}
|
q1586
|
AttributeSelection.selected_attributes
|
train
|
def selected_attributes(self):
"""
Returns the selected attributes from the last run.
:return: the Numpy array of 0-based indices
|
python
|
{
"resource": ""
}
|
q1587
|
AttributeSelection.reduce_dimensionality
|
train
|
def reduce_dimensionality(self, data):
"""
Reduces the dimensionality of the provided Instance or Instances object.
:param data: the data to process
:type data: Instances
:return: the reduced dataset
:rtype: Instances
"""
if type(data) is Instance:
return Instance(
javabridge.call(
self.jobject, "reduceDimensionality",
|
python
|
{
"resource": ""
}
|
q1588
|
generate_thresholdcurve_data
|
train
|
def generate_thresholdcurve_data(evaluation, class_index):
"""
Generates the threshold curve data from the evaluation object's predictions.
:param evaluation: the evaluation to obtain the predictions from
:type evaluation: Evaluation
:param class_index: the 0-based index of the class-label to create the plot for
:type class_index: int
:return: the generated threshold curve data
:rtype: Instances
"""
|
python
|
{
"resource": ""
}
|
q1589
|
get_thresholdcurve_data
|
train
|
def get_thresholdcurve_data(data, xname, yname):
"""
Retrieves x and y columns from of the data generated by the weka.classifiers.evaluation.ThresholdCurve
class.
:param data: the threshold curve data
:type data: Instances
:param xname: the name of the X column
:type xname: str
:param yname: the name of the Y column
:type yname: str
:return: tuple of x and y
|
python
|
{
"resource": ""
}
|
q1590
|
install_package
|
train
|
def install_package(pkge, version="Latest"):
"""
The list of packages to install.
:param pkge: the name of the repository package, a URL (http/https) or a zip file
:type pkge: str
:param version: in case of the repository packages, the version
:type version: str
:return: whether successfully installed
:rtype: bool
"""
establish_cache()
if pkge.startswith("http://") or pkge.startswith("https://"):
url = javabridge.make_instance(
"java/net/URL", "(Ljava/lang/String;)V", javabridge.get_env().new_string_utf(pkge))
return not javabridge.static_call(
"weka/core/WekaPackageManager", "installPackageFromURL",
"(Ljava/net/URL;[Ljava/io/PrintStream;)Ljava/lang/String;", url, []) is None
elif pkge.lower().endswith(".zip"):
return not javabridge.static_call(
|
python
|
{
"resource": ""
}
|
q1591
|
is_installed
|
train
|
def is_installed(name):
"""
Checks whether a package with the name is already installed.
:param name: the name of the package
:type
|
python
|
{
"resource": ""
}
|
q1592
|
Package.dependencies
|
train
|
def dependencies(self):
"""
Returns the dependencies of the package.
:return: the list of Dependency objects
:rtype: list of Dependency
"""
|
python
|
{
"resource": ""
}
|
q1593
|
PackageConstraint.check_constraint
|
train
|
def check_constraint(self, pkge=None, constr=None):
"""
Checks the constraints.
:param pkge: the package to check
:type pkge: Package
:param constr: the package constraint to check
:type constr: PackageConstraint
"""
if not pkge is None:
|
python
|
{
"resource": ""
}
|
q1594
|
InstanceQuery.custom_properties
|
train
|
def custom_properties(self, props):
"""
Sets the custom properties file to use.
:param props: the props file
:type props: str
"""
|
python
|
{
"resource": ""
}
|
q1595
|
Capabilities.owner
|
train
|
def owner(self):
"""
Returns the owner of these capabilities, if any.
:return: the owner, can be None
:rtype: JavaObject
"""
obj
|
python
|
{
"resource": ""
}
|
q1596
|
Capabilities.dependencies
|
train
|
def dependencies(self):
"""
Returns all the dependencies.
:return: the dependency list
:rtype: list
"""
result = []
iterator = javabridge.iterate_java(javabridge.call(self.jobject,
|
python
|
{
"resource": ""
}
|
q1597
|
Capabilities.for_instances
|
train
|
def for_instances(cls, data, multi=None):
"""
returns a Capabilities object specific for this data. The minimum number of instances is not set, the check
for multi-instance data is optional.
:param data: the data to generate the capabilities for
:type data: Instances
:param multi: whether to check the structure, too
:type multi: bool
:return: the generated capabilities
:rtype: Capabilities
"""
if multi is None:
return Capabilities(javabridge.static_call(
|
python
|
{
"resource": ""
}
|
q1598
|
scatter_plot
|
train
|
def scatter_plot(data, index_x, index_y, percent=100.0, seed=1, size=50, title=None, outfile=None, wait=True):
"""
Plots two attributes against each other.
TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html
:param data: the dataset
:type data: Instances
:param index_x: the 0-based index of the attribute on the x axis
:type index_x: int
:param index_y: the 0-based index of the attribute on the y axis
:type index_y: int
:param percent: the percentage of the dataset to use for plotting
:type percent: float
:param seed: the seed value to use for subsampling
:type seed: int
:param size: the size of the circles in point
:type size: int
:param title: an optional title
:type title: str
:param outfile: the (optional) file to save the generated plot to. The extension determines the file format.
:type outfile: str
:param wait: whether to wait for the user to close the plot
:type wait: bool
"""
if not plot.matplotlib_available:
logger.error("Matplotlib is not installed, plotting unavailable!")
return
# create subsample
data = plot.create_subsample(data, percent=percent, seed=seed)
# collect data
x = []
y = []
if data.class_index == -1:
c = None
else:
c = []
for i in
|
python
|
{
"resource": ""
}
|
q1599
|
line_plot
|
train
|
def line_plot(data, atts=None, percent=100.0, seed=1, title=None, outfile=None, wait=True):
"""
Uses the internal format to plot the dataset, one line per instance.
:param data: the dataset
:type data: Instances
:param atts: the list of 0-based attribute indices of attributes to plot
:type atts: list
:param percent: the percentage of the dataset to use for plotting
:type percent: float
:param seed: the seed value to use for subsampling
:type seed: int
:param title: an optional title
:type title: str
:param outfile: the (optional) file to save the generated plot to. The extension determines the file format.
:type outfile: str
:param wait: whether to wait for the user to close the plot
:type wait: bool
"""
if not plot.matplotlib_available:
logger.error("Matplotlib is not installed, plotting unavailable!")
return
# create subsample
data = plot.create_subsample(data, percent=percent, seed=seed)
fig = plt.figure()
if atts is None:
x = []
for i in range(data.num_attributes):
x.append(i)
else:
x = atts
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.