_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q1500
column_start_to_end
train
def column_start_to_end(data, column, start_idx, end_idx): """Return a list of numeric data entries in the given column from the starting index to the ending index. This can list can be compiled over one or more DataFrames. :param data: a list of DataFrames to extract data in one column from :type data: Pandas.DataFrame list :param column: a column index :type column: int :param start_idx: the index of the starting row :type start_idx: int :param start_idx: the index of the ending row :type start_idx: int :return: a list of data from the given column :rtype: float list """ if len(data) == 1: result = list(pd.to_numeric(data[0].iloc[start_idx:end_idx, column])) else: result = list(pd.to_numeric(data[0].iloc[start_idx:, column])) for i in range(1, len(data)-1): data[i].iloc[0, 0] = 0 result += list(pd.to_numeric(data[i].iloc[:, column]) + (i if column == 0 else 0)) data[-1].iloc[0, 0] = 0 result += list(pd.to_numeric(data[-1].iloc[:end_idx, column]) + (len(data)-1 if column == 0 else 0)) return result
python
{ "resource": "" }
q1501
get_data_by_state
train
def get_data_by_state(path, dates, state, column): """Reads a ProCoDA file and extracts the time and data column for each iteration ofthe given state. Note: column 0 is time, the first data column is column 1. :param path: The path to the folder containing the ProCoDA data file(s), defaults to the current directory :type path: string :param dates: A single date or list of dates for which data was recorded, formatted "M-D-YYYY" :type dates: string or string list :param state: The state ID number for which data should be plotted :type state: int :param column: The integer index of the column that you want to extract OR the header of the column that you want to extract :type column: int or string :return: A list of lists of the time and data columns extracted for each iteration of the state. For example, if "data" is the output, data[i][:,0] gives the time column and data[i][:,1] gives the data column for the ith iteration of the given state and column. data[i][0] would give the first [time, data] pair. :type: list of lists of lists :Examples: .. code-block:: python data = get_data_by_state(path='/Users/.../ProCoDA Data/', dates=["6-19-2013", "6-20-2013"], state=1, column=28) """ data_agg = [] day = 0 first_day = True overnight = False extension = ".xls" if path[-1] != '/': path += '/' if not isinstance(dates, list): dates = [dates] for d in dates: state_file = path + "statelog " + d + extension data_file = path + "datalog " + d + extension states = pd.read_csv(state_file, delimiter='\t') data = pd.read_csv(data_file, delimiter='\t') states = np.array(states) data = np.array(data) # get the start and end times for the state state_start_idx = states[:, 1] == state state_start = states[state_start_idx, 0] state_end_idx = np.append([False], state_start_idx[0:-1]) state_end = states[state_end_idx, 0] if overnight: state_start = np.insert(state_start, 0, 0) state_end = np.insert(state_end, 0, states[0, 0]) if state_start_idx[-1]: np.append(state_end, data[0, -1]) # get the corresponding indices in the data array data_start = [] data_end = [] for i in range(np.size(state_start)): add_start = True for j in range(np.size(data[:, 0])): if (data[j, 0] > state_start[i]) and add_start: data_start.append(j) add_start = False if data[j, 0] > state_end[i]: data_end.append(j-1) break if first_day: start_time = data[0, 0] # extract data at those times for i in range(np.size(data_start)): t = data[data_start[i]:data_end[i], 0] + day - start_time if isinstance(column, int): c = data[data_start[i]:data_end[i], column] else: c = data[column][data_start[i]:data_end[i]] if overnight and i == 0: data_agg = np.insert(data_agg[-1], np.size(data_agg[-1][:, 0]), np.vstack((t, c)).T) else: data_agg.append(np.vstack((t, c)).T) day += 1 if first_day: first_day = False if state_start_idx[-1]: overnight = True return data_agg
python
{ "resource": "" }
q1502
column_of_time
train
def column_of_time(path, start, end=-1): """This function extracts the column of times from a ProCoDA data file. :param path: The file path of the ProCoDA data file. If the file is in the working directory, then the file name is sufficient. :type path: string :param start: Index of first row of data to extract from the data file :type start: int :param end: Index of last row of data to extract from the data. Defaults to last row :type end: int :return: Experimental times starting at 0 day with units of days. :rtype: numpy.array :Examples: .. code-block:: python time = column_of_time("Reactor_data.txt", 0) """ df = pd.read_csv(path, delimiter='\t') start_time = pd.to_numeric(df.iloc[start, 0])*u.day day_times = pd.to_numeric(df.iloc[start:end, 0]) time_data = np.subtract((np.array(day_times)*u.day), start_time) return time_data
python
{ "resource": "" }
q1503
column_of_data
train
def column_of_data(path, start, column, end="-1", units=""): """This function extracts a column of data from a ProCoDA data file. Note: Column 0 is time. The first data column is column 1. :param path: The file path of the ProCoDA data file. If the file is in the working directory, then the file name is sufficient. :type path: string :param start: Index of first row of data to extract from the data file :type start: int :param end: Index of last row of data to extract from the data. Defaults to last row :type end: int, optional :param column: Index of the column that you want to extract OR name of the column header that you want to extract :type column: int or string :param units: The units you want to apply to the data, e.g. 'mg/L'. Defaults to "" (dimensionless) :type units: string, optional :return: Experimental data with the units applied. :rtype: numpy.array :Examples: .. code-block:: python data = column_of_data("Reactor_data.txt", 0, 1, -1, "mg/L") """ if not isinstance(start, int): start = int(start) if not isinstance(end, int): end = int(end) df = pd.read_csv(path, delimiter='\t') if units == "": if isinstance(column, int): data = np.array(pd.to_numeric(df.iloc[start:end, column])) else: df[column][0:len(df)] else: if isinstance(column, int): data = np.array(pd.to_numeric(df.iloc[start:end, column]))*u(units) else: df[column][0:len(df)]*u(units) return data
python
{ "resource": "" }
q1504
notes
train
def notes(path): """This function extracts any experimental notes from a ProCoDA data file. :param path: The file path of the ProCoDA data file. If the file is in the working directory, then the file name is sufficient. :type path: string :return: The rows of the data file that contain text notes inserted during the experiment. Use this to identify the section of the data file that you want to extract. :rtype: pandas.Dataframe """ df = pd.read_csv(path, delimiter='\t') text_row = df.iloc[0:-1, 0].str.contains('[a-z]', '[A-Z]') text_row_index = text_row.index[text_row].tolist() notes = df.loc[text_row_index] return notes
python
{ "resource": "" }
q1505
write_calculations_to_csv
train
def write_calculations_to_csv(funcs, states, columns, path, headers, out_name, metaids=[], extension=".xls"): """Writes each output of the given functions on the given states and data columns to a new column in the specified output file. Note: Column 0 is time. The first data column is column 1. :param funcs: A function or list of functions which will be applied in order to the data. If only one function is given it is applied to all the states/columns :type funcs: function or function list :param states: The state ID numbers for which data should be extracted. List should be in order of calculation or if only one state is given then it will be used for all the calculations :type states: string or string list :param columns: The index of a column, the header of a column, a list of indexes, OR a list of headers of the column(s) that you want to apply calculations to :type columns: int, string, int list, or string list :param path: Path to your ProCoDA metafile (must be tab-delimited) :type path: string :param headers: List of the desired header for each calculation, in order :type headers: string list :param out_name: Desired name for the output file. Can include a relative path :type out_name: string :param metaids: A list of the experiment IDs you'd like to analyze from the metafile :type metaids: string list, optional :param extension: The file extension of the tab delimited file. Defaults to ".xls" if no argument is passed in :type extension: string, optional :requires: funcs, states, columns, and headers are all of the same length if they are lists. Some being lists and some single values are okay. :return: out_name.csv (CVS file) - A CSV file with the each column being a new calcuation and each row being a new experiment on which the calcuations were performed :return: output (Pandas.DataFrame)- Pandas DataFrame holding the same data that was written to the output file """ if not isinstance(funcs, list): funcs = [funcs] * len(headers) if not isinstance(states, list): states = [states] * len(headers) if not isinstance(columns, list): columns = [columns] * len(headers) data_agg = [] for i in range(len(headers)): ids, data = read_state_with_metafile(funcs[i], states[i], columns[i], path, metaids, extension) data_agg = np.append(data_agg, [data]) output = pd.DataFrame(data=np.vstack((ids, data_agg)).T, columns=["ID"]+headers) output.to_csv(out_name, sep='\t') return output
python
{ "resource": "" }
q1506
OD
train
def OD(ND): """Return a pipe's outer diameter according to its nominal diameter. The pipe schedule is not required here because all of the pipes of a given nominal diameter have the same outer diameter. Steps: 1. Find the index of the closest nominal diameter. (Should this be changed to find the next largest ND?) 2. Take the values of the array, subtract the ND, take the absolute value, find the index of the minimium value. """ index = (np.abs(np.array(pipedb['NDinch']) - (ND))).argmin() return pipedb.iloc[index, 1]
python
{ "resource": "" }
q1507
ID_sch40
train
def ID_sch40(ND): """Return the inner diameter for schedule 40 pipes. The wall thickness for these pipes is in the pipedb. Take the values of the array, subtract the ND, take the absolute value, find the index of the minimium value. """ myindex = (np.abs(np.array(pipedb['NDinch']) - (ND))).argmin() return (pipedb.iloc[myindex, 1] - 2*(pipedb.iloc[myindex, 5]))
python
{ "resource": "" }
q1508
ND_all_available
train
def ND_all_available(): """Return an array of available nominal diameters. NDs available are those commonly used as based on the 'Used' column in the pipedb. """ ND_all_available = [] for i in range(len(pipedb['NDinch'])): if pipedb.iloc[i, 4] == 1: ND_all_available.append((pipedb['NDinch'][i])) return ND_all_available * u.inch
python
{ "resource": "" }
q1509
ID_SDR_all_available
train
def ID_SDR_all_available(SDR): """Return an array of inner diameters with a given SDR. IDs available are those commonly used based on the 'Used' column in the pipedb. """ ID = [] ND = ND_all_available() for i in range(len(ND)): ID.append(ID_SDR(ND[i], SDR).magnitude) return ID * u.inch
python
{ "resource": "" }
q1510
ND_SDR_available
train
def ND_SDR_available(ID, SDR): """ Return an available ND given an ID and a schedule. Takes the values of the array, compares to the ID, and finds the index of the first value greater or equal. """ for i in range(len(np.array(ID_SDR_all_available(SDR)))): if np.array(ID_SDR_all_available(SDR))[i] >= (ID.to(u.inch)).magnitude: return ND_all_available()[i]
python
{ "resource": "" }
q1511
flow_pipeline
train
def flow_pipeline(diameters, lengths, k_minors, target_headloss, nu=con.WATER_NU, pipe_rough=mats.PVC_PIPE_ROUGH): """ This function takes a single pipeline with multiple sections, each potentially with different diameters, lengths and minor loss coefficients and determines the flow rate for a given headloss. :param diameters: list of diameters, where the i_th diameter corresponds to the i_th pipe section :type diameters: numpy.ndarray :param lengths: list of diameters, where the i_th diameter corresponds to the i_th pipe section :type lengths: numpy.ndarray :param k_minors: list of diameters, where the i_th diameter corresponds to the i_th pipe section :type k_minors: numpy.ndarray :param target_headloss: a single headloss describing the total headloss through the system :type target_headloss: float :param nu: The fluid dynamic viscosity of the fluid. Defaults to water at room temperature (1 * 10**-6 * m**2/s) :type nu: float :param pipe_rough: The pipe roughness. Defaults to PVC roughness. :type pipe_rough: float :return: the total flow through the system :rtype: float """ # Ensure all the arguments except total headloss are the same length #TODO # Total number of pipe lengths n = diameters.size # Start with a flow rate guess based on the flow through a single pipe section flow = pc.flow_pipe(diameters[0], target_headloss, lengths[0], nu, pipe_rough, k_minors[0]) err = 1.0 # Add all the pipe length headlosses together to test the error while abs(err) > 0.01 : headloss = sum([pc.headloss(flow, diameters[i], lengths[i], nu, pipe_rough, k_minors[i]).to(u.m).magnitude for i in range(n)]) # Test the error. This is always less than one. err = (target_headloss - headloss) / (target_headloss + headloss) # Adjust the total flow in the direction of the error. If there is more headloss than target headloss, # The flow should be reduced, and vice-versa. flow = flow + err * flow return flow
python
{ "resource": "" }
q1512
LFOM.n_rows
train
def n_rows(self): """This equation states that the open area corresponding to one row can be set equal to two orifices of diameter=row height. If there are more than two orifices per row at the top of the LFOM then there are more orifices than are convenient to drill and more than necessary for good accuracy. Thus this relationship can be used to increase the spacing between the rows and thus increase the diameter of the orifices. This spacing function also sets the lower depth on the high flow rate LFOM with no accurate flows below a depth equal to the first row height. But it might be better to always set then number of rows to 10. The challenge is to figure out a reasonable system of constraints that reliably returns a valid solution. """ N_estimated = (self.hl * np.pi / (2 * self.stout_w_per_flow(self.hl) * self.q)).to(u.dimensionless) variablerow = min(10, max(4, math.trunc(N_estimated.magnitude))) return variablerow
python
{ "resource": "" }
q1513
LFOM.area_pipe_min
train
def area_pipe_min(self): """The minimum cross-sectional area of the LFOM pipe that assures a safety factor.""" return (self.safety_factor * self.q / self.vel_critical).to(u.cm**2)
python
{ "resource": "" }
q1514
LFOM.nom_diam_pipe
train
def nom_diam_pipe(self): """The nominal diameter of the LFOM pipe""" ID = pc.diam_circle(self.area_pipe_min) return pipe.ND_SDR_available(ID, self.sdr)
python
{ "resource": "" }
q1515
LFOM.orifice_diameter
train
def orifice_diameter(self): """The actual orifice diameter. We don't let the diameter extend beyond its row space. """ maxdrill = min(self.b_rows, self.d_orifice_max) return ut.floor_nearest(maxdrill, self.drill_bits)
python
{ "resource": "" }
q1516
LFOM.flow_ramp
train
def flow_ramp(self): """An equally spaced array representing flow at each row.""" return np.linspace(1 / self.n_rows, 1, self.n_rows)*self.q
python
{ "resource": "" }
q1517
LFOM.n_orifices_per_row
train
def n_orifices_per_row(self): """Calculate number of orifices at each level given an orifice diameter. """ # H is distance from the bottom of the next row of orifices to the # center of the current row of orifices H = self.b_rows - 0.5*self.orifice_diameter flow_per_orifice = pc.flow_orifice_vert(self.orifice_diameter, H, con.VC_ORIFICE_RATIO) n = np.zeros(self.n_rows) for i in range(self.n_rows): # calculate the ideal number of orifices at the current row without # constraining to an integer flow_needed = self.flow_ramp[i] - self.flow_actual(i, n) n_orifices_real = (flow_needed / flow_per_orifice).to(u.dimensionless) # constrain number of orifices to be less than the max per row and # greater or equal to 0 n[i] = min((max(0, round(n_orifices_real))), self.n_orifices_per_row_max) return n
python
{ "resource": "" }
q1518
LFOM.error_per_row
train
def error_per_row(self): """This function calculates the error of the design based on the differences between the predicted flow rate and the actual flow rate through the LFOM.""" FLOW_lfom_error = np.zeros(self.n_rows) for i in range(self.n_rows): actual_flow = self.flow_actual(i, self.n_orifices_per_row) FLOW_lfom_error[i] = (((actual_flow - self.flow_ramp[i]) / self.flow_ramp[i]).to(u.dimensionless)).magnitude return FLOW_lfom_error
python
{ "resource": "" }
q1519
get_drill_bits_d_imperial
train
def get_drill_bits_d_imperial(): """Return array of possible drill diameters in imperial.""" step_32nd = np.arange(0.03125, 0.25, 0.03125) step_8th = np.arange(0.25, 1.0, 0.125) step_4th = np.arange(1.0, 2.0, 0.25) maximum = [2.0] return np.concatenate((step_32nd, step_8th, step_4th, maximum)) * u.inch
python
{ "resource": "" }
q1520
get_drill_bits_d_metric
train
def get_drill_bits_d_metric(): """Return array of possible drill diameters in metric.""" return np.concatenate((np.arange(1.0, 10.0, 0.1), np.arange(10.0, 18.0, 0.5), np.arange(18.0, 36.0, 1.0), np.arange(40.0, 55.0, 5.0))) * u.mm
python
{ "resource": "" }
q1521
Variable_C_Stock.C_stock
train
def C_stock(self): """Return the required concentration of material in the stock given a reactor's desired system flow rate, system concentration, and stock flow rate. :return: Concentration of material in the stock :rtype: float """ return self._C_sys * (self._Q_sys / self._Q_stock).to(u.dimensionless)
python
{ "resource": "" }
q1522
Variable_Q_Stock.Q_stock
train
def Q_stock(self): """Return the required flow rate from the stock of material given a reactor's desired system flow rate, system concentration, and stock concentration. :return: Flow rate from the stock of material :rtype: float """ return self._Q_sys * (self._C_sys / self._C_stock).to(u.dimensionless)
python
{ "resource": "" }
q1523
Variable_Q_Stock.rpm
train
def rpm(self, vol_per_rev): """Return the pump speed required for the reactor's stock of material given the volume of fluid output per revolution by the stock's pump. :param vol_per_rev: Volume of fluid pumped per revolution (dependent on pump and tubing) :type vol_per_rev: float :return: Pump speed for the material stock, in revolutions per minute :rtype: float """ return Stock.rpm(self, vol_per_rev, self.Q_stock()).to(u.rev/u.min)
python
{ "resource": "" }
q1524
Flocculator.draw
train
def draw(self): """Draw the Onshape flocculator model based off of this object.""" from onshapepy import Part CAD = Part( 'https://cad.onshape.com/documents/b4cfd328713460beeb3125ac/w/3928b5c91bb0a0be7858d99e/e/6f2eeada21e494cebb49515f' ) CAD.params = { 'channel_L': self.channel_L, 'channel_W': self.channel_W, 'channel_H': self.downstream_H, 'channel_pairs': self.channel_n/2, 'baffle_S': self.baffle_S, }
python
{ "resource": "" }
q1525
max_linear_flow
train
def max_linear_flow(Diam, HeadlossCDC, Ratio_Error, KMinor): """Return the maximum flow that will meet the linear requirement. Maximum flow that can be put through a tube of a given diameter without exceeding the allowable deviation from linear head loss behavior """ flow = (pc.area_circle(Diam)).magnitude * np.sqrt((2 * Ratio_Error * HeadlossCDC * pc.gravity)/ KMinor) return flow.magnitude
python
{ "resource": "" }
q1526
_len_tube
train
def _len_tube(Flow, Diam, HeadLoss, conc_chem, temp, en_chem, KMinor): """Length of tube required to get desired head loss at maximum flow based on the Hagen-Poiseuille equation.""" num1 = pc.gravity.magnitude * HeadLoss * np.pi * (Diam**4) denom1 = 128 * viscosity_kinematic_chem(conc_chem, temp, en_chem) * Flow num2 = Flow * KMinor denom2 = 16 * np.pi * viscosity_kinematic_chem(conc_chem, temp, en_chem) len = ((num1/denom1) - (num2/denom2)) return len.magnitude
python
{ "resource": "" }
q1527
_length_cdc_tube_array
train
def _length_cdc_tube_array(FlowPlant, ConcDoseMax, ConcStock, DiamTubeAvail, HeadlossCDC, temp, en_chem, KMinor): """Calculate the length of each diameter tube given the corresponding flow rate and coagulant. Choose the tube that is shorter than the maximum length tube.""" Flow = _flow_cdc_tube(FlowPlant, ConcDoseMax, ConcStock, DiamTubeAvail, HeadlossCDC,Ratio_Error, KMinor).magnitude return _len_tube(Flow, DiamTubeAvail, HeadlossCDC, ConcStock, temp, en_chem, KMinor).magnitude
python
{ "resource": "" }
q1528
len_cdc_tube
train
def len_cdc_tube(FlowPlant, ConcDoseMax, ConcStock, DiamTubeAvail, HeadlossCDC, LenCDCTubeMax, temp, en_chem, KMinor): """The length of tubing may be longer than the max specified if the stock concentration is too high to give a viable solution with the specified length of tubing.""" index = i_cdc(FlowPlant, ConcDoseMax, ConcStock, DiamTubeAvail, HeadlossCDC, LenCDCTubeMax, temp, en_chem, KMinor) len_cdc_tube = (_length_cdc_tube_array(FlowPlant, ConcDoseMax, ConcStock, DiamTubeAvail, HeadlossCDC, temp, en_chem, KMinor))[index].magnitude return len_cdc_tube
python
{ "resource": "" }
q1529
dens_alum_nanocluster
train
def dens_alum_nanocluster(coag): """Return the density of the aluminum in the nanocluster. This is useful for determining the volume of nanoclusters given a concentration of aluminum. """ density = (coag.PrecipDensity * MOLEC_WEIGHT_ALUMINUM * coag.PrecipAluminumMPM / coag.PrecipMolecWeight) return density
python
{ "resource": "" }
q1530
dens_pacl_solution
train
def dens_pacl_solution(ConcAluminum, temp): """Return the density of the PACl solution. From Stock Tank Mixing report Fall 2013: https://confluence.cornell.edu/download/attachments/137953883/20131213_Research_Report.pdf """ return ((0.492 * ConcAluminum * PACl.MolecWeight / (PACl.AluminumMPM * MOLEC_WEIGHT_ALUMINUM) ) + pc.density_water(temp).magnitude )
python
{ "resource": "" }
q1531
particle_number_concentration
train
def particle_number_concentration(ConcMat, material): """Return the number of particles in suspension. :param ConcMat: Concentration of the material :type ConcMat: float :param material: The material in solution :type material: floc_model.Material """ return ConcMat.to(material.Density.units) / ((material.Density * np.pi * material.Diameter**3) / 6)
python
{ "resource": "" }
q1532
sep_dist_clay
train
def sep_dist_clay(ConcClay, material): """Return the separation distance between clay particles.""" return ((material.Density/ConcClay)*((np.pi * material.Diameter ** 3)/6))**(1/3)
python
{ "resource": "" }
q1533
num_nanoclusters
train
def num_nanoclusters(ConcAluminum, coag): """Return the number of Aluminum nanoclusters.""" return (ConcAluminum / (dens_alum_nanocluster(coag).magnitude * np.pi * coag.Diameter**3))
python
{ "resource": "" }
q1534
frac_vol_floc_initial
train
def frac_vol_floc_initial(ConcAluminum, ConcClay, coag, material): """Return the volume fraction of flocs initially present, accounting for both suspended particles and coagulant precipitates. :param ConcAluminum: Concentration of aluminum in solution :type ConcAluminum: float :param ConcClay: Concentration of particle in suspension :type ConcClay: float :param coag: Type of coagulant in solution :type coag: float :param material: Type of particles in suspension, e.g. floc_model.Clay :type material: floc_model.Material :return: Volume fraction of particles initially present :rtype: float """ return ((conc_precipitate(ConcAluminum, coag).magnitude/coag.PrecipDensity) + (ConcClay / material.Density))
python
{ "resource": "" }
q1535
num_coll_reqd
train
def num_coll_reqd(DIM_FRACTAL, material, DiamTarget): """Return the number of doubling collisions required. Calculates the number of doubling collisions required to produce a floc of diameter DiamTarget. """ return DIM_FRACTAL * np.log2(DiamTarget/material.Diameter)
python
{ "resource": "" }
q1536
sep_dist_floc
train
def sep_dist_floc(ConcAluminum, ConcClay, coag, material, DIM_FRACTAL, DiamTarget): """Return separation distance as a function of floc size.""" return (material.Diameter * (np.pi/(6 * frac_vol_floc_initial(ConcAluminum, ConcClay, coag, material) ))**(1/3) * (DiamTarget / material.Diameter)**(DIM_FRACTAL / 3) )
python
{ "resource": "" }
q1537
frac_vol_floc
train
def frac_vol_floc(ConcAluminum, ConcClay, coag, DIM_FRACTAL, material, DiamTarget): """Return the floc volume fraction.""" return (frac_vol_floc_initial(ConcAluminum, ConcClay, coag, material) * (DiamTarget / material.Diameter)**(3-DIM_FRACTAL) )
python
{ "resource": "" }
q1538
dens_floc_init
train
def dens_floc_init(ConcAluminum, ConcClay, coag, material): """Return the density of the initial floc. Initial floc is made primarily of the primary colloid and nanoglobs. """ return (conc_floc(ConcAluminum, ConcClay, coag).magnitude / frac_vol_floc_initial(ConcAluminum, ConcClay, coag, material) )
python
{ "resource": "" }
q1539
ratio_area_clay_total
train
def ratio_area_clay_total(ConcClay, material, DiamTube, RatioHeightDiameter): """Return the surface area of clay normalized by total surface area. Total surface area is a combination of clay and reactor wall surface areas. This function is used to estimate how much coagulant actually goes to the clay. :param ConcClay: Concentration of clay in suspension :type ConcClay: float :param material: Type of clay in suspension, e.g. floc_model.Clay :type material: floc_model.Material :param DiamTube: Diameter of flocculator tube (assumes tube flocculator for calculation of reactor surface area) :type DiamTube: float :param RatioHeightDiameter: Dimensionless ratio describing ratio of clay height to clay diameter :type RatioHeightDiameter: float :return: The ratio of clay surface area to total available surface area (accounting for reactor walls) :rtype: float """ return (1 / (1 + (2 * material.Diameter / (3 * DiamTube * ratio_clay_sphere(RatioHeightDiameter) * (ConcClay / material.Density) ) ) ) )
python
{ "resource": "" }
q1540
gamma_coag
train
def gamma_coag(ConcClay, ConcAluminum, coag, material, DiamTube, RatioHeightDiameter): """Return the coverage of clay with nanoglobs. This function accounts for loss to the tube flocculator walls and a poisson distribution on the clay given random hits by the nanoglobs. The poisson distribution results in the coverage only gradually approaching full coverage as coagulant dose increases. :param ConcClay: Concentration of clay in suspension :type ConcClay: float :param ConcAluminum: Concentration of aluminum in solution :type ConcAluminum: float :param coag: Type of coagulant in solution, e.g. floc_model.PACl :type coag: floc_model.Material :param material: Type of clay in suspension, e.g. floc_model.Clay :type material: floc_model.Material :param DiamTube: Diameter of flocculator tube (assumes tube flocculator for calculation of reactor surface area) :type DiamTube: float :param RatioHeightDiameter: Dimensionless ratio of clay height to clay diameter :type RatioHeightDiameter: float :return: Fraction of the clay surface area that is coated with coagulant precipitates :rtype: float """ return (1 - np.exp(( (-frac_vol_floc_initial(ConcAluminum, 0*u.kg/u.m**3, coag, material) * material.Diameter) / (frac_vol_floc_initial(0*u.kg/u.m**3, ConcClay, coag, material) * coag.Diameter)) * (1 / np.pi) * (ratio_area_clay_total(ConcClay, material, DiamTube, RatioHeightDiameter) / ratio_clay_sphere(RatioHeightDiameter)) ))
python
{ "resource": "" }
q1541
gamma_humic_acid_to_coag
train
def gamma_humic_acid_to_coag(ConcAl, ConcNatOrgMat, NatOrgMat, coag): """Return the fraction of the coagulant that is coated with humic acid. :param ConcAl: Concentration of alumninum in solution :type ConcAl: float :param ConcNatOrgMat: Concentration of natural organic matter in solution :type ConcNatOrgMat: float :param NatOrgMat: type of natural organic matter, e.g. floc_model.HumicAcid :type NatOrgMat: floc_model.Material :param coag: Type of coagulant in solution, e.g. floc_model.PACl :type coag: floc_model.Material :return: fraction of the coagulant that is coated with humic acid :rtype: float """ return min(((ConcNatOrgMat / conc_precipitate(ConcAl, coag).magnitude) * (coag.Density / NatOrgMat.Density) * (coag.Diameter / (4 * NatOrgMat.Diameter)) ), 1)
python
{ "resource": "" }
q1542
pacl_term
train
def pacl_term(DiamTube, ConcClay, ConcAl, ConcNatOrgMat, NatOrgMat, coag, material, RatioHeightDiameter): """Return the fraction of the surface area that is covered with coagulant that is not covered with humic acid. :param DiamTube: Diameter of the dosing tube :type Diamtube: float :param ConcClay: Concentration of clay in solution :type ConcClay: float :param ConcAl: Concentration of alumninum in solution :type ConcAl: float :param ConcNatOrgMat: Concentration of natural organic matter in solution :type ConcNatOrgMat: float :param NatOrgMat: type of natural organic matter, e.g. floc_model.HumicAcid :type NatOrgMat: floc_model.Material :param coag: Type of coagulant in solution, e.g. floc_model.PACl :type coag: floc_model.Material :param material: Type of clay in suspension, e.g. floc_model.Clay :type material: floc_model.Material :param RatioHeightDiameter: Dimensionless ratio of clay height to clay diameter :type RatioHeightDiameter: float :return: fraction of the surface area that is covered with coagulant that is not covered with humic acid :rtype: float """ return (gamma_coag(ConcClay, ConcAl, coag, material, DiamTube, RatioHeightDiameter) * (1 - gamma_humic_acid_to_coag(ConcAl, ConcNatOrgMat, NatOrgMat, coag)) )
python
{ "resource": "" }
q1543
dens_floc
train
def dens_floc(ConcAl, ConcClay, DIM_FRACTAL, DiamTarget, coag, material, Temp): """Calculate floc density as a function of size.""" WaterDensity = pc.density_water(Temp).magnitude return ((dens_floc_init(ConcAl, ConcClay, coag, material).magnitude - WaterDensity ) * (material.Diameter / DiamTarget)**(3 - DIM_FRACTAL) + WaterDensity )
python
{ "resource": "" }
q1544
vel_term_floc
train
def vel_term_floc(ConcAl, ConcClay, coag, material, DIM_FRACTAL, DiamTarget, Temp): """Calculate floc terminal velocity.""" WaterDensity = pc.density_water(Temp).magnitude return (((pc.gravity.magnitude * material.Diameter**2) / (18 * PHI_FLOC * pc.viscosity_kinematic(Temp).magnitude) ) * ((dens_floc_init(ConcAl, ConcClay, coag, material).magnitude - WaterDensity ) / WaterDensity ) * (DiamTarget / material.Diameter) ** (DIM_FRACTAL - 1) )
python
{ "resource": "" }
q1545
diam_floc_vel_term
train
def diam_floc_vel_term(ConcAl, ConcClay, coag, material, DIM_FRACTAL, VelTerm, Temp): """Calculate floc diamter as a function of terminal velocity.""" WaterDensity = pc.density_water(Temp).magnitude return (material.Diameter * (((18 * VelTerm * PHI_FLOC * pc.viscosity_kinematic(Temp).magnitude ) / (pc.gravity.magnitude * material.Diameter**2) ) * (WaterDensity / (dens_floc_init(ConcAl, ConcClay, coag, material).magnitude - WaterDensity ) ) ) ** (1 / (DIM_FRACTAL - 1)) )
python
{ "resource": "" }
q1546
time_col_laminar
train
def time_col_laminar(EnergyDis, Temp, ConcAl, ConcClay, coag, material, DiamTarget, DiamTube, DIM_FRACTAL, RatioHeightDiameter): """Calculate single collision time for laminar flow mediated collisions. Calculated as a function of floc size. """ return (((1/6) * ((6/np.pi)**(1/3)) * frac_vol_floc_initial(ConcAl, ConcClay, coag, material) ** (-2/3) * (pc.viscosity_kinematic(Temp).magnitude / EnergyDis) ** (1 / 2) * (DiamTarget / material.Diameter) ** (2*DIM_FRACTAL/3 - 2) ) # End of the numerator / (gamma_coag(ConcClay, ConcAl, coag, material, DiamTube, RatioHeightDiameter) ) # End of the denominator )
python
{ "resource": "" }
q1547
time_col_turbulent
train
def time_col_turbulent(EnergyDis, ConcAl, ConcClay, coag, material, DiamTarget, DIM_FRACTAL): """Calculate single collision time for turbulent flow mediated collisions. Calculated as a function of floc size. """ return((1/6) * (6/np.pi)**(1/9) * EnergyDis**(-1/3) * DiamTarget**(2/3) * frac_vol_floc_initial(ConcAl, ConcClay, coag, material)**(-8/9) * (DiamTarget / material.Diameter)**((8*(DIM_FRACTAL-3)) / 9) )
python
{ "resource": "" }
q1548
diam_kolmogorov
train
def diam_kolmogorov(EnergyDis, Temp, ConcAl, ConcClay, coag, material, DIM_FRACTAL): """Return the size of the floc with separation distances equal to the Kolmogorov length and the inner viscous length scale. """ return (material.Diameter * ((eta_kolmogorov(EnergyDis, Temp).magnitude / material.Diameter) * ((6 * frac_vol_floc_initial(ConcAl, ConcClay, coag, material)) / np.pi )**(1/3) )**(3 / DIM_FRACTAL) )
python
{ "resource": "" }
q1549
dean_number
train
def dean_number(PlantFlow, IDTube, RadiusCoil, Temp): """Return the Dean Number. The Dean Number is a dimensionless parameter that is the unfortunate combination of Reynolds and tube curvature. It would have been better to keep the Reynolds number and define a simple dimensionless geometric parameter. """ return (reynolds_rapid_mix(PlantFlow, IDTube, Temp) * (IDTube / (2 * RadiusCoil))**(1/2) )
python
{ "resource": "" }
q1550
g_coil
train
def g_coil(FlowPlant, IDTube, RadiusCoil, Temp): """We need a reference for this. Karen's thesis likely has this equation and the reference. """ return (g_straight(FlowPlant, IDTube).magnitude * (1 + 0.033 * np.log10(dean_number(FlowPlant, IDTube, RadiusCoil, Temp) ) ** 4 ) ** (1/2) )
python
{ "resource": "" }
q1551
g_time_res
train
def g_time_res(FlowPlant, IDTube, RadiusCoil, LengthTube, Temp): """G Residence Time calculated for a coiled tube flocculator.""" return (g_coil(FlowPlant, IDTube, RadiusCoil, Temp).magnitude * time_res_tube(IDTube, LengthTube, FlowPlant).magnitude )
python
{ "resource": "" }
q1552
Chemical.define_Precip
train
def define_Precip(self, diameter, density, molecweight, alumMPM): """Define a precipitate for the chemical. :param diameter: Diameter of the precipitate in particulate form :type diameter: float :param density: Density of the material (mass/volume) :type density: float :param molecWeight: Molecular weight of the material (mass/mole) :type molecWeight: float :param alumMPM: """ self.PrecipDiameter = diameter self.PrecipDensity = density self.PrecipMolecWeight = molecweight self.PrecipAluminumMPM = alumMPM
python
{ "resource": "" }
q1553
Plant.ent_tank_a
train
def ent_tank_a(self): """Calculate the planview area of the entrance tank, given the volume of the flocculator. :returns: The planview area of the entrance tank. :rtype: float * u.m ** 2 """ # first guess planview area a_new = 1 * u.m**2 a_ratio = 2 # set to >1+tolerance to start while loop tolerance = 0.01 a_floc_pv = ( self.floc.vol / (self.floc.downstream_H + (self.floc.HL / 2)) ) while a_ratio > (1 + tolerance): a_et_pv = a_new a_etf_pv = a_et_pv + a_floc_pv w_tot = a_etf_pv / self.floc.max_L w_chan = w_tot / self.floc.channel_n a_new = self.floc.max_L * w_chan a_ratio = a_new / a_et_pv return a_new
python
{ "resource": "" }
q1554
n_lfom_rows
train
def n_lfom_rows(FLOW,HL_LFOM): """This equation states that the open area corresponding to one row can be set equal to two orifices of diameter=row height. If there are more than two orifices per row at the top of the LFOM then there are more orifices than are convenient to drill and more than necessary for good accuracy. Thus this relationship can be used to increase the spacing between the rows and thus increase the diameter of the orifices. This spacing function also sets the lower depth on the high flow rate LFOM with no accurate flows below a depth equal to the first row height. But it might be better to always set then number of rows to 10. The challenge is to figure out a reasonable system of constraints that reliably returns a valid solution. """ N_estimated = (HL_LFOM*np.pi/(2*width_stout(HL_LFOM,HL_LFOM)*FLOW)) variablerow = min(10,max(4,math.trunc(N_estimated.magnitude))) # Forcing the LFOM to either have 4 or 8 rows, for design purposes # If the hydraulic calculation finds that there should be 4 rows, then there # will be 4 rows. If anything other besides 4 rows is found, then assign 8 # rows. # This can be improved in the future. if variablerow == 4: variablerow = 4 else: variablerow = 8 return variablerow
python
{ "resource": "" }
q1555
flow_lfom_actual
train
def flow_lfom_actual(FLOW,HL_LFOM,drill_bits,Row_Index_Submerged,N_LFOM_Orifices): """Calculates the flow for a given number of submerged rows of orifices harray is the distance from the water level to the center of the orifices when the water is at the max level """ D_LFOM_Orifices=orifice_diameter(FLOW, HL_LFOM, drill_bits).magnitude row_height=dist_center_lfom_rows(FLOW, HL_LFOM).magnitude harray = (np.linspace(row_height, HL_LFOM, n_lfom_rows(FLOW, HL_LFOM))) - 0.5 * D_LFOM_Orifices FLOW_new = 0 for i in range(Row_Index_Submerged+1): FLOW_new = FLOW_new + (N_LFOM_Orifices[i] * ( pc.flow_orifice_vert(D_LFOM_Orifices, harray[Row_Index_Submerged-i], con.VC_ORIFICE_RATIO).magnitude)) return FLOW_new
python
{ "resource": "" }
q1556
round_sf
train
def round_sf(number, digits): """Returns inputted value rounded to number of significant figures desired. :param number: Value to be rounded :type number: float :param digits: number of significant digits to be rounded to. :type digits: int """ units = None try: num = number.magnitude units = number.units except AttributeError: num = number try: if (units != None): rounded_num = round(num, digits - int(floor(log10(abs(num)))) - 1) * units else: rounded_num = round(num, digits - int(floor(log10(abs(num)))) - 1) return rounded_num except ValueError: # Prevents an error with log10(0) if (units != None): return 0 * units else: return 0
python
{ "resource": "" }
q1557
stepceil_with_units
train
def stepceil_with_units(param, step, unit): """This function returns the smallest multiple of 'step' greater than or equal to 'param' and outputs the result in Pint units. This function is unit-aware and functions without requiring translation so long as 'param' and 'unit' are of the same dimensionality. """ counter = 0 * unit while counter < param.to(unit): counter += step * unit return counter
python
{ "resource": "" }
q1558
list_handler
train
def list_handler(HandlerResult="nparray"): """Wraps a function to handle list inputs.""" def decorate(func): def wrapper(*args, **kwargs): """Run through the wrapped function once for each array element. :param HandlerResult: output type. Defaults to numpy arrays. """ sequences = [] enumsUnitCheck = enumerate(args) argsList = list(args) #This for loop identifies pint unit objects and strips them #of their units. for num, arg in enumsUnitCheck: if type(arg) == type(1 * u.m): argsList[num] = arg.to_base_units().magnitude enumsUnitless = enumerate(argsList) #This for loop identifies arguments that are sequences and #adds their index location to the list 'sequences'. for num, arg in enumsUnitless: if isinstance(arg, (list, tuple, np.ndarray)): sequences.append(num) #If there are no sequences to iterate through, simply return #the function. if len(sequences) == 0: result = func(*args, **kwargs) else: #iterant keeps track of how many times we've iterated and #limiter stops the loop once we've iterated as many times #as there are list elements. Without this check, a few #erroneous runs will occur, appending the last couple values #to the end of the list multiple times. # #We only care about the length of sequences[0] because this #function is recursive, and sequences[0] is always the relevant #sequences for any given run. limiter = len(argsList[sequences[0]]) iterant = 0 result = [] for num in sequences: for arg in argsList[num]: if iterant >= limiter: break #We can safely replace the entire list argument #with a single element from it because of the looping #we're doing. We redefine the object, but that #definition remains within this namespace and does #not penetrate further up the function. argsList[num] = arg #Here we dive down the rabbit hole. This ends up #creating a multi-dimensional array shaped by the #sizes and shapes of the lists passed. result.append(wrapper(*argsList, HandlerResult=HandlerResult, **kwargs)) iterant += 1 #HandlerResult allows the user to specify what type to #return the generated sequence as. It defaults to numpy #arrays because functions tend to handle them better, but if #the user does not wish to import numpy the base Python options #are available to them. if HandlerResult == "nparray": result = np.array(result) elif HandlerResult == "tuple": result = tuple(result) elif HandlerResult == "list": result == list(result) return result return wrapper return decorate
python
{ "resource": "" }
q1559
check_range
train
def check_range(*args): """ Check whether passed paramters fall within approved ranges. Does not return anything, but will raise an error if a parameter falls outside of its defined range. Input should be passed as an array of sequences, with each sequence having three elements: [0] is the value being checked, [1] is the range parameter(s) within which the value should fall, and [2] is the name of the parameter, for better error messages. If [2] is not supplied, "Input" will be appended as a generic name. Range requests that this function understands are listed in the knownChecks sequence. """ knownChecks = ('>0', '>=0', '0-1', '<0', '<=0', 'int', 'boolean') for arg in args: #Converts arg to a mutable list arg = [*arg] if len(arg) == 1: #arg[1] details what range the parameter should fall within; if #len(arg) is 1 that means a validity was not specified and the #parameter should not have been passed in its current form raise TypeError("No range-validity parameter provided.") elif len(arg) == 2: #Appending 'Input" to the end allows us to give more descriptive #error messages that do not fail if no description was supplied. arg.append("Input") #This ensures that all whitespace is removed before checking if the #request is understood arg[1] = "".join(arg[1].lower().split()) #This block checks that each range request is understood. #If the request is a compound one, it must be separated into individual #requests for validity comprehension for i in arg[1].split(","): if i not in knownChecks: raise RuntimeError("Unknown parameter validation " "request: {0}.".format(i)) if not isinstance(arg[0], (list, tuple, np.ndarray)): arg[0] = [arg[0]] for i in arg[0]: if '>0' in arg[1] and i <= 0: raise ValueError("{1} is {0} but must be greater than " "0.".format(i, arg[2])) if '>=0' in arg[1] and i <0: raise ValueError("{1} is {0} but must be 0 or " "greater.".format(i, arg[2])) if '0-1' in arg[1] and not 0 <= i <= 1: raise ValueError("{1} is {0} but must be between 0 and " "1.".format(i, arg[2])) if '<0' in arg[1] and i >= 0: raise ValueError("{1} is {0} but must be less than " "0.".format(i, arg[2])) if '<=0' in arg[1] and i >0: raise ValueError("{1} is {0} but must be 0 or " "less.".format(i, arg[2])) if 'int' in arg[1] and int(i) != i: raise TypeError("{1} is {0} but must be a numeric " "integer.".format(i, arg[2])) if 'boolean' in arg[1] and type(i) != bool: raise TypeError("{1} is {0} but must be a " "boolean.".format(i, arg[2]))
python
{ "resource": "" }
q1560
Clusterer.update_clusterer
train
def update_clusterer(self, inst): """ Updates the clusterer with the instance. :param inst: the Instance to update the clusterer with :type inst: Instance """ if self.is_updateable: javabridge.call(self.jobject, "updateClusterer", "(Lweka/core/Instance;)V", inst.jobject) else: logger.critical(classes.get_classname(self.jobject) + " is not updateable!")
python
{ "resource": "" }
q1561
Clusterer.update_finished
train
def update_finished(self): """ Signals the clusterer that updating with new data has finished. """ if self.is_updateable: javabridge.call(self.jobject, "updateFinished", "()V") else: logger.critical(classes.get_classname(self.jobject) + " is not updateable!")
python
{ "resource": "" }
q1562
Clusterer.distribution_for_instance
train
def distribution_for_instance(self, inst): """ Peforms a prediction, returning the cluster distribution. :param inst: the Instance to get the cluster distribution for :type inst: Instance :return: the cluster distribution :rtype: float[] """ pred = self.__distribution(inst.jobject) return javabridge.get_env().get_double_array_elements(pred)
python
{ "resource": "" }
q1563
ClusterEvaluation.cluster_assignments
train
def cluster_assignments(self): """ Return an array of cluster assignments corresponding to the most recent set of instances clustered. :return: the cluster assignments :rtype: ndarray """ array = javabridge.call(self.jobject, "getClusterAssignments", "()[D") if array is None: return None else: return javabridge.get_env().get_double_array_elements(array)
python
{ "resource": "" }
q1564
ClusterEvaluation.crossvalidate_model
train
def crossvalidate_model(cls, clusterer, data, num_folds, rnd): """ Cross-validates the clusterer and returns the loglikelihood. :param clusterer: the clusterer instance to evaluate :type clusterer: Clusterer :param data: the data to evaluate on :type data: Instances :param num_folds: the number of folds :type num_folds: int :param rnd: the random number generator to use :type rnd: Random :return: the cross-validated loglikelihood :rtype: float """ return javabridge.static_call( "Lweka/clusterers/ClusterEvaluation;", "crossValidateModel", "(Lweka/clusterers/DensityBasedClusterer;Lweka/core/Instances;ILjava/util/Random;)D", clusterer.jobject, data.jobject, num_folds, rnd.jobject)
python
{ "resource": "" }
q1565
read_all
train
def read_all(filename): """ Reads the serialized objects from disk. Caller must wrap objects in appropriate Python wrapper classes. :param filename: the file with the serialized objects :type filename: str :return: the list of JB_OBjects :rtype: list """ array = javabridge.static_call( "Lweka/core/SerializationHelper;", "readAll", "(Ljava/lang/String;)[Ljava/lang/Object;", filename) if array is None: return None else: return javabridge.get_env().get_object_array_elements(array)
python
{ "resource": "" }
q1566
write
train
def write(filename, jobject): """ Serializes the object to disk. JavaObject instances get automatically unwrapped. :param filename: the file to serialize the object to :type filename: str :param jobject: the object to serialize :type jobject: JB_Object or JavaObject """ if isinstance(jobject, JavaObject): jobject = jobject.jobject javabridge.static_call( "Lweka/core/SerializationHelper;", "write", "(Ljava/lang/String;Ljava/lang/Object;)V", filename, jobject)
python
{ "resource": "" }
q1567
Item.decrease_frequency
train
def decrease_frequency(self, frequency=None): """ Decreases the frequency. :param frequency: the frequency to decrease by, 1 if None :type frequency: int """ if frequency is None: javabridge.call(self.jobject, "decreaseFrequency", "()V") else: javabridge.call(self.jobject, "decreaseFrequency", "(I)V", frequency)
python
{ "resource": "" }
q1568
Item.increase_frequency
train
def increase_frequency(self, frequency=None): """ Increases the frequency. :param frequency: the frequency to increase by, 1 if None :type frequency: int """ if frequency is None: javabridge.call(self.jobject, "increaseFrequency", "()V") else: javabridge.call(self.jobject, "increaseFrequency", "(I)V", frequency)
python
{ "resource": "" }
q1569
AssociationRule.consequence
train
def consequence(self): """ Get the the consequence. :return: the consequence, list of Item objects :rtype: list """ items = javabridge.get_collection_wrapper( javabridge.call(self.jobject, "getConsequence", "()Ljava/util/Collection;")) result = [] for item in items: result.append(Item(item)) return result
python
{ "resource": "" }
q1570
Associator.can_produce_rules
train
def can_produce_rules(self): """ Checks whether association rules can be generated. :return: whether scheme implements AssociationRulesProducer interface and association rules can be generated :rtype: bool """ if not self.check_type(self.jobject, "weka.associations.AssociationRulesProducer"): return False return javabridge.call(self.jobject, "canProduceRules", "()Z")
python
{ "resource": "" }
q1571
Associator.association_rules
train
def association_rules(self): """ Returns association rules that were generated. Only if implements AssociationRulesProducer. :return: the association rules that were generated :rtype: AssociationRules """ if not self.check_type(self.jobject, "weka.associations.AssociationRulesProducer"): return None return AssociationRules( javabridge.call(self.jobject, "getAssociationRules", "()Lweka/associations/AssociationRules;"))
python
{ "resource": "" }
q1572
Associator.rule_metric_names
train
def rule_metric_names(self): """ Returns the rule metric names of the association rules. Only if implements AssociationRulesProducer. :return: the metric names :rtype: list """ if not self.check_type(self.jobject, "weka.associations.AssociationRulesProducer"): return None return string_array_to_list( javabridge.call(self.jobject, "getRuleMetricNames", "()[Ljava/lang/String;"))
python
{ "resource": "" }
q1573
loader_for_file
train
def loader_for_file(filename): """ Returns a Loader that can load the specified file, based on the file extension. None if failed to determine. :param filename: the filename to get the loader for :type filename: str :return: the assoicated loader instance or None if none found :rtype: Loader """ loader = javabridge.static_call( "weka/core/converters/ConverterUtils", "getLoaderForFile", "(Ljava/lang/String;)Lweka/core/converters/AbstractFileLoader;", filename) if loader is None: return None else: return Loader(jobject=loader)
python
{ "resource": "" }
q1574
saver_for_file
train
def saver_for_file(filename): """ Returns a Saver that can load the specified file, based on the file extension. None if failed to determine. :param filename: the filename to get the saver for :type filename: str :return: the associated saver instance or None if none found :rtype: Saver """ saver = javabridge.static_call( "weka/core/converters/ConverterUtils", "getSaverForFile", "(Ljava/lang/String;)Lweka/core/converters/AbstractFileSaver;", filename) if saver is None: return None else: return Saver(jobject=saver)
python
{ "resource": "" }
q1575
save_any_file
train
def save_any_file(data, filename): """ Determines a Saver based on the the file extension. Returns whether successfully saved. :param filename: the name of the file to save :type filename: str :param data: the data to save :type data: Instances :return: whether successfully saved :rtype: bool """ saver = saver_for_file(filename) if saver is None: return False else: saver.save_file(data, filename) return True
python
{ "resource": "" }
q1576
ndarray_to_instances
train
def ndarray_to_instances(array, relation, att_template="Att-#", att_list=None): """ Converts the numpy matrix into an Instances object and returns it. :param array: the numpy ndarray to convert :type array: numpy.darray :param relation: the name of the dataset :type relation: str :param att_template: the prefix to use for the attribute names, "#" is the 1-based index, "!" is the 0-based index, "@" the relation name :type att_template: str :param att_list: the list of attribute names to use :type att_list: list :return: the generated instances object :rtype: Instances """ if len(numpy.shape(array)) != 2: raise Exception("Number of array dimensions must be 2!") rows, cols = numpy.shape(array) # header atts = [] if att_list is not None: if len(att_list) != cols: raise Exception( "Number columns and provided attribute names differ: " + str(cols) + " != " + len(att_list)) for name in att_list: att = Attribute.create_numeric(name) atts.append(att) else: for i in range(cols): name = att_template.replace("#", str(i+1)).replace("!", str(i)).replace("@", relation) att = Attribute.create_numeric(name) atts.append(att) result = Instances.create_instances(relation, atts, rows) # data for i in range(rows): inst = Instance.create_instance(array[i]) result.add_instance(inst) return result
python
{ "resource": "" }
q1577
Loader.load_file
train
def load_file(self, dfile, incremental=False): """ Loads the specified file and returns the Instances object. In case of incremental loading, only the structure. :param dfile: the file to load :type dfile: str :param incremental: whether to load the dataset incrementally :type incremental: bool :return: the full dataset or the header (if incremental) :rtype: Instances :raises Exception: if the file does not exist """ self.enforce_type(self.jobject, "weka.core.converters.FileSourcedConverter") self.incremental = incremental if not javabridge.is_instance_of(dfile, "Ljava/io/File;"): dfile = javabridge.make_instance( "Ljava/io/File;", "(Ljava/lang/String;)V", javabridge.get_env().new_string_utf(str(dfile))) javabridge.call(self.jobject, "reset", "()V") # check whether file exists, otherwise previously set file gets loaded again sfile = javabridge.to_string(dfile) if not os.path.exists(sfile): raise Exception("Dataset file does not exist: " + str(sfile)) javabridge.call(self.jobject, "setFile", "(Ljava/io/File;)V", dfile) if incremental: self.structure = Instances(javabridge.call(self.jobject, "getStructure", "()Lweka/core/Instances;")) return self.structure else: return Instances(javabridge.call(self.jobject, "getDataSet", "()Lweka/core/Instances;"))
python
{ "resource": "" }
q1578
Loader.load_url
train
def load_url(self, url, incremental=False): """ Loads the specified URL and returns the Instances object. In case of incremental loading, only the structure. :param url: the URL to load the data from :type url: str :param incremental: whether to load the dataset incrementally :type incremental: bool :return: the full dataset or the header (if incremental) :rtype: Instances """ self.enforce_type(self.jobject, "weka.core.converters.URLSourcedLoader") self.incremental = incremental javabridge.call(self.jobject, "reset", "()V") javabridge.call(self.jobject, "setURL", "(Ljava/lang/String;)V", str(url)) if incremental: self.structure = Instances(javabridge.call(self.jobject, "getStructure", "()Lweka/core/Instances;")) return self.structure else: return Instances(javabridge.call(self.jobject, "getDataSet", "()Lweka/core/Instances;"))
python
{ "resource": "" }
q1579
TextDirectoryLoader.load
train
def load(self): """ Loads the text files from the specified directory and returns the Instances object. In case of incremental loading, only the structure. :return: the full dataset or the header (if incremental) :rtype: Instances """ javabridge.call(self.jobject, "reset", "()V") return Instances(javabridge.call(self.jobject, "getDataSet", "()Lweka/core/Instances;"))
python
{ "resource": "" }
q1580
Saver.save_file
train
def save_file(self, data, dfile): """ Saves the Instances object in the specified file. :param data: the data to save :type data: Instances :param dfile: the file to save the data to :type dfile: str """ self.enforce_type(self.jobject, "weka.core.converters.FileSourcedConverter") if not javabridge.is_instance_of(dfile, "Ljava/io/File;"): dfile = javabridge.make_instance( "Ljava/io/File;", "(Ljava/lang/String;)V", javabridge.get_env().new_string_utf(str(dfile))) javabridge.call(self.jobject, "setFile", "(Ljava/io/File;)V", dfile) javabridge.call(self.jobject, "setInstances", "(Lweka/core/Instances;)V", data.jobject) javabridge.call(self.jobject, "writeBatch", "()V")
python
{ "resource": "" }
q1581
string_array_to_list
train
def string_array_to_list(a): """ Turns the Java string array into Python unicode string list. :param a: the string array to convert :type a: JB_Object :return: the string list :rtype: list """ result = [] length = javabridge.get_env().get_array_length(a) wrapped = javabridge.get_env().get_object_array_elements(a) for i in range(length): result.append(javabridge.get_env().get_string(wrapped[i])) return result
python
{ "resource": "" }
q1582
string_list_to_array
train
def string_list_to_array(l): """ Turns a Python unicode string list into a Java String array. :param l: the string list :type: list :rtype: java string array :return: JB_Object """ result = javabridge.get_env().make_object_array(len(l), javabridge.get_env().find_class("java/lang/String")) for i in range(len(l)): javabridge.get_env().set_object_array_element(result, i, javabridge.get_env().new_string_utf(l[i])) return result
python
{ "resource": "" }
q1583
enumeration_to_list
train
def enumeration_to_list(enm): """ Turns the java.util.Enumeration into a list. :param enm: the enumeration to convert :type enm: JB_Object :return: the list :rtype: list """ result = [] while javabridge.call(enm, "hasMoreElements", "()Z"): result.append(javabridge.call(enm, "nextElement", "()Ljava/lang/Object;")) return result
python
{ "resource": "" }
q1584
ASSearch.search
train
def search(self, evaluation, data): """ Performs the search and returns the indices of the selected attributes. :param evaluation: the evaluation algorithm to use :type evaluation: ASEvaluation :param data: the data to use :type data: Instances :return: the selected attributes (0-based indices) :rtype: ndarray """ array = javabridge.call( self.jobject, "search", "(Lweka/attributeSelection/ASEvaluation;Lweka/core/Instances;)[I", evaluation.jobject, data.jobject) if array is None: return None else: javabridge.get_env().get_int_array_elements(array)
python
{ "resource": "" }
q1585
ASEvaluation.post_process
train
def post_process(self, indices): """ Post-processes the evaluator with the selected attribute indices. :param indices: the attribute indices list to use :type indices: ndarray :return: the processed indices :rtype: ndarray """ array = javabridge.call(self.jobject, "postProcess", "([I)[I", indices) if array is None: return None else: return javabridge.get_env().get_int_array_elements(array)
python
{ "resource": "" }
q1586
AttributeSelection.selected_attributes
train
def selected_attributes(self): """ Returns the selected attributes from the last run. :return: the Numpy array of 0-based indices :rtype: ndarray """ array = javabridge.call(self.jobject, "selectedAttributes", "()[I") if array is None: return None else: return javabridge.get_env().get_int_array_elements(array)
python
{ "resource": "" }
q1587
AttributeSelection.reduce_dimensionality
train
def reduce_dimensionality(self, data): """ Reduces the dimensionality of the provided Instance or Instances object. :param data: the data to process :type data: Instances :return: the reduced dataset :rtype: Instances """ if type(data) is Instance: return Instance( javabridge.call( self.jobject, "reduceDimensionality", "(Lweka/core/Instance;)Lweka/core/Instance;", data.jobject)) else: return Instances( javabridge.call( self.jobject, "reduceDimensionality", "(Lweka/core/Instances;)Lweka/core/Instances;", data.jobject))
python
{ "resource": "" }
q1588
generate_thresholdcurve_data
train
def generate_thresholdcurve_data(evaluation, class_index): """ Generates the threshold curve data from the evaluation object's predictions. :param evaluation: the evaluation to obtain the predictions from :type evaluation: Evaluation :param class_index: the 0-based index of the class-label to create the plot for :type class_index: int :return: the generated threshold curve data :rtype: Instances """ jtc = JavaObject.new_instance("weka.classifiers.evaluation.ThresholdCurve") pred = javabridge.call(evaluation.jobject, "predictions", "()Ljava/util/ArrayList;") result = Instances( javabridge.call(jtc, "getCurve", "(Ljava/util/ArrayList;I)Lweka/core/Instances;", pred, class_index)) return result
python
{ "resource": "" }
q1589
get_thresholdcurve_data
train
def get_thresholdcurve_data(data, xname, yname): """ Retrieves x and y columns from of the data generated by the weka.classifiers.evaluation.ThresholdCurve class. :param data: the threshold curve data :type data: Instances :param xname: the name of the X column :type xname: str :param yname: the name of the Y column :type yname: str :return: tuple of x and y arrays :rtype: tuple """ xi = data.attribute_by_name(xname).index yi = data.attribute_by_name(yname).index x = [] y = [] for i in range(data.num_instances): inst = data.get_instance(i) x.append(inst.get_value(xi)) y.append(inst.get_value(yi)) return x, y
python
{ "resource": "" }
q1590
install_package
train
def install_package(pkge, version="Latest"): """ The list of packages to install. :param pkge: the name of the repository package, a URL (http/https) or a zip file :type pkge: str :param version: in case of the repository packages, the version :type version: str :return: whether successfully installed :rtype: bool """ establish_cache() if pkge.startswith("http://") or pkge.startswith("https://"): url = javabridge.make_instance( "java/net/URL", "(Ljava/lang/String;)V", javabridge.get_env().new_string_utf(pkge)) return not javabridge.static_call( "weka/core/WekaPackageManager", "installPackageFromURL", "(Ljava/net/URL;[Ljava/io/PrintStream;)Ljava/lang/String;", url, []) is None elif pkge.lower().endswith(".zip"): return not javabridge.static_call( "weka/core/WekaPackageManager", "installPackageFromArchive", "(Ljava/lang/String;[Ljava/io/PrintStream;)Ljava/lang/String;", pkge, []) is None else: return javabridge.static_call( "weka/core/WekaPackageManager", "installPackageFromRepository", "(Ljava/lang/String;Ljava/lang/String;[Ljava/io/PrintStream;)Z", pkge, version, [])
python
{ "resource": "" }
q1591
is_installed
train
def is_installed(name): """ Checks whether a package with the name is already installed. :param name: the name of the package :type name: str :return: whether the package is installed :rtype: bool """ pkgs = installed_packages() for pkge in pkgs: if pkge.name == name: return True return False
python
{ "resource": "" }
q1592
Package.dependencies
train
def dependencies(self): """ Returns the dependencies of the package. :return: the list of Dependency objects :rtype: list of Dependency """ result = [] dependencies = javabridge.get_collection_wrapper( javabridge.call(self.jobject, "getDependencies", "()Ljava/util/List;")) for dependency in dependencies: result.append(Dependency(dependency)) return result
python
{ "resource": "" }
q1593
PackageConstraint.check_constraint
train
def check_constraint(self, pkge=None, constr=None): """ Checks the constraints. :param pkge: the package to check :type pkge: Package :param constr: the package constraint to check :type constr: PackageConstraint """ if not pkge is None: return javabridge.call( self.jobject, "checkConstraint", "(Lweka/core/packageManagement/Package;)Z", pkge.jobject) if not constr is None: return javabridge.call( self.jobject, "checkConstraint", "(Lweka/core/packageManagement/PackageConstraint;)Z", pkge.jobject) raise Exception("Either package or package constraing must be provided!")
python
{ "resource": "" }
q1594
InstanceQuery.custom_properties
train
def custom_properties(self, props): """ Sets the custom properties file to use. :param props: the props file :type props: str """ fprops = javabridge.make_instance("java/io/File", "(Ljava/lang/String;)V", props) javabridge.call(self.jobject, "setCustomPropsFile", "(Ljava/io/File;)V", fprops)
python
{ "resource": "" }
q1595
Capabilities.owner
train
def owner(self): """ Returns the owner of these capabilities, if any. :return: the owner, can be None :rtype: JavaObject """ obj = javabridge.call(self.jobject, "getOwner", "()Lweka/core/CapabilitiesHandler;") if obj is None: return None else: return JavaObject(jobject=obj)
python
{ "resource": "" }
q1596
Capabilities.dependencies
train
def dependencies(self): """ Returns all the dependencies. :return: the dependency list :rtype: list """ result = [] iterator = javabridge.iterate_java(javabridge.call(self.jobject, "dependencies", "()Ljava/util/Iterator;")) for c in iterator: result.append(Capability(c)) return result
python
{ "resource": "" }
q1597
Capabilities.for_instances
train
def for_instances(cls, data, multi=None): """ returns a Capabilities object specific for this data. The minimum number of instances is not set, the check for multi-instance data is optional. :param data: the data to generate the capabilities for :type data: Instances :param multi: whether to check the structure, too :type multi: bool :return: the generated capabilities :rtype: Capabilities """ if multi is None: return Capabilities(javabridge.static_call( "weka/core/Capabilities", "forInstances", "(Lweka/core/Instances;)Lweka/core/Capabilities;", data.jobject)) else: return Capabilities(javabridge.static_call( "weka/core/Capabilities", "forInstances", "(Lweka/core/Instances;Z)Lweka/core/Capabilities;", data.jobject, multi))
python
{ "resource": "" }
q1598
scatter_plot
train
def scatter_plot(data, index_x, index_y, percent=100.0, seed=1, size=50, title=None, outfile=None, wait=True): """ Plots two attributes against each other. TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html :param data: the dataset :type data: Instances :param index_x: the 0-based index of the attribute on the x axis :type index_x: int :param index_y: the 0-based index of the attribute on the y axis :type index_y: int :param percent: the percentage of the dataset to use for plotting :type percent: float :param seed: the seed value to use for subsampling :type seed: int :param size: the size of the circles in point :type size: int :param title: an optional title :type title: str :param outfile: the (optional) file to save the generated plot to. The extension determines the file format. :type outfile: str :param wait: whether to wait for the user to close the plot :type wait: bool """ if not plot.matplotlib_available: logger.error("Matplotlib is not installed, plotting unavailable!") return # create subsample data = plot.create_subsample(data, percent=percent, seed=seed) # collect data x = [] y = [] if data.class_index == -1: c = None else: c = [] for i in range(data.num_instances): inst = data.get_instance(i) x.append(inst.get_value(index_x)) y.append(inst.get_value(index_y)) if c is not None: c.append(inst.get_value(inst.class_index)) # plot data fig, ax = plt.subplots() if c is None: ax.scatter(x, y, s=size, alpha=0.5) else: ax.scatter(x, y, c=c, s=size, alpha=0.5) ax.set_xlabel(data.attribute(index_x).name) ax.set_ylabel(data.attribute(index_y).name) if title is None: title = "Attribute scatter plot" if percent != 100: title += " (%0.1f%%)" % percent ax.set_title(title) ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c="0.3") ax.grid(True) fig.canvas.set_window_title(data.relationname) plt.draw() if outfile is not None: plt.savefig(outfile) if wait: plt.show()
python
{ "resource": "" }
q1599
line_plot
train
def line_plot(data, atts=None, percent=100.0, seed=1, title=None, outfile=None, wait=True): """ Uses the internal format to plot the dataset, one line per instance. :param data: the dataset :type data: Instances :param atts: the list of 0-based attribute indices of attributes to plot :type atts: list :param percent: the percentage of the dataset to use for plotting :type percent: float :param seed: the seed value to use for subsampling :type seed: int :param title: an optional title :type title: str :param outfile: the (optional) file to save the generated plot to. The extension determines the file format. :type outfile: str :param wait: whether to wait for the user to close the plot :type wait: bool """ if not plot.matplotlib_available: logger.error("Matplotlib is not installed, plotting unavailable!") return # create subsample data = plot.create_subsample(data, percent=percent, seed=seed) fig = plt.figure() if atts is None: x = [] for i in range(data.num_attributes): x.append(i) else: x = atts ax = fig.add_subplot(111) ax.set_xlabel("attributes") ax.set_ylabel("value") ax.grid(True) for index_y in range(data.num_instances): y = [] for index_x in x: y.append(data.get_instance(index_y).get_value(index_x)) ax.plot(x, y, "o-", alpha=0.5) if title is None: title = data.relationname if percent != 100: title += " (%0.1f%%)" % percent fig.canvas.set_window_title(title) plt.draw() if outfile is not None: plt.savefig(outfile) if wait: plt.show()
python
{ "resource": "" }