body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
e03f165ce174f6c37eb7431d5ef36aab7a71f3c80fea7d92453dffbe24c85274 | def real_sph_harm(l, zero_m_only=True, spherical_coordinates=True):
'\n Computes formula strings of the the real part of the spherical harmonics up to order l (excluded).\n Variables are either cartesian coordinates x,y,z on the unit sphere or spherical coordinates phi and theta.\n '
if (not zero_m_only):
S_m = [0]
C_m = [1]
for i in range(1, l):
x = sym.symbols('x')
y = sym.symbols('y')
S_m += [((x * S_m[(i - 1)]) + (y * C_m[(i - 1)]))]
C_m += [((x * C_m[(i - 1)]) - (y * S_m[(i - 1)]))]
P_l_m = associated_legendre_polynomials(l, zero_m_only)
if spherical_coordinates:
theta = sym.symbols('theta')
z = sym.symbols('z')
for i in range(len(P_l_m)):
for j in range(len(P_l_m[i])):
if (type(P_l_m[i][j]) != int):
P_l_m[i][j] = P_l_m[i][j].subs(z, sym.cos(theta))
if (not zero_m_only):
phi = sym.symbols('phi')
for i in range(len(S_m)):
S_m[i] = S_m[i].subs(x, (sym.sin(theta) * sym.cos(phi))).subs(y, (sym.sin(theta) * sym.sin(phi)))
for i in range(len(C_m)):
C_m[i] = C_m[i].subs(x, (sym.sin(theta) * sym.cos(phi))).subs(y, (sym.sin(theta) * sym.sin(phi)))
Y_func_l_m = [(['0'] * ((2 * j) + 1)) for j in range(l)]
for i in range(l):
Y_func_l_m[i][0] = sym.simplify((sph_harm_prefactor(i, 0) * P_l_m[i][0]))
if (not zero_m_only):
for i in range(1, l):
for j in range(1, (i + 1)):
Y_func_l_m[i][j] = sym.simplify(((((2 ** 0.5) * sph_harm_prefactor(i, j)) * C_m[j]) * P_l_m[i][j]))
for i in range(1, l):
for j in range(1, (i + 1)):
Y_func_l_m[i][(- j)] = sym.simplify(((((2 ** 0.5) * sph_harm_prefactor(i, (- j))) * S_m[j]) * P_l_m[i][j]))
return Y_func_l_m | Computes formula strings of the the real part of the spherical harmonics up to order l (excluded).
Variables are either cartesian coordinates x,y,z on the unit sphere or spherical coordinates phi and theta. | nff/utils/functions.py | real_sph_harm | jkaraguesian/NeuralForceField | 0 | python | def real_sph_harm(l, zero_m_only=True, spherical_coordinates=True):
'\n Computes formula strings of the the real part of the spherical harmonics up to order l (excluded).\n Variables are either cartesian coordinates x,y,z on the unit sphere or spherical coordinates phi and theta.\n '
if (not zero_m_only):
S_m = [0]
C_m = [1]
for i in range(1, l):
x = sym.symbols('x')
y = sym.symbols('y')
S_m += [((x * S_m[(i - 1)]) + (y * C_m[(i - 1)]))]
C_m += [((x * C_m[(i - 1)]) - (y * S_m[(i - 1)]))]
P_l_m = associated_legendre_polynomials(l, zero_m_only)
if spherical_coordinates:
theta = sym.symbols('theta')
z = sym.symbols('z')
for i in range(len(P_l_m)):
for j in range(len(P_l_m[i])):
if (type(P_l_m[i][j]) != int):
P_l_m[i][j] = P_l_m[i][j].subs(z, sym.cos(theta))
if (not zero_m_only):
phi = sym.symbols('phi')
for i in range(len(S_m)):
S_m[i] = S_m[i].subs(x, (sym.sin(theta) * sym.cos(phi))).subs(y, (sym.sin(theta) * sym.sin(phi)))
for i in range(len(C_m)):
C_m[i] = C_m[i].subs(x, (sym.sin(theta) * sym.cos(phi))).subs(y, (sym.sin(theta) * sym.sin(phi)))
Y_func_l_m = [(['0'] * ((2 * j) + 1)) for j in range(l)]
for i in range(l):
Y_func_l_m[i][0] = sym.simplify((sph_harm_prefactor(i, 0) * P_l_m[i][0]))
if (not zero_m_only):
for i in range(1, l):
for j in range(1, (i + 1)):
Y_func_l_m[i][j] = sym.simplify(((((2 ** 0.5) * sph_harm_prefactor(i, j)) * C_m[j]) * P_l_m[i][j]))
for i in range(1, l):
for j in range(1, (i + 1)):
Y_func_l_m[i][(- j)] = sym.simplify(((((2 ** 0.5) * sph_harm_prefactor(i, (- j))) * S_m[j]) * P_l_m[i][j]))
return Y_func_l_m | def real_sph_harm(l, zero_m_only=True, spherical_coordinates=True):
'\n Computes formula strings of the the real part of the spherical harmonics up to order l (excluded).\n Variables are either cartesian coordinates x,y,z on the unit sphere or spherical coordinates phi and theta.\n '
if (not zero_m_only):
S_m = [0]
C_m = [1]
for i in range(1, l):
x = sym.symbols('x')
y = sym.symbols('y')
S_m += [((x * S_m[(i - 1)]) + (y * C_m[(i - 1)]))]
C_m += [((x * C_m[(i - 1)]) - (y * S_m[(i - 1)]))]
P_l_m = associated_legendre_polynomials(l, zero_m_only)
if spherical_coordinates:
theta = sym.symbols('theta')
z = sym.symbols('z')
for i in range(len(P_l_m)):
for j in range(len(P_l_m[i])):
if (type(P_l_m[i][j]) != int):
P_l_m[i][j] = P_l_m[i][j].subs(z, sym.cos(theta))
if (not zero_m_only):
phi = sym.symbols('phi')
for i in range(len(S_m)):
S_m[i] = S_m[i].subs(x, (sym.sin(theta) * sym.cos(phi))).subs(y, (sym.sin(theta) * sym.sin(phi)))
for i in range(len(C_m)):
C_m[i] = C_m[i].subs(x, (sym.sin(theta) * sym.cos(phi))).subs(y, (sym.sin(theta) * sym.sin(phi)))
Y_func_l_m = [(['0'] * ((2 * j) + 1)) for j in range(l)]
for i in range(l):
Y_func_l_m[i][0] = sym.simplify((sph_harm_prefactor(i, 0) * P_l_m[i][0]))
if (not zero_m_only):
for i in range(1, l):
for j in range(1, (i + 1)):
Y_func_l_m[i][j] = sym.simplify(((((2 ** 0.5) * sph_harm_prefactor(i, j)) * C_m[j]) * P_l_m[i][j]))
for i in range(1, l):
for j in range(1, (i + 1)):
Y_func_l_m[i][(- j)] = sym.simplify(((((2 ** 0.5) * sph_harm_prefactor(i, (- j))) * S_m[j]) * P_l_m[i][j]))
return Y_func_l_m<|docstring|>Computes formula strings of the the real part of the spherical harmonics up to order l (excluded).
Variables are either cartesian coordinates x,y,z on the unit sphere or spherical coordinates phi and theta.<|endoftext|> |
7264554eeb7d16d8fb871985c43591bf0bd03b0accd6cf8143487f74eb5d7ddb | def get_subject(db: Session, subcode: str) -> SubjectReport:
'Get Subject From Code\n\n Args:\n db (Session): SQLAlchemy Session.\n subcode (str): Subject Code.\n\n Raises:\n NoResultFound\n\n Returns:\n SubjectReport: Details Of the Requested Subject.\n '
res = db.query(Subject).filter((Subject.Code == subcode)).one()
rep = SubjectReport.from_orm(res)
return rep | Get Subject From Code
Args:
db (Session): SQLAlchemy Session.
subcode (str): Subject Code.
Raises:
NoResultFound
Returns:
SubjectReport: Details Of the Requested Subject. | semesterstat/crud/subject.py | get_subject | Rushyanth111/Semester-Stats | 0 | python | def get_subject(db: Session, subcode: str) -> SubjectReport:
'Get Subject From Code\n\n Args:\n db (Session): SQLAlchemy Session.\n subcode (str): Subject Code.\n\n Raises:\n NoResultFound\n\n Returns:\n SubjectReport: Details Of the Requested Subject.\n '
res = db.query(Subject).filter((Subject.Code == subcode)).one()
rep = SubjectReport.from_orm(res)
return rep | def get_subject(db: Session, subcode: str) -> SubjectReport:
'Get Subject From Code\n\n Args:\n db (Session): SQLAlchemy Session.\n subcode (str): Subject Code.\n\n Raises:\n NoResultFound\n\n Returns:\n SubjectReport: Details Of the Requested Subject.\n '
res = db.query(Subject).filter((Subject.Code == subcode)).one()
rep = SubjectReport.from_orm(res)
return rep<|docstring|>Get Subject From Code
Args:
db (Session): SQLAlchemy Session.
subcode (str): Subject Code.
Raises:
NoResultFound
Returns:
SubjectReport: Details Of the Requested Subject.<|endoftext|> |
45cfea7cb4f55310bb04ae6fd84033c0171925308c68a5a2a58dc78d765b27bd | def put_subject(db: Session, sub: SubjectReport) -> None:
'Add a Subject to the Database\n\n Args:\n db (Session): SQLAlchemy Session.\n sub (SubjectReport): Subject Report Object.\n\n Raises:\n IntegrityError\n '
ipt = Subject(Code=sub.Code, Name=sub.Name, Semester=sub.Semester, Scheme=sub.Scheme, Department=sub.Department)
db.add(ipt)
db.commit() | Add a Subject to the Database
Args:
db (Session): SQLAlchemy Session.
sub (SubjectReport): Subject Report Object.
Raises:
IntegrityError | semesterstat/crud/subject.py | put_subject | Rushyanth111/Semester-Stats | 0 | python | def put_subject(db: Session, sub: SubjectReport) -> None:
'Add a Subject to the Database\n\n Args:\n db (Session): SQLAlchemy Session.\n sub (SubjectReport): Subject Report Object.\n\n Raises:\n IntegrityError\n '
ipt = Subject(Code=sub.Code, Name=sub.Name, Semester=sub.Semester, Scheme=sub.Scheme, Department=sub.Department)
db.add(ipt)
db.commit() | def put_subject(db: Session, sub: SubjectReport) -> None:
'Add a Subject to the Database\n\n Args:\n db (Session): SQLAlchemy Session.\n sub (SubjectReport): Subject Report Object.\n\n Raises:\n IntegrityError\n '
ipt = Subject(Code=sub.Code, Name=sub.Name, Semester=sub.Semester, Scheme=sub.Scheme, Department=sub.Department)
db.add(ipt)
db.commit()<|docstring|>Add a Subject to the Database
Args:
db (Session): SQLAlchemy Session.
sub (SubjectReport): Subject Report Object.
Raises:
IntegrityError<|endoftext|> |
ff19f77d291f830cd02680b17ba0dda4d46e33ee22aa111450c711f8981d9d9b | def update_subject(db: Session, old_sub: str, new_sub: SubjectReport) -> None:
'Update a Subject\n\n Args:\n db (Session): SQLAlchemy Session.\n old_sub (str): Old Subject Code.\n new_sub (SubjectReport): Subject Details to Change\n\n Raises:\n IntegrityError\n '
upd = db.query(Subject).filter((Subject.Code == old_sub)).first()
upd.Code = new_sub.Code
upd.Name = new_sub.Name
upd.Semester = new_sub.Semester
upd.Scheme = new_sub.Scheme
upd.Department = new_sub.Department
db.commit() | Update a Subject
Args:
db (Session): SQLAlchemy Session.
old_sub (str): Old Subject Code.
new_sub (SubjectReport): Subject Details to Change
Raises:
IntegrityError | semesterstat/crud/subject.py | update_subject | Rushyanth111/Semester-Stats | 0 | python | def update_subject(db: Session, old_sub: str, new_sub: SubjectReport) -> None:
'Update a Subject\n\n Args:\n db (Session): SQLAlchemy Session.\n old_sub (str): Old Subject Code.\n new_sub (SubjectReport): Subject Details to Change\n\n Raises:\n IntegrityError\n '
upd = db.query(Subject).filter((Subject.Code == old_sub)).first()
upd.Code = new_sub.Code
upd.Name = new_sub.Name
upd.Semester = new_sub.Semester
upd.Scheme = new_sub.Scheme
upd.Department = new_sub.Department
db.commit() | def update_subject(db: Session, old_sub: str, new_sub: SubjectReport) -> None:
'Update a Subject\n\n Args:\n db (Session): SQLAlchemy Session.\n old_sub (str): Old Subject Code.\n new_sub (SubjectReport): Subject Details to Change\n\n Raises:\n IntegrityError\n '
upd = db.query(Subject).filter((Subject.Code == old_sub)).first()
upd.Code = new_sub.Code
upd.Name = new_sub.Name
upd.Semester = new_sub.Semester
upd.Scheme = new_sub.Scheme
upd.Department = new_sub.Department
db.commit()<|docstring|>Update a Subject
Args:
db (Session): SQLAlchemy Session.
old_sub (str): Old Subject Code.
new_sub (SubjectReport): Subject Details to Change
Raises:
IntegrityError<|endoftext|> |
e635eb670f694dc3f4f66643ababeab181279c1f30bec027a15b4ee03bcfffd2 | def is_subject_exist(db: Session, subcode: str) -> bool:
'Checks if Subject Exists.\n\n Args:\n db (Session): SQLAlchemy Session.\n subcode (str): Subject Code.\n\n Returns:\n bool: True if Present, Else False.\n '
equery = db.query(Subject).filter((Subject.Code == subcode))
res = db.query(equery.exists()).scalar()
return res | Checks if Subject Exists.
Args:
db (Session): SQLAlchemy Session.
subcode (str): Subject Code.
Returns:
bool: True if Present, Else False. | semesterstat/crud/subject.py | is_subject_exist | Rushyanth111/Semester-Stats | 0 | python | def is_subject_exist(db: Session, subcode: str) -> bool:
'Checks if Subject Exists.\n\n Args:\n db (Session): SQLAlchemy Session.\n subcode (str): Subject Code.\n\n Returns:\n bool: True if Present, Else False.\n '
equery = db.query(Subject).filter((Subject.Code == subcode))
res = db.query(equery.exists()).scalar()
return res | def is_subject_exist(db: Session, subcode: str) -> bool:
'Checks if Subject Exists.\n\n Args:\n db (Session): SQLAlchemy Session.\n subcode (str): Subject Code.\n\n Returns:\n bool: True if Present, Else False.\n '
equery = db.query(Subject).filter((Subject.Code == subcode))
res = db.query(equery.exists()).scalar()
return res<|docstring|>Checks if Subject Exists.
Args:
db (Session): SQLAlchemy Session.
subcode (str): Subject Code.
Returns:
bool: True if Present, Else False.<|endoftext|> |
6793e3fd551e601ef4ea16f9936b50504fc3706c0608d086e8d5a95aef44649b | def get_subjects(db: Session, batch: int=None, dept: str=None, sem: int=None) -> List[str]:
'Obtains a List of Subjects According to Optional Params\n\n Args:\n db (Session): SQLAlchemy Session.\n batch (int, optional): Batch that Attended That Subject. Defaults to None.\n dept (str, optional): Department of the Subject(Includes "XX" By default).\n Defaults to None.\n sem (int, optional): Semester Of the Subject. Defaults to None.\n\n Raises:\n NoResultFound\n\n Returns:\n List[str]: List of the Subject Codes Searched.\n '
res = db.query(Subject)
if (batch is not None):
scheme = get_scheme(db, batch)
if (scheme is None):
return []
res = res.filter((Subject.Scheme == scheme))
if (dept is not None):
if (dept != 'XX'):
res = res.filter(or_((Subject.Department == dept), (Subject.Department == 'XX')))
else:
res = res.filter((Subject.Department == 'XX'))
if (sem is not None):
res = res.filter((Subject.Semester == sem))
subcodes = [sub.Code for sub in res]
return subcodes | Obtains a List of Subjects According to Optional Params
Args:
db (Session): SQLAlchemy Session.
batch (int, optional): Batch that Attended That Subject. Defaults to None.
dept (str, optional): Department of the Subject(Includes "XX" By default).
Defaults to None.
sem (int, optional): Semester Of the Subject. Defaults to None.
Raises:
NoResultFound
Returns:
List[str]: List of the Subject Codes Searched. | semesterstat/crud/subject.py | get_subjects | Rushyanth111/Semester-Stats | 0 | python | def get_subjects(db: Session, batch: int=None, dept: str=None, sem: int=None) -> List[str]:
'Obtains a List of Subjects According to Optional Params\n\n Args:\n db (Session): SQLAlchemy Session.\n batch (int, optional): Batch that Attended That Subject. Defaults to None.\n dept (str, optional): Department of the Subject(Includes "XX" By default).\n Defaults to None.\n sem (int, optional): Semester Of the Subject. Defaults to None.\n\n Raises:\n NoResultFound\n\n Returns:\n List[str]: List of the Subject Codes Searched.\n '
res = db.query(Subject)
if (batch is not None):
scheme = get_scheme(db, batch)
if (scheme is None):
return []
res = res.filter((Subject.Scheme == scheme))
if (dept is not None):
if (dept != 'XX'):
res = res.filter(or_((Subject.Department == dept), (Subject.Department == 'XX')))
else:
res = res.filter((Subject.Department == 'XX'))
if (sem is not None):
res = res.filter((Subject.Semester == sem))
subcodes = [sub.Code for sub in res]
return subcodes | def get_subjects(db: Session, batch: int=None, dept: str=None, sem: int=None) -> List[str]:
'Obtains a List of Subjects According to Optional Params\n\n Args:\n db (Session): SQLAlchemy Session.\n batch (int, optional): Batch that Attended That Subject. Defaults to None.\n dept (str, optional): Department of the Subject(Includes "XX" By default).\n Defaults to None.\n sem (int, optional): Semester Of the Subject. Defaults to None.\n\n Raises:\n NoResultFound\n\n Returns:\n List[str]: List of the Subject Codes Searched.\n '
res = db.query(Subject)
if (batch is not None):
scheme = get_scheme(db, batch)
if (scheme is None):
return []
res = res.filter((Subject.Scheme == scheme))
if (dept is not None):
if (dept != 'XX'):
res = res.filter(or_((Subject.Department == dept), (Subject.Department == 'XX')))
else:
res = res.filter((Subject.Department == 'XX'))
if (sem is not None):
res = res.filter((Subject.Semester == sem))
subcodes = [sub.Code for sub in res]
return subcodes<|docstring|>Obtains a List of Subjects According to Optional Params
Args:
db (Session): SQLAlchemy Session.
batch (int, optional): Batch that Attended That Subject. Defaults to None.
dept (str, optional): Department of the Subject(Includes "XX" By default).
Defaults to None.
sem (int, optional): Semester Of the Subject. Defaults to None.
Raises:
NoResultFound
Returns:
List[str]: List of the Subject Codes Searched.<|endoftext|> |
060be6ba08659a4bbf08bd1c936a95439bb8e851431ca1817d69bf1b4bf8a266 | def __init__(self, num_rollouts=1):
'\n\n Args:\n num_rollouts: the number of rollouts we simulate\n '
self.num_rollouts = num_rollouts | Args:
num_rollouts: the number of rollouts we simulate | connect_four/agents/flat_ucb.py | __init__ | rpachauri/connect4 | 0 | python | def __init__(self, num_rollouts=1):
'\n\n Args:\n num_rollouts: the number of rollouts we simulate\n '
self.num_rollouts = num_rollouts | def __init__(self, num_rollouts=1):
'\n\n Args:\n num_rollouts: the number of rollouts we simulate\n '
self.num_rollouts = num_rollouts<|docstring|>Args:
num_rollouts: the number of rollouts we simulate<|endoftext|> |
3fff461e04cd9337ed780e98e5bd01e11e7c50ee4475ced472da14480e4a3be4 | def action(self, env, last_action=None):
'Returns an action.\n\n Args:\n env: a plannable gym.Env\n last_action: the last_action we took. None by default.\n Requires:\n - env must implement env_variables, which returns a variable that can\n be passed to env.reset() to restore a state (this supports planning agents)\n - env is a deterministic environment.\n - action space of env is finite.\n Returns:\n the best action after performing num_rollouts simulations\n '
action_total_values = np.zeros(env.action_space)
action_visits = np.zeros(env.action_space)
env_variables = env.env_variables
for _ in range(self.num_rollouts):
action = self._select_action_for_rollout(action_total_values, action_visits)
value = self.rollout(env, action)
action_total_values[action] += value
action_visits[action] += 1
env.reset(env_variables)
print('action_visits =', action_visits, '=>', np.sum(action_visits))
best_action = np.argmax(action_visits)
return best_action | Returns an action.
Args:
env: a plannable gym.Env
last_action: the last_action we took. None by default.
Requires:
- env must implement env_variables, which returns a variable that can
be passed to env.reset() to restore a state (this supports planning agents)
- env is a deterministic environment.
- action space of env is finite.
Returns:
the best action after performing num_rollouts simulations | connect_four/agents/flat_ucb.py | action | rpachauri/connect4 | 0 | python | def action(self, env, last_action=None):
'Returns an action.\n\n Args:\n env: a plannable gym.Env\n last_action: the last_action we took. None by default.\n Requires:\n - env must implement env_variables, which returns a variable that can\n be passed to env.reset() to restore a state (this supports planning agents)\n - env is a deterministic environment.\n - action space of env is finite.\n Returns:\n the best action after performing num_rollouts simulations\n '
action_total_values = np.zeros(env.action_space)
action_visits = np.zeros(env.action_space)
env_variables = env.env_variables
for _ in range(self.num_rollouts):
action = self._select_action_for_rollout(action_total_values, action_visits)
value = self.rollout(env, action)
action_total_values[action] += value
action_visits[action] += 1
env.reset(env_variables)
print('action_visits =', action_visits, '=>', np.sum(action_visits))
best_action = np.argmax(action_visits)
return best_action | def action(self, env, last_action=None):
'Returns an action.\n\n Args:\n env: a plannable gym.Env\n last_action: the last_action we took. None by default.\n Requires:\n - env must implement env_variables, which returns a variable that can\n be passed to env.reset() to restore a state (this supports planning agents)\n - env is a deterministic environment.\n - action space of env is finite.\n Returns:\n the best action after performing num_rollouts simulations\n '
action_total_values = np.zeros(env.action_space)
action_visits = np.zeros(env.action_space)
env_variables = env.env_variables
for _ in range(self.num_rollouts):
action = self._select_action_for_rollout(action_total_values, action_visits)
value = self.rollout(env, action)
action_total_values[action] += value
action_visits[action] += 1
env.reset(env_variables)
print('action_visits =', action_visits, '=>', np.sum(action_visits))
best_action = np.argmax(action_visits)
return best_action<|docstring|>Returns an action.
Args:
env: a plannable gym.Env
last_action: the last_action we took. None by default.
Requires:
- env must implement env_variables, which returns a variable that can
be passed to env.reset() to restore a state (this supports planning agents)
- env is a deterministic environment.
- action space of env is finite.
Returns:
the best action after performing num_rollouts simulations<|endoftext|> |
ba65a04ee73019c8235d4ed80a88344d1c5c01f3b0f7e5481b230507f008fc28 | @staticmethod
def rollout(env, action) -> float:
"Obtains a sample estimate of the action-value for the current\n environment's player.\n\n Args:\n env (gym.Env): a gym.Env object. Note that this function modifies env\n and env will reach a terminal state. Assumes a terminal state\n is reachable through uniform random move selection.\n action (int): The action to obtain a sample estimate of the action-value for.\n\n Returns:\n value (float): the total return after performing rollout from the state env is in\n "
(_, r, done, _) = env.step(action)
value = r
while (not done):
all_actions = np.arange(env.action_space)
action = np.random.choice(all_actions)
(_, r, done, _) = env.step(action)
value += r
value *= (- 1)
return value | Obtains a sample estimate of the action-value for the current
environment's player.
Args:
env (gym.Env): a gym.Env object. Note that this function modifies env
and env will reach a terminal state. Assumes a terminal state
is reachable through uniform random move selection.
action (int): The action to obtain a sample estimate of the action-value for.
Returns:
value (float): the total return after performing rollout from the state env is in | connect_four/agents/flat_ucb.py | rollout | rpachauri/connect4 | 0 | python | @staticmethod
def rollout(env, action) -> float:
"Obtains a sample estimate of the action-value for the current\n environment's player.\n\n Args:\n env (gym.Env): a gym.Env object. Note that this function modifies env\n and env will reach a terminal state. Assumes a terminal state\n is reachable through uniform random move selection.\n action (int): The action to obtain a sample estimate of the action-value for.\n\n Returns:\n value (float): the total return after performing rollout from the state env is in\n "
(_, r, done, _) = env.step(action)
value = r
while (not done):
all_actions = np.arange(env.action_space)
action = np.random.choice(all_actions)
(_, r, done, _) = env.step(action)
value += r
value *= (- 1)
return value | @staticmethod
def rollout(env, action) -> float:
"Obtains a sample estimate of the action-value for the current\n environment's player.\n\n Args:\n env (gym.Env): a gym.Env object. Note that this function modifies env\n and env will reach a terminal state. Assumes a terminal state\n is reachable through uniform random move selection.\n action (int): The action to obtain a sample estimate of the action-value for.\n\n Returns:\n value (float): the total return after performing rollout from the state env is in\n "
(_, r, done, _) = env.step(action)
value = r
while (not done):
all_actions = np.arange(env.action_space)
action = np.random.choice(all_actions)
(_, r, done, _) = env.step(action)
value += r
value *= (- 1)
return value<|docstring|>Obtains a sample estimate of the action-value for the current
environment's player.
Args:
env (gym.Env): a gym.Env object. Note that this function modifies env
and env will reach a terminal state. Assumes a terminal state
is reachable through uniform random move selection.
action (int): The action to obtain a sample estimate of the action-value for.
Returns:
value (float): the total return after performing rollout from the state env is in<|endoftext|> |
25c16a7931595ed6302d146ec01618dab4c2d570d635b78d806a15795a71ac73 | def __init__(self, grid_size=(16, 16), random_spawn=False, seed=None):
" Game ends if the snake's head touches its body or goes out of\n bounds. Eat apples to get a reward and grow the snakes body by one\n segment. Agents should call `actions()` and `feature_space()` to get\n valid actions and boundaries, and then `step()` with an `action` to\n advance one frame in the game.\n\n Args:\n - grid_size: (tuple) play area dimensions x, y\n - random_spawn: (bool) if False, player will begin centered\n - seed: if not None, randomly generated numbers will be repeatable\n "
(self.width, self.height) = grid_size
self.random_spawn = random_spawn
self.seed = seed
self.reset() | Game ends if the snake's head touches its body or goes out of
bounds. Eat apples to get a reward and grow the snakes body by one
segment. Agents should call `actions()` and `feature_space()` to get
valid actions and boundaries, and then `step()` with an `action` to
advance one frame in the game.
Args:
- grid_size: (tuple) play area dimensions x, y
- random_spawn: (bool) if False, player will begin centered
- seed: if not None, randomly generated numbers will be repeatable | snake.py | __init__ | tyoungNIO/snake-python | 0 | python | def __init__(self, grid_size=(16, 16), random_spawn=False, seed=None):
" Game ends if the snake's head touches its body or goes out of\n bounds. Eat apples to get a reward and grow the snakes body by one\n segment. Agents should call `actions()` and `feature_space()` to get\n valid actions and boundaries, and then `step()` with an `action` to\n advance one frame in the game.\n\n Args:\n - grid_size: (tuple) play area dimensions x, y\n - random_spawn: (bool) if False, player will begin centered\n - seed: if not None, randomly generated numbers will be repeatable\n "
(self.width, self.height) = grid_size
self.random_spawn = random_spawn
self.seed = seed
self.reset() | def __init__(self, grid_size=(16, 16), random_spawn=False, seed=None):
" Game ends if the snake's head touches its body or goes out of\n bounds. Eat apples to get a reward and grow the snakes body by one\n segment. Agents should call `actions()` and `feature_space()` to get\n valid actions and boundaries, and then `step()` with an `action` to\n advance one frame in the game.\n\n Args:\n - grid_size: (tuple) play area dimensions x, y\n - random_spawn: (bool) if False, player will begin centered\n - seed: if not None, randomly generated numbers will be repeatable\n "
(self.width, self.height) = grid_size
self.random_spawn = random_spawn
self.seed = seed
self.reset()<|docstring|>Game ends if the snake's head touches its body or goes out of
bounds. Eat apples to get a reward and grow the snakes body by one
segment. Agents should call `actions()` and `feature_space()` to get
valid actions and boundaries, and then `step()` with an `action` to
advance one frame in the game.
Args:
- grid_size: (tuple) play area dimensions x, y
- random_spawn: (bool) if False, player will begin centered
- seed: if not None, randomly generated numbers will be repeatable<|endoftext|> |
d8ec4486917a448b1b5d92189bf3414a0fc42d2bf823066d8a71619934769758 | def actions(self):
' Returns a map of valid actions as `{key: action}` pairs.'
return {'LEFT': ((- 1), 0), 'RIGHT': (1, 0), 'UP': (0, (- 1)), 'DOWN': (0, 1)} | Returns a map of valid actions as `{key: action}` pairs. | snake.py | actions | tyoungNIO/snake-python | 0 | python | def actions(self):
' '
return {'LEFT': ((- 1), 0), 'RIGHT': (1, 0), 'UP': (0, (- 1)), 'DOWN': (0, 1)} | def actions(self):
' '
return {'LEFT': ((- 1), 0), 'RIGHT': (1, 0), 'UP': (0, (- 1)), 'DOWN': (0, 1)}<|docstring|>Returns a map of valid actions as `{key: action}` pairs.<|endoftext|> |
e25718fb34cb508397954c2fd73eaeaab53cd34828af8208d7ede466cdbd05e7 | def feature_space(self):
' Returns the play area dimensions.'
return (self.width, self.height) | Returns the play area dimensions. | snake.py | feature_space | tyoungNIO/snake-python | 0 | python | def feature_space(self):
' '
return (self.width, self.height) | def feature_space(self):
' '
return (self.width, self.height)<|docstring|>Returns the play area dimensions.<|endoftext|> |
5ef1c4df82f1520ba430d5c202b3103f9d38d4e5669c5f0de0b9bbcef1e8c9ac | def game_state(self):
' Returns the game state.'
return (self.apple, self.snake, self.score) | Returns the game state. | snake.py | game_state | tyoungNIO/snake-python | 0 | python | def game_state(self):
' '
return (self.apple, self.snake, self.score) | def game_state(self):
' '
return (self.apple, self.snake, self.score)<|docstring|>Returns the game state.<|endoftext|> |
693b998739d6972cc6b1c27c8356cb4f72afa7e91715bc3fb7b7f84c899c9d50 | def reset(self):
' Call to reset the game. If a random seed has been set random\n numbers, such as the sequence of apples, will be repeated exactly.\n '
if (self.seed != None):
random.seed(self.seed)
if self.random_spawn:
self.snake = [self._random_coords()]
else:
center = ((self.width // 2), (self.height // 2))
self.snake = [center]
self.apple = self._random_coords(self.snake)
self.game_over = False
self.score = 0 | Call to reset the game. If a random seed has been set random
numbers, such as the sequence of apples, will be repeated exactly. | snake.py | reset | tyoungNIO/snake-python | 0 | python | def reset(self):
' Call to reset the game. If a random seed has been set random\n numbers, such as the sequence of apples, will be repeated exactly.\n '
if (self.seed != None):
random.seed(self.seed)
if self.random_spawn:
self.snake = [self._random_coords()]
else:
center = ((self.width // 2), (self.height // 2))
self.snake = [center]
self.apple = self._random_coords(self.snake)
self.game_over = False
self.score = 0 | def reset(self):
' Call to reset the game. If a random seed has been set random\n numbers, such as the sequence of apples, will be repeated exactly.\n '
if (self.seed != None):
random.seed(self.seed)
if self.random_spawn:
self.snake = [self._random_coords()]
else:
center = ((self.width // 2), (self.height // 2))
self.snake = [center]
self.apple = self._random_coords(self.snake)
self.game_over = False
self.score = 0<|docstring|>Call to reset the game. If a random seed has been set random
numbers, such as the sequence of apples, will be repeated exactly.<|endoftext|> |
b994183357ceefd09c8cd0e9ce2d2ce260aab1d34eec8cdab1b2d57988d54033 | def step(self, action):
" Advance one frame in the game.\n\n Args:\n - action: (tuple) direction to move, must be a value from\n `actions()` otherwise an `ValueError` is raised\n Returns:\n - apple: (tuple) coordinates of the apple\n - snake: (list) tuples of coordinate pairs of each segment of the\n snake's body, ordered from head to tail\n - reward: (int) 1 if an apple is consumed, -1 if the player dies,\n otherwise 0\n - game_over: (bool) if True further calls to this method will raise\n an `Exception` until `reset()` has been called\n "
if self.game_over:
raise Exception('Game Over'.format(self.score))
if (action not in self.actions().values()):
raise ValueError('Invalid action "{}"'.format(action))
reward = 0
current_head = self.snake[0]
new_head = ((current_head[0] + action[0]), (current_head[1] + action[1]))
if (new_head == self.apple):
self.score += 1
reward = 1
excluded = ([new_head] + self.snake[:self.score])
self.apple = self._random_coords(excluded)
self.snake = ([new_head] + self.snake[:self.score])
if ((new_head in self.snake[1:]) or (new_head[0] not in range(self.width)) or (new_head[1] not in range(self.height))):
self.game_over = True
reward = (- 1)
return (reward, self.game_over) | Advance one frame in the game.
Args:
- action: (tuple) direction to move, must be a value from
`actions()` otherwise an `ValueError` is raised
Returns:
- apple: (tuple) coordinates of the apple
- snake: (list) tuples of coordinate pairs of each segment of the
snake's body, ordered from head to tail
- reward: (int) 1 if an apple is consumed, -1 if the player dies,
otherwise 0
- game_over: (bool) if True further calls to this method will raise
an `Exception` until `reset()` has been called | snake.py | step | tyoungNIO/snake-python | 0 | python | def step(self, action):
" Advance one frame in the game.\n\n Args:\n - action: (tuple) direction to move, must be a value from\n `actions()` otherwise an `ValueError` is raised\n Returns:\n - apple: (tuple) coordinates of the apple\n - snake: (list) tuples of coordinate pairs of each segment of the\n snake's body, ordered from head to tail\n - reward: (int) 1 if an apple is consumed, -1 if the player dies,\n otherwise 0\n - game_over: (bool) if True further calls to this method will raise\n an `Exception` until `reset()` has been called\n "
if self.game_over:
raise Exception('Game Over'.format(self.score))
if (action not in self.actions().values()):
raise ValueError('Invalid action "{}"'.format(action))
reward = 0
current_head = self.snake[0]
new_head = ((current_head[0] + action[0]), (current_head[1] + action[1]))
if (new_head == self.apple):
self.score += 1
reward = 1
excluded = ([new_head] + self.snake[:self.score])
self.apple = self._random_coords(excluded)
self.snake = ([new_head] + self.snake[:self.score])
if ((new_head in self.snake[1:]) or (new_head[0] not in range(self.width)) or (new_head[1] not in range(self.height))):
self.game_over = True
reward = (- 1)
return (reward, self.game_over) | def step(self, action):
" Advance one frame in the game.\n\n Args:\n - action: (tuple) direction to move, must be a value from\n `actions()` otherwise an `ValueError` is raised\n Returns:\n - apple: (tuple) coordinates of the apple\n - snake: (list) tuples of coordinate pairs of each segment of the\n snake's body, ordered from head to tail\n - reward: (int) 1 if an apple is consumed, -1 if the player dies,\n otherwise 0\n - game_over: (bool) if True further calls to this method will raise\n an `Exception` until `reset()` has been called\n "
if self.game_over:
raise Exception('Game Over'.format(self.score))
if (action not in self.actions().values()):
raise ValueError('Invalid action "{}"'.format(action))
reward = 0
current_head = self.snake[0]
new_head = ((current_head[0] + action[0]), (current_head[1] + action[1]))
if (new_head == self.apple):
self.score += 1
reward = 1
excluded = ([new_head] + self.snake[:self.score])
self.apple = self._random_coords(excluded)
self.snake = ([new_head] + self.snake[:self.score])
if ((new_head in self.snake[1:]) or (new_head[0] not in range(self.width)) or (new_head[1] not in range(self.height))):
self.game_over = True
reward = (- 1)
return (reward, self.game_over)<|docstring|>Advance one frame in the game.
Args:
- action: (tuple) direction to move, must be a value from
`actions()` otherwise an `ValueError` is raised
Returns:
- apple: (tuple) coordinates of the apple
- snake: (list) tuples of coordinate pairs of each segment of the
snake's body, ordered from head to tail
- reward: (int) 1 if an apple is consumed, -1 if the player dies,
otherwise 0
- game_over: (bool) if True further calls to this method will raise
an `Exception` until `reset()` has been called<|endoftext|> |
61d263ea948cbc84634c4b1a01c2d8afdfcd1ad075de340a4ac3a5efa5a5e362 | def _random_coords(self, excluded=[]):
' Create a random coordinate pair. Does not return until a random\n coordinate is found that is not in `excluded`, potentially forever if\n a valid value cannot be found.\n\n Args:\n - excluded: a list of coordinate tuples that will not be returned\n Returns:\n - coords: (tuple) x, y integers\n '
def new_coords():
return (random.randint(0, (self.width - 1)), random.randint(0, (self.height - 1)))
coords = new_coords()
while (coords in excluded):
coords = new_coords()
return coords | Create a random coordinate pair. Does not return until a random
coordinate is found that is not in `excluded`, potentially forever if
a valid value cannot be found.
Args:
- excluded: a list of coordinate tuples that will not be returned
Returns:
- coords: (tuple) x, y integers | snake.py | _random_coords | tyoungNIO/snake-python | 0 | python | def _random_coords(self, excluded=[]):
' Create a random coordinate pair. Does not return until a random\n coordinate is found that is not in `excluded`, potentially forever if\n a valid value cannot be found.\n\n Args:\n - excluded: a list of coordinate tuples that will not be returned\n Returns:\n - coords: (tuple) x, y integers\n '
def new_coords():
return (random.randint(0, (self.width - 1)), random.randint(0, (self.height - 1)))
coords = new_coords()
while (coords in excluded):
coords = new_coords()
return coords | def _random_coords(self, excluded=[]):
' Create a random coordinate pair. Does not return until a random\n coordinate is found that is not in `excluded`, potentially forever if\n a valid value cannot be found.\n\n Args:\n - excluded: a list of coordinate tuples that will not be returned\n Returns:\n - coords: (tuple) x, y integers\n '
def new_coords():
return (random.randint(0, (self.width - 1)), random.randint(0, (self.height - 1)))
coords = new_coords()
while (coords in excluded):
coords = new_coords()
return coords<|docstring|>Create a random coordinate pair. Does not return until a random
coordinate is found that is not in `excluded`, potentially forever if
a valid value cannot be found.
Args:
- excluded: a list of coordinate tuples that will not be returned
Returns:
- coords: (tuple) x, y integers<|endoftext|> |
5ac6f7358f6fb5ce6d39a5a20d75dc94bc3123fd8bc5f590af1d3f6df8185b54 | def __init__(self, degree=1, cross=False, reference_simulations=None, previous_statistics=None):
'\n Initialization of the parent class. All sub-classes must call this at the end of their __init__,\n as it takes care of initializing the correct attributes to self for the other methods to work.\n\n `degree` and `cross` specify the polynomial expansion you want to apply to the statistics.\n\n If `reference_simulations` are provided, the standard deviation of the different statistics on the set \n of reference simulations is computed and stored; these will then be used to rescale\n the statistics for each new simulation or observation. \n If no set of reference simulations are provided, then this is not done.\n \n `previous_statistics` allows different Statistics object to be pipelined. Specifically, if the final \n statistic to be used is determined by the\n composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it\n is sufficient to call the `statistics` method of the second one, and that will automatically apply both\n transformations. \n \n Parameters\n ----------\n degree: integer, optional\n Of polynomial expansion. The default value is 2 meaning second order polynomial expansion.\n cross: boolean, optional\n Defines whether to include the cross-product terms. The default value is True, meaning the cross product term\n is included.\n reference_simulations: array, optional\n A numpy array with shape (n_samples, output_size) containing a set of reference simulations. If provided, \n statistics are computed at initialization for all reference simulations, and the standard deviation of the \n different statistics is extracted. The standard deviation is then used to standardize the summary \n statistics each time they are compute on a new observation or simulation. Defaults to None, in which case \n standardization is not applied.\n previous_statistics: abcpy.statistics.Statistics, optional\n It allows pipelining of Statistics. Specifically, if the final statistic to be used is determined by the\n composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it\n is sufficient to call the `statistics` method of the second one, and that will automatically apply both\n transformations.\n '
self.degree = degree
self.cross = cross
self.previous_statistics = previous_statistics
if (reference_simulations is not None):
training_statistics = self.statistics([reference_simulations[i] for i in range(reference_simulations.shape[0])])
self.std_statistics = np.std(training_statistics, axis=0) | Initialization of the parent class. All sub-classes must call this at the end of their __init__,
as it takes care of initializing the correct attributes to self for the other methods to work.
`degree` and `cross` specify the polynomial expansion you want to apply to the statistics.
If `reference_simulations` are provided, the standard deviation of the different statistics on the set
of reference simulations is computed and stored; these will then be used to rescale
the statistics for each new simulation or observation.
If no set of reference simulations are provided, then this is not done.
`previous_statistics` allows different Statistics object to be pipelined. Specifically, if the final
statistic to be used is determined by the
composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it
is sufficient to call the `statistics` method of the second one, and that will automatically apply both
transformations.
Parameters
----------
degree: integer, optional
Of polynomial expansion. The default value is 2 meaning second order polynomial expansion.
cross: boolean, optional
Defines whether to include the cross-product terms. The default value is True, meaning the cross product term
is included.
reference_simulations: array, optional
A numpy array with shape (n_samples, output_size) containing a set of reference simulations. If provided,
statistics are computed at initialization for all reference simulations, and the standard deviation of the
different statistics is extracted. The standard deviation is then used to standardize the summary
statistics each time they are compute on a new observation or simulation. Defaults to None, in which case
standardization is not applied.
previous_statistics: abcpy.statistics.Statistics, optional
It allows pipelining of Statistics. Specifically, if the final statistic to be used is determined by the
composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it
is sufficient to call the `statistics` method of the second one, and that will automatically apply both
transformations. | abcpy/statistics.py | __init__ | LoryPack/abcpy | 89 | python | def __init__(self, degree=1, cross=False, reference_simulations=None, previous_statistics=None):
'\n Initialization of the parent class. All sub-classes must call this at the end of their __init__,\n as it takes care of initializing the correct attributes to self for the other methods to work.\n\n `degree` and `cross` specify the polynomial expansion you want to apply to the statistics.\n\n If `reference_simulations` are provided, the standard deviation of the different statistics on the set \n of reference simulations is computed and stored; these will then be used to rescale\n the statistics for each new simulation or observation. \n If no set of reference simulations are provided, then this is not done.\n \n `previous_statistics` allows different Statistics object to be pipelined. Specifically, if the final \n statistic to be used is determined by the\n composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it\n is sufficient to call the `statistics` method of the second one, and that will automatically apply both\n transformations. \n \n Parameters\n ----------\n degree: integer, optional\n Of polynomial expansion. The default value is 2 meaning second order polynomial expansion.\n cross: boolean, optional\n Defines whether to include the cross-product terms. The default value is True, meaning the cross product term\n is included.\n reference_simulations: array, optional\n A numpy array with shape (n_samples, output_size) containing a set of reference simulations. If provided, \n statistics are computed at initialization for all reference simulations, and the standard deviation of the \n different statistics is extracted. The standard deviation is then used to standardize the summary \n statistics each time they are compute on a new observation or simulation. Defaults to None, in which case \n standardization is not applied.\n previous_statistics: abcpy.statistics.Statistics, optional\n It allows pipelining of Statistics. Specifically, if the final statistic to be used is determined by the\n composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it\n is sufficient to call the `statistics` method of the second one, and that will automatically apply both\n transformations.\n '
self.degree = degree
self.cross = cross
self.previous_statistics = previous_statistics
if (reference_simulations is not None):
training_statistics = self.statistics([reference_simulations[i] for i in range(reference_simulations.shape[0])])
self.std_statistics = np.std(training_statistics, axis=0) | def __init__(self, degree=1, cross=False, reference_simulations=None, previous_statistics=None):
'\n Initialization of the parent class. All sub-classes must call this at the end of their __init__,\n as it takes care of initializing the correct attributes to self for the other methods to work.\n\n `degree` and `cross` specify the polynomial expansion you want to apply to the statistics.\n\n If `reference_simulations` are provided, the standard deviation of the different statistics on the set \n of reference simulations is computed and stored; these will then be used to rescale\n the statistics for each new simulation or observation. \n If no set of reference simulations are provided, then this is not done.\n \n `previous_statistics` allows different Statistics object to be pipelined. Specifically, if the final \n statistic to be used is determined by the\n composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it\n is sufficient to call the `statistics` method of the second one, and that will automatically apply both\n transformations. \n \n Parameters\n ----------\n degree: integer, optional\n Of polynomial expansion. The default value is 2 meaning second order polynomial expansion.\n cross: boolean, optional\n Defines whether to include the cross-product terms. The default value is True, meaning the cross product term\n is included.\n reference_simulations: array, optional\n A numpy array with shape (n_samples, output_size) containing a set of reference simulations. If provided, \n statistics are computed at initialization for all reference simulations, and the standard deviation of the \n different statistics is extracted. The standard deviation is then used to standardize the summary \n statistics each time they are compute on a new observation or simulation. Defaults to None, in which case \n standardization is not applied.\n previous_statistics: abcpy.statistics.Statistics, optional\n It allows pipelining of Statistics. Specifically, if the final statistic to be used is determined by the\n composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it\n is sufficient to call the `statistics` method of the second one, and that will automatically apply both\n transformations.\n '
self.degree = degree
self.cross = cross
self.previous_statistics = previous_statistics
if (reference_simulations is not None):
training_statistics = self.statistics([reference_simulations[i] for i in range(reference_simulations.shape[0])])
self.std_statistics = np.std(training_statistics, axis=0)<|docstring|>Initialization of the parent class. All sub-classes must call this at the end of their __init__,
as it takes care of initializing the correct attributes to self for the other methods to work.
`degree` and `cross` specify the polynomial expansion you want to apply to the statistics.
If `reference_simulations` are provided, the standard deviation of the different statistics on the set
of reference simulations is computed and stored; these will then be used to rescale
the statistics for each new simulation or observation.
If no set of reference simulations are provided, then this is not done.
`previous_statistics` allows different Statistics object to be pipelined. Specifically, if the final
statistic to be used is determined by the
composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it
is sufficient to call the `statistics` method of the second one, and that will automatically apply both
transformations.
Parameters
----------
degree: integer, optional
Of polynomial expansion. The default value is 2 meaning second order polynomial expansion.
cross: boolean, optional
Defines whether to include the cross-product terms. The default value is True, meaning the cross product term
is included.
reference_simulations: array, optional
A numpy array with shape (n_samples, output_size) containing a set of reference simulations. If provided,
statistics are computed at initialization for all reference simulations, and the standard deviation of the
different statistics is extracted. The standard deviation is then used to standardize the summary
statistics each time they are compute on a new observation or simulation. Defaults to None, in which case
standardization is not applied.
previous_statistics: abcpy.statistics.Statistics, optional
It allows pipelining of Statistics. Specifically, if the final statistic to be used is determined by the
composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it
is sufficient to call the `statistics` method of the second one, and that will automatically apply both
transformations.<|endoftext|> |
a50843d66d08469acb56d9ba9462e2dd074c95615d995028eef6015c0e1c8493 | @abstractmethod
def statistics(self, data: object) -> object:
'To be overwritten by any sub-class: should extract statistics from the\n data set data. It is assumed that data is a list of n same type\n elements(eg., The data can be a list containing n timeseries, n graphs or n np.ndarray).\n\n All statistics implementation should follow this structure:\n\n >>> # need to call this first which takes care of calling the\n >>> # previous statistics if that is defined and of properly\n >>> # formatting data\n >>> data = self._preprocess(data)\n >>>\n >>> # !!! here do all the processing on the statistics (data) !!!\n >>>\n >>> # Expand the data with polynomial expansion\n >>> result = self._polynomial_expansion(data)\n >>>\n >>> # now call the _rescale function which automatically rescales\n >>> # the different statistics using the standard\n >>> # deviation of them on the training set provided at initialization.\n >>> result = self._rescale(result)\n\n\n Parameters\n ----------\n data: python list\n Contains n data sets with length p.\n Returns\n -------\n numpy.ndarray\n nxp matrix where for each of the n data points p statistics are calculated.\n\n '
raise NotImplementedError | To be overwritten by any sub-class: should extract statistics from the
data set data. It is assumed that data is a list of n same type
elements(eg., The data can be a list containing n timeseries, n graphs or n np.ndarray).
All statistics implementation should follow this structure:
>>> # need to call this first which takes care of calling the
>>> # previous statistics if that is defined and of properly
>>> # formatting data
>>> data = self._preprocess(data)
>>>
>>> # !!! here do all the processing on the statistics (data) !!!
>>>
>>> # Expand the data with polynomial expansion
>>> result = self._polynomial_expansion(data)
>>>
>>> # now call the _rescale function which automatically rescales
>>> # the different statistics using the standard
>>> # deviation of them on the training set provided at initialization.
>>> result = self._rescale(result)
Parameters
----------
data: python list
Contains n data sets with length p.
Returns
-------
numpy.ndarray
nxp matrix where for each of the n data points p statistics are calculated. | abcpy/statistics.py | statistics | LoryPack/abcpy | 89 | python | @abstractmethod
def statistics(self, data: object) -> object:
'To be overwritten by any sub-class: should extract statistics from the\n data set data. It is assumed that data is a list of n same type\n elements(eg., The data can be a list containing n timeseries, n graphs or n np.ndarray).\n\n All statistics implementation should follow this structure:\n\n >>> # need to call this first which takes care of calling the\n >>> # previous statistics if that is defined and of properly\n >>> # formatting data\n >>> data = self._preprocess(data)\n >>>\n >>> # !!! here do all the processing on the statistics (data) !!!\n >>>\n >>> # Expand the data with polynomial expansion\n >>> result = self._polynomial_expansion(data)\n >>>\n >>> # now call the _rescale function which automatically rescales\n >>> # the different statistics using the standard\n >>> # deviation of them on the training set provided at initialization.\n >>> result = self._rescale(result)\n\n\n Parameters\n ----------\n data: python list\n Contains n data sets with length p.\n Returns\n -------\n numpy.ndarray\n nxp matrix where for each of the n data points p statistics are calculated.\n\n '
raise NotImplementedError | @abstractmethod
def statistics(self, data: object) -> object:
'To be overwritten by any sub-class: should extract statistics from the\n data set data. It is assumed that data is a list of n same type\n elements(eg., The data can be a list containing n timeseries, n graphs or n np.ndarray).\n\n All statistics implementation should follow this structure:\n\n >>> # need to call this first which takes care of calling the\n >>> # previous statistics if that is defined and of properly\n >>> # formatting data\n >>> data = self._preprocess(data)\n >>>\n >>> # !!! here do all the processing on the statistics (data) !!!\n >>>\n >>> # Expand the data with polynomial expansion\n >>> result = self._polynomial_expansion(data)\n >>>\n >>> # now call the _rescale function which automatically rescales\n >>> # the different statistics using the standard\n >>> # deviation of them on the training set provided at initialization.\n >>> result = self._rescale(result)\n\n\n Parameters\n ----------\n data: python list\n Contains n data sets with length p.\n Returns\n -------\n numpy.ndarray\n nxp matrix where for each of the n data points p statistics are calculated.\n\n '
raise NotImplementedError<|docstring|>To be overwritten by any sub-class: should extract statistics from the
data set data. It is assumed that data is a list of n same type
elements(eg., The data can be a list containing n timeseries, n graphs or n np.ndarray).
All statistics implementation should follow this structure:
>>> # need to call this first which takes care of calling the
>>> # previous statistics if that is defined and of properly
>>> # formatting data
>>> data = self._preprocess(data)
>>>
>>> # !!! here do all the processing on the statistics (data) !!!
>>>
>>> # Expand the data with polynomial expansion
>>> result = self._polynomial_expansion(data)
>>>
>>> # now call the _rescale function which automatically rescales
>>> # the different statistics using the standard
>>> # deviation of them on the training set provided at initialization.
>>> result = self._rescale(result)
Parameters
----------
data: python list
Contains n data sets with length p.
Returns
-------
numpy.ndarray
nxp matrix where for each of the n data points p statistics are calculated.<|endoftext|> |
4845b389d7fc4ad0de59974790d02aa8e7c458815e539025a7fadc7cbf4ac928 | def _polynomial_expansion(self, summary_statistics):
'Helper function that does the polynomial expansion and includes cross-product\n terms of summary_statistics, already calculated summary statistics. It is tipically called in the `statistics`\n method of a `Statistics` class, after the statistics have been computed from data but before the statistics\n are (optionally) rescaled.\n\n Parameters\n ----------\n summary_statistics: numpy.ndarray\n nxp matrix where n is number of data points in the datasets data set and p number os\n summary statistics calculated.\n Returns\n -------\n numpy.ndarray\n nx(p+degree*p+cross*nchoosek(p,2)) matrix where for each of the n points with\n p statistics, degree*p polynomial expansion term and cross*nchoosek(p,2) many\n cross-product terms are calculated.\n\n '
if (not isinstance(summary_statistics, np.ndarray)):
raise TypeError('Summary statistics is not of allowed types')
result = summary_statistics
for ind in range(2, (self.degree + 1)):
result = np.column_stack((result, np.power(summary_statistics, ind)))
if (self.cross and (summary_statistics.shape[1] > 1)):
for ind1 in range(0, summary_statistics.shape[1]):
for ind2 in range((ind1 + 1), summary_statistics.shape[1]):
result = np.column_stack((result, (summary_statistics[(:, ind1)] * summary_statistics[(:, ind2)])))
return result | Helper function that does the polynomial expansion and includes cross-product
terms of summary_statistics, already calculated summary statistics. It is tipically called in the `statistics`
method of a `Statistics` class, after the statistics have been computed from data but before the statistics
are (optionally) rescaled.
Parameters
----------
summary_statistics: numpy.ndarray
nxp matrix where n is number of data points in the datasets data set and p number os
summary statistics calculated.
Returns
-------
numpy.ndarray
nx(p+degree*p+cross*nchoosek(p,2)) matrix where for each of the n points with
p statistics, degree*p polynomial expansion term and cross*nchoosek(p,2) many
cross-product terms are calculated. | abcpy/statistics.py | _polynomial_expansion | LoryPack/abcpy | 89 | python | def _polynomial_expansion(self, summary_statistics):
'Helper function that does the polynomial expansion and includes cross-product\n terms of summary_statistics, already calculated summary statistics. It is tipically called in the `statistics`\n method of a `Statistics` class, after the statistics have been computed from data but before the statistics\n are (optionally) rescaled.\n\n Parameters\n ----------\n summary_statistics: numpy.ndarray\n nxp matrix where n is number of data points in the datasets data set and p number os\n summary statistics calculated.\n Returns\n -------\n numpy.ndarray\n nx(p+degree*p+cross*nchoosek(p,2)) matrix where for each of the n points with\n p statistics, degree*p polynomial expansion term and cross*nchoosek(p,2) many\n cross-product terms are calculated.\n\n '
if (not isinstance(summary_statistics, np.ndarray)):
raise TypeError('Summary statistics is not of allowed types')
result = summary_statistics
for ind in range(2, (self.degree + 1)):
result = np.column_stack((result, np.power(summary_statistics, ind)))
if (self.cross and (summary_statistics.shape[1] > 1)):
for ind1 in range(0, summary_statistics.shape[1]):
for ind2 in range((ind1 + 1), summary_statistics.shape[1]):
result = np.column_stack((result, (summary_statistics[(:, ind1)] * summary_statistics[(:, ind2)])))
return result | def _polynomial_expansion(self, summary_statistics):
'Helper function that does the polynomial expansion and includes cross-product\n terms of summary_statistics, already calculated summary statistics. It is tipically called in the `statistics`\n method of a `Statistics` class, after the statistics have been computed from data but before the statistics\n are (optionally) rescaled.\n\n Parameters\n ----------\n summary_statistics: numpy.ndarray\n nxp matrix where n is number of data points in the datasets data set and p number os\n summary statistics calculated.\n Returns\n -------\n numpy.ndarray\n nx(p+degree*p+cross*nchoosek(p,2)) matrix where for each of the n points with\n p statistics, degree*p polynomial expansion term and cross*nchoosek(p,2) many\n cross-product terms are calculated.\n\n '
if (not isinstance(summary_statistics, np.ndarray)):
raise TypeError('Summary statistics is not of allowed types')
result = summary_statistics
for ind in range(2, (self.degree + 1)):
result = np.column_stack((result, np.power(summary_statistics, ind)))
if (self.cross and (summary_statistics.shape[1] > 1)):
for ind1 in range(0, summary_statistics.shape[1]):
for ind2 in range((ind1 + 1), summary_statistics.shape[1]):
result = np.column_stack((result, (summary_statistics[(:, ind1)] * summary_statistics[(:, ind2)])))
return result<|docstring|>Helper function that does the polynomial expansion and includes cross-product
terms of summary_statistics, already calculated summary statistics. It is tipically called in the `statistics`
method of a `Statistics` class, after the statistics have been computed from data but before the statistics
are (optionally) rescaled.
Parameters
----------
summary_statistics: numpy.ndarray
nxp matrix where n is number of data points in the datasets data set and p number os
summary statistics calculated.
Returns
-------
numpy.ndarray
nx(p+degree*p+cross*nchoosek(p,2)) matrix where for each of the n points with
p statistics, degree*p polynomial expansion term and cross*nchoosek(p,2) many
cross-product terms are calculated.<|endoftext|> |
b22d320460bd1f2f90c2405dba333efc94fe0f437b9597025a45792c03761a64 | def _rescale(self, result):
'Rescales the final summary statistics using the standard deviations computed at initialization on the set of\n reference simulations. If that was not done, no rescaling is done.\n\n Parameters\n ----------\n result: numpy.ndarray\n Final summary statistics (after polynomial expansion)\n\n Returns\n -------\n numpy.ndarray\n Rescaled summary statistics, with the same shape as the input.\n '
if hasattr(self, 'std_statistics'):
if (result.shape[(- 1)] != self.std_statistics.shape[(- 1)]):
raise RuntimeError('The size of the statistics is not the same as the stored standard deviations for rescaling! Please check that you initialized the statistics with the correct set of reference samples.')
result = (result / self.std_statistics)
return result | Rescales the final summary statistics using the standard deviations computed at initialization on the set of
reference simulations. If that was not done, no rescaling is done.
Parameters
----------
result: numpy.ndarray
Final summary statistics (after polynomial expansion)
Returns
-------
numpy.ndarray
Rescaled summary statistics, with the same shape as the input. | abcpy/statistics.py | _rescale | LoryPack/abcpy | 89 | python | def _rescale(self, result):
'Rescales the final summary statistics using the standard deviations computed at initialization on the set of\n reference simulations. If that was not done, no rescaling is done.\n\n Parameters\n ----------\n result: numpy.ndarray\n Final summary statistics (after polynomial expansion)\n\n Returns\n -------\n numpy.ndarray\n Rescaled summary statistics, with the same shape as the input.\n '
if hasattr(self, 'std_statistics'):
if (result.shape[(- 1)] != self.std_statistics.shape[(- 1)]):
raise RuntimeError('The size of the statistics is not the same as the stored standard deviations for rescaling! Please check that you initialized the statistics with the correct set of reference samples.')
result = (result / self.std_statistics)
return result | def _rescale(self, result):
'Rescales the final summary statistics using the standard deviations computed at initialization on the set of\n reference simulations. If that was not done, no rescaling is done.\n\n Parameters\n ----------\n result: numpy.ndarray\n Final summary statistics (after polynomial expansion)\n\n Returns\n -------\n numpy.ndarray\n Rescaled summary statistics, with the same shape as the input.\n '
if hasattr(self, 'std_statistics'):
if (result.shape[(- 1)] != self.std_statistics.shape[(- 1)]):
raise RuntimeError('The size of the statistics is not the same as the stored standard deviations for rescaling! Please check that you initialized the statistics with the correct set of reference samples.')
result = (result / self.std_statistics)
return result<|docstring|>Rescales the final summary statistics using the standard deviations computed at initialization on the set of
reference simulations. If that was not done, no rescaling is done.
Parameters
----------
result: numpy.ndarray
Final summary statistics (after polynomial expansion)
Returns
-------
numpy.ndarray
Rescaled summary statistics, with the same shape as the input.<|endoftext|> |
722be8cb5955fdad8eb9f1e8c8fe835a02ea81eaf712c3996f8fda692fb1741c | def _preprocess(self, data):
'Utility which needs to be called at the beginning of the `statistics` method for all `Statistics` classes.\n It takes care of calling the `previous_statistics` if that is available (pipelining)\n and of correctly formatting the data.\n\n Parameters\n ----------\n data: python list\n Contains n data sets with length p.\n\n Returns\n -------\n numpy.ndarray\n Formatted statistics after pipelining.\n '
if (self.previous_statistics is not None):
data = self.previous_statistics.statistics(data)
else:
data = self._check_and_transform_input(data)
return data | Utility which needs to be called at the beginning of the `statistics` method for all `Statistics` classes.
It takes care of calling the `previous_statistics` if that is available (pipelining)
and of correctly formatting the data.
Parameters
----------
data: python list
Contains n data sets with length p.
Returns
-------
numpy.ndarray
Formatted statistics after pipelining. | abcpy/statistics.py | _preprocess | LoryPack/abcpy | 89 | python | def _preprocess(self, data):
'Utility which needs to be called at the beginning of the `statistics` method for all `Statistics` classes.\n It takes care of calling the `previous_statistics` if that is available (pipelining)\n and of correctly formatting the data.\n\n Parameters\n ----------\n data: python list\n Contains n data sets with length p.\n\n Returns\n -------\n numpy.ndarray\n Formatted statistics after pipelining.\n '
if (self.previous_statistics is not None):
data = self.previous_statistics.statistics(data)
else:
data = self._check_and_transform_input(data)
return data | def _preprocess(self, data):
'Utility which needs to be called at the beginning of the `statistics` method for all `Statistics` classes.\n It takes care of calling the `previous_statistics` if that is available (pipelining)\n and of correctly formatting the data.\n\n Parameters\n ----------\n data: python list\n Contains n data sets with length p.\n\n Returns\n -------\n numpy.ndarray\n Formatted statistics after pipelining.\n '
if (self.previous_statistics is not None):
data = self.previous_statistics.statistics(data)
else:
data = self._check_and_transform_input(data)
return data<|docstring|>Utility which needs to be called at the beginning of the `statistics` method for all `Statistics` classes.
It takes care of calling the `previous_statistics` if that is available (pipelining)
and of correctly formatting the data.
Parameters
----------
data: python list
Contains n data sets with length p.
Returns
-------
numpy.ndarray
Formatted statistics after pipelining.<|endoftext|> |
b53e397f0f4fe89ea6bf42f705fab91839ccda1f456a23f24114e71b9edd31d4 | def _check_and_transform_input(self, data):
' Formats the input in the correct way for computing summary statistics; specifically takes as input a\n list and returns a numpy.ndarray.\n\n Parameters\n ----------\n data: python list\n Contains n data sets with length p.\n\n Returns\n -------\n numpy.ndarray\n Formatted statistics after pipelining.\n '
if isinstance(data, list):
if (np.array(data).shape == (len(data),)):
if (len(data) == 1):
data = np.array(data).reshape(1, 1)
data = np.array(data).reshape(len(data), 1)
else:
data = np.concatenate(data).reshape(len(data), (- 1))
else:
raise TypeError('Input data should be of type list, but found type {}'.format(type(data)))
return data | Formats the input in the correct way for computing summary statistics; specifically takes as input a
list and returns a numpy.ndarray.
Parameters
----------
data: python list
Contains n data sets with length p.
Returns
-------
numpy.ndarray
Formatted statistics after pipelining. | abcpy/statistics.py | _check_and_transform_input | LoryPack/abcpy | 89 | python | def _check_and_transform_input(self, data):
' Formats the input in the correct way for computing summary statistics; specifically takes as input a\n list and returns a numpy.ndarray.\n\n Parameters\n ----------\n data: python list\n Contains n data sets with length p.\n\n Returns\n -------\n numpy.ndarray\n Formatted statistics after pipelining.\n '
if isinstance(data, list):
if (np.array(data).shape == (len(data),)):
if (len(data) == 1):
data = np.array(data).reshape(1, 1)
data = np.array(data).reshape(len(data), 1)
else:
data = np.concatenate(data).reshape(len(data), (- 1))
else:
raise TypeError('Input data should be of type list, but found type {}'.format(type(data)))
return data | def _check_and_transform_input(self, data):
' Formats the input in the correct way for computing summary statistics; specifically takes as input a\n list and returns a numpy.ndarray.\n\n Parameters\n ----------\n data: python list\n Contains n data sets with length p.\n\n Returns\n -------\n numpy.ndarray\n Formatted statistics after pipelining.\n '
if isinstance(data, list):
if (np.array(data).shape == (len(data),)):
if (len(data) == 1):
data = np.array(data).reshape(1, 1)
data = np.array(data).reshape(len(data), 1)
else:
data = np.concatenate(data).reshape(len(data), (- 1))
else:
raise TypeError('Input data should be of type list, but found type {}'.format(type(data)))
return data<|docstring|>Formats the input in the correct way for computing summary statistics; specifically takes as input a
list and returns a numpy.ndarray.
Parameters
----------
data: python list
Contains n data sets with length p.
Returns
-------
numpy.ndarray
Formatted statistics after pipelining.<|endoftext|> |
e82a23e90a6bcc84615f052435b52ddf34bbfbf9d2a27280afb1321af302dfaa | def statistics(self, data):
'\n Parameters\n ----------\n data: python list\n Contains n data sets with length p.\n Returns\n -------\n numpy.ndarray\n nx(p+degree*p+cross*nchoosek(p,2)) matrix where for each of the n data points with length p,\n (p+degree*p+cross*nchoosek(p,2)) statistics are calculated.\n '
data = self._preprocess(data)
result = self._polynomial_expansion(data)
result = self._rescale(result)
return result | Parameters
----------
data: python list
Contains n data sets with length p.
Returns
-------
numpy.ndarray
nx(p+degree*p+cross*nchoosek(p,2)) matrix where for each of the n data points with length p,
(p+degree*p+cross*nchoosek(p,2)) statistics are calculated. | abcpy/statistics.py | statistics | LoryPack/abcpy | 89 | python | def statistics(self, data):
'\n Parameters\n ----------\n data: python list\n Contains n data sets with length p.\n Returns\n -------\n numpy.ndarray\n nx(p+degree*p+cross*nchoosek(p,2)) matrix where for each of the n data points with length p,\n (p+degree*p+cross*nchoosek(p,2)) statistics are calculated.\n '
data = self._preprocess(data)
result = self._polynomial_expansion(data)
result = self._rescale(result)
return result | def statistics(self, data):
'\n Parameters\n ----------\n data: python list\n Contains n data sets with length p.\n Returns\n -------\n numpy.ndarray\n nx(p+degree*p+cross*nchoosek(p,2)) matrix where for each of the n data points with length p,\n (p+degree*p+cross*nchoosek(p,2)) statistics are calculated.\n '
data = self._preprocess(data)
result = self._polynomial_expansion(data)
result = self._rescale(result)
return result<|docstring|>Parameters
----------
data: python list
Contains n data sets with length p.
Returns
-------
numpy.ndarray
nx(p+degree*p+cross*nchoosek(p,2)) matrix where for each of the n data points with length p,
(p+degree*p+cross*nchoosek(p,2)) statistics are calculated.<|endoftext|> |
0c0487d8d00aa8b76b618750cb78a6452a443583755b30a296daef842781da1a | def __init__(self, coefficients, degree=1, cross=False, reference_simulations=None, previous_statistics=None):
'\n `degree` and `cross` specify the polynomial expansion you want to apply to the statistics.\n\n If `reference_simulations` are provided, the standard deviation of the different statistics on the set \n of reference simulations is computed and stored; these will then be used to rescale\n the statistics for each new simulation or observation. \n If no set of reference simulations are provided, then this is not done.\n\n `previous_statistics` allows different Statistics object to be pipelined. Specifically, if the final \n statistic to be used is determined by the\n composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it\n is sufficient to call the `statistics` method of the second one, and that will automatically apply both\n transformations. \n\n Parameters\n ----------\n coefficients: coefficients is a matrix with size d x p, where d is the dimension of the summary statistic that\n is obtained after applying the linear transformation (i.e. before a possible polynomial expansion is\n applied), while d is the dimension of each data.\n degree : integer, optional\n Of polynomial expansion. The default value is 2 meaning second order polynomial expansion.\n cross : boolean, optional\n Defines whether to include the cross-product terms. The default value is True, meaning the cross product term\n is included.\n reference_simulations: array, optional\n A numpy array with shape (n_samples, output_size) containing a set of reference simulations. If provided, \n statistics are computed at initialization for all reference simulations, and the standard deviation of the \n different statistics is extracted. The standard deviation is then used to standardize the summary \n statistics each time they are compute on a new observation or simulation. Defaults to None, in which case \n standardization is not applied.\n previous_statistics : abcpy.statistics.Statistics, optional\n It allows pipelining of Statistics. Specifically, if the final statistic to be used is determined by the\n composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it\n is sufficient to call the `statistics` method of the second one, and that will automatically apply both\n transformations.\n '
self.coefficients = coefficients
super(LinearTransformation, self).__init__(degree, cross, reference_simulations, previous_statistics) | `degree` and `cross` specify the polynomial expansion you want to apply to the statistics.
If `reference_simulations` are provided, the standard deviation of the different statistics on the set
of reference simulations is computed and stored; these will then be used to rescale
the statistics for each new simulation or observation.
If no set of reference simulations are provided, then this is not done.
`previous_statistics` allows different Statistics object to be pipelined. Specifically, if the final
statistic to be used is determined by the
composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it
is sufficient to call the `statistics` method of the second one, and that will automatically apply both
transformations.
Parameters
----------
coefficients: coefficients is a matrix with size d x p, where d is the dimension of the summary statistic that
is obtained after applying the linear transformation (i.e. before a possible polynomial expansion is
applied), while d is the dimension of each data.
degree : integer, optional
Of polynomial expansion. The default value is 2 meaning second order polynomial expansion.
cross : boolean, optional
Defines whether to include the cross-product terms. The default value is True, meaning the cross product term
is included.
reference_simulations: array, optional
A numpy array with shape (n_samples, output_size) containing a set of reference simulations. If provided,
statistics are computed at initialization for all reference simulations, and the standard deviation of the
different statistics is extracted. The standard deviation is then used to standardize the summary
statistics each time they are compute on a new observation or simulation. Defaults to None, in which case
standardization is not applied.
previous_statistics : abcpy.statistics.Statistics, optional
It allows pipelining of Statistics. Specifically, if the final statistic to be used is determined by the
composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it
is sufficient to call the `statistics` method of the second one, and that will automatically apply both
transformations. | abcpy/statistics.py | __init__ | LoryPack/abcpy | 89 | python | def __init__(self, coefficients, degree=1, cross=False, reference_simulations=None, previous_statistics=None):
'\n `degree` and `cross` specify the polynomial expansion you want to apply to the statistics.\n\n If `reference_simulations` are provided, the standard deviation of the different statistics on the set \n of reference simulations is computed and stored; these will then be used to rescale\n the statistics for each new simulation or observation. \n If no set of reference simulations are provided, then this is not done.\n\n `previous_statistics` allows different Statistics object to be pipelined. Specifically, if the final \n statistic to be used is determined by the\n composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it\n is sufficient to call the `statistics` method of the second one, and that will automatically apply both\n transformations. \n\n Parameters\n ----------\n coefficients: coefficients is a matrix with size d x p, where d is the dimension of the summary statistic that\n is obtained after applying the linear transformation (i.e. before a possible polynomial expansion is\n applied), while d is the dimension of each data.\n degree : integer, optional\n Of polynomial expansion. The default value is 2 meaning second order polynomial expansion.\n cross : boolean, optional\n Defines whether to include the cross-product terms. The default value is True, meaning the cross product term\n is included.\n reference_simulations: array, optional\n A numpy array with shape (n_samples, output_size) containing a set of reference simulations. If provided, \n statistics are computed at initialization for all reference simulations, and the standard deviation of the \n different statistics is extracted. The standard deviation is then used to standardize the summary \n statistics each time they are compute on a new observation or simulation. Defaults to None, in which case \n standardization is not applied.\n previous_statistics : abcpy.statistics.Statistics, optional\n It allows pipelining of Statistics. Specifically, if the final statistic to be used is determined by the\n composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it\n is sufficient to call the `statistics` method of the second one, and that will automatically apply both\n transformations.\n '
self.coefficients = coefficients
super(LinearTransformation, self).__init__(degree, cross, reference_simulations, previous_statistics) | def __init__(self, coefficients, degree=1, cross=False, reference_simulations=None, previous_statistics=None):
'\n `degree` and `cross` specify the polynomial expansion you want to apply to the statistics.\n\n If `reference_simulations` are provided, the standard deviation of the different statistics on the set \n of reference simulations is computed and stored; these will then be used to rescale\n the statistics for each new simulation or observation. \n If no set of reference simulations are provided, then this is not done.\n\n `previous_statistics` allows different Statistics object to be pipelined. Specifically, if the final \n statistic to be used is determined by the\n composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it\n is sufficient to call the `statistics` method of the second one, and that will automatically apply both\n transformations. \n\n Parameters\n ----------\n coefficients: coefficients is a matrix with size d x p, where d is the dimension of the summary statistic that\n is obtained after applying the linear transformation (i.e. before a possible polynomial expansion is\n applied), while d is the dimension of each data.\n degree : integer, optional\n Of polynomial expansion. The default value is 2 meaning second order polynomial expansion.\n cross : boolean, optional\n Defines whether to include the cross-product terms. The default value is True, meaning the cross product term\n is included.\n reference_simulations: array, optional\n A numpy array with shape (n_samples, output_size) containing a set of reference simulations. If provided, \n statistics are computed at initialization for all reference simulations, and the standard deviation of the \n different statistics is extracted. The standard deviation is then used to standardize the summary \n statistics each time they are compute on a new observation or simulation. Defaults to None, in which case \n standardization is not applied.\n previous_statistics : abcpy.statistics.Statistics, optional\n It allows pipelining of Statistics. Specifically, if the final statistic to be used is determined by the\n composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it\n is sufficient to call the `statistics` method of the second one, and that will automatically apply both\n transformations.\n '
self.coefficients = coefficients
super(LinearTransformation, self).__init__(degree, cross, reference_simulations, previous_statistics)<|docstring|>`degree` and `cross` specify the polynomial expansion you want to apply to the statistics.
If `reference_simulations` are provided, the standard deviation of the different statistics on the set
of reference simulations is computed and stored; these will then be used to rescale
the statistics for each new simulation or observation.
If no set of reference simulations are provided, then this is not done.
`previous_statistics` allows different Statistics object to be pipelined. Specifically, if the final
statistic to be used is determined by the
composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it
is sufficient to call the `statistics` method of the second one, and that will automatically apply both
transformations.
Parameters
----------
coefficients: coefficients is a matrix with size d x p, where d is the dimension of the summary statistic that
is obtained after applying the linear transformation (i.e. before a possible polynomial expansion is
applied), while d is the dimension of each data.
degree : integer, optional
Of polynomial expansion. The default value is 2 meaning second order polynomial expansion.
cross : boolean, optional
Defines whether to include the cross-product terms. The default value is True, meaning the cross product term
is included.
reference_simulations: array, optional
A numpy array with shape (n_samples, output_size) containing a set of reference simulations. If provided,
statistics are computed at initialization for all reference simulations, and the standard deviation of the
different statistics is extracted. The standard deviation is then used to standardize the summary
statistics each time they are compute on a new observation or simulation. Defaults to None, in which case
standardization is not applied.
previous_statistics : abcpy.statistics.Statistics, optional
It allows pipelining of Statistics. Specifically, if the final statistic to be used is determined by the
composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it
is sufficient to call the `statistics` method of the second one, and that will automatically apply both
transformations.<|endoftext|> |
c515c96f7baa14ee6d81e8d0930f4b7fd0716ec73802544a694c366079babf4a | def statistics(self, data):
'\n Parameters\n ----------\n data: python list\n Contains n data sets with length p.\n Returns\n -------\n numpy.ndarray\n nx(d+degree*d+cross*nchoosek(d,2)) matrix where for each of the n data points with length p you apply the\n linear transformation to get to dimension d, from where (d+degree*d+cross*nchoosek(d,2)) statistics are\n calculated.\n '
data = self._preprocess(data)
if (not (data.shape[1] == self.coefficients.shape[0])):
raise ValueError('Mismatch in dimension of summary statistics and coefficients')
data = np.dot(data, self.coefficients)
result = self._polynomial_expansion(data)
result = self._rescale(result)
return result | Parameters
----------
data: python list
Contains n data sets with length p.
Returns
-------
numpy.ndarray
nx(d+degree*d+cross*nchoosek(d,2)) matrix where for each of the n data points with length p you apply the
linear transformation to get to dimension d, from where (d+degree*d+cross*nchoosek(d,2)) statistics are
calculated. | abcpy/statistics.py | statistics | LoryPack/abcpy | 89 | python | def statistics(self, data):
'\n Parameters\n ----------\n data: python list\n Contains n data sets with length p.\n Returns\n -------\n numpy.ndarray\n nx(d+degree*d+cross*nchoosek(d,2)) matrix where for each of the n data points with length p you apply the\n linear transformation to get to dimension d, from where (d+degree*d+cross*nchoosek(d,2)) statistics are\n calculated.\n '
data = self._preprocess(data)
if (not (data.shape[1] == self.coefficients.shape[0])):
raise ValueError('Mismatch in dimension of summary statistics and coefficients')
data = np.dot(data, self.coefficients)
result = self._polynomial_expansion(data)
result = self._rescale(result)
return result | def statistics(self, data):
'\n Parameters\n ----------\n data: python list\n Contains n data sets with length p.\n Returns\n -------\n numpy.ndarray\n nx(d+degree*d+cross*nchoosek(d,2)) matrix where for each of the n data points with length p you apply the\n linear transformation to get to dimension d, from where (d+degree*d+cross*nchoosek(d,2)) statistics are\n calculated.\n '
data = self._preprocess(data)
if (not (data.shape[1] == self.coefficients.shape[0])):
raise ValueError('Mismatch in dimension of summary statistics and coefficients')
data = np.dot(data, self.coefficients)
result = self._polynomial_expansion(data)
result = self._rescale(result)
return result<|docstring|>Parameters
----------
data: python list
Contains n data sets with length p.
Returns
-------
numpy.ndarray
nx(d+degree*d+cross*nchoosek(d,2)) matrix where for each of the n data points with length p you apply the
linear transformation to get to dimension d, from where (d+degree*d+cross*nchoosek(d,2)) statistics are
calculated.<|endoftext|> |
6af95dde22170762c565c8b710e3b550116e52825433224860f8ecaac8649ba0 | def __init__(self, net, degree=1, cross=False, reference_simulations=None, previous_statistics=None):
'\n `degree` and `cross` specify the polynomial expansion you want to apply to the statistics.\n\n If `reference_simulations` are provided, the standard deviation of the different statistics on the set\n of reference simulations is computed and stored; these will then be used to rescale\n the statistics for each new simulation or observation. \n If no set of reference simulations are provided, then this is not done.\n\n `previous_statistics` allows different Statistics object to be pipelined. Specifically, if the final \n statistic to be used is determined by the\n composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it\n is sufficient to call the `statistics` method of the second one, and that will automatically apply both\n transformations.\n\n Parameters\n ----------\n net : torch.nn object\n the embedding neural network. The input size of the neural network must coincide with the size of each of\n the datapoints.\n degree: integer, optional\n Of polynomial expansion. The default value is 2 meaning second order polynomial expansion.\n cross: boolean, optional\n Defines whether to include the cross-product terms. The default value is True, meaning the cross product term\n is included.\n reference_simulations: array, optional\n A numpy array with shape (n_samples, output_size) containing a set of reference simulations. If provided, \n statistics are computed at initialization for all reference simulations, and the standard deviation of the \n different statistics is extracted. The standard deviation is then used to standardize the summary \n statistics each time they are compute on a new observation or simulation. Defaults to None, in which case \n standardization is not applied.\n previous_statistics: abcpy.statistics.Statistics, optional\n It allows pipelining of Statistics. Specifically, if the final statistic to be used is determined by the\n composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it\n is sufficient to call the `statistics` method of the second one, and that will automatically apply both\n transformations.\n '
if (not has_torch):
raise ImportError('Pytorch is required to instantiate an element of the {} class, in order to handle neural networks. Please install it. '.format(self.__class__.__name__))
self.net = net
super(NeuralEmbedding, self).__init__(degree, cross, reference_simulations, previous_statistics) | `degree` and `cross` specify the polynomial expansion you want to apply to the statistics.
If `reference_simulations` are provided, the standard deviation of the different statistics on the set
of reference simulations is computed and stored; these will then be used to rescale
the statistics for each new simulation or observation.
If no set of reference simulations are provided, then this is not done.
`previous_statistics` allows different Statistics object to be pipelined. Specifically, if the final
statistic to be used is determined by the
composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it
is sufficient to call the `statistics` method of the second one, and that will automatically apply both
transformations.
Parameters
----------
net : torch.nn object
the embedding neural network. The input size of the neural network must coincide with the size of each of
the datapoints.
degree: integer, optional
Of polynomial expansion. The default value is 2 meaning second order polynomial expansion.
cross: boolean, optional
Defines whether to include the cross-product terms. The default value is True, meaning the cross product term
is included.
reference_simulations: array, optional
A numpy array with shape (n_samples, output_size) containing a set of reference simulations. If provided,
statistics are computed at initialization for all reference simulations, and the standard deviation of the
different statistics is extracted. The standard deviation is then used to standardize the summary
statistics each time they are compute on a new observation or simulation. Defaults to None, in which case
standardization is not applied.
previous_statistics: abcpy.statistics.Statistics, optional
It allows pipelining of Statistics. Specifically, if the final statistic to be used is determined by the
composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it
is sufficient to call the `statistics` method of the second one, and that will automatically apply both
transformations. | abcpy/statistics.py | __init__ | LoryPack/abcpy | 89 | python | def __init__(self, net, degree=1, cross=False, reference_simulations=None, previous_statistics=None):
'\n `degree` and `cross` specify the polynomial expansion you want to apply to the statistics.\n\n If `reference_simulations` are provided, the standard deviation of the different statistics on the set\n of reference simulations is computed and stored; these will then be used to rescale\n the statistics for each new simulation or observation. \n If no set of reference simulations are provided, then this is not done.\n\n `previous_statistics` allows different Statistics object to be pipelined. Specifically, if the final \n statistic to be used is determined by the\n composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it\n is sufficient to call the `statistics` method of the second one, and that will automatically apply both\n transformations.\n\n Parameters\n ----------\n net : torch.nn object\n the embedding neural network. The input size of the neural network must coincide with the size of each of\n the datapoints.\n degree: integer, optional\n Of polynomial expansion. The default value is 2 meaning second order polynomial expansion.\n cross: boolean, optional\n Defines whether to include the cross-product terms. The default value is True, meaning the cross product term\n is included.\n reference_simulations: array, optional\n A numpy array with shape (n_samples, output_size) containing a set of reference simulations. If provided, \n statistics are computed at initialization for all reference simulations, and the standard deviation of the \n different statistics is extracted. The standard deviation is then used to standardize the summary \n statistics each time they are compute on a new observation or simulation. Defaults to None, in which case \n standardization is not applied.\n previous_statistics: abcpy.statistics.Statistics, optional\n It allows pipelining of Statistics. Specifically, if the final statistic to be used is determined by the\n composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it\n is sufficient to call the `statistics` method of the second one, and that will automatically apply both\n transformations.\n '
if (not has_torch):
raise ImportError('Pytorch is required to instantiate an element of the {} class, in order to handle neural networks. Please install it. '.format(self.__class__.__name__))
self.net = net
super(NeuralEmbedding, self).__init__(degree, cross, reference_simulations, previous_statistics) | def __init__(self, net, degree=1, cross=False, reference_simulations=None, previous_statistics=None):
'\n `degree` and `cross` specify the polynomial expansion you want to apply to the statistics.\n\n If `reference_simulations` are provided, the standard deviation of the different statistics on the set\n of reference simulations is computed and stored; these will then be used to rescale\n the statistics for each new simulation or observation. \n If no set of reference simulations are provided, then this is not done.\n\n `previous_statistics` allows different Statistics object to be pipelined. Specifically, if the final \n statistic to be used is determined by the\n composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it\n is sufficient to call the `statistics` method of the second one, and that will automatically apply both\n transformations.\n\n Parameters\n ----------\n net : torch.nn object\n the embedding neural network. The input size of the neural network must coincide with the size of each of\n the datapoints.\n degree: integer, optional\n Of polynomial expansion. The default value is 2 meaning second order polynomial expansion.\n cross: boolean, optional\n Defines whether to include the cross-product terms. The default value is True, meaning the cross product term\n is included.\n reference_simulations: array, optional\n A numpy array with shape (n_samples, output_size) containing a set of reference simulations. If provided, \n statistics are computed at initialization for all reference simulations, and the standard deviation of the \n different statistics is extracted. The standard deviation is then used to standardize the summary \n statistics each time they are compute on a new observation or simulation. Defaults to None, in which case \n standardization is not applied.\n previous_statistics: abcpy.statistics.Statistics, optional\n It allows pipelining of Statistics. Specifically, if the final statistic to be used is determined by the\n composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it\n is sufficient to call the `statistics` method of the second one, and that will automatically apply both\n transformations.\n '
if (not has_torch):
raise ImportError('Pytorch is required to instantiate an element of the {} class, in order to handle neural networks. Please install it. '.format(self.__class__.__name__))
self.net = net
super(NeuralEmbedding, self).__init__(degree, cross, reference_simulations, previous_statistics)<|docstring|>`degree` and `cross` specify the polynomial expansion you want to apply to the statistics.
If `reference_simulations` are provided, the standard deviation of the different statistics on the set
of reference simulations is computed and stored; these will then be used to rescale
the statistics for each new simulation or observation.
If no set of reference simulations are provided, then this is not done.
`previous_statistics` allows different Statistics object to be pipelined. Specifically, if the final
statistic to be used is determined by the
composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it
is sufficient to call the `statistics` method of the second one, and that will automatically apply both
transformations.
Parameters
----------
net : torch.nn object
the embedding neural network. The input size of the neural network must coincide with the size of each of
the datapoints.
degree: integer, optional
Of polynomial expansion. The default value is 2 meaning second order polynomial expansion.
cross: boolean, optional
Defines whether to include the cross-product terms. The default value is True, meaning the cross product term
is included.
reference_simulations: array, optional
A numpy array with shape (n_samples, output_size) containing a set of reference simulations. If provided,
statistics are computed at initialization for all reference simulations, and the standard deviation of the
different statistics is extracted. The standard deviation is then used to standardize the summary
statistics each time they are compute on a new observation or simulation. Defaults to None, in which case
standardization is not applied.
previous_statistics: abcpy.statistics.Statistics, optional
It allows pipelining of Statistics. Specifically, if the final statistic to be used is determined by the
composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it
is sufficient to call the `statistics` method of the second one, and that will automatically apply both
transformations.<|endoftext|> |
b5ee3c0fb94431e4f956d95caff1e2ef10e759e8748c3f9914b41a293a5306b0 | @classmethod
def fromFile(cls, path_to_net_state_dict, network_class=None, path_to_scaler=None, input_size=None, output_size=None, hidden_sizes=None, degree=1, cross=False, reference_simulations=None, previous_statistics=None):
'If the neural network state_dict was saved to the disk, this method can be used to instantiate a\n NeuralEmbedding object with that neural network.\n\n In order for the state_dict to be read correctly, the network class is needed. Therefore, we provide 2 options:\n 1) the Pytorch neural network class can be passed (if the user defined it, for instance)\n 2) if the neural network was defined by using the DefaultNN class in abcpy.NN_utilities.networks, you can\n provide arguments `input_size`, `output_size` and `hidden_sizes` (the latter is optional) that define\n the sizes of a fully connected network; then a DefaultNN is instantiated with those sizes. This can be used\n if for instance the neural network was trained using the utilities in abcpy.statisticslearning and you did\n not provide explicitly the neural network class there, but defined it through the sizes of the different layers.\n\n In both cases, note that the input size of the neural network must coincide with the size of each of the\n datapoints generated from the model (unless some other statistics are computed beforehand).\n\n Note that if the neural network was of the class `ScalerAndNet`, ie a scaler was applied before the data is fed\n through it, you need to pass `path_to_scaler` as well. Then this method will instantiate the network in the\n correct way.\n\n Parameters\n ----------\n path_to_net_state_dict : basestring\n the path where the state-dict is saved\n network_class : torch.nn class, optional\n if the neural network class is known explicitly (for instance if the used defined it), then it has to be\n passed here. This must not be provided together with `input_size` or `output_size`.\n path_to_scaler: basestring, optional\n The path where the scaler which was applied before the neural network is saved. Note that if the neural\n network was trained on scaled data and now you do not pass the correct scaler, the behavior will not be\n correct, leading to wrong inference. Default to None.\n input_size : integer, optional\n if the neural network is an instance of abcpy.NN_utilities.networks.DefaultNN with some input and\n output size, then you should provide here the input size of the network. It has to be provided together with\n the corresponding output_size, and it must not be provided with `network_class`.\n output_size : integer, optional\n if the neural network is an instance of abcpy.NN_utilities.networks.DefaultNN with some input and\n output size, then you should provide here the output size of the network. It has to be provided together\n with the corresponding input_size, and it must not be provided with `network_class`.\n hidden_sizes : array-like, optional\n if the neural network is an instance of abcpy.NN_utilities.networks.DefaultNN with some input and\n output size, then you can provide here an array-like with the size of the hidden layers (for instance\n [5,7,5] denotes 3 hidden layers with correspondingly 5,7,5 neurons). In case this parameter is not provided,\n the hidden sizes are determined from the input and output sizes as determined in\n abcpy.NN_utilities.networks.DefaultNN. Note that this must not be provided together with `network_class`.\n degree: integer, optional\n Of polynomial expansion. The default value is 2 meaning second order polynomial expansion.\n cross: boolean, optional\n Defines whether to include the cross-product terms. The default value is True, meaning the cross product term\n is included.\n reference_simulations: array, optional\n A numpy array with shape (n_samples, output_size) containing a set of reference simulations. If provided,\n statistics are computed at initialization for all reference simulations, and the standard deviation of the\n different statistics is extracted. The standard deviation is then used to standardize the summary\n statistics each time they are compute on a new observation or simulation. Defaults to None, in which case\n standardization is not applied.\n previous_statistics : abcpy.statistics.Statistics, optional\n It allows pipelining of Statistics. Specifically, if the final statistic to be used is determined by the\n composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it\n is sufficient to call the `statistics` method of the second one, and that will automatically apply both\n transformations. In this case, this is the statistics that has to be computed before the neural network\n transformation is applied.\n Returns\n -------\n abcpy.statistics.NeuralEmbedding\n the `NeuralEmbedding` object with the neural network obtained from the specified file.\n '
if (not has_torch):
raise ImportError('Pytorch is required to instantiate an element of the {} class, in order to handle neural networks. Please install it. '.format(cls.__name__))
if ((network_class is None) and ((input_size is None) or (output_size is None))):
raise RuntimeError('You need to pass either network class or both input_size and output_size.')
if ((network_class is not None) and ((input_size is not None) or (output_size is not None))):
raise RuntimeError("You can't pass together network_class and one of input_size, output_size")
if ((network_class is not None) and (hidden_sizes is not None)):
raise RuntimeError('You passed hidden_sizes as an argument, but that may be passed only if you are passing input_size and input_size as well, and you are not passing network_class.')
if (network_class is None):
network_class = createDefaultNN(input_size=input_size, output_size=output_size, hidden_sizes=hidden_sizes)
try:
net = load_net(path_to_net_state_dict, network_class)
except RuntimeError:
net = load_net(path_to_net_state_dict, DiscardLastOutputNet, network_class())
if (path_to_scaler is not None):
f = open(path_to_scaler, 'rb')
scaler = cloudpickle.load(f)
f.close()
net = ScalerAndNet(net, scaler)
statistic_object = cls(net, degree=degree, cross=cross, reference_simulations=reference_simulations, previous_statistics=previous_statistics)
return statistic_object | If the neural network state_dict was saved to the disk, this method can be used to instantiate a
NeuralEmbedding object with that neural network.
In order for the state_dict to be read correctly, the network class is needed. Therefore, we provide 2 options:
1) the Pytorch neural network class can be passed (if the user defined it, for instance)
2) if the neural network was defined by using the DefaultNN class in abcpy.NN_utilities.networks, you can
provide arguments `input_size`, `output_size` and `hidden_sizes` (the latter is optional) that define
the sizes of a fully connected network; then a DefaultNN is instantiated with those sizes. This can be used
if for instance the neural network was trained using the utilities in abcpy.statisticslearning and you did
not provide explicitly the neural network class there, but defined it through the sizes of the different layers.
In both cases, note that the input size of the neural network must coincide with the size of each of the
datapoints generated from the model (unless some other statistics are computed beforehand).
Note that if the neural network was of the class `ScalerAndNet`, ie a scaler was applied before the data is fed
through it, you need to pass `path_to_scaler` as well. Then this method will instantiate the network in the
correct way.
Parameters
----------
path_to_net_state_dict : basestring
the path where the state-dict is saved
network_class : torch.nn class, optional
if the neural network class is known explicitly (for instance if the used defined it), then it has to be
passed here. This must not be provided together with `input_size` or `output_size`.
path_to_scaler: basestring, optional
The path where the scaler which was applied before the neural network is saved. Note that if the neural
network was trained on scaled data and now you do not pass the correct scaler, the behavior will not be
correct, leading to wrong inference. Default to None.
input_size : integer, optional
if the neural network is an instance of abcpy.NN_utilities.networks.DefaultNN with some input and
output size, then you should provide here the input size of the network. It has to be provided together with
the corresponding output_size, and it must not be provided with `network_class`.
output_size : integer, optional
if the neural network is an instance of abcpy.NN_utilities.networks.DefaultNN with some input and
output size, then you should provide here the output size of the network. It has to be provided together
with the corresponding input_size, and it must not be provided with `network_class`.
hidden_sizes : array-like, optional
if the neural network is an instance of abcpy.NN_utilities.networks.DefaultNN with some input and
output size, then you can provide here an array-like with the size of the hidden layers (for instance
[5,7,5] denotes 3 hidden layers with correspondingly 5,7,5 neurons). In case this parameter is not provided,
the hidden sizes are determined from the input and output sizes as determined in
abcpy.NN_utilities.networks.DefaultNN. Note that this must not be provided together with `network_class`.
degree: integer, optional
Of polynomial expansion. The default value is 2 meaning second order polynomial expansion.
cross: boolean, optional
Defines whether to include the cross-product terms. The default value is True, meaning the cross product term
is included.
reference_simulations: array, optional
A numpy array with shape (n_samples, output_size) containing a set of reference simulations. If provided,
statistics are computed at initialization for all reference simulations, and the standard deviation of the
different statistics is extracted. The standard deviation is then used to standardize the summary
statistics each time they are compute on a new observation or simulation. Defaults to None, in which case
standardization is not applied.
previous_statistics : abcpy.statistics.Statistics, optional
It allows pipelining of Statistics. Specifically, if the final statistic to be used is determined by the
composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it
is sufficient to call the `statistics` method of the second one, and that will automatically apply both
transformations. In this case, this is the statistics that has to be computed before the neural network
transformation is applied.
Returns
-------
abcpy.statistics.NeuralEmbedding
the `NeuralEmbedding` object with the neural network obtained from the specified file. | abcpy/statistics.py | fromFile | LoryPack/abcpy | 89 | python | @classmethod
def fromFile(cls, path_to_net_state_dict, network_class=None, path_to_scaler=None, input_size=None, output_size=None, hidden_sizes=None, degree=1, cross=False, reference_simulations=None, previous_statistics=None):
'If the neural network state_dict was saved to the disk, this method can be used to instantiate a\n NeuralEmbedding object with that neural network.\n\n In order for the state_dict to be read correctly, the network class is needed. Therefore, we provide 2 options:\n 1) the Pytorch neural network class can be passed (if the user defined it, for instance)\n 2) if the neural network was defined by using the DefaultNN class in abcpy.NN_utilities.networks, you can\n provide arguments `input_size`, `output_size` and `hidden_sizes` (the latter is optional) that define\n the sizes of a fully connected network; then a DefaultNN is instantiated with those sizes. This can be used\n if for instance the neural network was trained using the utilities in abcpy.statisticslearning and you did\n not provide explicitly the neural network class there, but defined it through the sizes of the different layers.\n\n In both cases, note that the input size of the neural network must coincide with the size of each of the\n datapoints generated from the model (unless some other statistics are computed beforehand).\n\n Note that if the neural network was of the class `ScalerAndNet`, ie a scaler was applied before the data is fed\n through it, you need to pass `path_to_scaler` as well. Then this method will instantiate the network in the\n correct way.\n\n Parameters\n ----------\n path_to_net_state_dict : basestring\n the path where the state-dict is saved\n network_class : torch.nn class, optional\n if the neural network class is known explicitly (for instance if the used defined it), then it has to be\n passed here. This must not be provided together with `input_size` or `output_size`.\n path_to_scaler: basestring, optional\n The path where the scaler which was applied before the neural network is saved. Note that if the neural\n network was trained on scaled data and now you do not pass the correct scaler, the behavior will not be\n correct, leading to wrong inference. Default to None.\n input_size : integer, optional\n if the neural network is an instance of abcpy.NN_utilities.networks.DefaultNN with some input and\n output size, then you should provide here the input size of the network. It has to be provided together with\n the corresponding output_size, and it must not be provided with `network_class`.\n output_size : integer, optional\n if the neural network is an instance of abcpy.NN_utilities.networks.DefaultNN with some input and\n output size, then you should provide here the output size of the network. It has to be provided together\n with the corresponding input_size, and it must not be provided with `network_class`.\n hidden_sizes : array-like, optional\n if the neural network is an instance of abcpy.NN_utilities.networks.DefaultNN with some input and\n output size, then you can provide here an array-like with the size of the hidden layers (for instance\n [5,7,5] denotes 3 hidden layers with correspondingly 5,7,5 neurons). In case this parameter is not provided,\n the hidden sizes are determined from the input and output sizes as determined in\n abcpy.NN_utilities.networks.DefaultNN. Note that this must not be provided together with `network_class`.\n degree: integer, optional\n Of polynomial expansion. The default value is 2 meaning second order polynomial expansion.\n cross: boolean, optional\n Defines whether to include the cross-product terms. The default value is True, meaning the cross product term\n is included.\n reference_simulations: array, optional\n A numpy array with shape (n_samples, output_size) containing a set of reference simulations. If provided,\n statistics are computed at initialization for all reference simulations, and the standard deviation of the\n different statistics is extracted. The standard deviation is then used to standardize the summary\n statistics each time they are compute on a new observation or simulation. Defaults to None, in which case\n standardization is not applied.\n previous_statistics : abcpy.statistics.Statistics, optional\n It allows pipelining of Statistics. Specifically, if the final statistic to be used is determined by the\n composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it\n is sufficient to call the `statistics` method of the second one, and that will automatically apply both\n transformations. In this case, this is the statistics that has to be computed before the neural network\n transformation is applied.\n Returns\n -------\n abcpy.statistics.NeuralEmbedding\n the `NeuralEmbedding` object with the neural network obtained from the specified file.\n '
if (not has_torch):
raise ImportError('Pytorch is required to instantiate an element of the {} class, in order to handle neural networks. Please install it. '.format(cls.__name__))
if ((network_class is None) and ((input_size is None) or (output_size is None))):
raise RuntimeError('You need to pass either network class or both input_size and output_size.')
if ((network_class is not None) and ((input_size is not None) or (output_size is not None))):
raise RuntimeError("You can't pass together network_class and one of input_size, output_size")
if ((network_class is not None) and (hidden_sizes is not None)):
raise RuntimeError('You passed hidden_sizes as an argument, but that may be passed only if you are passing input_size and input_size as well, and you are not passing network_class.')
if (network_class is None):
network_class = createDefaultNN(input_size=input_size, output_size=output_size, hidden_sizes=hidden_sizes)
try:
net = load_net(path_to_net_state_dict, network_class)
except RuntimeError:
net = load_net(path_to_net_state_dict, DiscardLastOutputNet, network_class())
if (path_to_scaler is not None):
f = open(path_to_scaler, 'rb')
scaler = cloudpickle.load(f)
f.close()
net = ScalerAndNet(net, scaler)
statistic_object = cls(net, degree=degree, cross=cross, reference_simulations=reference_simulations, previous_statistics=previous_statistics)
return statistic_object | @classmethod
def fromFile(cls, path_to_net_state_dict, network_class=None, path_to_scaler=None, input_size=None, output_size=None, hidden_sizes=None, degree=1, cross=False, reference_simulations=None, previous_statistics=None):
'If the neural network state_dict was saved to the disk, this method can be used to instantiate a\n NeuralEmbedding object with that neural network.\n\n In order for the state_dict to be read correctly, the network class is needed. Therefore, we provide 2 options:\n 1) the Pytorch neural network class can be passed (if the user defined it, for instance)\n 2) if the neural network was defined by using the DefaultNN class in abcpy.NN_utilities.networks, you can\n provide arguments `input_size`, `output_size` and `hidden_sizes` (the latter is optional) that define\n the sizes of a fully connected network; then a DefaultNN is instantiated with those sizes. This can be used\n if for instance the neural network was trained using the utilities in abcpy.statisticslearning and you did\n not provide explicitly the neural network class there, but defined it through the sizes of the different layers.\n\n In both cases, note that the input size of the neural network must coincide with the size of each of the\n datapoints generated from the model (unless some other statistics are computed beforehand).\n\n Note that if the neural network was of the class `ScalerAndNet`, ie a scaler was applied before the data is fed\n through it, you need to pass `path_to_scaler` as well. Then this method will instantiate the network in the\n correct way.\n\n Parameters\n ----------\n path_to_net_state_dict : basestring\n the path where the state-dict is saved\n network_class : torch.nn class, optional\n if the neural network class is known explicitly (for instance if the used defined it), then it has to be\n passed here. This must not be provided together with `input_size` or `output_size`.\n path_to_scaler: basestring, optional\n The path where the scaler which was applied before the neural network is saved. Note that if the neural\n network was trained on scaled data and now you do not pass the correct scaler, the behavior will not be\n correct, leading to wrong inference. Default to None.\n input_size : integer, optional\n if the neural network is an instance of abcpy.NN_utilities.networks.DefaultNN with some input and\n output size, then you should provide here the input size of the network. It has to be provided together with\n the corresponding output_size, and it must not be provided with `network_class`.\n output_size : integer, optional\n if the neural network is an instance of abcpy.NN_utilities.networks.DefaultNN with some input and\n output size, then you should provide here the output size of the network. It has to be provided together\n with the corresponding input_size, and it must not be provided with `network_class`.\n hidden_sizes : array-like, optional\n if the neural network is an instance of abcpy.NN_utilities.networks.DefaultNN with some input and\n output size, then you can provide here an array-like with the size of the hidden layers (for instance\n [5,7,5] denotes 3 hidden layers with correspondingly 5,7,5 neurons). In case this parameter is not provided,\n the hidden sizes are determined from the input and output sizes as determined in\n abcpy.NN_utilities.networks.DefaultNN. Note that this must not be provided together with `network_class`.\n degree: integer, optional\n Of polynomial expansion. The default value is 2 meaning second order polynomial expansion.\n cross: boolean, optional\n Defines whether to include the cross-product terms. The default value is True, meaning the cross product term\n is included.\n reference_simulations: array, optional\n A numpy array with shape (n_samples, output_size) containing a set of reference simulations. If provided,\n statistics are computed at initialization for all reference simulations, and the standard deviation of the\n different statistics is extracted. The standard deviation is then used to standardize the summary\n statistics each time they are compute on a new observation or simulation. Defaults to None, in which case\n standardization is not applied.\n previous_statistics : abcpy.statistics.Statistics, optional\n It allows pipelining of Statistics. Specifically, if the final statistic to be used is determined by the\n composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it\n is sufficient to call the `statistics` method of the second one, and that will automatically apply both\n transformations. In this case, this is the statistics that has to be computed before the neural network\n transformation is applied.\n Returns\n -------\n abcpy.statistics.NeuralEmbedding\n the `NeuralEmbedding` object with the neural network obtained from the specified file.\n '
if (not has_torch):
raise ImportError('Pytorch is required to instantiate an element of the {} class, in order to handle neural networks. Please install it. '.format(cls.__name__))
if ((network_class is None) and ((input_size is None) or (output_size is None))):
raise RuntimeError('You need to pass either network class or both input_size and output_size.')
if ((network_class is not None) and ((input_size is not None) or (output_size is not None))):
raise RuntimeError("You can't pass together network_class and one of input_size, output_size")
if ((network_class is not None) and (hidden_sizes is not None)):
raise RuntimeError('You passed hidden_sizes as an argument, but that may be passed only if you are passing input_size and input_size as well, and you are not passing network_class.')
if (network_class is None):
network_class = createDefaultNN(input_size=input_size, output_size=output_size, hidden_sizes=hidden_sizes)
try:
net = load_net(path_to_net_state_dict, network_class)
except RuntimeError:
net = load_net(path_to_net_state_dict, DiscardLastOutputNet, network_class())
if (path_to_scaler is not None):
f = open(path_to_scaler, 'rb')
scaler = cloudpickle.load(f)
f.close()
net = ScalerAndNet(net, scaler)
statistic_object = cls(net, degree=degree, cross=cross, reference_simulations=reference_simulations, previous_statistics=previous_statistics)
return statistic_object<|docstring|>If the neural network state_dict was saved to the disk, this method can be used to instantiate a
NeuralEmbedding object with that neural network.
In order for the state_dict to be read correctly, the network class is needed. Therefore, we provide 2 options:
1) the Pytorch neural network class can be passed (if the user defined it, for instance)
2) if the neural network was defined by using the DefaultNN class in abcpy.NN_utilities.networks, you can
provide arguments `input_size`, `output_size` and `hidden_sizes` (the latter is optional) that define
the sizes of a fully connected network; then a DefaultNN is instantiated with those sizes. This can be used
if for instance the neural network was trained using the utilities in abcpy.statisticslearning and you did
not provide explicitly the neural network class there, but defined it through the sizes of the different layers.
In both cases, note that the input size of the neural network must coincide with the size of each of the
datapoints generated from the model (unless some other statistics are computed beforehand).
Note that if the neural network was of the class `ScalerAndNet`, ie a scaler was applied before the data is fed
through it, you need to pass `path_to_scaler` as well. Then this method will instantiate the network in the
correct way.
Parameters
----------
path_to_net_state_dict : basestring
the path where the state-dict is saved
network_class : torch.nn class, optional
if the neural network class is known explicitly (for instance if the used defined it), then it has to be
passed here. This must not be provided together with `input_size` or `output_size`.
path_to_scaler: basestring, optional
The path where the scaler which was applied before the neural network is saved. Note that if the neural
network was trained on scaled data and now you do not pass the correct scaler, the behavior will not be
correct, leading to wrong inference. Default to None.
input_size : integer, optional
if the neural network is an instance of abcpy.NN_utilities.networks.DefaultNN with some input and
output size, then you should provide here the input size of the network. It has to be provided together with
the corresponding output_size, and it must not be provided with `network_class`.
output_size : integer, optional
if the neural network is an instance of abcpy.NN_utilities.networks.DefaultNN with some input and
output size, then you should provide here the output size of the network. It has to be provided together
with the corresponding input_size, and it must not be provided with `network_class`.
hidden_sizes : array-like, optional
if the neural network is an instance of abcpy.NN_utilities.networks.DefaultNN with some input and
output size, then you can provide here an array-like with the size of the hidden layers (for instance
[5,7,5] denotes 3 hidden layers with correspondingly 5,7,5 neurons). In case this parameter is not provided,
the hidden sizes are determined from the input and output sizes as determined in
abcpy.NN_utilities.networks.DefaultNN. Note that this must not be provided together with `network_class`.
degree: integer, optional
Of polynomial expansion. The default value is 2 meaning second order polynomial expansion.
cross: boolean, optional
Defines whether to include the cross-product terms. The default value is True, meaning the cross product term
is included.
reference_simulations: array, optional
A numpy array with shape (n_samples, output_size) containing a set of reference simulations. If provided,
statistics are computed at initialization for all reference simulations, and the standard deviation of the
different statistics is extracted. The standard deviation is then used to standardize the summary
statistics each time they are compute on a new observation or simulation. Defaults to None, in which case
standardization is not applied.
previous_statistics : abcpy.statistics.Statistics, optional
It allows pipelining of Statistics. Specifically, if the final statistic to be used is determined by the
composition of two Statistics, you can pass the first here; then, whenever the final statistic is needed, it
is sufficient to call the `statistics` method of the second one, and that will automatically apply both
transformations. In this case, this is the statistics that has to be computed before the neural network
transformation is applied.
Returns
-------
abcpy.statistics.NeuralEmbedding
the `NeuralEmbedding` object with the neural network obtained from the specified file.<|endoftext|> |
c1d25646ea3f1cbaef9e442cb0e386b7db23bd8788e4320a355ecc57cc56906d | def save_net(self, path_to_net_state_dict, path_to_scaler=None):
'Method to save the neural network state dict to a file. If the network is of the class ScalerAndNet, ie a\n scaler is applied before the data is fed through the network, then you are required to pass the path where you\n want the scaler to be saved.\n\n Parameters\n ----------\n path_to_net_state_dict: basestring\n Path where the state dict of the neural network is saved.\n path_to_scaler: basestring\n Path where the scaler is saved (with pickle); this is required if the neural network is of the class\n ScalerAndNet, and is ignored otherwise.\n '
if (hasattr(self.net, 'scaler') and (path_to_scaler is None)):
raise RuntimeError('You did not specify path_to_scaler, which is required as the neural network is an element of the class `ScalerAndNet`, ie a scaler is applied before the data is fed through the network')
if hasattr(self.net, 'scaler'):
save_net(path_to_net_state_dict, self.net.net)
f = open(path_to_scaler, 'wb')
cloudpickle.dump(self.net.scaler, f)
f.close()
else:
save_net(path_to_net_state_dict, self.net) | Method to save the neural network state dict to a file. If the network is of the class ScalerAndNet, ie a
scaler is applied before the data is fed through the network, then you are required to pass the path where you
want the scaler to be saved.
Parameters
----------
path_to_net_state_dict: basestring
Path where the state dict of the neural network is saved.
path_to_scaler: basestring
Path where the scaler is saved (with pickle); this is required if the neural network is of the class
ScalerAndNet, and is ignored otherwise. | abcpy/statistics.py | save_net | LoryPack/abcpy | 89 | python | def save_net(self, path_to_net_state_dict, path_to_scaler=None):
'Method to save the neural network state dict to a file. If the network is of the class ScalerAndNet, ie a\n scaler is applied before the data is fed through the network, then you are required to pass the path where you\n want the scaler to be saved.\n\n Parameters\n ----------\n path_to_net_state_dict: basestring\n Path where the state dict of the neural network is saved.\n path_to_scaler: basestring\n Path where the scaler is saved (with pickle); this is required if the neural network is of the class\n ScalerAndNet, and is ignored otherwise.\n '
if (hasattr(self.net, 'scaler') and (path_to_scaler is None)):
raise RuntimeError('You did not specify path_to_scaler, which is required as the neural network is an element of the class `ScalerAndNet`, ie a scaler is applied before the data is fed through the network')
if hasattr(self.net, 'scaler'):
save_net(path_to_net_state_dict, self.net.net)
f = open(path_to_scaler, 'wb')
cloudpickle.dump(self.net.scaler, f)
f.close()
else:
save_net(path_to_net_state_dict, self.net) | def save_net(self, path_to_net_state_dict, path_to_scaler=None):
'Method to save the neural network state dict to a file. If the network is of the class ScalerAndNet, ie a\n scaler is applied before the data is fed through the network, then you are required to pass the path where you\n want the scaler to be saved.\n\n Parameters\n ----------\n path_to_net_state_dict: basestring\n Path where the state dict of the neural network is saved.\n path_to_scaler: basestring\n Path where the scaler is saved (with pickle); this is required if the neural network is of the class\n ScalerAndNet, and is ignored otherwise.\n '
if (hasattr(self.net, 'scaler') and (path_to_scaler is None)):
raise RuntimeError('You did not specify path_to_scaler, which is required as the neural network is an element of the class `ScalerAndNet`, ie a scaler is applied before the data is fed through the network')
if hasattr(self.net, 'scaler'):
save_net(path_to_net_state_dict, self.net.net)
f = open(path_to_scaler, 'wb')
cloudpickle.dump(self.net.scaler, f)
f.close()
else:
save_net(path_to_net_state_dict, self.net)<|docstring|>Method to save the neural network state dict to a file. If the network is of the class ScalerAndNet, ie a
scaler is applied before the data is fed through the network, then you are required to pass the path where you
want the scaler to be saved.
Parameters
----------
path_to_net_state_dict: basestring
Path where the state dict of the neural network is saved.
path_to_scaler: basestring
Path where the scaler is saved (with pickle); this is required if the neural network is of the class
ScalerAndNet, and is ignored otherwise.<|endoftext|> |
a40c193648eb3126ad3c7fc09b7e6157175739eb564490e9f6deeae281f8196e | def statistics(self, data):
'\n Parameters\n ----------\n data: python list\n Contains n data sets with length p.\n\n Returns\n -------\n numpy.ndarray\n the statistics computed by applying the neural network.\n '
data = self._preprocess(data)
data = torch.from_numpy(data.astype('float32'))
if next(self.net.parameters()).is_cuda:
data = data.cuda()
try:
data = self.net(data).cpu().detach().numpy()
except (IndexError, RuntimeError, ValueError) as e:
raise RuntimeError('There was an error in passing the data through the network, likely due to the data not being of the right size.')
data = np.array(data)
result = self._polynomial_expansion(data)
result = self._rescale(result)
return result | Parameters
----------
data: python list
Contains n data sets with length p.
Returns
-------
numpy.ndarray
the statistics computed by applying the neural network. | abcpy/statistics.py | statistics | LoryPack/abcpy | 89 | python | def statistics(self, data):
'\n Parameters\n ----------\n data: python list\n Contains n data sets with length p.\n\n Returns\n -------\n numpy.ndarray\n the statistics computed by applying the neural network.\n '
data = self._preprocess(data)
data = torch.from_numpy(data.astype('float32'))
if next(self.net.parameters()).is_cuda:
data = data.cuda()
try:
data = self.net(data).cpu().detach().numpy()
except (IndexError, RuntimeError, ValueError) as e:
raise RuntimeError('There was an error in passing the data through the network, likely due to the data not being of the right size.')
data = np.array(data)
result = self._polynomial_expansion(data)
result = self._rescale(result)
return result | def statistics(self, data):
'\n Parameters\n ----------\n data: python list\n Contains n data sets with length p.\n\n Returns\n -------\n numpy.ndarray\n the statistics computed by applying the neural network.\n '
data = self._preprocess(data)
data = torch.from_numpy(data.astype('float32'))
if next(self.net.parameters()).is_cuda:
data = data.cuda()
try:
data = self.net(data).cpu().detach().numpy()
except (IndexError, RuntimeError, ValueError) as e:
raise RuntimeError('There was an error in passing the data through the network, likely due to the data not being of the right size.')
data = np.array(data)
result = self._polynomial_expansion(data)
result = self._rescale(result)
return result<|docstring|>Parameters
----------
data: python list
Contains n data sets with length p.
Returns
-------
numpy.ndarray
the statistics computed by applying the neural network.<|endoftext|> |
832b8985af6bf113f14cd68b2851b09b2a19d0a69911598351e867c2e727fe1a | @abstractmethod
def __call__(self, population, target_population_size):
'Performs selection on individuals.\n\n Parameters\n ----------\n population : list of chromosomes\n The population on which to perform selection\n target_population_size : int\n Target size of the population after selection\n\n Returns\n -------\n list of chromosomes :\n A subset of the input population\n '
raise NotImplementedError | Performs selection on individuals.
Parameters
----------
population : list of chromosomes
The population on which to perform selection
target_population_size : int
Target size of the population after selection
Returns
-------
list of chromosomes :
A subset of the input population | bingo/selection/selection.py | __call__ | nolanstr/bingo_nolan_fork | 37 | python | @abstractmethod
def __call__(self, population, target_population_size):
'Performs selection on individuals.\n\n Parameters\n ----------\n population : list of chromosomes\n The population on which to perform selection\n target_population_size : int\n Target size of the population after selection\n\n Returns\n -------\n list of chromosomes :\n A subset of the input population\n '
raise NotImplementedError | @abstractmethod
def __call__(self, population, target_population_size):
'Performs selection on individuals.\n\n Parameters\n ----------\n population : list of chromosomes\n The population on which to perform selection\n target_population_size : int\n Target size of the population after selection\n\n Returns\n -------\n list of chromosomes :\n A subset of the input population\n '
raise NotImplementedError<|docstring|>Performs selection on individuals.
Parameters
----------
population : list of chromosomes
The population on which to perform selection
target_population_size : int
Target size of the population after selection
Returns
-------
list of chromosomes :
A subset of the input population<|endoftext|> |
672b8a9f52efa179a75a27fbe695f8e20e256fbf34e5a740abb1e708aef601a7 | def get_time_created(self, instance):
'get time the article was created and return in iso format'
return instance.time_created.isoformat() | get time the article was created and return in iso format | authors/apps/articles/serializers.py | get_time_created | andela/ah-codeofduty | 0 | python | def get_time_created(self, instance):
return instance.time_created.isoformat() | def get_time_created(self, instance):
return instance.time_created.isoformat()<|docstring|>get time the article was created and return in iso format<|endoftext|> |
58a3c900ffbeca028cb31bd5094c2a0c0bfab378b522aae25217c44b8a3427fb | def get_time_updated(self, instance):
'get time the article was updated and return in iso format'
return instance.time_updated.isoformat() | get time the article was updated and return in iso format | authors/apps/articles/serializers.py | get_time_updated | andela/ah-codeofduty | 0 | python | def get_time_updated(self, instance):
return instance.time_updated.isoformat() | def get_time_updated(self, instance):
return instance.time_updated.isoformat()<|docstring|>get time the article was updated and return in iso format<|endoftext|> |
5c492574cad3f3e60b7a62cbf511389691d30693bfa6c3bc9ed409bf627294be | def get_time_to_read(self, text, images):
'method calculating time it takes to read'
average_image_view_time = 0
if images:
average_image_view_time = (len(images) * 0.2)
return math.ceil(((len(text.split()) / 200) + average_image_view_time)) | method calculating time it takes to read | authors/apps/articles/serializers.py | get_time_to_read | andela/ah-codeofduty | 0 | python | def get_time_to_read(self, text, images):
average_image_view_time = 0
if images:
average_image_view_time = (len(images) * 0.2)
return math.ceil(((len(text.split()) / 200) + average_image_view_time)) | def get_time_to_read(self, text, images):
average_image_view_time = 0
if images:
average_image_view_time = (len(images) * 0.2)
return math.ceil(((len(text.split()) / 200) + average_image_view_time))<|docstring|>method calculating time it takes to read<|endoftext|> |
634c71c94ae623e15813dfa8fa1271bf594a4a742846cad8681721845e72ff04 | def create(self, validated_data):
'method creating articles'
email = self.context.get('email')
user = User.objects.get(email=email)
validated_data['author'] = user
images = validated_data.get('images', None)
tags = validated_data.pop('tags', [])
slug = slugify(validated_data['title'])
num = 1
while Article.objects.filter(slug=slug).exists():
slug = (slug + '{}'.format(num))
num += 1
validated_data['slug'] = slug
validated_data['time_to_read'] = self.get_time_to_read(validated_data['body'], images)
article = Article.objects.create(**validated_data)
for tag in tags:
article.tags.add(tag)
return article | method creating articles | authors/apps/articles/serializers.py | create | andela/ah-codeofduty | 0 | python | def create(self, validated_data):
email = self.context.get('email')
user = User.objects.get(email=email)
validated_data['author'] = user
images = validated_data.get('images', None)
tags = validated_data.pop('tags', [])
slug = slugify(validated_data['title'])
num = 1
while Article.objects.filter(slug=slug).exists():
slug = (slug + '{}'.format(num))
num += 1
validated_data['slug'] = slug
validated_data['time_to_read'] = self.get_time_to_read(validated_data['body'], images)
article = Article.objects.create(**validated_data)
for tag in tags:
article.tags.add(tag)
return article | def create(self, validated_data):
email = self.context.get('email')
user = User.objects.get(email=email)
validated_data['author'] = user
images = validated_data.get('images', None)
tags = validated_data.pop('tags', [])
slug = slugify(validated_data['title'])
num = 1
while Article.objects.filter(slug=slug).exists():
slug = (slug + '{}'.format(num))
num += 1
validated_data['slug'] = slug
validated_data['time_to_read'] = self.get_time_to_read(validated_data['body'], images)
article = Article.objects.create(**validated_data)
for tag in tags:
article.tags.add(tag)
return article<|docstring|>method creating articles<|endoftext|> |
0a9eccfb37123be9f8550d44bee44c6816ee93809693749de8a51caee4acbd0e | def update(self, instance, validated_data):
'method updating articles'
email = self.context.get('email')
tags = validated_data.get('tags', None)
if (email != instance.author):
raise PermissionDenied
instance.title = validated_data.get('title', instance.title)
instance.body = validated_data.get('body', instance.body)
instance.description = validated_data.get('description', instance.description)
if tags:
instance.tags.set(tags)
Tag.edit_tags()
instance.images = validated_data.get('images', instance.images)
instance.time_to_read = self.get_time_to_read(instance.body, instance.images)
instance.save()
return instance | method updating articles | authors/apps/articles/serializers.py | update | andela/ah-codeofduty | 0 | python | def update(self, instance, validated_data):
email = self.context.get('email')
tags = validated_data.get('tags', None)
if (email != instance.author):
raise PermissionDenied
instance.title = validated_data.get('title', instance.title)
instance.body = validated_data.get('body', instance.body)
instance.description = validated_data.get('description', instance.description)
if tags:
instance.tags.set(tags)
Tag.edit_tags()
instance.images = validated_data.get('images', instance.images)
instance.time_to_read = self.get_time_to_read(instance.body, instance.images)
instance.save()
return instance | def update(self, instance, validated_data):
email = self.context.get('email')
tags = validated_data.get('tags', None)
if (email != instance.author):
raise PermissionDenied
instance.title = validated_data.get('title', instance.title)
instance.body = validated_data.get('body', instance.body)
instance.description = validated_data.get('description', instance.description)
if tags:
instance.tags.set(tags)
Tag.edit_tags()
instance.images = validated_data.get('images', instance.images)
instance.time_to_read = self.get_time_to_read(instance.body, instance.images)
instance.save()
return instance<|docstring|>method updating articles<|endoftext|> |
4e0d6e13440b86741854bdb9a1310ead4b3c88e120f1e5b0a7945ca8d655facf | def count_likes(self, instance):
'Returns the total likes of an article'
request = self.context.get('request')
liked_by_me = False
if ((request is not None) and request.user.is_authenticated):
user_id = request.user.id
liked_by_me = (instance.likes.all().filter(id=user_id).count() == 1)
return {'count': instance.likes.count(), 'me': liked_by_me} | Returns the total likes of an article | authors/apps/articles/serializers.py | count_likes | andela/ah-codeofduty | 0 | python | def count_likes(self, instance):
request = self.context.get('request')
liked_by_me = False
if ((request is not None) and request.user.is_authenticated):
user_id = request.user.id
liked_by_me = (instance.likes.all().filter(id=user_id).count() == 1)
return {'count': instance.likes.count(), 'me': liked_by_me} | def count_likes(self, instance):
request = self.context.get('request')
liked_by_me = False
if ((request is not None) and request.user.is_authenticated):
user_id = request.user.id
liked_by_me = (instance.likes.all().filter(id=user_id).count() == 1)
return {'count': instance.likes.count(), 'me': liked_by_me}<|docstring|>Returns the total likes of an article<|endoftext|> |
2e70e78483fc44276ea49d019761c83c67973536c3f5a18c839dc0b92a246875 | def count_dislikes(self, instance):
'Returns the total dislikes of an article'
request = self.context.get('request')
disliked_by_me = False
if ((request is not None) and request.user.is_authenticated):
user_id = request.user.id
disliked_by_me = (instance.dislikes.all().filter(id=user_id).count() == 1)
return {'count': instance.dislikes.count(), 'me': disliked_by_me} | Returns the total dislikes of an article | authors/apps/articles/serializers.py | count_dislikes | andela/ah-codeofduty | 0 | python | def count_dislikes(self, instance):
request = self.context.get('request')
disliked_by_me = False
if ((request is not None) and request.user.is_authenticated):
user_id = request.user.id
disliked_by_me = (instance.dislikes.all().filter(id=user_id).count() == 1)
return {'count': instance.dislikes.count(), 'me': disliked_by_me} | def count_dislikes(self, instance):
request = self.context.get('request')
disliked_by_me = False
if ((request is not None) and request.user.is_authenticated):
user_id = request.user.id
disliked_by_me = (instance.dislikes.all().filter(id=user_id).count() == 1)
return {'count': instance.dislikes.count(), 'me': disliked_by_me}<|docstring|>Returns the total dislikes of an article<|endoftext|> |
337465f2a18d5b8631cd587e807466e5c844734912d376db61b2355dfb7c62ea | def update(self, instance, valid_input, **kwargs):
'\n Update and return a comment instance, given valid_input\n '
instance.body = valid_input.get('body', instance.body)
instance.save()
return instance | Update and return a comment instance, given valid_input | authors/apps/articles/serializers.py | update | andela/ah-codeofduty | 0 | python | def update(self, instance, valid_input, **kwargs):
'\n \n '
instance.body = valid_input.get('body', instance.body)
instance.save()
return instance | def update(self, instance, valid_input, **kwargs):
'\n \n '
instance.body = valid_input.get('body', instance.body)
instance.save()
return instance<|docstring|>Update and return a comment instance, given valid_input<|endoftext|> |
a7f38946c66b697dba0736ebd6b27b9f596eb9a85acd8feb133c4f598bc855fe | def create(self, valid_input):
'\n Create and return a new comment instance, given a valid_input\n '
parent = self.context.get('parent', None)
instance = Comment.objects.create(parent=parent, **valid_input)
return instance | Create and return a new comment instance, given a valid_input | authors/apps/articles/serializers.py | create | andela/ah-codeofduty | 0 | python | def create(self, valid_input):
'\n \n '
parent = self.context.get('parent', None)
instance = Comment.objects.create(parent=parent, **valid_input)
return instance | def create(self, valid_input):
'\n \n '
parent = self.context.get('parent', None)
instance = Comment.objects.create(parent=parent, **valid_input)
return instance<|docstring|>Create and return a new comment instance, given a valid_input<|endoftext|> |
64dabd31a72f85a95e202bb92b5e171ae0306ca7d0c8127e6b235badfb670cb5 | def count_likes(self, instance):
'Returns the total likes of a comment'
request = self.context.get('request')
liked_by_me = False
if ((request is not None) and request.user.is_authenticated):
user_id = request.user.id
liked_by_me = (instance.likes.all().filter(id=user_id).count() == 1)
return {'count': instance.likes.count(), 'me': liked_by_me} | Returns the total likes of a comment | authors/apps/articles/serializers.py | count_likes | andela/ah-codeofduty | 0 | python | def count_likes(self, instance):
request = self.context.get('request')
liked_by_me = False
if ((request is not None) and request.user.is_authenticated):
user_id = request.user.id
liked_by_me = (instance.likes.all().filter(id=user_id).count() == 1)
return {'count': instance.likes.count(), 'me': liked_by_me} | def count_likes(self, instance):
request = self.context.get('request')
liked_by_me = False
if ((request is not None) and request.user.is_authenticated):
user_id = request.user.id
liked_by_me = (instance.likes.all().filter(id=user_id).count() == 1)
return {'count': instance.likes.count(), 'me': liked_by_me}<|docstring|>Returns the total likes of a comment<|endoftext|> |
744e39b8e617ad3e363bb2975798093e1b1ac937af20c4e961e40ce4fd408ef1 | def create(self, validated_data):
'method creating a new highlight'
validated_data['highlighter'] = self.context.get('highlighter')
validated_data['article'] = self.context.get('article')
highlight_text = validated_data['article'].body[validated_data['index_start']:validated_data['index_stop']]
if (not highlight_text):
raise serializers.ValidationError("Text doesn't exist on this article")
validated_data['highlighted_article_piece'] = highlight_text
return Highlight.objects.create(**validated_data) | method creating a new highlight | authors/apps/articles/serializers.py | create | andela/ah-codeofduty | 0 | python | def create(self, validated_data):
validated_data['highlighter'] = self.context.get('highlighter')
validated_data['article'] = self.context.get('article')
highlight_text = validated_data['article'].body[validated_data['index_start']:validated_data['index_stop']]
if (not highlight_text):
raise serializers.ValidationError("Text doesn't exist on this article")
validated_data['highlighted_article_piece'] = highlight_text
return Highlight.objects.create(**validated_data) | def create(self, validated_data):
validated_data['highlighter'] = self.context.get('highlighter')
validated_data['article'] = self.context.get('article')
highlight_text = validated_data['article'].body[validated_data['index_start']:validated_data['index_stop']]
if (not highlight_text):
raise serializers.ValidationError("Text doesn't exist on this article")
validated_data['highlighted_article_piece'] = highlight_text
return Highlight.objects.create(**validated_data)<|docstring|>method creating a new highlight<|endoftext|> |
c1a784110f18e495947876422d6f5b038c0d4fa405c0a1209476b452be10880e | def update(self, instance, validated_data):
'method updating highlights'
user = self.context.get('user')
if (user != instance.highlighter):
raise PermissionDenied
index_start = validated_data.get('index_start', instance.index_start)
index_stop = validated_data.get('index_stop', instance.index_stop)
highlight_text = instance.article.body[index_start:index_stop]
if (not highlight_text):
raise serializers.ValidationError("Text doesn't exist on this article")
instance.comment = validated_data.get('comment', instance.comment)
instance.index_start = index_start
instance.index_stop = index_stop
instance.highlighted_article_piece = highlight_text
instance.save()
return instance | method updating highlights | authors/apps/articles/serializers.py | update | andela/ah-codeofduty | 0 | python | def update(self, instance, validated_data):
user = self.context.get('user')
if (user != instance.highlighter):
raise PermissionDenied
index_start = validated_data.get('index_start', instance.index_start)
index_stop = validated_data.get('index_stop', instance.index_stop)
highlight_text = instance.article.body[index_start:index_stop]
if (not highlight_text):
raise serializers.ValidationError("Text doesn't exist on this article")
instance.comment = validated_data.get('comment', instance.comment)
instance.index_start = index_start
instance.index_stop = index_stop
instance.highlighted_article_piece = highlight_text
instance.save()
return instance | def update(self, instance, validated_data):
user = self.context.get('user')
if (user != instance.highlighter):
raise PermissionDenied
index_start = validated_data.get('index_start', instance.index_start)
index_stop = validated_data.get('index_stop', instance.index_stop)
highlight_text = instance.article.body[index_start:index_stop]
if (not highlight_text):
raise serializers.ValidationError("Text doesn't exist on this article")
instance.comment = validated_data.get('comment', instance.comment)
instance.index_start = index_start
instance.index_stop = index_stop
instance.highlighted_article_piece = highlight_text
instance.save()
return instance<|docstring|>method updating highlights<|endoftext|> |
7102e9da0f8d192b3ad77eb0a8bf14e8efcbd6a8a6e6b7dff8f292f3844a8d31 | def __init__(self, triad, uid, model, device, batch_size=32) -> None:
'客户端调用fit进行训练\n\n Args:\n triad: 三元组\n batch_size : local 训练的bs, 默认10, -1表示\n '
super().__init__(device, model)
self.triad = triad
self.uid = uid
self.loss_list = []
self.n_item = len(triad)
self.batch_size = (batch_size if (batch_size != (- 1)) else self.n_item)
self.data_loader = DataLoader(ToTorchDataset(self.triad), batch_size=self.batch_size) | 客户端调用fit进行训练
Args:
triad: 三元组
batch_size : local 训练的bs, 默认10, -1表示 | models/FedNeuMF/client.py | __init__ | TD21forever/QoS-Predcition-Algorithm-library | 2 | python | def __init__(self, triad, uid, model, device, batch_size=32) -> None:
'客户端调用fit进行训练\n\n Args:\n triad: 三元组\n batch_size : local 训练的bs, 默认10, -1表示\n '
super().__init__(device, model)
self.triad = triad
self.uid = uid
self.loss_list = []
self.n_item = len(triad)
self.batch_size = (batch_size if (batch_size != (- 1)) else self.n_item)
self.data_loader = DataLoader(ToTorchDataset(self.triad), batch_size=self.batch_size) | def __init__(self, triad, uid, model, device, batch_size=32) -> None:
'客户端调用fit进行训练\n\n Args:\n triad: 三元组\n batch_size : local 训练的bs, 默认10, -1表示\n '
super().__init__(device, model)
self.triad = triad
self.uid = uid
self.loss_list = []
self.n_item = len(triad)
self.batch_size = (batch_size if (batch_size != (- 1)) else self.n_item)
self.data_loader = DataLoader(ToTorchDataset(self.triad), batch_size=self.batch_size)<|docstring|>客户端调用fit进行训练
Args:
triad: 三元组
batch_size : local 训练的bs, 默认10, -1表示<|endoftext|> |
67fa8690549df7f13b3484529ef9cfd0a3258d517904dde2ffe1a3c2e21aca53 | def test_subclass_of_Request(self):
'\n Test that HEADREQUEST is a subclass of urllib2.Request.\n '
assert issubclass(util.HEADREQUEST, Request) | Test that HEADREQUEST is a subclass of urllib2.Request. | tests/util/test_requests.py | test_subclass_of_Request | unt-libraries/codalib | 0 | python | def test_subclass_of_Request(self):
'\n \n '
assert issubclass(util.HEADREQUEST, Request) | def test_subclass_of_Request(self):
'\n \n '
assert issubclass(util.HEADREQUEST, Request)<|docstring|>Test that HEADREQUEST is a subclass of urllib2.Request.<|endoftext|> |
6024bb9644f0c58a51b9bf1e7b784acc62c0d61e0b67f08d12a91779b2727230 | def test_get_method(self):
'\n Verify the HTTP method is HEAD.\n '
request = util.HEADREQUEST('http://example.com')
assert (request.get_method() == 'HEAD') | Verify the HTTP method is HEAD. | tests/util/test_requests.py | test_get_method | unt-libraries/codalib | 0 | python | def test_get_method(self):
'\n \n '
request = util.HEADREQUEST('http://example.com')
assert (request.get_method() == 'HEAD') | def test_get_method(self):
'\n \n '
request = util.HEADREQUEST('http://example.com')
assert (request.get_method() == 'HEAD')<|docstring|>Verify the HTTP method is HEAD.<|endoftext|> |
a11f2e5b8192e19dbda4ef8d7bf5956e8f5f190169be05a6f20aaf7e45823951 | def test_subclass_of_Request(self):
'\n Test that PUTREQUEST is a subclass of urllib2.Request.\n '
assert issubclass(util.PUTREQUEST, Request) | Test that PUTREQUEST is a subclass of urllib2.Request. | tests/util/test_requests.py | test_subclass_of_Request | unt-libraries/codalib | 0 | python | def test_subclass_of_Request(self):
'\n \n '
assert issubclass(util.PUTREQUEST, Request) | def test_subclass_of_Request(self):
'\n \n '
assert issubclass(util.PUTREQUEST, Request)<|docstring|>Test that PUTREQUEST is a subclass of urllib2.Request.<|endoftext|> |
4ad1e6cff8b49857425f0f0aa2228694e7f32e1c5c6aea7e3e12593a53f149ce | def test_get_method(self):
'\n Verify the HTTP method is PUT.\n '
request = util.PUTREQUEST('http://example.com')
assert (request.get_method() == 'PUT') | Verify the HTTP method is PUT. | tests/util/test_requests.py | test_get_method | unt-libraries/codalib | 0 | python | def test_get_method(self):
'\n \n '
request = util.PUTREQUEST('http://example.com')
assert (request.get_method() == 'PUT') | def test_get_method(self):
'\n \n '
request = util.PUTREQUEST('http://example.com')
assert (request.get_method() == 'PUT')<|docstring|>Verify the HTTP method is PUT.<|endoftext|> |
575b52e73eddae4c5b8277316436a66e972b3ae72e22eac856af22eeac000bcb | def test_subclass_of_Request(self):
'\n Test that DELETEREQUEST is a subclass of urllib2.Request.\n '
assert issubclass(util.DELETEREQUEST, Request) | Test that DELETEREQUEST is a subclass of urllib2.Request. | tests/util/test_requests.py | test_subclass_of_Request | unt-libraries/codalib | 0 | python | def test_subclass_of_Request(self):
'\n \n '
assert issubclass(util.DELETEREQUEST, Request) | def test_subclass_of_Request(self):
'\n \n '
assert issubclass(util.DELETEREQUEST, Request)<|docstring|>Test that DELETEREQUEST is a subclass of urllib2.Request.<|endoftext|> |
4b0f3582f0137e5d32e5523bbfe19783c9cca078dab60ce074245d875cd65abd | def test_get_method(self):
'\n Verify the HTTP method is DELETE.\n '
request = util.DELETEREQUEST('http://example.com')
assert (request.get_method() == 'DELETE') | Verify the HTTP method is DELETE. | tests/util/test_requests.py | test_get_method | unt-libraries/codalib | 0 | python | def test_get_method(self):
'\n \n '
request = util.DELETEREQUEST('http://example.com')
assert (request.get_method() == 'DELETE') | def test_get_method(self):
'\n \n '
request = util.DELETEREQUEST('http://example.com')
assert (request.get_method() == 'DELETE')<|docstring|>Verify the HTTP method is DELETE.<|endoftext|> |
6e0719fbe59931d3065d54ee5ec328d5cde624e8f1010794f23229e9338337e8 | def on_cancel(self) -> None:
'\n Helpfull when called TaskManager.shutdown.\n E.g. Your task is working with file, in this case you have time to save and close it.\n '
pass | Helpfull when called TaskManager.shutdown.
E.g. Your task is working with file, in this case you have time to save and close it. | ben_ten_adventure/schedulers.py | on_cancel | Ben-10-Secret-of-the-Omnitrix-Game/Ben-10-Adventure | 1 | python | def on_cancel(self) -> None:
'\n Helpfull when called TaskManager.shutdown.\n E.g. Your task is working with file, in this case you have time to save and close it.\n '
pass | def on_cancel(self) -> None:
'\n Helpfull when called TaskManager.shutdown.\n E.g. Your task is working with file, in this case you have time to save and close it.\n '
pass<|docstring|>Helpfull when called TaskManager.shutdown.
E.g. Your task is working with file, in this case you have time to save and close it.<|endoftext|> |
7ee3f401733668147e6b3122f9a9f13719edbebe6358300fb581eae94b3095d2 | def __init__(self, client):
'\n :param client: HorizonClient\n '
self.client = client | :param client: HorizonClient | src/mf_horizon_client/client/datasets/data_interface.py | __init__ | MF-HORIZON/mf-horizon-python-client | 0 | python | def __init__(self, client):
'\n \n '
self.client = client | def __init__(self, client):
'\n \n '
self.client = client<|docstring|>:param client: HorizonClient<|endoftext|> |
3b8a14946e38f0b81f1434ca8c013903ef599b309a949bc1cce5600d2a5fcaa6 | def upload_data(self, data: pd.DataFrame, name: str, forward_fill_missing_values: bool=False, replace_missing_values: bool=False, align_to_column: str='') -> IndividualDataset:
'\n Uploads the given data set to the Horizon API.\n\n :param align_to_column: Aligns data to column if the data is misaligned. This should be selected as the target\n if data is misaligned or has missing values. Selecting this will also cause missing data in the specified\n column to be dropped.\n :param data: DataFrame to be uploaded\n :param name: Name of the data set to be uploaded\n :param forward_fill_missing_values: Forward-fill missing values\n :param replace_missing_values: Replace missing values\n :return: A summary of the uploaded data set.\n '
str_buffer = io.StringIO(data.to_csv(encoding='utf-8', index=False))
str_buffer.seek(0)
str_buffer.name = name
if (forward_fill_missing_values and (not align_to_column)):
print_warning('Forward-fill select without alignment to column. Please be aware that if you choose a target column that has been forward-filled this will yield scientifically inaccurate results')
options = {'alignTo': align_to_column, 'missingDataStrategy': {'ffill': {'enabled': forward_fill_missing_values}, 'replaceMissing': {'enabled': replace_missing_values, 'replaceWith': 1}}}
request_data = dict(file=str_buffer, follow_redirects=True)
data = dict(options=json.dumps(options))
response = self.client.post(endpoint=Endpoints.UPLOAD_DATA, body=data, files=request_data, on_success_message=f"Data set '{name}' uploaded. Analyzing...")
ingestion_process = IngestionProcess(**convert_dict_from_camel_to_snake(response))
while (ingestion_process.status not in ['completed', 'error']):
sleep(0.5)
response = self.client.get(endpoint=Endpoints.SINGLE_INGESTION_PROCESS(ingestion_process.id_))
ingestion_process = IngestionProcess(**convert_dict_from_camel_to_snake(response))
if (ingestion_process.status == 'error'):
raise ValueError(f'''Error analyzing data
{ingestion_process.error}''')
return self.get_dataset(ingestion_process.dataset_id) | Uploads the given data set to the Horizon API.
:param align_to_column: Aligns data to column if the data is misaligned. This should be selected as the target
if data is misaligned or has missing values. Selecting this will also cause missing data in the specified
column to be dropped.
:param data: DataFrame to be uploaded
:param name: Name of the data set to be uploaded
:param forward_fill_missing_values: Forward-fill missing values
:param replace_missing_values: Replace missing values
:return: A summary of the uploaded data set. | src/mf_horizon_client/client/datasets/data_interface.py | upload_data | MF-HORIZON/mf-horizon-python-client | 0 | python | def upload_data(self, data: pd.DataFrame, name: str, forward_fill_missing_values: bool=False, replace_missing_values: bool=False, align_to_column: str=) -> IndividualDataset:
'\n Uploads the given data set to the Horizon API.\n\n :param align_to_column: Aligns data to column if the data is misaligned. This should be selected as the target\n if data is misaligned or has missing values. Selecting this will also cause missing data in the specified\n column to be dropped.\n :param data: DataFrame to be uploaded\n :param name: Name of the data set to be uploaded\n :param forward_fill_missing_values: Forward-fill missing values\n :param replace_missing_values: Replace missing values\n :return: A summary of the uploaded data set.\n '
str_buffer = io.StringIO(data.to_csv(encoding='utf-8', index=False))
str_buffer.seek(0)
str_buffer.name = name
if (forward_fill_missing_values and (not align_to_column)):
print_warning('Forward-fill select without alignment to column. Please be aware that if you choose a target column that has been forward-filled this will yield scientifically inaccurate results')
options = {'alignTo': align_to_column, 'missingDataStrategy': {'ffill': {'enabled': forward_fill_missing_values}, 'replaceMissing': {'enabled': replace_missing_values, 'replaceWith': 1}}}
request_data = dict(file=str_buffer, follow_redirects=True)
data = dict(options=json.dumps(options))
response = self.client.post(endpoint=Endpoints.UPLOAD_DATA, body=data, files=request_data, on_success_message=f"Data set '{name}' uploaded. Analyzing...")
ingestion_process = IngestionProcess(**convert_dict_from_camel_to_snake(response))
while (ingestion_process.status not in ['completed', 'error']):
sleep(0.5)
response = self.client.get(endpoint=Endpoints.SINGLE_INGESTION_PROCESS(ingestion_process.id_))
ingestion_process = IngestionProcess(**convert_dict_from_camel_to_snake(response))
if (ingestion_process.status == 'error'):
raise ValueError(f'Error analyzing data
{ingestion_process.error}')
return self.get_dataset(ingestion_process.dataset_id) | def upload_data(self, data: pd.DataFrame, name: str, forward_fill_missing_values: bool=False, replace_missing_values: bool=False, align_to_column: str=) -> IndividualDataset:
'\n Uploads the given data set to the Horizon API.\n\n :param align_to_column: Aligns data to column if the data is misaligned. This should be selected as the target\n if data is misaligned or has missing values. Selecting this will also cause missing data in the specified\n column to be dropped.\n :param data: DataFrame to be uploaded\n :param name: Name of the data set to be uploaded\n :param forward_fill_missing_values: Forward-fill missing values\n :param replace_missing_values: Replace missing values\n :return: A summary of the uploaded data set.\n '
str_buffer = io.StringIO(data.to_csv(encoding='utf-8', index=False))
str_buffer.seek(0)
str_buffer.name = name
if (forward_fill_missing_values and (not align_to_column)):
print_warning('Forward-fill select without alignment to column. Please be aware that if you choose a target column that has been forward-filled this will yield scientifically inaccurate results')
options = {'alignTo': align_to_column, 'missingDataStrategy': {'ffill': {'enabled': forward_fill_missing_values}, 'replaceMissing': {'enabled': replace_missing_values, 'replaceWith': 1}}}
request_data = dict(file=str_buffer, follow_redirects=True)
data = dict(options=json.dumps(options))
response = self.client.post(endpoint=Endpoints.UPLOAD_DATA, body=data, files=request_data, on_success_message=f"Data set '{name}' uploaded. Analyzing...")
ingestion_process = IngestionProcess(**convert_dict_from_camel_to_snake(response))
while (ingestion_process.status not in ['completed', 'error']):
sleep(0.5)
response = self.client.get(endpoint=Endpoints.SINGLE_INGESTION_PROCESS(ingestion_process.id_))
ingestion_process = IngestionProcess(**convert_dict_from_camel_to_snake(response))
if (ingestion_process.status == 'error'):
raise ValueError(f'Error analyzing data
{ingestion_process.error}')
return self.get_dataset(ingestion_process.dataset_id)<|docstring|>Uploads the given data set to the Horizon API.
:param align_to_column: Aligns data to column if the data is misaligned. This should be selected as the target
if data is misaligned or has missing values. Selecting this will also cause missing data in the specified
column to be dropped.
:param data: DataFrame to be uploaded
:param name: Name of the data set to be uploaded
:param forward_fill_missing_values: Forward-fill missing values
:param replace_missing_values: Replace missing values
:return: A summary of the uploaded data set.<|endoftext|> |
8daba2e437c6c6620d9b0e09e549171353a6ca523d2b93d3846e40a583117c7f | @catch_errors
def list_datasets(self) -> List[DatasetSummary]:
'\n requests a list of datasets (DatasetSchema) that have been uploaded into horizon. The data itself is not returned - just\n the metadata.\n\n '
datasets = self.client.get(Endpoints.ALL_DATASETS)
return [DatasetSummary(**convert_dict_from_camel_to_snake(dataset)) for dataset in datasets] | requests a list of datasets (DatasetSchema) that have been uploaded into horizon. The data itself is not returned - just
the metadata. | src/mf_horizon_client/client/datasets/data_interface.py | list_datasets | MF-HORIZON/mf-horizon-python-client | 0 | python | @catch_errors
def list_datasets(self) -> List[DatasetSummary]:
'\n requests a list of datasets (DatasetSchema) that have been uploaded into horizon. The data itself is not returned - just\n the metadata.\n\n '
datasets = self.client.get(Endpoints.ALL_DATASETS)
return [DatasetSummary(**convert_dict_from_camel_to_snake(dataset)) for dataset in datasets] | @catch_errors
def list_datasets(self) -> List[DatasetSummary]:
'\n requests a list of datasets (DatasetSchema) that have been uploaded into horizon. The data itself is not returned - just\n the metadata.\n\n '
datasets = self.client.get(Endpoints.ALL_DATASETS)
return [DatasetSummary(**convert_dict_from_camel_to_snake(dataset)) for dataset in datasets]<|docstring|>requests a list of datasets (DatasetSchema) that have been uploaded into horizon. The data itself is not returned - just
the metadata.<|endoftext|> |
51b1ebe372067050ee77b0d90f287861dc6dfc24dff250d2e122de590d29ef55 | @catch_errors
def delete_datasets(self, identifiers: List[int]=None):
'\n Deletes data sets as identified by their identifiers.\n These may be retrieved by calling DataInterface.list_datasets.\n\n :param identifiers: list of numeric identifiers\n :return:\n '
pbar = tqdm(identifiers)
for identifier in pbar:
pbar.set_description(f'Deleting Data Set with ID: {identifier}')
self.client.delete(Endpoints.SINGLE_DATASET(identifier)) | Deletes data sets as identified by their identifiers.
These may be retrieved by calling DataInterface.list_datasets.
:param identifiers: list of numeric identifiers
:return: | src/mf_horizon_client/client/datasets/data_interface.py | delete_datasets | MF-HORIZON/mf-horizon-python-client | 0 | python | @catch_errors
def delete_datasets(self, identifiers: List[int]=None):
'\n Deletes data sets as identified by their identifiers.\n These may be retrieved by calling DataInterface.list_datasets.\n\n :param identifiers: list of numeric identifiers\n :return:\n '
pbar = tqdm(identifiers)
for identifier in pbar:
pbar.set_description(f'Deleting Data Set with ID: {identifier}')
self.client.delete(Endpoints.SINGLE_DATASET(identifier)) | @catch_errors
def delete_datasets(self, identifiers: List[int]=None):
'\n Deletes data sets as identified by their identifiers.\n These may be retrieved by calling DataInterface.list_datasets.\n\n :param identifiers: list of numeric identifiers\n :return:\n '
pbar = tqdm(identifiers)
for identifier in pbar:
pbar.set_description(f'Deleting Data Set with ID: {identifier}')
self.client.delete(Endpoints.SINGLE_DATASET(identifier))<|docstring|>Deletes data sets as identified by their identifiers.
These may be retrieved by calling DataInterface.list_datasets.
:param identifiers: list of numeric identifiers
:return:<|endoftext|> |
6487de2696fcb45c721e8a115fda812cf075a92de3ae1eeac6d86031a1ea4267 | @catch_errors
def delete_all_datasets(self):
'\n Deletes all data sets previously uploaded by the authorised user.\n\n WARNING: All associated pipelines will also be deleted.\n WARNING: Calling this endpoint is effectively the same as resetting Horizon for a user.\n\n :return:\n '
datasets = self.list_datasets()
dataset_ids = [dataset.id_ for dataset in datasets]
self.delete_datasets(dataset_ids)
print_success('All data successfully deleted from Horizon!') | Deletes all data sets previously uploaded by the authorised user.
WARNING: All associated pipelines will also be deleted.
WARNING: Calling this endpoint is effectively the same as resetting Horizon for a user.
:return: | src/mf_horizon_client/client/datasets/data_interface.py | delete_all_datasets | MF-HORIZON/mf-horizon-python-client | 0 | python | @catch_errors
def delete_all_datasets(self):
'\n Deletes all data sets previously uploaded by the authorised user.\n\n WARNING: All associated pipelines will also be deleted.\n WARNING: Calling this endpoint is effectively the same as resetting Horizon for a user.\n\n :return:\n '
datasets = self.list_datasets()
dataset_ids = [dataset.id_ for dataset in datasets]
self.delete_datasets(dataset_ids)
print_success('All data successfully deleted from Horizon!') | @catch_errors
def delete_all_datasets(self):
'\n Deletes all data sets previously uploaded by the authorised user.\n\n WARNING: All associated pipelines will also be deleted.\n WARNING: Calling this endpoint is effectively the same as resetting Horizon for a user.\n\n :return:\n '
datasets = self.list_datasets()
dataset_ids = [dataset.id_ for dataset in datasets]
self.delete_datasets(dataset_ids)
print_success('All data successfully deleted from Horizon!')<|docstring|>Deletes all data sets previously uploaded by the authorised user.
WARNING: All associated pipelines will also be deleted.
WARNING: Calling this endpoint is effectively the same as resetting Horizon for a user.
:return:<|endoftext|> |
92d920687e8c6d4799ba37098356b56c87e3fbfe9632df957a3f4226585f5109 | @catch_errors
def rename_dataset(self, identifier: int, name: str):
'\n Renames an already existing dataset\n :param identifier: id of a dataset\n :param name: The new name for the dataset\n :return:\n '
assert (len(name) < 100), 'Name too long. Please keep to under 100 chars.'
self.client.put(Endpoints.RENAME_DATASET(identifier), body={'newName': name}) | Renames an already existing dataset
:param identifier: id of a dataset
:param name: The new name for the dataset
:return: | src/mf_horizon_client/client/datasets/data_interface.py | rename_dataset | MF-HORIZON/mf-horizon-python-client | 0 | python | @catch_errors
def rename_dataset(self, identifier: int, name: str):
'\n Renames an already existing dataset\n :param identifier: id of a dataset\n :param name: The new name for the dataset\n :return:\n '
assert (len(name) < 100), 'Name too long. Please keep to under 100 chars.'
self.client.put(Endpoints.RENAME_DATASET(identifier), body={'newName': name}) | @catch_errors
def rename_dataset(self, identifier: int, name: str):
'\n Renames an already existing dataset\n :param identifier: id of a dataset\n :param name: The new name for the dataset\n :return:\n '
assert (len(name) < 100), 'Name too long. Please keep to under 100 chars.'
self.client.put(Endpoints.RENAME_DATASET(identifier), body={'newName': name})<|docstring|>Renames an already existing dataset
:param identifier: id of a dataset
:param name: The new name for the dataset
:return:<|endoftext|> |
effe1fb8ceafafddd5abb7be80f6f53321948f8051390055c8c7691873c19d49 | @catch_errors
def get_dataset(self, identifier: int) -> IndividualDataset:
"\n Gets a single data set's meta data.\n\n :param identifier: dataset id as returned from upload_dataset or list_all_datasets.\n :return: Individual data set sans data\n "
response = self.client.get(Endpoints.SINGLE_DATASET(identifier))
individual_dataset_dictionary = response
column_data = [ColumnPassport(**convert_dict_from_camel_to_snake(col)) for col in individual_dataset_dictionary['analysis']]
dataset = IndividualDataset(analysis=column_data, summary=DatasetSummary(**convert_dict_from_camel_to_snake(individual_dataset_dictionary['summary'])))
dataset.summary.columns = [RawColumn(name=col.name, id_=col.id_, is_text=col.is_text, is_binary=col.is_binary) for col in column_data]
return dataset | Gets a single data set's meta data.
:param identifier: dataset id as returned from upload_dataset or list_all_datasets.
:return: Individual data set sans data | src/mf_horizon_client/client/datasets/data_interface.py | get_dataset | MF-HORIZON/mf-horizon-python-client | 0 | python | @catch_errors
def get_dataset(self, identifier: int) -> IndividualDataset:
"\n Gets a single data set's meta data.\n\n :param identifier: dataset id as returned from upload_dataset or list_all_datasets.\n :return: Individual data set sans data\n "
response = self.client.get(Endpoints.SINGLE_DATASET(identifier))
individual_dataset_dictionary = response
column_data = [ColumnPassport(**convert_dict_from_camel_to_snake(col)) for col in individual_dataset_dictionary['analysis']]
dataset = IndividualDataset(analysis=column_data, summary=DatasetSummary(**convert_dict_from_camel_to_snake(individual_dataset_dictionary['summary'])))
dataset.summary.columns = [RawColumn(name=col.name, id_=col.id_, is_text=col.is_text, is_binary=col.is_binary) for col in column_data]
return dataset | @catch_errors
def get_dataset(self, identifier: int) -> IndividualDataset:
"\n Gets a single data set's meta data.\n\n :param identifier: dataset id as returned from upload_dataset or list_all_datasets.\n :return: Individual data set sans data\n "
response = self.client.get(Endpoints.SINGLE_DATASET(identifier))
individual_dataset_dictionary = response
column_data = [ColumnPassport(**convert_dict_from_camel_to_snake(col)) for col in individual_dataset_dictionary['analysis']]
dataset = IndividualDataset(analysis=column_data, summary=DatasetSummary(**convert_dict_from_camel_to_snake(individual_dataset_dictionary['summary'])))
dataset.summary.columns = [RawColumn(name=col.name, id_=col.id_, is_text=col.is_text, is_binary=col.is_binary) for col in column_data]
return dataset<|docstring|>Gets a single data set's meta data.
:param identifier: dataset id as returned from upload_dataset or list_all_datasets.
:return: Individual data set sans data<|endoftext|> |
e93876516d4b50f05af98a8437a57a557bb56963a94037de89cc377b26c59839 | @catch_errors
def get_series_data_sampled(self, dataset_identifier: int, series_identifier: int):
'\n Retrieves sampled data of a particular series in a data set. Suitable for plotting.\n\n In the case of intra-day data this data is aggregated into a daily plot.\n\n :param dataset_identifier: Unique identifier of a dataset.\n :param series_identifier: Unique identifier of a column\n :return:\n '
response = self.client.get(Endpoints.SINGLE_SERIES(dataset_identifier, series_identifier))
return convert_dict_from_camel_to_snake(response) | Retrieves sampled data of a particular series in a data set. Suitable for plotting.
In the case of intra-day data this data is aggregated into a daily plot.
:param dataset_identifier: Unique identifier of a dataset.
:param series_identifier: Unique identifier of a column
:return: | src/mf_horizon_client/client/datasets/data_interface.py | get_series_data_sampled | MF-HORIZON/mf-horizon-python-client | 0 | python | @catch_errors
def get_series_data_sampled(self, dataset_identifier: int, series_identifier: int):
'\n Retrieves sampled data of a particular series in a data set. Suitable for plotting.\n\n In the case of intra-day data this data is aggregated into a daily plot.\n\n :param dataset_identifier: Unique identifier of a dataset.\n :param series_identifier: Unique identifier of a column\n :return:\n '
response = self.client.get(Endpoints.SINGLE_SERIES(dataset_identifier, series_identifier))
return convert_dict_from_camel_to_snake(response) | @catch_errors
def get_series_data_sampled(self, dataset_identifier: int, series_identifier: int):
'\n Retrieves sampled data of a particular series in a data set. Suitable for plotting.\n\n In the case of intra-day data this data is aggregated into a daily plot.\n\n :param dataset_identifier: Unique identifier of a dataset.\n :param series_identifier: Unique identifier of a column\n :return:\n '
response = self.client.get(Endpoints.SINGLE_SERIES(dataset_identifier, series_identifier))
return convert_dict_from_camel_to_snake(response)<|docstring|>Retrieves sampled data of a particular series in a data set. Suitable for plotting.
In the case of intra-day data this data is aggregated into a daily plot.
:param dataset_identifier: Unique identifier of a dataset.
:param series_identifier: Unique identifier of a column
:return:<|endoftext|> |
4dc0e6ddfc8167bdf34ccb9d70bfddf84075b615d17c0f4e20f228ae7203df81 | @catch_errors
def get_correlations(self, dataset_identifier: int, series_identifier: int):
'\n Calculates the pearson correlation of a single series with every other series in a dataset\n\n :param dataset_identifier: Unique identifier of a dataset.\n :param series_identifier: Unique identifier of a column\n :return:\n '
dataset_summary = self.get_dataset(dataset_identifier)
names = [col.name for col in dataset_summary.analysis if (col.id_ == series_identifier)]
if (len(names) == 0):
raise ValueError('Invalid series identifier specified')
series_name = names[0]
correlation_data = self.client.get(Endpoints.SINGLE_SERIES_CORRELATIONS_WITH_OTHER_SERIES(dataset_identifier, series_identifier))
correlations = pd.DataFrame.from_dict(correlation_data['data'])
correlations.columns = ['Series', 'Pearson Correlation']
correlations.name = series_name
return correlations | Calculates the pearson correlation of a single series with every other series in a dataset
:param dataset_identifier: Unique identifier of a dataset.
:param series_identifier: Unique identifier of a column
:return: | src/mf_horizon_client/client/datasets/data_interface.py | get_correlations | MF-HORIZON/mf-horizon-python-client | 0 | python | @catch_errors
def get_correlations(self, dataset_identifier: int, series_identifier: int):
'\n Calculates the pearson correlation of a single series with every other series in a dataset\n\n :param dataset_identifier: Unique identifier of a dataset.\n :param series_identifier: Unique identifier of a column\n :return:\n '
dataset_summary = self.get_dataset(dataset_identifier)
names = [col.name for col in dataset_summary.analysis if (col.id_ == series_identifier)]
if (len(names) == 0):
raise ValueError('Invalid series identifier specified')
series_name = names[0]
correlation_data = self.client.get(Endpoints.SINGLE_SERIES_CORRELATIONS_WITH_OTHER_SERIES(dataset_identifier, series_identifier))
correlations = pd.DataFrame.from_dict(correlation_data['data'])
correlations.columns = ['Series', 'Pearson Correlation']
correlations.name = series_name
return correlations | @catch_errors
def get_correlations(self, dataset_identifier: int, series_identifier: int):
'\n Calculates the pearson correlation of a single series with every other series in a dataset\n\n :param dataset_identifier: Unique identifier of a dataset.\n :param series_identifier: Unique identifier of a column\n :return:\n '
dataset_summary = self.get_dataset(dataset_identifier)
names = [col.name for col in dataset_summary.analysis if (col.id_ == series_identifier)]
if (len(names) == 0):
raise ValueError('Invalid series identifier specified')
series_name = names[0]
correlation_data = self.client.get(Endpoints.SINGLE_SERIES_CORRELATIONS_WITH_OTHER_SERIES(dataset_identifier, series_identifier))
correlations = pd.DataFrame.from_dict(correlation_data['data'])
correlations.columns = ['Series', 'Pearson Correlation']
correlations.name = series_name
return correlations<|docstring|>Calculates the pearson correlation of a single series with every other series in a dataset
:param dataset_identifier: Unique identifier of a dataset.
:param series_identifier: Unique identifier of a column
:return:<|endoftext|> |
11b6fe17c1a17d86db7386342f2e21fa34f02598d339d88bff9c17b35bae4a9d | @catch_errors
def get_autocorrelation(self, dataset_identifier: int, series_identifier: int):
'\n Calculates the autocorrelation functon of a single series\n\n :param dataset_identifier: Unique identifier of a dataset.\n :param series_identifier: Unique identifier of a column\n :returndT:\n '
dataset_summary = self.get_dataset(dataset_identifier)
names = [col.name for col in dataset_summary.analysis if (col.id_ == series_identifier)]
if (len(names) == 0):
raise ValueError('Invalid series identifier specified')
series_name = names[0]
acf = self.client.get(Endpoints.SINGLE_SERIES_AUTOCORRELATION(dataset_identifier, series_identifier))
acf_df = pd.DataFrame(acf['data'])
acf_df.columns = ['Lag', f'Correlation: f{series_name}']
return acf_df | Calculates the autocorrelation functon of a single series
:param dataset_identifier: Unique identifier of a dataset.
:param series_identifier: Unique identifier of a column
:returndT: | src/mf_horizon_client/client/datasets/data_interface.py | get_autocorrelation | MF-HORIZON/mf-horizon-python-client | 0 | python | @catch_errors
def get_autocorrelation(self, dataset_identifier: int, series_identifier: int):
'\n Calculates the autocorrelation functon of a single series\n\n :param dataset_identifier: Unique identifier of a dataset.\n :param series_identifier: Unique identifier of a column\n :returndT:\n '
dataset_summary = self.get_dataset(dataset_identifier)
names = [col.name for col in dataset_summary.analysis if (col.id_ == series_identifier)]
if (len(names) == 0):
raise ValueError('Invalid series identifier specified')
series_name = names[0]
acf = self.client.get(Endpoints.SINGLE_SERIES_AUTOCORRELATION(dataset_identifier, series_identifier))
acf_df = pd.DataFrame(acf['data'])
acf_df.columns = ['Lag', f'Correlation: f{series_name}']
return acf_df | @catch_errors
def get_autocorrelation(self, dataset_identifier: int, series_identifier: int):
'\n Calculates the autocorrelation functon of a single series\n\n :param dataset_identifier: Unique identifier of a dataset.\n :param series_identifier: Unique identifier of a column\n :returndT:\n '
dataset_summary = self.get_dataset(dataset_identifier)
names = [col.name for col in dataset_summary.analysis if (col.id_ == series_identifier)]
if (len(names) == 0):
raise ValueError('Invalid series identifier specified')
series_name = names[0]
acf = self.client.get(Endpoints.SINGLE_SERIES_AUTOCORRELATION(dataset_identifier, series_identifier))
acf_df = pd.DataFrame(acf['data'])
acf_df.columns = ['Lag', f'Correlation: f{series_name}']
return acf_df<|docstring|>Calculates the autocorrelation functon of a single series
:param dataset_identifier: Unique identifier of a dataset.
:param series_identifier: Unique identifier of a column
:returndT:<|endoftext|> |
9452842207fe6badf42b0f51021ce5cc4479609275f0d3e8e0da477a77d0df73 | @catch_errors
def get_stationarity_scores(self, dataset_identifier: int) -> pd.DataFrame:
'\n Returns the Augmented-dicky-fuller ADF score of the signals in a data set. For large data a data sample is used to compute this.\n\n :param dataset_identifier: Unique identifier of a dataset\n :return: Dataframe of stationarity scores\n '
dataset = self.get_dataset(identifier=dataset_identifier)
df = pd.DataFrame.from_records([dataclasses.asdict(series) for series in dataset.analysis])[['id_', 'name', 'adf']]
df['id_'] = df['id_'].astype(str)
return df | Returns the Augmented-dicky-fuller ADF score of the signals in a data set. For large data a data sample is used to compute this.
:param dataset_identifier: Unique identifier of a dataset
:return: Dataframe of stationarity scores | src/mf_horizon_client/client/datasets/data_interface.py | get_stationarity_scores | MF-HORIZON/mf-horizon-python-client | 0 | python | @catch_errors
def get_stationarity_scores(self, dataset_identifier: int) -> pd.DataFrame:
'\n Returns the Augmented-dicky-fuller ADF score of the signals in a data set. For large data a data sample is used to compute this.\n\n :param dataset_identifier: Unique identifier of a dataset\n :return: Dataframe of stationarity scores\n '
dataset = self.get_dataset(identifier=dataset_identifier)
df = pd.DataFrame.from_records([dataclasses.asdict(series) for series in dataset.analysis])[['id_', 'name', 'adf']]
df['id_'] = df['id_'].astype(str)
return df | @catch_errors
def get_stationarity_scores(self, dataset_identifier: int) -> pd.DataFrame:
'\n Returns the Augmented-dicky-fuller ADF score of the signals in a data set. For large data a data sample is used to compute this.\n\n :param dataset_identifier: Unique identifier of a dataset\n :return: Dataframe of stationarity scores\n '
dataset = self.get_dataset(identifier=dataset_identifier)
df = pd.DataFrame.from_records([dataclasses.asdict(series) for series in dataset.analysis])[['id_', 'name', 'adf']]
df['id_'] = df['id_'].astype(str)
return df<|docstring|>Returns the Augmented-dicky-fuller ADF score of the signals in a data set. For large data a data sample is used to compute this.
:param dataset_identifier: Unique identifier of a dataset
:return: Dataframe of stationarity scores<|endoftext|> |
10f4a328dd69df3499dbc7eb3258bf8a103d393585ab42b0400284cbc975ab64 | @catch_errors
def get_mutual_information(self, dataset_identifier: int, series_identifier: int):
'\n Calculates the mutual information of a single series with all other columns in a dataset\n\n\n :param dataset_identifier: Unique identifier of a dataset.\n :param series_identifier: Unique identifier of a column\n :return:\n '
dataset_summary = self.get_dataset(dataset_identifier)
names = [col.name for col in dataset_summary.analysis if (col.id_ == series_identifier)]
if (len(names) == 0):
raise ValueError('Invalid series identifier specified')
series_name = names[0]
mutual_information_data = self.client.get(Endpoints.SINGLE_SERIES_MUTUAL_INFORMATION_WITH_OTHER_SERIES(dataset_identifier, series_identifier))
mutual_information_data = pd.DataFrame.from_dict(mutual_information_data['data'])
mutual_information_data.columns = ['Series', 'Mutual Information']
mutual_information_data.name = series_name
return mutual_information_data | Calculates the mutual information of a single series with all other columns in a dataset
:param dataset_identifier: Unique identifier of a dataset.
:param series_identifier: Unique identifier of a column
:return: | src/mf_horizon_client/client/datasets/data_interface.py | get_mutual_information | MF-HORIZON/mf-horizon-python-client | 0 | python | @catch_errors
def get_mutual_information(self, dataset_identifier: int, series_identifier: int):
'\n Calculates the mutual information of a single series with all other columns in a dataset\n\n\n :param dataset_identifier: Unique identifier of a dataset.\n :param series_identifier: Unique identifier of a column\n :return:\n '
dataset_summary = self.get_dataset(dataset_identifier)
names = [col.name for col in dataset_summary.analysis if (col.id_ == series_identifier)]
if (len(names) == 0):
raise ValueError('Invalid series identifier specified')
series_name = names[0]
mutual_information_data = self.client.get(Endpoints.SINGLE_SERIES_MUTUAL_INFORMATION_WITH_OTHER_SERIES(dataset_identifier, series_identifier))
mutual_information_data = pd.DataFrame.from_dict(mutual_information_data['data'])
mutual_information_data.columns = ['Series', 'Mutual Information']
mutual_information_data.name = series_name
return mutual_information_data | @catch_errors
def get_mutual_information(self, dataset_identifier: int, series_identifier: int):
'\n Calculates the mutual information of a single series with all other columns in a dataset\n\n\n :param dataset_identifier: Unique identifier of a dataset.\n :param series_identifier: Unique identifier of a column\n :return:\n '
dataset_summary = self.get_dataset(dataset_identifier)
names = [col.name for col in dataset_summary.analysis if (col.id_ == series_identifier)]
if (len(names) == 0):
raise ValueError('Invalid series identifier specified')
series_name = names[0]
mutual_information_data = self.client.get(Endpoints.SINGLE_SERIES_MUTUAL_INFORMATION_WITH_OTHER_SERIES(dataset_identifier, series_identifier))
mutual_information_data = pd.DataFrame.from_dict(mutual_information_data['data'])
mutual_information_data.columns = ['Series', 'Mutual Information']
mutual_information_data.name = series_name
return mutual_information_data<|docstring|>Calculates the mutual information of a single series with all other columns in a dataset
:param dataset_identifier: Unique identifier of a dataset.
:param series_identifier: Unique identifier of a column
:return:<|endoftext|> |
89f77da899964647c9ff1597ef3d87143d63155fa18133e3c0bd83a73d438cb6 | @catch_errors
def upload_data_long_format_as_single_data_set(self, data: pd.DataFrame, name: str, cross_section_column_name: str, date_column_name: str, replace_missing_values: bool=True, forward_fill_missing_values: bool=False) -> IndividualDataset:
'\n Uploads long format data into Horizon. The data frame should have a date column, with a numeric index.\n\n :param data: The dataset in a pandas data frame. Must have a valid date column.\n :param name: Name of the data set to be uploaded\n :param cross_section_column_name: The identifier column that groups the records\n :param date_column_name: The column name of the date index.\n :param forward_fill_missing_values: Forward-fill missing values\n :param replace_missing_values: Replace missing values\n :return: A summary of the uploaded data set.\n :param encode_categorical_data: Categorically encode data that is non-numeric\n :param max_categories: Maximum number of categories per series.\n '
df = data.pivot_table(columns=cross_section_column_name, index=date_column_name)
df.reset_index(inplace=True)
df.columns = ['/'.join(column) for column in df.columns]
return self.upload_data(data=df, name=name, forward_fill_missing_values=forward_fill_missing_values, replace_missing_values=replace_missing_values) | Uploads long format data into Horizon. The data frame should have a date column, with a numeric index.
:param data: The dataset in a pandas data frame. Must have a valid date column.
:param name: Name of the data set to be uploaded
:param cross_section_column_name: The identifier column that groups the records
:param date_column_name: The column name of the date index.
:param forward_fill_missing_values: Forward-fill missing values
:param replace_missing_values: Replace missing values
:return: A summary of the uploaded data set.
:param encode_categorical_data: Categorically encode data that is non-numeric
:param max_categories: Maximum number of categories per series. | src/mf_horizon_client/client/datasets/data_interface.py | upload_data_long_format_as_single_data_set | MF-HORIZON/mf-horizon-python-client | 0 | python | @catch_errors
def upload_data_long_format_as_single_data_set(self, data: pd.DataFrame, name: str, cross_section_column_name: str, date_column_name: str, replace_missing_values: bool=True, forward_fill_missing_values: bool=False) -> IndividualDataset:
'\n Uploads long format data into Horizon. The data frame should have a date column, with a numeric index.\n\n :param data: The dataset in a pandas data frame. Must have a valid date column.\n :param name: Name of the data set to be uploaded\n :param cross_section_column_name: The identifier column that groups the records\n :param date_column_name: The column name of the date index.\n :param forward_fill_missing_values: Forward-fill missing values\n :param replace_missing_values: Replace missing values\n :return: A summary of the uploaded data set.\n :param encode_categorical_data: Categorically encode data that is non-numeric\n :param max_categories: Maximum number of categories per series.\n '
df = data.pivot_table(columns=cross_section_column_name, index=date_column_name)
df.reset_index(inplace=True)
df.columns = ['/'.join(column) for column in df.columns]
return self.upload_data(data=df, name=name, forward_fill_missing_values=forward_fill_missing_values, replace_missing_values=replace_missing_values) | @catch_errors
def upload_data_long_format_as_single_data_set(self, data: pd.DataFrame, name: str, cross_section_column_name: str, date_column_name: str, replace_missing_values: bool=True, forward_fill_missing_values: bool=False) -> IndividualDataset:
'\n Uploads long format data into Horizon. The data frame should have a date column, with a numeric index.\n\n :param data: The dataset in a pandas data frame. Must have a valid date column.\n :param name: Name of the data set to be uploaded\n :param cross_section_column_name: The identifier column that groups the records\n :param date_column_name: The column name of the date index.\n :param forward_fill_missing_values: Forward-fill missing values\n :param replace_missing_values: Replace missing values\n :return: A summary of the uploaded data set.\n :param encode_categorical_data: Categorically encode data that is non-numeric\n :param max_categories: Maximum number of categories per series.\n '
df = data.pivot_table(columns=cross_section_column_name, index=date_column_name)
df.reset_index(inplace=True)
df.columns = ['/'.join(column) for column in df.columns]
return self.upload_data(data=df, name=name, forward_fill_missing_values=forward_fill_missing_values, replace_missing_values=replace_missing_values)<|docstring|>Uploads long format data into Horizon. The data frame should have a date column, with a numeric index.
:param data: The dataset in a pandas data frame. Must have a valid date column.
:param name: Name of the data set to be uploaded
:param cross_section_column_name: The identifier column that groups the records
:param date_column_name: The column name of the date index.
:param forward_fill_missing_values: Forward-fill missing values
:param replace_missing_values: Replace missing values
:return: A summary of the uploaded data set.
:param encode_categorical_data: Categorically encode data that is non-numeric
:param max_categories: Maximum number of categories per series.<|endoftext|> |
30ec0d9b019ae2249e516843c151aa66c8733a6adaf10cff2d83ffd1ca5f2479 | def reference(t_data: np.array, weights_data: np.array) -> np.array:
'Return result of equivalent calculation of the test in pytorch.\n\n Args:\n t_data (np.array): Input tensor data\n weights_data (np.array): Input tensor weights\n\n Returns:\n np.array: The result of the pytorch operations\n '
t_in = torch.from_numpy(t_data)
weights = torch.from_numpy(weights_data)
t_1 = torch.matmul(t_in, weights)
t_2 = torch.nn.functional.gelu(t_1)
t_out = torch.nn.functional.softmax(t_2, dim=1)
return t_out.numpy() | Return result of equivalent calculation of the test in pytorch.
Args:
t_data (np.array): Input tensor data
weights_data (np.array): Input tensor weights
Returns:
np.array: The result of the pytorch operations | tests/integration/popart.ir/test_fwd_pipeline.py | reference | graphcore/popart | 61 | python | def reference(t_data: np.array, weights_data: np.array) -> np.array:
'Return result of equivalent calculation of the test in pytorch.\n\n Args:\n t_data (np.array): Input tensor data\n weights_data (np.array): Input tensor weights\n\n Returns:\n np.array: The result of the pytorch operations\n '
t_in = torch.from_numpy(t_data)
weights = torch.from_numpy(weights_data)
t_1 = torch.matmul(t_in, weights)
t_2 = torch.nn.functional.gelu(t_1)
t_out = torch.nn.functional.softmax(t_2, dim=1)
return t_out.numpy() | def reference(t_data: np.array, weights_data: np.array) -> np.array:
'Return result of equivalent calculation of the test in pytorch.\n\n Args:\n t_data (np.array): Input tensor data\n weights_data (np.array): Input tensor weights\n\n Returns:\n np.array: The result of the pytorch operations\n '
t_in = torch.from_numpy(t_data)
weights = torch.from_numpy(weights_data)
t_1 = torch.matmul(t_in, weights)
t_2 = torch.nn.functional.gelu(t_1)
t_out = torch.nn.functional.softmax(t_2, dim=1)
return t_out.numpy()<|docstring|>Return result of equivalent calculation of the test in pytorch.
Args:
t_data (np.array): Input tensor data
weights_data (np.array): Input tensor weights
Returns:
np.array: The result of the pytorch operations<|endoftext|> |
25e63d4d3bc94009640dea82dd6f6d5521d294bedf73605fe7f0d6b90c27e4a0 | def build_model(weights_data: np.array, input_shape: Tuple[int]) -> Tuple[(_ir.Ir, HostToDeviceStream, DeviceToHostStream)]:
'Build the model using popart.ir API.\n \n Args:\n weights_data (np.array): The (non-streamed) data of the weights\n input_shape (tuple): The shape of the streamed input tensor\n\n Returns:\n (tuple): tuple containing:\n\n ir._pb_ir(_ir.Ir): The underlying IR\n t_in_h2d(HostToDeviceStream): The input stream of t_in\n t_out_d2h (DeviceToHostStream): The output stream of t_out\n '
ir = pir.Ir()
main = ir.main_graph()
with main:
weights = pir.variable(weights_data, name='weights')
t_in_h2d = pir.h2d_stream(input_shape, pir.float32, name='t_in_stream')
with pir.virtual_graph(0):
t_in = ops.host_load(t_in_h2d, 't_in')
t_1 = ops.matmul(t_in, weights)
t_1_c = ops.ipu_copy(t_1, 1)
with pir.virtual_graph(1):
t_2 = ops.gelu(t_1_c)
t_2_c = ops.ipu_copy(t_2, 2)
with pir.virtual_graph(2):
t_out = ops.softmax(t_2_c, axis=1)
t_out_d2h = pir.d2h_stream(t_out.shape, pir.float32, name='t_out_stream')
ops.host_store(t_out_d2h, t_out)
return (ir._pb_ir, t_in_h2d, t_out_d2h) | Build the model using popart.ir API.
Args:
weights_data (np.array): The (non-streamed) data of the weights
input_shape (tuple): The shape of the streamed input tensor
Returns:
(tuple): tuple containing:
ir._pb_ir(_ir.Ir): The underlying IR
t_in_h2d(HostToDeviceStream): The input stream of t_in
t_out_d2h (DeviceToHostStream): The output stream of t_out | tests/integration/popart.ir/test_fwd_pipeline.py | build_model | graphcore/popart | 61 | python | def build_model(weights_data: np.array, input_shape: Tuple[int]) -> Tuple[(_ir.Ir, HostToDeviceStream, DeviceToHostStream)]:
'Build the model using popart.ir API.\n \n Args:\n weights_data (np.array): The (non-streamed) data of the weights\n input_shape (tuple): The shape of the streamed input tensor\n\n Returns:\n (tuple): tuple containing:\n\n ir._pb_ir(_ir.Ir): The underlying IR\n t_in_h2d(HostToDeviceStream): The input stream of t_in\n t_out_d2h (DeviceToHostStream): The output stream of t_out\n '
ir = pir.Ir()
main = ir.main_graph()
with main:
weights = pir.variable(weights_data, name='weights')
t_in_h2d = pir.h2d_stream(input_shape, pir.float32, name='t_in_stream')
with pir.virtual_graph(0):
t_in = ops.host_load(t_in_h2d, 't_in')
t_1 = ops.matmul(t_in, weights)
t_1_c = ops.ipu_copy(t_1, 1)
with pir.virtual_graph(1):
t_2 = ops.gelu(t_1_c)
t_2_c = ops.ipu_copy(t_2, 2)
with pir.virtual_graph(2):
t_out = ops.softmax(t_2_c, axis=1)
t_out_d2h = pir.d2h_stream(t_out.shape, pir.float32, name='t_out_stream')
ops.host_store(t_out_d2h, t_out)
return (ir._pb_ir, t_in_h2d, t_out_d2h) | def build_model(weights_data: np.array, input_shape: Tuple[int]) -> Tuple[(_ir.Ir, HostToDeviceStream, DeviceToHostStream)]:
'Build the model using popart.ir API.\n \n Args:\n weights_data (np.array): The (non-streamed) data of the weights\n input_shape (tuple): The shape of the streamed input tensor\n\n Returns:\n (tuple): tuple containing:\n\n ir._pb_ir(_ir.Ir): The underlying IR\n t_in_h2d(HostToDeviceStream): The input stream of t_in\n t_out_d2h (DeviceToHostStream): The output stream of t_out\n '
ir = pir.Ir()
main = ir.main_graph()
with main:
weights = pir.variable(weights_data, name='weights')
t_in_h2d = pir.h2d_stream(input_shape, pir.float32, name='t_in_stream')
with pir.virtual_graph(0):
t_in = ops.host_load(t_in_h2d, 't_in')
t_1 = ops.matmul(t_in, weights)
t_1_c = ops.ipu_copy(t_1, 1)
with pir.virtual_graph(1):
t_2 = ops.gelu(t_1_c)
t_2_c = ops.ipu_copy(t_2, 2)
with pir.virtual_graph(2):
t_out = ops.softmax(t_2_c, axis=1)
t_out_d2h = pir.d2h_stream(t_out.shape, pir.float32, name='t_out_stream')
ops.host_store(t_out_d2h, t_out)
return (ir._pb_ir, t_in_h2d, t_out_d2h)<|docstring|>Build the model using popart.ir API.
Args:
weights_data (np.array): The (non-streamed) data of the weights
input_shape (tuple): The shape of the streamed input tensor
Returns:
(tuple): tuple containing:
ir._pb_ir(_ir.Ir): The underlying IR
t_in_h2d(HostToDeviceStream): The input stream of t_in
t_out_d2h (DeviceToHostStream): The output stream of t_out<|endoftext|> |
65dded8c4074df014917f1f609fa0615353ca2f46bb1f97b2de23571989ab52b | def test_fwd_pipeline():
'\n Test one forward pass of a simple pipeline model in serial.\n\n The test compares the outcome from popart.ir with outcome from pytorch\n '
input_shape = (2, 16)
w_shape = (input_shape[(- 1)], 4)
weights_data = np.random.normal(0, 0.1, w_shape).astype(np.float32)
t_data = np.random.normal(0, 0.1, input_shape).astype(np.float32)
(ir, t_in_h2d, t_out_d2h) = build_model(weights_data, input_shape)
t_in_id = t_in_h2d.tensor_id()
t_out_id = t_out_d2h.tensor_id()
bps = 1
data_flow = popart.DataFlow(bps, {t_out_id: popart.AnchorReturnType('All')})
ir.setDataFlow(data_flow)
opts = ir.getSessionOptions()
opts.useHostCopyOps = True
opts.virtualGraphMode = popart.VirtualGraphMode.Manual
ir.updateVertices()
session = popart.InferenceSession.fromIr(ir=ir, deviceInfo=tu.create_test_device(numIpus=3))
session.prepareDevice()
anchors = session.initAnchorArrays()
stepio = popart.PyStepIO({t_in_id: t_data}, anchors)
session.weightsFromHost()
session.run(stepio)
expected_t_out = reference(t_data, weights_data)
t_out = anchors[t_out_id]
assert (t_out.shape == expected_t_out.shape)
assert (t_out.dtype == expected_t_out.dtype)
assert np.allclose(t_out, expected_t_out) | Test one forward pass of a simple pipeline model in serial.
The test compares the outcome from popart.ir with outcome from pytorch | tests/integration/popart.ir/test_fwd_pipeline.py | test_fwd_pipeline | graphcore/popart | 61 | python | def test_fwd_pipeline():
'\n Test one forward pass of a simple pipeline model in serial.\n\n The test compares the outcome from popart.ir with outcome from pytorch\n '
input_shape = (2, 16)
w_shape = (input_shape[(- 1)], 4)
weights_data = np.random.normal(0, 0.1, w_shape).astype(np.float32)
t_data = np.random.normal(0, 0.1, input_shape).astype(np.float32)
(ir, t_in_h2d, t_out_d2h) = build_model(weights_data, input_shape)
t_in_id = t_in_h2d.tensor_id()
t_out_id = t_out_d2h.tensor_id()
bps = 1
data_flow = popart.DataFlow(bps, {t_out_id: popart.AnchorReturnType('All')})
ir.setDataFlow(data_flow)
opts = ir.getSessionOptions()
opts.useHostCopyOps = True
opts.virtualGraphMode = popart.VirtualGraphMode.Manual
ir.updateVertices()
session = popart.InferenceSession.fromIr(ir=ir, deviceInfo=tu.create_test_device(numIpus=3))
session.prepareDevice()
anchors = session.initAnchorArrays()
stepio = popart.PyStepIO({t_in_id: t_data}, anchors)
session.weightsFromHost()
session.run(stepio)
expected_t_out = reference(t_data, weights_data)
t_out = anchors[t_out_id]
assert (t_out.shape == expected_t_out.shape)
assert (t_out.dtype == expected_t_out.dtype)
assert np.allclose(t_out, expected_t_out) | def test_fwd_pipeline():
'\n Test one forward pass of a simple pipeline model in serial.\n\n The test compares the outcome from popart.ir with outcome from pytorch\n '
input_shape = (2, 16)
w_shape = (input_shape[(- 1)], 4)
weights_data = np.random.normal(0, 0.1, w_shape).astype(np.float32)
t_data = np.random.normal(0, 0.1, input_shape).astype(np.float32)
(ir, t_in_h2d, t_out_d2h) = build_model(weights_data, input_shape)
t_in_id = t_in_h2d.tensor_id()
t_out_id = t_out_d2h.tensor_id()
bps = 1
data_flow = popart.DataFlow(bps, {t_out_id: popart.AnchorReturnType('All')})
ir.setDataFlow(data_flow)
opts = ir.getSessionOptions()
opts.useHostCopyOps = True
opts.virtualGraphMode = popart.VirtualGraphMode.Manual
ir.updateVertices()
session = popart.InferenceSession.fromIr(ir=ir, deviceInfo=tu.create_test_device(numIpus=3))
session.prepareDevice()
anchors = session.initAnchorArrays()
stepio = popart.PyStepIO({t_in_id: t_data}, anchors)
session.weightsFromHost()
session.run(stepio)
expected_t_out = reference(t_data, weights_data)
t_out = anchors[t_out_id]
assert (t_out.shape == expected_t_out.shape)
assert (t_out.dtype == expected_t_out.dtype)
assert np.allclose(t_out, expected_t_out)<|docstring|>Test one forward pass of a simple pipeline model in serial.
The test compares the outcome from popart.ir with outcome from pytorch<|endoftext|> |
280756f4092f71a40e963d883d02f458784268d1d94921fac04c248e033589eb | def train():
'Train CIFAR-10 for a number of steps.'
g1 = tf.Graph()
with g1.as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
(images, labels) = cifar10.distorted_inputs()
logits = cifar10.inference(images)
loss = cifar10.loss(logits, labels)
grads = cifar10.train_part1(loss, global_step)
only_gradients = [g for (g, _) in grads]
only_vars = [v for (_, v) in grads]
placeholder_gradients = []
for grad_var in grads:
placeholder_gradients.append((tf.placeholder('float', shape=grad_var[0].get_shape()), grad_var[1]))
feed_dict = {}
for (i, grad_var) in enumerate(grads):
feed_dict[placeholder_gradients[i][0]] = np.zeros(placeholder_gradients[i][0].shape)
train_op = cifar10.train_part2(global_step, placeholder_gradients)
class _LoggerHook(tf.train.SessionRunHook):
'Logs loss and runtime.'
def begin(self):
self._step = (- 1)
self._start_time = time.time()
def before_run(self, run_context):
self._step += 1
if ((self._step % 2) == 0):
return tf.train.SessionRunArgs(loss)
else:
return None
def after_run(self, run_context, run_values):
if (((self._step % FLAGS.log_frequency) == 0) and ((self._step % 2) == 0)):
current_time = time.time()
duration = (current_time - self._start_time)
self._start_time = current_time
loss_value = run_values.results
examples_per_sec = ((FLAGS.log_frequency * FLAGS.batch_size) / duration)
sec_per_batch = float((duration / FLAGS.log_frequency))
format_str = '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)'
print((format_str % (datetime.now(), self._step, loss_value, examples_per_sec, sec_per_batch)))
with tf.train.MonitoredTrainingSession(checkpoint_dir=FLAGS.train_dir, hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps), tf.train.NanTensorHook(loss), _LoggerHook()], config=tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while (not mon_sess.should_stop()):
(dummy_loss, gradients) = mon_sess.run([loss, only_gradients], feed_dict=feed_dict)
gradients2 = gradients
feed_dict = {}
for (i, grad_var) in enumerate(gradients2):
feed_dict[placeholder_gradients[i][0]] = gradients2[i]
res = mon_sess.run(train_op, feed_dict=feed_dict) | Train CIFAR-10 for a number of steps. | weightgrad/baseline.py | train | sabuj7177/CovidProject | 0 | python | def train():
g1 = tf.Graph()
with g1.as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
(images, labels) = cifar10.distorted_inputs()
logits = cifar10.inference(images)
loss = cifar10.loss(logits, labels)
grads = cifar10.train_part1(loss, global_step)
only_gradients = [g for (g, _) in grads]
only_vars = [v for (_, v) in grads]
placeholder_gradients = []
for grad_var in grads:
placeholder_gradients.append((tf.placeholder('float', shape=grad_var[0].get_shape()), grad_var[1]))
feed_dict = {}
for (i, grad_var) in enumerate(grads):
feed_dict[placeholder_gradients[i][0]] = np.zeros(placeholder_gradients[i][0].shape)
train_op = cifar10.train_part2(global_step, placeholder_gradients)
class _LoggerHook(tf.train.SessionRunHook):
'Logs loss and runtime.'
def begin(self):
self._step = (- 1)
self._start_time = time.time()
def before_run(self, run_context):
self._step += 1
if ((self._step % 2) == 0):
return tf.train.SessionRunArgs(loss)
else:
return None
def after_run(self, run_context, run_values):
if (((self._step % FLAGS.log_frequency) == 0) and ((self._step % 2) == 0)):
current_time = time.time()
duration = (current_time - self._start_time)
self._start_time = current_time
loss_value = run_values.results
examples_per_sec = ((FLAGS.log_frequency * FLAGS.batch_size) / duration)
sec_per_batch = float((duration / FLAGS.log_frequency))
format_str = '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)'
print((format_str % (datetime.now(), self._step, loss_value, examples_per_sec, sec_per_batch)))
with tf.train.MonitoredTrainingSession(checkpoint_dir=FLAGS.train_dir, hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps), tf.train.NanTensorHook(loss), _LoggerHook()], config=tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while (not mon_sess.should_stop()):
(dummy_loss, gradients) = mon_sess.run([loss, only_gradients], feed_dict=feed_dict)
gradients2 = gradients
feed_dict = {}
for (i, grad_var) in enumerate(gradients2):
feed_dict[placeholder_gradients[i][0]] = gradients2[i]
res = mon_sess.run(train_op, feed_dict=feed_dict) | def train():
g1 = tf.Graph()
with g1.as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
(images, labels) = cifar10.distorted_inputs()
logits = cifar10.inference(images)
loss = cifar10.loss(logits, labels)
grads = cifar10.train_part1(loss, global_step)
only_gradients = [g for (g, _) in grads]
only_vars = [v for (_, v) in grads]
placeholder_gradients = []
for grad_var in grads:
placeholder_gradients.append((tf.placeholder('float', shape=grad_var[0].get_shape()), grad_var[1]))
feed_dict = {}
for (i, grad_var) in enumerate(grads):
feed_dict[placeholder_gradients[i][0]] = np.zeros(placeholder_gradients[i][0].shape)
train_op = cifar10.train_part2(global_step, placeholder_gradients)
class _LoggerHook(tf.train.SessionRunHook):
'Logs loss and runtime.'
def begin(self):
self._step = (- 1)
self._start_time = time.time()
def before_run(self, run_context):
self._step += 1
if ((self._step % 2) == 0):
return tf.train.SessionRunArgs(loss)
else:
return None
def after_run(self, run_context, run_values):
if (((self._step % FLAGS.log_frequency) == 0) and ((self._step % 2) == 0)):
current_time = time.time()
duration = (current_time - self._start_time)
self._start_time = current_time
loss_value = run_values.results
examples_per_sec = ((FLAGS.log_frequency * FLAGS.batch_size) / duration)
sec_per_batch = float((duration / FLAGS.log_frequency))
format_str = '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)'
print((format_str % (datetime.now(), self._step, loss_value, examples_per_sec, sec_per_batch)))
with tf.train.MonitoredTrainingSession(checkpoint_dir=FLAGS.train_dir, hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps), tf.train.NanTensorHook(loss), _LoggerHook()], config=tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while (not mon_sess.should_stop()):
(dummy_loss, gradients) = mon_sess.run([loss, only_gradients], feed_dict=feed_dict)
gradients2 = gradients
feed_dict = {}
for (i, grad_var) in enumerate(gradients2):
feed_dict[placeholder_gradients[i][0]] = gradients2[i]
res = mon_sess.run(train_op, feed_dict=feed_dict)<|docstring|>Train CIFAR-10 for a number of steps.<|endoftext|> |
7c8a4705975c9ce7397a64a49f039e51574962ac89566188f05b447acfe3949a | def get_entities(seq, suffix=False):
"Gets entities from sequence.\n Args:\n seq (list): sequence of labels.\n Returns:\n list: list of (chunk_type, chunk_start, chunk_end).\n Example:\n >>> from sagas.nlu.utils import get_entities\n >>> seq = ['B-PER', 'I-PER', 'O', 'B-LOC']\n >>> get_entities(seq)\n [('PER', 0, 1), ('LOC', 3, 3)]\n "
if any((isinstance(s, list) for s in seq)):
seq = [item for sublist in seq for item in (sublist + ['O'])]
prev_tag = 'O'
prev_type = ''
begin_offset = 0
chunks = []
for (i, chunk) in enumerate((seq + ['O'])):
if suffix:
tag = chunk[(- 1)]
type_ = chunk.split('-', maxsplit=1)[0]
else:
tag = chunk[0]
type_ = chunk.split('-', maxsplit=1)[(- 1)]
if end_of_chunk(prev_tag, tag, prev_type, type_):
chunks.append((prev_type, begin_offset, (i - 1)))
if start_of_chunk(prev_tag, tag, prev_type, type_):
begin_offset = i
prev_tag = tag
prev_type = type_
return chunks | Gets entities from sequence.
Args:
seq (list): sequence of labels.
Returns:
list: list of (chunk_type, chunk_start, chunk_end).
Example:
>>> from sagas.nlu.utils import get_entities
>>> seq = ['B-PER', 'I-PER', 'O', 'B-LOC']
>>> get_entities(seq)
[('PER', 0, 1), ('LOC', 3, 3)] | sagas/nlu/utils.py | get_entities | samlet/stack | 3 | python | def get_entities(seq, suffix=False):
"Gets entities from sequence.\n Args:\n seq (list): sequence of labels.\n Returns:\n list: list of (chunk_type, chunk_start, chunk_end).\n Example:\n >>> from sagas.nlu.utils import get_entities\n >>> seq = ['B-PER', 'I-PER', 'O', 'B-LOC']\n >>> get_entities(seq)\n [('PER', 0, 1), ('LOC', 3, 3)]\n "
if any((isinstance(s, list) for s in seq)):
seq = [item for sublist in seq for item in (sublist + ['O'])]
prev_tag = 'O'
prev_type =
begin_offset = 0
chunks = []
for (i, chunk) in enumerate((seq + ['O'])):
if suffix:
tag = chunk[(- 1)]
type_ = chunk.split('-', maxsplit=1)[0]
else:
tag = chunk[0]
type_ = chunk.split('-', maxsplit=1)[(- 1)]
if end_of_chunk(prev_tag, tag, prev_type, type_):
chunks.append((prev_type, begin_offset, (i - 1)))
if start_of_chunk(prev_tag, tag, prev_type, type_):
begin_offset = i
prev_tag = tag
prev_type = type_
return chunks | def get_entities(seq, suffix=False):
"Gets entities from sequence.\n Args:\n seq (list): sequence of labels.\n Returns:\n list: list of (chunk_type, chunk_start, chunk_end).\n Example:\n >>> from sagas.nlu.utils import get_entities\n >>> seq = ['B-PER', 'I-PER', 'O', 'B-LOC']\n >>> get_entities(seq)\n [('PER', 0, 1), ('LOC', 3, 3)]\n "
if any((isinstance(s, list) for s in seq)):
seq = [item for sublist in seq for item in (sublist + ['O'])]
prev_tag = 'O'
prev_type =
begin_offset = 0
chunks = []
for (i, chunk) in enumerate((seq + ['O'])):
if suffix:
tag = chunk[(- 1)]
type_ = chunk.split('-', maxsplit=1)[0]
else:
tag = chunk[0]
type_ = chunk.split('-', maxsplit=1)[(- 1)]
if end_of_chunk(prev_tag, tag, prev_type, type_):
chunks.append((prev_type, begin_offset, (i - 1)))
if start_of_chunk(prev_tag, tag, prev_type, type_):
begin_offset = i
prev_tag = tag
prev_type = type_
return chunks<|docstring|>Gets entities from sequence.
Args:
seq (list): sequence of labels.
Returns:
list: list of (chunk_type, chunk_start, chunk_end).
Example:
>>> from sagas.nlu.utils import get_entities
>>> seq = ['B-PER', 'I-PER', 'O', 'B-LOC']
>>> get_entities(seq)
[('PER', 0, 1), ('LOC', 3, 3)]<|endoftext|> |
4e55ddf7420a02169e6662a4cef5f0342b5ef68a15bce65741d6354742685ed1 | def end_of_chunk(prev_tag, tag, prev_type, type_):
'Checks if a chunk ended between the previous and current word.\n Args:\n prev_tag: previous chunk tag.\n tag: current chunk tag.\n prev_type: previous type.\n type_: current type.\n Returns:\n chunk_end: boolean.\n '
chunk_end = False
if (prev_tag == 'E'):
chunk_end = True
if (prev_tag == 'S'):
chunk_end = True
if ((prev_tag == 'B') and (tag == 'B')):
chunk_end = True
if ((prev_tag == 'B') and (tag == 'S')):
chunk_end = True
if ((prev_tag == 'B') and (tag == 'O')):
chunk_end = True
if ((prev_tag == 'I') and (tag == 'B')):
chunk_end = True
if ((prev_tag == 'I') and (tag == 'S')):
chunk_end = True
if ((prev_tag == 'I') and (tag == 'O')):
chunk_end = True
if ((prev_tag != 'O') and (prev_tag != '.') and (prev_type != type_)):
chunk_end = True
return chunk_end | Checks if a chunk ended between the previous and current word.
Args:
prev_tag: previous chunk tag.
tag: current chunk tag.
prev_type: previous type.
type_: current type.
Returns:
chunk_end: boolean. | sagas/nlu/utils.py | end_of_chunk | samlet/stack | 3 | python | def end_of_chunk(prev_tag, tag, prev_type, type_):
'Checks if a chunk ended between the previous and current word.\n Args:\n prev_tag: previous chunk tag.\n tag: current chunk tag.\n prev_type: previous type.\n type_: current type.\n Returns:\n chunk_end: boolean.\n '
chunk_end = False
if (prev_tag == 'E'):
chunk_end = True
if (prev_tag == 'S'):
chunk_end = True
if ((prev_tag == 'B') and (tag == 'B')):
chunk_end = True
if ((prev_tag == 'B') and (tag == 'S')):
chunk_end = True
if ((prev_tag == 'B') and (tag == 'O')):
chunk_end = True
if ((prev_tag == 'I') and (tag == 'B')):
chunk_end = True
if ((prev_tag == 'I') and (tag == 'S')):
chunk_end = True
if ((prev_tag == 'I') and (tag == 'O')):
chunk_end = True
if ((prev_tag != 'O') and (prev_tag != '.') and (prev_type != type_)):
chunk_end = True
return chunk_end | def end_of_chunk(prev_tag, tag, prev_type, type_):
'Checks if a chunk ended between the previous and current word.\n Args:\n prev_tag: previous chunk tag.\n tag: current chunk tag.\n prev_type: previous type.\n type_: current type.\n Returns:\n chunk_end: boolean.\n '
chunk_end = False
if (prev_tag == 'E'):
chunk_end = True
if (prev_tag == 'S'):
chunk_end = True
if ((prev_tag == 'B') and (tag == 'B')):
chunk_end = True
if ((prev_tag == 'B') and (tag == 'S')):
chunk_end = True
if ((prev_tag == 'B') and (tag == 'O')):
chunk_end = True
if ((prev_tag == 'I') and (tag == 'B')):
chunk_end = True
if ((prev_tag == 'I') and (tag == 'S')):
chunk_end = True
if ((prev_tag == 'I') and (tag == 'O')):
chunk_end = True
if ((prev_tag != 'O') and (prev_tag != '.') and (prev_type != type_)):
chunk_end = True
return chunk_end<|docstring|>Checks if a chunk ended between the previous and current word.
Args:
prev_tag: previous chunk tag.
tag: current chunk tag.
prev_type: previous type.
type_: current type.
Returns:
chunk_end: boolean.<|endoftext|> |
439499348288cf587523b727c1772c5537c8c3f9d53118c03832f13fc61bb6b9 | def start_of_chunk(prev_tag, tag, prev_type, type_):
'Checks if a chunk started between the previous and current word.\n Args:\n prev_tag: previous chunk tag.\n tag: current chunk tag.\n prev_type: previous type.\n type_: current type.\n Returns:\n chunk_start: boolean.\n '
chunk_start = False
if (tag == 'B'):
chunk_start = True
if (tag == 'S'):
chunk_start = True
if ((prev_tag == 'E') and (tag == 'E')):
chunk_start = True
if ((prev_tag == 'E') and (tag == 'I')):
chunk_start = True
if ((prev_tag == 'S') and (tag == 'E')):
chunk_start = True
if ((prev_tag == 'S') and (tag == 'I')):
chunk_start = True
if ((prev_tag == 'O') and (tag == 'E')):
chunk_start = True
if ((prev_tag == 'O') and (tag == 'I')):
chunk_start = True
if ((tag != 'O') and (tag != '.') and (prev_type != type_)):
chunk_start = True
return chunk_start | Checks if a chunk started between the previous and current word.
Args:
prev_tag: previous chunk tag.
tag: current chunk tag.
prev_type: previous type.
type_: current type.
Returns:
chunk_start: boolean. | sagas/nlu/utils.py | start_of_chunk | samlet/stack | 3 | python | def start_of_chunk(prev_tag, tag, prev_type, type_):
'Checks if a chunk started between the previous and current word.\n Args:\n prev_tag: previous chunk tag.\n tag: current chunk tag.\n prev_type: previous type.\n type_: current type.\n Returns:\n chunk_start: boolean.\n '
chunk_start = False
if (tag == 'B'):
chunk_start = True
if (tag == 'S'):
chunk_start = True
if ((prev_tag == 'E') and (tag == 'E')):
chunk_start = True
if ((prev_tag == 'E') and (tag == 'I')):
chunk_start = True
if ((prev_tag == 'S') and (tag == 'E')):
chunk_start = True
if ((prev_tag == 'S') and (tag == 'I')):
chunk_start = True
if ((prev_tag == 'O') and (tag == 'E')):
chunk_start = True
if ((prev_tag == 'O') and (tag == 'I')):
chunk_start = True
if ((tag != 'O') and (tag != '.') and (prev_type != type_)):
chunk_start = True
return chunk_start | def start_of_chunk(prev_tag, tag, prev_type, type_):
'Checks if a chunk started between the previous and current word.\n Args:\n prev_tag: previous chunk tag.\n tag: current chunk tag.\n prev_type: previous type.\n type_: current type.\n Returns:\n chunk_start: boolean.\n '
chunk_start = False
if (tag == 'B'):
chunk_start = True
if (tag == 'S'):
chunk_start = True
if ((prev_tag == 'E') and (tag == 'E')):
chunk_start = True
if ((prev_tag == 'E') and (tag == 'I')):
chunk_start = True
if ((prev_tag == 'S') and (tag == 'E')):
chunk_start = True
if ((prev_tag == 'S') and (tag == 'I')):
chunk_start = True
if ((prev_tag == 'O') and (tag == 'E')):
chunk_start = True
if ((prev_tag == 'O') and (tag == 'I')):
chunk_start = True
if ((tag != 'O') and (tag != '.') and (prev_type != type_)):
chunk_start = True
return chunk_start<|docstring|>Checks if a chunk started between the previous and current word.
Args:
prev_tag: previous chunk tag.
tag: current chunk tag.
prev_type: previous type.
type_: current type.
Returns:
chunk_start: boolean.<|endoftext|> |
26ea27236783214f1a1dcef20ff67ede98f5b3461f37056b158750eb6314d09a | def generate_prediction_data(self, *args, **kwargs):
'Generates data that consumers will use to make predictions for the next trading day.\n\n Currently there is no implementation for this, and calling this method will result in a NotImplementedError\n '
raise NotImplementedError() | Generates data that consumers will use to make predictions for the next trading day.
Currently there is no implementation for this, and calling this method will result in a NotImplementedError | src/data_providing_module/data_providers/clustered_block_provider.py | generate_prediction_data | Freitacr/ML-StockAnalysisProject | 0 | python | def generate_prediction_data(self, *args, **kwargs):
'Generates data that consumers will use to make predictions for the next trading day.\n\n Currently there is no implementation for this, and calling this method will result in a NotImplementedError\n '
raise NotImplementedError() | def generate_prediction_data(self, *args, **kwargs):
'Generates data that consumers will use to make predictions for the next trading day.\n\n Currently there is no implementation for this, and calling this method will result in a NotImplementedError\n '
raise NotImplementedError()<|docstring|>Generates data that consumers will use to make predictions for the next trading day.
Currently there is no implementation for this, and calling this method will result in a NotImplementedError<|endoftext|> |
3036423b60790bf819ea36ab101089c89116e5f6545991c03b28967367bbee31 | def load_configuration(self, parser: 'configparser.ConfigParser'):
'Attempts to load the configurable parameters for this provider from the provided parser.\n\n For more details see abstract class documentation.\n '
section = config_util.create_type_section(parser, self)
if (not parser.has_option(section.name, _ENABLED_CONFIG_ID)):
self.write_default_configuration(section)
enabled = parser.getboolean(section.name, _ENABLED_CONFIG_ID)
if enabled:
data_provider_registry.registry.register_provider(data_provider_static_names.CLUSTERED_BLOCK_PROVIDER_ID, self) | Attempts to load the configurable parameters for this provider from the provided parser.
For more details see abstract class documentation. | src/data_providing_module/data_providers/clustered_block_provider.py | load_configuration | Freitacr/ML-StockAnalysisProject | 0 | python | def load_configuration(self, parser: 'configparser.ConfigParser'):
'Attempts to load the configurable parameters for this provider from the provided parser.\n\n For more details see abstract class documentation.\n '
section = config_util.create_type_section(parser, self)
if (not parser.has_option(section.name, _ENABLED_CONFIG_ID)):
self.write_default_configuration(section)
enabled = parser.getboolean(section.name, _ENABLED_CONFIG_ID)
if enabled:
data_provider_registry.registry.register_provider(data_provider_static_names.CLUSTERED_BLOCK_PROVIDER_ID, self) | def load_configuration(self, parser: 'configparser.ConfigParser'):
'Attempts to load the configurable parameters for this provider from the provided parser.\n\n For more details see abstract class documentation.\n '
section = config_util.create_type_section(parser, self)
if (not parser.has_option(section.name, _ENABLED_CONFIG_ID)):
self.write_default_configuration(section)
enabled = parser.getboolean(section.name, _ENABLED_CONFIG_ID)
if enabled:
data_provider_registry.registry.register_provider(data_provider_static_names.CLUSTERED_BLOCK_PROVIDER_ID, self)<|docstring|>Attempts to load the configurable parameters for this provider from the provided parser.
For more details see abstract class documentation.<|endoftext|> |
497d50d9400920609899e12549fd7101f52a17d554ebc5f3fab2688b09bb9352 | def write_default_configuration(self, section: 'configparser.SectionProxy'):
'Writes default configuration values into the SectionProxy provided.\n\n For more details see abstract class documentation.\n '
section[_ENABLED_CONFIG_ID] = 'True' | Writes default configuration values into the SectionProxy provided.
For more details see abstract class documentation. | src/data_providing_module/data_providers/clustered_block_provider.py | write_default_configuration | Freitacr/ML-StockAnalysisProject | 0 | python | def write_default_configuration(self, section: 'configparser.SectionProxy'):
'Writes default configuration values into the SectionProxy provided.\n\n For more details see abstract class documentation.\n '
section[_ENABLED_CONFIG_ID] = 'True' | def write_default_configuration(self, section: 'configparser.SectionProxy'):
'Writes default configuration values into the SectionProxy provided.\n\n For more details see abstract class documentation.\n '
section[_ENABLED_CONFIG_ID] = 'True'<|docstring|>Writes default configuration values into the SectionProxy provided.
For more details see abstract class documentation.<|endoftext|> |
f3ce46c57cad01da4cf89fcef09531455a2faaf31eb014a9ff65491636c21684 | def __init__(self):
'Initializes ClusteredBlockProvider and registers it with the global DataProviderRegistry\n\n '
super(ClusteredBlockProvider, self).__init__()
configurable_registry.config_registry.register_configurable(self) | Initializes ClusteredBlockProvider and registers it with the global DataProviderRegistry | src/data_providing_module/data_providers/clustered_block_provider.py | __init__ | Freitacr/ML-StockAnalysisProject | 0 | python | def __init__(self):
'\n\n '
super(ClusteredBlockProvider, self).__init__()
configurable_registry.config_registry.register_configurable(self) | def __init__(self):
'\n\n '
super(ClusteredBlockProvider, self).__init__()
configurable_registry.config_registry.register_configurable(self)<|docstring|>Initializes ClusteredBlockProvider and registers it with the global DataProviderRegistry<|endoftext|> |
7bed1794b3c240a32ee11a9611e0de9e324a756454d4297a211820a70c50592a | def generate_data(self, *args, **kwargs) -> Tuple[(np.ndarray, np.ndarray, np.ndarray, np.ndarray)]:
'Generates data for Consumers to use by clustering together stocks in a time period\n\n The time period for cluster creation is a period of 52 * 4 weeks (approximately 4 years).\n Consumers requiring data from this provider are expected to provide the arguments specified in the\n *args entry of the Arguments section\n\n As a note, the data provided is not separated by cluster. If separation is desired, see SplitBlockProvider.\n\n Arguments:\n *args:\n List of arguments that are expected to be in the following order, with the specified types\n train_columns: List[str]\n List of names of columns from a StockDataTable. These will be used to retrieve data\n from the database and construct the returned data blocks\n expectation_columns: List[int]\n List of integers representing the indices of the columns to be used as the target data\n in the generation of the data blocks\n Returns:\n See StockClusterDataManager.retrieve_training_data_movement_targets\n '
if (len(args) <= 1):
raise ValueError(('Expected at least the first argument from the following list;' + ' train_columns: List["str"], expectation_columns: List["int"]'))
columns = args[0]
expectation_columns = None
if (len(args) == 2):
expectation_columns = args[1]
start_date = (datetime.datetime.now() - datetime.timedelta(weeks=(52 * 4)))
start_date = start_date.isoformat()[:10].replace('-', '/')
end_date = datetime.datetime.now().isoformat()[:10].replace('-', '/')
data_retriever = stock_cluster_data_manager.StockClusterDataManager(start_date, end_date, column_list=columns)
return data_retriever.retrieveTrainingDataMovementTargets(expectation_columns=expectation_columns) | Generates data for Consumers to use by clustering together stocks in a time period
The time period for cluster creation is a period of 52 * 4 weeks (approximately 4 years).
Consumers requiring data from this provider are expected to provide the arguments specified in the
*args entry of the Arguments section
As a note, the data provided is not separated by cluster. If separation is desired, see SplitBlockProvider.
Arguments:
*args:
List of arguments that are expected to be in the following order, with the specified types
train_columns: List[str]
List of names of columns from a StockDataTable. These will be used to retrieve data
from the database and construct the returned data blocks
expectation_columns: List[int]
List of integers representing the indices of the columns to be used as the target data
in the generation of the data blocks
Returns:
See StockClusterDataManager.retrieve_training_data_movement_targets | src/data_providing_module/data_providers/clustered_block_provider.py | generate_data | Freitacr/ML-StockAnalysisProject | 0 | python | def generate_data(self, *args, **kwargs) -> Tuple[(np.ndarray, np.ndarray, np.ndarray, np.ndarray)]:
'Generates data for Consumers to use by clustering together stocks in a time period\n\n The time period for cluster creation is a period of 52 * 4 weeks (approximately 4 years).\n Consumers requiring data from this provider are expected to provide the arguments specified in the\n *args entry of the Arguments section\n\n As a note, the data provided is not separated by cluster. If separation is desired, see SplitBlockProvider.\n\n Arguments:\n *args:\n List of arguments that are expected to be in the following order, with the specified types\n train_columns: List[str]\n List of names of columns from a StockDataTable. These will be used to retrieve data\n from the database and construct the returned data blocks\n expectation_columns: List[int]\n List of integers representing the indices of the columns to be used as the target data\n in the generation of the data blocks\n Returns:\n See StockClusterDataManager.retrieve_training_data_movement_targets\n '
if (len(args) <= 1):
raise ValueError(('Expected at least the first argument from the following list;' + ' train_columns: List["str"], expectation_columns: List["int"]'))
columns = args[0]
expectation_columns = None
if (len(args) == 2):
expectation_columns = args[1]
start_date = (datetime.datetime.now() - datetime.timedelta(weeks=(52 * 4)))
start_date = start_date.isoformat()[:10].replace('-', '/')
end_date = datetime.datetime.now().isoformat()[:10].replace('-', '/')
data_retriever = stock_cluster_data_manager.StockClusterDataManager(start_date, end_date, column_list=columns)
return data_retriever.retrieveTrainingDataMovementTargets(expectation_columns=expectation_columns) | def generate_data(self, *args, **kwargs) -> Tuple[(np.ndarray, np.ndarray, np.ndarray, np.ndarray)]:
'Generates data for Consumers to use by clustering together stocks in a time period\n\n The time period for cluster creation is a period of 52 * 4 weeks (approximately 4 years).\n Consumers requiring data from this provider are expected to provide the arguments specified in the\n *args entry of the Arguments section\n\n As a note, the data provided is not separated by cluster. If separation is desired, see SplitBlockProvider.\n\n Arguments:\n *args:\n List of arguments that are expected to be in the following order, with the specified types\n train_columns: List[str]\n List of names of columns from a StockDataTable. These will be used to retrieve data\n from the database and construct the returned data blocks\n expectation_columns: List[int]\n List of integers representing the indices of the columns to be used as the target data\n in the generation of the data blocks\n Returns:\n See StockClusterDataManager.retrieve_training_data_movement_targets\n '
if (len(args) <= 1):
raise ValueError(('Expected at least the first argument from the following list;' + ' train_columns: List["str"], expectation_columns: List["int"]'))
columns = args[0]
expectation_columns = None
if (len(args) == 2):
expectation_columns = args[1]
start_date = (datetime.datetime.now() - datetime.timedelta(weeks=(52 * 4)))
start_date = start_date.isoformat()[:10].replace('-', '/')
end_date = datetime.datetime.now().isoformat()[:10].replace('-', '/')
data_retriever = stock_cluster_data_manager.StockClusterDataManager(start_date, end_date, column_list=columns)
return data_retriever.retrieveTrainingDataMovementTargets(expectation_columns=expectation_columns)<|docstring|>Generates data for Consumers to use by clustering together stocks in a time period
The time period for cluster creation is a period of 52 * 4 weeks (approximately 4 years).
Consumers requiring data from this provider are expected to provide the arguments specified in the
*args entry of the Arguments section
As a note, the data provided is not separated by cluster. If separation is desired, see SplitBlockProvider.
Arguments:
*args:
List of arguments that are expected to be in the following order, with the specified types
train_columns: List[str]
List of names of columns from a StockDataTable. These will be used to retrieve data
from the database and construct the returned data blocks
expectation_columns: List[int]
List of integers representing the indices of the columns to be used as the target data
in the generation of the data blocks
Returns:
See StockClusterDataManager.retrieve_training_data_movement_targets<|endoftext|> |
e8f12f22bbe4b9496cd603c285bdb5d8401a0d30dcfac13830b4c29625e40fef | def _hashlist(items):
'return sha1 hexdigest for a list'
return hashlib.sha1(str(items)).hexdigest() | return sha1 hexdigest for a list | python/lib/python2.7/site-packages/hgext/chgserver.py | _hashlist | gtfarng/Odoo_migrade | 1 | python | def _hashlist(items):
return hashlib.sha1(str(items)).hexdigest() | def _hashlist(items):
return hashlib.sha1(str(items)).hexdigest()<|docstring|>return sha1 hexdigest for a list<|endoftext|> |
cf96cf357714a09ab483cd16897e5efaad31c8691e58dfd08568bd6ea55a3f4b | def _confighash(ui):
'return a quick hash for detecting config/env changes\n\n confighash is the hash of sensitive config items and environment variables.\n\n for chgserver, it is designed that once confighash changes, the server is\n not qualified to serve its client and should redirect the client to a new\n server. different from mtimehash, confighash change will not mark the\n server outdated and exit since the user can have different configs at the\n same time.\n '
sectionitems = []
for section in _configsections:
sectionitems.append(ui.configitems(section))
sectionhash = _hashlist(sectionitems)
envitems = [(k, v) for (k, v) in os.environ.iteritems() if _envre.match(k)]
envhash = _hashlist(sorted(envitems))
return (sectionhash[:6] + envhash[:6]) | return a quick hash for detecting config/env changes
confighash is the hash of sensitive config items and environment variables.
for chgserver, it is designed that once confighash changes, the server is
not qualified to serve its client and should redirect the client to a new
server. different from mtimehash, confighash change will not mark the
server outdated and exit since the user can have different configs at the
same time. | python/lib/python2.7/site-packages/hgext/chgserver.py | _confighash | gtfarng/Odoo_migrade | 1 | python | def _confighash(ui):
'return a quick hash for detecting config/env changes\n\n confighash is the hash of sensitive config items and environment variables.\n\n for chgserver, it is designed that once confighash changes, the server is\n not qualified to serve its client and should redirect the client to a new\n server. different from mtimehash, confighash change will not mark the\n server outdated and exit since the user can have different configs at the\n same time.\n '
sectionitems = []
for section in _configsections:
sectionitems.append(ui.configitems(section))
sectionhash = _hashlist(sectionitems)
envitems = [(k, v) for (k, v) in os.environ.iteritems() if _envre.match(k)]
envhash = _hashlist(sorted(envitems))
return (sectionhash[:6] + envhash[:6]) | def _confighash(ui):
'return a quick hash for detecting config/env changes\n\n confighash is the hash of sensitive config items and environment variables.\n\n for chgserver, it is designed that once confighash changes, the server is\n not qualified to serve its client and should redirect the client to a new\n server. different from mtimehash, confighash change will not mark the\n server outdated and exit since the user can have different configs at the\n same time.\n '
sectionitems = []
for section in _configsections:
sectionitems.append(ui.configitems(section))
sectionhash = _hashlist(sectionitems)
envitems = [(k, v) for (k, v) in os.environ.iteritems() if _envre.match(k)]
envhash = _hashlist(sorted(envitems))
return (sectionhash[:6] + envhash[:6])<|docstring|>return a quick hash for detecting config/env changes
confighash is the hash of sensitive config items and environment variables.
for chgserver, it is designed that once confighash changes, the server is
not qualified to serve its client and should redirect the client to a new
server. different from mtimehash, confighash change will not mark the
server outdated and exit since the user can have different configs at the
same time.<|endoftext|> |
42e91411e2681448602d9c8592e4d313b6f8cd1f135f63fffd4f1f3a1fd608f3 | def _getmtimepaths(ui):
'get a list of paths that should be checked to detect change\n\n The list will include:\n - extensions (will not cover all files for complex extensions)\n - mercurial/__version__.py\n - python binary\n '
modules = [m for (n, m) in extensions.extensions(ui)]
try:
from mercurial import __version__
modules.append(__version__)
except ImportError:
pass
files = [sys.executable]
for m in modules:
try:
files.append(inspect.getabsfile(m))
except TypeError:
pass
return sorted(set(files)) | get a list of paths that should be checked to detect change
The list will include:
- extensions (will not cover all files for complex extensions)
- mercurial/__version__.py
- python binary | python/lib/python2.7/site-packages/hgext/chgserver.py | _getmtimepaths | gtfarng/Odoo_migrade | 1 | python | def _getmtimepaths(ui):
'get a list of paths that should be checked to detect change\n\n The list will include:\n - extensions (will not cover all files for complex extensions)\n - mercurial/__version__.py\n - python binary\n '
modules = [m for (n, m) in extensions.extensions(ui)]
try:
from mercurial import __version__
modules.append(__version__)
except ImportError:
pass
files = [sys.executable]
for m in modules:
try:
files.append(inspect.getabsfile(m))
except TypeError:
pass
return sorted(set(files)) | def _getmtimepaths(ui):
'get a list of paths that should be checked to detect change\n\n The list will include:\n - extensions (will not cover all files for complex extensions)\n - mercurial/__version__.py\n - python binary\n '
modules = [m for (n, m) in extensions.extensions(ui)]
try:
from mercurial import __version__
modules.append(__version__)
except ImportError:
pass
files = [sys.executable]
for m in modules:
try:
files.append(inspect.getabsfile(m))
except TypeError:
pass
return sorted(set(files))<|docstring|>get a list of paths that should be checked to detect change
The list will include:
- extensions (will not cover all files for complex extensions)
- mercurial/__version__.py
- python binary<|endoftext|> |
c908bd6f28ca34ac041081ef56cf3934985c162731af713324a4df90bbde3782 | def _mtimehash(paths):
"return a quick hash for detecting file changes\n\n mtimehash calls stat on given paths and calculate a hash based on size and\n mtime of each file. mtimehash does not read file content because reading is\n expensive. therefore it's not 100% reliable for detecting content changes.\n it's possible to return different hashes for same file contents.\n it's also possible to return a same hash for different file contents for\n some carefully crafted situation.\n\n for chgserver, it is designed that once mtimehash changes, the server is\n considered outdated immediately and should no longer provide service.\n\n mtimehash is not included in confighash because we only know the paths of\n extensions after importing them (there is imp.find_module but that faces\n race conditions). We need to calculate confighash without importing.\n "
def trystat(path):
try:
st = os.stat(path)
return (st.st_mtime, st.st_size)
except OSError:
pass
return _hashlist(map(trystat, paths))[:12] | return a quick hash for detecting file changes
mtimehash calls stat on given paths and calculate a hash based on size and
mtime of each file. mtimehash does not read file content because reading is
expensive. therefore it's not 100% reliable for detecting content changes.
it's possible to return different hashes for same file contents.
it's also possible to return a same hash for different file contents for
some carefully crafted situation.
for chgserver, it is designed that once mtimehash changes, the server is
considered outdated immediately and should no longer provide service.
mtimehash is not included in confighash because we only know the paths of
extensions after importing them (there is imp.find_module but that faces
race conditions). We need to calculate confighash without importing. | python/lib/python2.7/site-packages/hgext/chgserver.py | _mtimehash | gtfarng/Odoo_migrade | 1 | python | def _mtimehash(paths):
"return a quick hash for detecting file changes\n\n mtimehash calls stat on given paths and calculate a hash based on size and\n mtime of each file. mtimehash does not read file content because reading is\n expensive. therefore it's not 100% reliable for detecting content changes.\n it's possible to return different hashes for same file contents.\n it's also possible to return a same hash for different file contents for\n some carefully crafted situation.\n\n for chgserver, it is designed that once mtimehash changes, the server is\n considered outdated immediately and should no longer provide service.\n\n mtimehash is not included in confighash because we only know the paths of\n extensions after importing them (there is imp.find_module but that faces\n race conditions). We need to calculate confighash without importing.\n "
def trystat(path):
try:
st = os.stat(path)
return (st.st_mtime, st.st_size)
except OSError:
pass
return _hashlist(map(trystat, paths))[:12] | def _mtimehash(paths):
"return a quick hash for detecting file changes\n\n mtimehash calls stat on given paths and calculate a hash based on size and\n mtime of each file. mtimehash does not read file content because reading is\n expensive. therefore it's not 100% reliable for detecting content changes.\n it's possible to return different hashes for same file contents.\n it's also possible to return a same hash for different file contents for\n some carefully crafted situation.\n\n for chgserver, it is designed that once mtimehash changes, the server is\n considered outdated immediately and should no longer provide service.\n\n mtimehash is not included in confighash because we only know the paths of\n extensions after importing them (there is imp.find_module but that faces\n race conditions). We need to calculate confighash without importing.\n "
def trystat(path):
try:
st = os.stat(path)
return (st.st_mtime, st.st_size)
except OSError:
pass
return _hashlist(map(trystat, paths))[:12]<|docstring|>return a quick hash for detecting file changes
mtimehash calls stat on given paths and calculate a hash based on size and
mtime of each file. mtimehash does not read file content because reading is
expensive. therefore it's not 100% reliable for detecting content changes.
it's possible to return different hashes for same file contents.
it's also possible to return a same hash for different file contents for
some carefully crafted situation.
for chgserver, it is designed that once mtimehash changes, the server is
considered outdated immediately and should no longer provide service.
mtimehash is not included in confighash because we only know the paths of
extensions after importing them (there is imp.find_module but that faces
race conditions). We need to calculate confighash without importing.<|endoftext|> |
c29bedf53b552b68737f1553fdb2761a0cb6230e5f7b5260e44f93108f5bfd18 | def attachio(self):
"Attach to client's stdio passed via unix domain socket; all\n channels except cresult will no longer be used\n "
self.clientsock.sendall(struct.pack('>cI', 'I', 1))
clientfds = osutil.recvfds(self.clientsock.fileno())
_log(('received fds: %r\n' % clientfds))
ui = self.ui
ui.flush()
first = self._saveio()
for (fd, (cn, fn, mode)) in zip(clientfds, _iochannels):
assert (fd > 0)
fp = getattr(ui, fn)
os.dup2(fd, fp.fileno())
os.close(fd)
if (not first):
continue
if (fn == 'ferr'):
newfp = fp
else:
if fp.isatty():
bufsize = 1
else:
bufsize = (- 1)
newfp = os.fdopen(fp.fileno(), mode, bufsize)
setattr(ui, fn, newfp)
setattr(self, cn, newfp)
self.cresult.write(struct.pack('>i', len(clientfds))) | Attach to client's stdio passed via unix domain socket; all
channels except cresult will no longer be used | python/lib/python2.7/site-packages/hgext/chgserver.py | attachio | gtfarng/Odoo_migrade | 1 | python | def attachio(self):
"Attach to client's stdio passed via unix domain socket; all\n channels except cresult will no longer be used\n "
self.clientsock.sendall(struct.pack('>cI', 'I', 1))
clientfds = osutil.recvfds(self.clientsock.fileno())
_log(('received fds: %r\n' % clientfds))
ui = self.ui
ui.flush()
first = self._saveio()
for (fd, (cn, fn, mode)) in zip(clientfds, _iochannels):
assert (fd > 0)
fp = getattr(ui, fn)
os.dup2(fd, fp.fileno())
os.close(fd)
if (not first):
continue
if (fn == 'ferr'):
newfp = fp
else:
if fp.isatty():
bufsize = 1
else:
bufsize = (- 1)
newfp = os.fdopen(fp.fileno(), mode, bufsize)
setattr(ui, fn, newfp)
setattr(self, cn, newfp)
self.cresult.write(struct.pack('>i', len(clientfds))) | def attachio(self):
"Attach to client's stdio passed via unix domain socket; all\n channels except cresult will no longer be used\n "
self.clientsock.sendall(struct.pack('>cI', 'I', 1))
clientfds = osutil.recvfds(self.clientsock.fileno())
_log(('received fds: %r\n' % clientfds))
ui = self.ui
ui.flush()
first = self._saveio()
for (fd, (cn, fn, mode)) in zip(clientfds, _iochannels):
assert (fd > 0)
fp = getattr(ui, fn)
os.dup2(fd, fp.fileno())
os.close(fd)
if (not first):
continue
if (fn == 'ferr'):
newfp = fp
else:
if fp.isatty():
bufsize = 1
else:
bufsize = (- 1)
newfp = os.fdopen(fp.fileno(), mode, bufsize)
setattr(ui, fn, newfp)
setattr(self, cn, newfp)
self.cresult.write(struct.pack('>i', len(clientfds)))<|docstring|>Attach to client's stdio passed via unix domain socket; all
channels except cresult will no longer be used<|endoftext|> |
a181b25f7dcf3e691e84edd6107faf1803e78fa750f9d79028f42b0523c86501 | def validate(self):
'Reload the config and check if the server is up to date\n\n Read a list of \'\x00\' separated arguments.\n Write a non-empty list of \'\x00\' separated instruction strings or \'\x00\'\n if the list is empty.\n An instruction string could be either:\n - "unlink $path", the client should unlink the path to stop the\n outdated server.\n - "redirect $path", the client should attempt to connect to $path\n first. If it does not work, start a new server. It implies\n "reconnect".\n - "exit $n", the client should exit directly with code n.\n This may happen if we cannot parse the config.\n - "reconnect", the client should close the connection and\n reconnect.\n If neither "reconnect" nor "redirect" is included in the instruction\n list, the client can continue with this server after completing all\n the instructions.\n '
args = self._readlist()
try:
(self.ui, lui) = _loadnewui(self.ui, args)
except error.ParseError as inst:
dispatch._formatparse(self.ui.warn, inst)
self.ui.flush()
self.cresult.write('exit 255')
return
newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
insts = []
if (newhash.mtimehash != self.hashstate.mtimehash):
addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
insts.append(('unlink %s' % addr))
if self.hashstate.mtimehash:
insts.append('reconnect')
if (newhash.confighash != self.hashstate.confighash):
addr = _hashaddress(self.baseaddress, newhash.confighash)
insts.append(('redirect %s' % addr))
_log(('validate: %s\n' % insts))
self.cresult.write(('\x00'.join(insts) or '\x00')) | Reload the config and check if the server is up to date
Read a list of ' ' separated arguments.
Write a non-empty list of ' ' separated instruction strings or ' '
if the list is empty.
An instruction string could be either:
- "unlink $path", the client should unlink the path to stop the
outdated server.
- "redirect $path", the client should attempt to connect to $path
first. If it does not work, start a new server. It implies
"reconnect".
- "exit $n", the client should exit directly with code n.
This may happen if we cannot parse the config.
- "reconnect", the client should close the connection and
reconnect.
If neither "reconnect" nor "redirect" is included in the instruction
list, the client can continue with this server after completing all
the instructions. | python/lib/python2.7/site-packages/hgext/chgserver.py | validate | gtfarng/Odoo_migrade | 1 | python | def validate(self):
'Reload the config and check if the server is up to date\n\n Read a list of \'\x00\' separated arguments.\n Write a non-empty list of \'\x00\' separated instruction strings or \'\x00\'\n if the list is empty.\n An instruction string could be either:\n - "unlink $path", the client should unlink the path to stop the\n outdated server.\n - "redirect $path", the client should attempt to connect to $path\n first. If it does not work, start a new server. It implies\n "reconnect".\n - "exit $n", the client should exit directly with code n.\n This may happen if we cannot parse the config.\n - "reconnect", the client should close the connection and\n reconnect.\n If neither "reconnect" nor "redirect" is included in the instruction\n list, the client can continue with this server after completing all\n the instructions.\n '
args = self._readlist()
try:
(self.ui, lui) = _loadnewui(self.ui, args)
except error.ParseError as inst:
dispatch._formatparse(self.ui.warn, inst)
self.ui.flush()
self.cresult.write('exit 255')
return
newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
insts = []
if (newhash.mtimehash != self.hashstate.mtimehash):
addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
insts.append(('unlink %s' % addr))
if self.hashstate.mtimehash:
insts.append('reconnect')
if (newhash.confighash != self.hashstate.confighash):
addr = _hashaddress(self.baseaddress, newhash.confighash)
insts.append(('redirect %s' % addr))
_log(('validate: %s\n' % insts))
self.cresult.write(('\x00'.join(insts) or '\x00')) | def validate(self):
'Reload the config and check if the server is up to date\n\n Read a list of \'\x00\' separated arguments.\n Write a non-empty list of \'\x00\' separated instruction strings or \'\x00\'\n if the list is empty.\n An instruction string could be either:\n - "unlink $path", the client should unlink the path to stop the\n outdated server.\n - "redirect $path", the client should attempt to connect to $path\n first. If it does not work, start a new server. It implies\n "reconnect".\n - "exit $n", the client should exit directly with code n.\n This may happen if we cannot parse the config.\n - "reconnect", the client should close the connection and\n reconnect.\n If neither "reconnect" nor "redirect" is included in the instruction\n list, the client can continue with this server after completing all\n the instructions.\n '
args = self._readlist()
try:
(self.ui, lui) = _loadnewui(self.ui, args)
except error.ParseError as inst:
dispatch._formatparse(self.ui.warn, inst)
self.ui.flush()
self.cresult.write('exit 255')
return
newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
insts = []
if (newhash.mtimehash != self.hashstate.mtimehash):
addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
insts.append(('unlink %s' % addr))
if self.hashstate.mtimehash:
insts.append('reconnect')
if (newhash.confighash != self.hashstate.confighash):
addr = _hashaddress(self.baseaddress, newhash.confighash)
insts.append(('redirect %s' % addr))
_log(('validate: %s\n' % insts))
self.cresult.write(('\x00'.join(insts) or '\x00'))<|docstring|>Reload the config and check if the server is up to date
Read a list of ' ' separated arguments.
Write a non-empty list of ' ' separated instruction strings or ' '
if the list is empty.
An instruction string could be either:
- "unlink $path", the client should unlink the path to stop the
outdated server.
- "redirect $path", the client should attempt to connect to $path
first. If it does not work, start a new server. It implies
"reconnect".
- "exit $n", the client should exit directly with code n.
This may happen if we cannot parse the config.
- "reconnect", the client should close the connection and
reconnect.
If neither "reconnect" nor "redirect" is included in the instruction
list, the client can continue with this server after completing all
the instructions.<|endoftext|> |
713ee3640c30cee5d7022b97059a13625889467b10910f4931530bdc16befaf9 | def chdir(self):
'Change current directory\n\n Note that the behavior of --cwd option is bit different from this.\n It does not affect --config parameter.\n '
path = self._readstr()
if (not path):
return
_log(('chdir to %r\n' % path))
os.chdir(path) | Change current directory
Note that the behavior of --cwd option is bit different from this.
It does not affect --config parameter. | python/lib/python2.7/site-packages/hgext/chgserver.py | chdir | gtfarng/Odoo_migrade | 1 | python | def chdir(self):
'Change current directory\n\n Note that the behavior of --cwd option is bit different from this.\n It does not affect --config parameter.\n '
path = self._readstr()
if (not path):
return
_log(('chdir to %r\n' % path))
os.chdir(path) | def chdir(self):
'Change current directory\n\n Note that the behavior of --cwd option is bit different from this.\n It does not affect --config parameter.\n '
path = self._readstr()
if (not path):
return
_log(('chdir to %r\n' % path))
os.chdir(path)<|docstring|>Change current directory
Note that the behavior of --cwd option is bit different from this.
It does not affect --config parameter.<|endoftext|> |
a90ffe013dc1863b700451bdcfee49be697436950489c4ef7586e3fb67909972 | def setumask(self):
'Change umask'
mask = struct.unpack('>I', self._read(4))[0]
_log(('setumask %r\n' % mask))
os.umask(mask) | Change umask | python/lib/python2.7/site-packages/hgext/chgserver.py | setumask | gtfarng/Odoo_migrade | 1 | python | def setumask(self):
mask = struct.unpack('>I', self._read(4))[0]
_log(('setumask %r\n' % mask))
os.umask(mask) | def setumask(self):
mask = struct.unpack('>I', self._read(4))[0]
_log(('setumask %r\n' % mask))
os.umask(mask)<|docstring|>Change umask<|endoftext|> |
62db587af3b7f9a37634ea257a0de344044f9a51c6b3f7f1dffd5fbb74ae6fc4 | def getpager(self):
"Read cmdargs and write pager command to r-channel if enabled\n\n If pager isn't enabled, this writes '\x00' because channeledoutput\n does not allow to write empty data.\n "
args = self._readlist()
try:
(cmd, _func, args, options, _cmdoptions) = dispatch._parse(self.ui, args)
except (error.Abort, error.AmbiguousCommand, error.CommandError, error.UnknownCommand):
cmd = None
options = {}
if ((not cmd) or ('pager' not in options)):
self.cresult.write('\x00')
return
pagercmd = _setuppagercmd(self.ui, options, cmd)
if pagercmd:
if (util.safehasattr(signal, 'SIGPIPE') and (signal.getsignal(signal.SIGPIPE) == signal.SIG_IGN)):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
self.cresult.write(pagercmd)
else:
self.cresult.write('\x00') | Read cmdargs and write pager command to r-channel if enabled
If pager isn't enabled, this writes ' ' because channeledoutput
does not allow to write empty data. | python/lib/python2.7/site-packages/hgext/chgserver.py | getpager | gtfarng/Odoo_migrade | 1 | python | def getpager(self):
"Read cmdargs and write pager command to r-channel if enabled\n\n If pager isn't enabled, this writes '\x00' because channeledoutput\n does not allow to write empty data.\n "
args = self._readlist()
try:
(cmd, _func, args, options, _cmdoptions) = dispatch._parse(self.ui, args)
except (error.Abort, error.AmbiguousCommand, error.CommandError, error.UnknownCommand):
cmd = None
options = {}
if ((not cmd) or ('pager' not in options)):
self.cresult.write('\x00')
return
pagercmd = _setuppagercmd(self.ui, options, cmd)
if pagercmd:
if (util.safehasattr(signal, 'SIGPIPE') and (signal.getsignal(signal.SIGPIPE) == signal.SIG_IGN)):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
self.cresult.write(pagercmd)
else:
self.cresult.write('\x00') | def getpager(self):
"Read cmdargs and write pager command to r-channel if enabled\n\n If pager isn't enabled, this writes '\x00' because channeledoutput\n does not allow to write empty data.\n "
args = self._readlist()
try:
(cmd, _func, args, options, _cmdoptions) = dispatch._parse(self.ui, args)
except (error.Abort, error.AmbiguousCommand, error.CommandError, error.UnknownCommand):
cmd = None
options = {}
if ((not cmd) or ('pager' not in options)):
self.cresult.write('\x00')
return
pagercmd = _setuppagercmd(self.ui, options, cmd)
if pagercmd:
if (util.safehasattr(signal, 'SIGPIPE') and (signal.getsignal(signal.SIGPIPE) == signal.SIG_IGN)):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
self.cresult.write(pagercmd)
else:
self.cresult.write('\x00')<|docstring|>Read cmdargs and write pager command to r-channel if enabled
If pager isn't enabled, this writes ' ' because channeledoutput
does not allow to write empty data.<|endoftext|> |
fcaae9b6fa390ad5488968b20dc3c4677125a394e204adf05ddfe26405914b13 | def setenv(self):
'Clear and update os.environ\n\n Note that not all variables can make an effect on the running process.\n '
l = self._readlist()
try:
newenv = dict((s.split('=', 1) for s in l))
except ValueError:
raise ValueError('unexpected value in setenv request')
_log(('setenv: %r\n' % sorted(newenv.keys())))
os.environ.clear()
os.environ.update(newenv) | Clear and update os.environ
Note that not all variables can make an effect on the running process. | python/lib/python2.7/site-packages/hgext/chgserver.py | setenv | gtfarng/Odoo_migrade | 1 | python | def setenv(self):
'Clear and update os.environ\n\n Note that not all variables can make an effect on the running process.\n '
l = self._readlist()
try:
newenv = dict((s.split('=', 1) for s in l))
except ValueError:
raise ValueError('unexpected value in setenv request')
_log(('setenv: %r\n' % sorted(newenv.keys())))
os.environ.clear()
os.environ.update(newenv) | def setenv(self):
'Clear and update os.environ\n\n Note that not all variables can make an effect on the running process.\n '
l = self._readlist()
try:
newenv = dict((s.split('=', 1) for s in l))
except ValueError:
raise ValueError('unexpected value in setenv request')
_log(('setenv: %r\n' % sorted(newenv.keys())))
os.environ.clear()
os.environ.update(newenv)<|docstring|>Clear and update os.environ
Note that not all variables can make an effect on the running process.<|endoftext|> |
dd53e240a96f7035e6f34e502d0cd904120513081b8c862e1ac9ed9a761fc90e | @staticmethod
def image_to_lines(image_array: numpy.array, offset: int, rsleep: int, lsleep: int, app: str) -> None:
'\n\t\tConverts an image array to mouseclicks.\n\t\t:param image_array:\n\t\tA numpy array of bools, where False represents a click, and True represents no click.\n\t\t:param offset:\n\t\tAn int which provides spacing between each pixel in image_array. Usefull to adjust for brush size used in whatever this will be outputting for.\n\t\t:param rlseep:\n\t\tint which designates how long in second the mouse will take drawing a line\n\t\t:param rsleep:\n\t\tint which designates how long in seconds to pause at end of row\n\t\t:return:\n\t\t'
(startpositionx, startpositiony) = pyautogui.position()
for row in image_array:
xoffset = 0
white = palette[app][1]
alreadydrawing = [white, 0, 0]
row[(- 1)] = [255, 105, 180]
for value in row:
closest = closest_color(value, palette[app][0])
if (closest == white):
alreadydrawing[0] = closest
xoffset += offset
continue
if (alreadydrawing[0] == closest):
alreadydrawing[2] += offset
xoffset += offset
continue
alreadydrawing[0] = closest
pyautogui.mouseUp()
pyautogui.moveTo((startpositionx + alreadydrawing[1]), startpositiony)
pyautogui.mouseDown()
pyautogui.dragTo(((startpositionx + alreadydrawing[1]) + alreadydrawing[2]), startpositiony, duration=lsleep, button='left')
pyautogui.mouseUp()
alreadydrawing[1] = xoffset
alreadydrawing[2] = 0
change_color(closest, app)
time.sleep(lsleep)
xoffset += offset
pyautogui.mouseUp()
change_color(white, app)
startpositiony += offset
pyautogui.mouseUp()
pyautogui.moveTo(startpositionx, startpositiony)
time.sleep(rsleep) | Converts an image array to mouseclicks.
:param image_array:
A numpy array of bools, where False represents a click, and True represents no click.
:param offset:
An int which provides spacing between each pixel in image_array. Usefull to adjust for brush size used in whatever this will be outputting for.
:param rlseep:
int which designates how long in second the mouse will take drawing a line
:param rsleep:
int which designates how long in seconds to pause at end of row
:return: | src/mouse_automate_color.py | image_to_lines | Nekose/Mouseomate | 322 | python | @staticmethod
def image_to_lines(image_array: numpy.array, offset: int, rsleep: int, lsleep: int, app: str) -> None:
'\n\t\tConverts an image array to mouseclicks.\n\t\t:param image_array:\n\t\tA numpy array of bools, where False represents a click, and True represents no click.\n\t\t:param offset:\n\t\tAn int which provides spacing between each pixel in image_array. Usefull to adjust for brush size used in whatever this will be outputting for.\n\t\t:param rlseep:\n\t\tint which designates how long in second the mouse will take drawing a line\n\t\t:param rsleep:\n\t\tint which designates how long in seconds to pause at end of row\n\t\t:return:\n\t\t'
(startpositionx, startpositiony) = pyautogui.position()
for row in image_array:
xoffset = 0
white = palette[app][1]
alreadydrawing = [white, 0, 0]
row[(- 1)] = [255, 105, 180]
for value in row:
closest = closest_color(value, palette[app][0])
if (closest == white):
alreadydrawing[0] = closest
xoffset += offset
continue
if (alreadydrawing[0] == closest):
alreadydrawing[2] += offset
xoffset += offset
continue
alreadydrawing[0] = closest
pyautogui.mouseUp()
pyautogui.moveTo((startpositionx + alreadydrawing[1]), startpositiony)
pyautogui.mouseDown()
pyautogui.dragTo(((startpositionx + alreadydrawing[1]) + alreadydrawing[2]), startpositiony, duration=lsleep, button='left')
pyautogui.mouseUp()
alreadydrawing[1] = xoffset
alreadydrawing[2] = 0
change_color(closest, app)
time.sleep(lsleep)
xoffset += offset
pyautogui.mouseUp()
change_color(white, app)
startpositiony += offset
pyautogui.mouseUp()
pyautogui.moveTo(startpositionx, startpositiony)
time.sleep(rsleep) | @staticmethod
def image_to_lines(image_array: numpy.array, offset: int, rsleep: int, lsleep: int, app: str) -> None:
'\n\t\tConverts an image array to mouseclicks.\n\t\t:param image_array:\n\t\tA numpy array of bools, where False represents a click, and True represents no click.\n\t\t:param offset:\n\t\tAn int which provides spacing between each pixel in image_array. Usefull to adjust for brush size used in whatever this will be outputting for.\n\t\t:param rlseep:\n\t\tint which designates how long in second the mouse will take drawing a line\n\t\t:param rsleep:\n\t\tint which designates how long in seconds to pause at end of row\n\t\t:return:\n\t\t'
(startpositionx, startpositiony) = pyautogui.position()
for row in image_array:
xoffset = 0
white = palette[app][1]
alreadydrawing = [white, 0, 0]
row[(- 1)] = [255, 105, 180]
for value in row:
closest = closest_color(value, palette[app][0])
if (closest == white):
alreadydrawing[0] = closest
xoffset += offset
continue
if (alreadydrawing[0] == closest):
alreadydrawing[2] += offset
xoffset += offset
continue
alreadydrawing[0] = closest
pyautogui.mouseUp()
pyautogui.moveTo((startpositionx + alreadydrawing[1]), startpositiony)
pyautogui.mouseDown()
pyautogui.dragTo(((startpositionx + alreadydrawing[1]) + alreadydrawing[2]), startpositiony, duration=lsleep, button='left')
pyautogui.mouseUp()
alreadydrawing[1] = xoffset
alreadydrawing[2] = 0
change_color(closest, app)
time.sleep(lsleep)
xoffset += offset
pyautogui.mouseUp()
change_color(white, app)
startpositiony += offset
pyautogui.mouseUp()
pyautogui.moveTo(startpositionx, startpositiony)
time.sleep(rsleep)<|docstring|>Converts an image array to mouseclicks.
:param image_array:
A numpy array of bools, where False represents a click, and True represents no click.
:param offset:
An int which provides spacing between each pixel in image_array. Usefull to adjust for brush size used in whatever this will be outputting for.
:param rlseep:
int which designates how long in second the mouse will take drawing a line
:param rsleep:
int which designates how long in seconds to pause at end of row
:return:<|endoftext|> |
2ebca353232bc4513a3e7ebeef18751d033ab3d87f52d55657677be275ac990a | def fold(s):
'auxiliary function: shorten long option values for output'
offset = (64 * ' ')
maxlen = 70
sep = '|'
parts = s.split(sep)
line = ''
out = ''
for f in range(0, len(parts)):
if (f != (len(parts) - 1)):
line = ((line + parts[f]) + sep)
else:
line = (line + parts[f])
if (len(line) >= maxlen):
out = (((out + line) + '\n') + offset)
line = ''
out = (out + line)
return out | auxiliary function: shorten long option values for output | CTANLoad+Out/CTANLoad+Out.py | fold | GuenterPartosch/Convert_CTAN | 1 | python | def fold(s):
offset = (64 * ' ')
maxlen = 70
sep = '|'
parts = s.split(sep)
line =
out =
for f in range(0, len(parts)):
if (f != (len(parts) - 1)):
line = ((line + parts[f]) + sep)
else:
line = (line + parts[f])
if (len(line) >= maxlen):
out = (((out + line) + '\n') + offset)
line =
out = (out + line)
return out | def fold(s):
offset = (64 * ' ')
maxlen = 70
sep = '|'
parts = s.split(sep)
line =
out =
for f in range(0, len(parts)):
if (f != (len(parts) - 1)):
line = ((line + parts[f]) + sep)
else:
line = (line + parts[f])
if (len(line) >= maxlen):
out = (((out + line) + '\n') + offset)
line =
out = (out + line)
return out<|docstring|>auxiliary function: shorten long option values for output<|endoftext|> |
f542c41b5b2a3033844e9bba9bb25adee60be06a42dbb95868401469bcf3f523 | def remove_LaTeX_file(t):
'auxiliary function: remove named LaTeX file.'
if delete_temporary_file:
if (t in latex_files):
if path.exists((args.output_name + t)):
os.remove((args.output_name + t))
if verbose:
print("* Warning: LaTeX file '{}' removed".format((args.output_name + t)))
else:
pass | auxiliary function: remove named LaTeX file. | CTANLoad+Out/CTANLoad+Out.py | remove_LaTeX_file | GuenterPartosch/Convert_CTAN | 1 | python | def remove_LaTeX_file(t):
if delete_temporary_file:
if (t in latex_files):
if path.exists((args.output_name + t)):
os.remove((args.output_name + t))
if verbose:
print("* Warning: LaTeX file '{}' removed".format((args.output_name + t)))
else:
pass | def remove_LaTeX_file(t):
if delete_temporary_file:
if (t in latex_files):
if path.exists((args.output_name + t)):
os.remove((args.output_name + t))
if verbose:
print("* Warning: LaTeX file '{}' removed".format((args.output_name + t)))
else:
pass<|docstring|>auxiliary function: remove named LaTeX file.<|endoftext|> |
30904e570217edc220d07a836f712163a0f75d121ee596e876e389972427e1ba | def remove_other_file(t):
'auxiliary function: remove named other file.'
if delete_temporary_file:
if (t in other_files):
if path.exists((args.output_name + t)):
os.remove((args.output_name + t))
if verbose:
print("* Warning: file '{}' removed".format((args.output_name + t)))
else:
pass | auxiliary function: remove named other file. | CTANLoad+Out/CTANLoad+Out.py | remove_other_file | GuenterPartosch/Convert_CTAN | 1 | python | def remove_other_file(t):
if delete_temporary_file:
if (t in other_files):
if path.exists((args.output_name + t)):
os.remove((args.output_name + t))
if verbose:
print("* Warning: file '{}' removed".format((args.output_name + t)))
else:
pass | def remove_other_file(t):
if delete_temporary_file:
if (t in other_files):
if path.exists((args.output_name + t)):
os.remove((args.output_name + t))
if verbose:
print("* Warning: file '{}' removed".format((args.output_name + t)))
else:
pass<|docstring|>auxiliary function: remove named other file.<|endoftext|> |
32da3a9a7a9b8c51056c21c0f48393d9644b7f9f8099023fb1c459a3b6b1df45 | def func_call_load():
'CTANLoad is processed.'
print(('-' * 80))
print('* Info: CTANLoad (Load)')
try:
process_load = subprocess.run(call_load, capture_output=True, universal_newlines=True)
load_message = process_load.stdout
load_errormessage = process_load.stderr
if (len(load_errormessage) > 0):
print('* Error: Error in CTANLoad (Load):')
print(load_errormessage)
sys.exit()
else:
print(load_message)
except:
sys.exit('* Error: Error in CTANLoad (Load)')
if verbose:
print('* Info: CTANLoad (Load) completed') | CTANLoad is processed. | CTANLoad+Out/CTANLoad+Out.py | func_call_load | GuenterPartosch/Convert_CTAN | 1 | python | def func_call_load():
print(('-' * 80))
print('* Info: CTANLoad (Load)')
try:
process_load = subprocess.run(call_load, capture_output=True, universal_newlines=True)
load_message = process_load.stdout
load_errormessage = process_load.stderr
if (len(load_errormessage) > 0):
print('* Error: Error in CTANLoad (Load):')
print(load_errormessage)
sys.exit()
else:
print(load_message)
except:
sys.exit('* Error: Error in CTANLoad (Load)')
if verbose:
print('* Info: CTANLoad (Load) completed') | def func_call_load():
print(('-' * 80))
print('* Info: CTANLoad (Load)')
try:
process_load = subprocess.run(call_load, capture_output=True, universal_newlines=True)
load_message = process_load.stdout
load_errormessage = process_load.stderr
if (len(load_errormessage) > 0):
print('* Error: Error in CTANLoad (Load):')
print(load_errormessage)
sys.exit()
else:
print(load_message)
except:
sys.exit('* Error: Error in CTANLoad (Load)')
if verbose:
print('* Info: CTANLoad (Load) completed')<|docstring|>CTANLoad is processed.<|endoftext|> |
85065a6c839e453c832770677515160eb330b3024075879d40567ff09c4e20e2 | def func_call_check():
'CTANLoad (Check) is processed.'
print(('-' * 80))
print('* Info: CTANLoad (Check)')
try:
process_check = subprocess.run(call_check, universal_newlines=True)
except:
sys.exit('* Error: Error in CTANLoad (Check)')
if verbose:
print('* Info: CTANLoad (Check) completed') | CTANLoad (Check) is processed. | CTANLoad+Out/CTANLoad+Out.py | func_call_check | GuenterPartosch/Convert_CTAN | 1 | python | def func_call_check():
print(('-' * 80))
print('* Info: CTANLoad (Check)')
try:
process_check = subprocess.run(call_check, universal_newlines=True)
except:
sys.exit('* Error: Error in CTANLoad (Check)')
if verbose:
print('* Info: CTANLoad (Check) completed') | def func_call_check():
print(('-' * 80))
print('* Info: CTANLoad (Check)')
try:
process_check = subprocess.run(call_check, universal_newlines=True)
except:
sys.exit('* Error: Error in CTANLoad (Check)')
if verbose:
print('* Info: CTANLoad (Check) completed')<|docstring|>CTANLoad (Check) is processed.<|endoftext|> |
68806551da68a99b97b6db118a43e13458535bc3175738025ecc6865ca7f546a | def func_call_regeneration():
'CTANLoad (Regeneration) is processed.'
print(('-' * 80))
print('* Info: CTANLoad (Regeneration)')
try:
process_regeneration = subprocess.run(call_regeneration, capture_output=True, universal_newlines=True)
regeneration_errormessage = process_regeneration.stderr
regeneration_message = process_regeneration.stdout
if (len(regeneration_errormessage) > 0):
print('* Error: Error in CTANLoad (Regeneration)')
print(regeneration_errormessage)
sys.exit()
else:
print(regeneration_message)
except:
sys.exit('* Error: Error in CTANLoad (Regeneration)')
if verbose:
print('* Info: CTANLoad (Regeneration) completed') | CTANLoad (Regeneration) is processed. | CTANLoad+Out/CTANLoad+Out.py | func_call_regeneration | GuenterPartosch/Convert_CTAN | 1 | python | def func_call_regeneration():
print(('-' * 80))
print('* Info: CTANLoad (Regeneration)')
try:
process_regeneration = subprocess.run(call_regeneration, capture_output=True, universal_newlines=True)
regeneration_errormessage = process_regeneration.stderr
regeneration_message = process_regeneration.stdout
if (len(regeneration_errormessage) > 0):
print('* Error: Error in CTANLoad (Regeneration)')
print(regeneration_errormessage)
sys.exit()
else:
print(regeneration_message)
except:
sys.exit('* Error: Error in CTANLoad (Regeneration)')
if verbose:
print('* Info: CTANLoad (Regeneration) completed') | def func_call_regeneration():
print(('-' * 80))
print('* Info: CTANLoad (Regeneration)')
try:
process_regeneration = subprocess.run(call_regeneration, capture_output=True, universal_newlines=True)
regeneration_errormessage = process_regeneration.stderr
regeneration_message = process_regeneration.stdout
if (len(regeneration_errormessage) > 0):
print('* Error: Error in CTANLoad (Regeneration)')
print(regeneration_errormessage)
sys.exit()
else:
print(regeneration_message)
except:
sys.exit('* Error: Error in CTANLoad (Regeneration)')
if verbose:
print('* Info: CTANLoad (Regeneration) completed')<|docstring|>CTANLoad (Regeneration) is processed.<|endoftext|> |
0bbec1c080256ccad9da2ed08c01112f8ca1062ea1021649f8f2ece8a0f3bbeb | def func_call_output():
'CTANOut is processed.'
print(('-' * 80))
print('* Info: CTANOut')
if (mode == 'BibLaTeX'):
remove_other_file('.bib')
elif (mode == 'LaTeX'):
remove_LaTeX_file('.tex')
remove_LaTeX_file('.tap')
remove_LaTeX_file('.top')
remove_LaTeX_file('.xref')
elif (mode == 'RIS'):
remove_other_file('.ris')
elif (mode == 'plain'):
remove_other_file('.txt')
elif (mode == 'Excel'):
remove_other_file('.tsv')
else:
pass
try:
process_output = subprocess.run(call_output, capture_output=True, universal_newlines=True)
output_errormessage = process_output.stderr
output_message = process_output.stdout
if (len(output_errormessage) > 0):
print('* Error: Error in CTANOut')
print(output_errormessage)
sys.exit()
else:
print(output_message)
except:
sys.exit('* Error: Error in CTANOut')
if verbose:
print('* Info: CTANOut completed') | CTANOut is processed. | CTANLoad+Out/CTANLoad+Out.py | func_call_output | GuenterPartosch/Convert_CTAN | 1 | python | def func_call_output():
print(('-' * 80))
print('* Info: CTANOut')
if (mode == 'BibLaTeX'):
remove_other_file('.bib')
elif (mode == 'LaTeX'):
remove_LaTeX_file('.tex')
remove_LaTeX_file('.tap')
remove_LaTeX_file('.top')
remove_LaTeX_file('.xref')
elif (mode == 'RIS'):
remove_other_file('.ris')
elif (mode == 'plain'):
remove_other_file('.txt')
elif (mode == 'Excel'):
remove_other_file('.tsv')
else:
pass
try:
process_output = subprocess.run(call_output, capture_output=True, universal_newlines=True)
output_errormessage = process_output.stderr
output_message = process_output.stdout
if (len(output_errormessage) > 0):
print('* Error: Error in CTANOut')
print(output_errormessage)
sys.exit()
else:
print(output_message)
except:
sys.exit('* Error: Error in CTANOut')
if verbose:
print('* Info: CTANOut completed') | def func_call_output():
print(('-' * 80))
print('* Info: CTANOut')
if (mode == 'BibLaTeX'):
remove_other_file('.bib')
elif (mode == 'LaTeX'):
remove_LaTeX_file('.tex')
remove_LaTeX_file('.tap')
remove_LaTeX_file('.top')
remove_LaTeX_file('.xref')
elif (mode == 'RIS'):
remove_other_file('.ris')
elif (mode == 'plain'):
remove_other_file('.txt')
elif (mode == 'Excel'):
remove_other_file('.tsv')
else:
pass
try:
process_output = subprocess.run(call_output, capture_output=True, universal_newlines=True)
output_errormessage = process_output.stderr
output_message = process_output.stdout
if (len(output_errormessage) > 0):
print('* Error: Error in CTANOut')
print(output_errormessage)
sys.exit()
else:
print(output_message)
except:
sys.exit('* Error: Error in CTANOut')
if verbose:
print('* Info: CTANOut completed')<|docstring|>CTANOut is processed.<|endoftext|> |
f0fb1971b3f71b603aa125cf1534e353f24a72a60924ebdf24450d96eb89b5a0 | def func_call_compile():
'Compile the generated LaTeX file.'
print(('-' * 80))
print('* Info: Compilation')
for e in ['.aux', '.idx', '.ind', '.log', '.ilg', '.pdf', '.out']:
remove_LaTeX_file(e)
print('\n* Info: XeLaTeX')
if verbose:
print('* Info: Call:', call_compile)
try:
process_compile1 = subprocess.run(call_compile, capture_output=True, universal_newlines=True)
compile1_errormessage = process_compile1.stderr
compile1_message = process_compile1.stdout
if (len(compile1_errormessage) > 0):
print('* Error: Error in compilation')
print(compile1_errormessage)
sys.exit()
elif verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".log'\n"))
print('* Info: Compilation OK')
except:
if verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".log'"))
sys.exit('* Error: Error in compilation')
print(('.' * 80))
print('* Info: XeLaTeX')
if verbose:
print('* Info: Call:', call_compile)
try:
process_compile2 = subprocess.run(call_compile, capture_output=True, universal_newlines=True)
compile2_errormessage = process_compile2.stderr
compile2_message = process_compile2.stdout
if (len(compile2_errormessage) > 0):
print('* Error: Error in compilation:')
print(compile2_errormessage)
sys.exit()
elif verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".log'\n"))
print('* Info: Compilation OK')
except:
if verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".log'"))
sys.exit('* Error: Error in compilation')
print(('.' * 80))
print('* Info: Makeindex')
if verbose:
print('* Info: Call:', call_index)
try:
process_index = subprocess.run(call_index, capture_output=True, universal_newlines=True)
index_errormessage = process_index.stderr
index_message = process_index.stdout
except:
if verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".ilg'\n"))
sys.exit('* Error: Error in Makeindex')
if verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".ilg'\n"))
print('* Info: Makeindex OK')
print(('.' * 80))
print('* Info: XeLaTeX')
if verbose:
print('* Info: Call:', call_compile)
try:
process_compile3 = subprocess.run(call_compile, capture_output=True, universal_newlines=True)
compile3_errormessage = process_compile3.stderr
compile3_message = process_compile3.stdout
if (len(compile3_errormessage) > 0):
print('* Error: Error in compilation:')
print(compile3_errormessage)
sys.exit()
elif verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".log'"))
print(((("* Info: result in '" + direc) + output_name) + ".pdf'\n"))
print('* Info: Compilation OK')
except:
if verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".log'"))
sys.exit('* Error: Error in compilation')
for e in ['.aux', '.idx', '.ind', '.out']:
remove_LaTeX_file(e) | Compile the generated LaTeX file. | CTANLoad+Out/CTANLoad+Out.py | func_call_compile | GuenterPartosch/Convert_CTAN | 1 | python | def func_call_compile():
print(('-' * 80))
print('* Info: Compilation')
for e in ['.aux', '.idx', '.ind', '.log', '.ilg', '.pdf', '.out']:
remove_LaTeX_file(e)
print('\n* Info: XeLaTeX')
if verbose:
print('* Info: Call:', call_compile)
try:
process_compile1 = subprocess.run(call_compile, capture_output=True, universal_newlines=True)
compile1_errormessage = process_compile1.stderr
compile1_message = process_compile1.stdout
if (len(compile1_errormessage) > 0):
print('* Error: Error in compilation')
print(compile1_errormessage)
sys.exit()
elif verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".log'\n"))
print('* Info: Compilation OK')
except:
if verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".log'"))
sys.exit('* Error: Error in compilation')
print(('.' * 80))
print('* Info: XeLaTeX')
if verbose:
print('* Info: Call:', call_compile)
try:
process_compile2 = subprocess.run(call_compile, capture_output=True, universal_newlines=True)
compile2_errormessage = process_compile2.stderr
compile2_message = process_compile2.stdout
if (len(compile2_errormessage) > 0):
print('* Error: Error in compilation:')
print(compile2_errormessage)
sys.exit()
elif verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".log'\n"))
print('* Info: Compilation OK')
except:
if verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".log'"))
sys.exit('* Error: Error in compilation')
print(('.' * 80))
print('* Info: Makeindex')
if verbose:
print('* Info: Call:', call_index)
try:
process_index = subprocess.run(call_index, capture_output=True, universal_newlines=True)
index_errormessage = process_index.stderr
index_message = process_index.stdout
except:
if verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".ilg'\n"))
sys.exit('* Error: Error in Makeindex')
if verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".ilg'\n"))
print('* Info: Makeindex OK')
print(('.' * 80))
print('* Info: XeLaTeX')
if verbose:
print('* Info: Call:', call_compile)
try:
process_compile3 = subprocess.run(call_compile, capture_output=True, universal_newlines=True)
compile3_errormessage = process_compile3.stderr
compile3_message = process_compile3.stdout
if (len(compile3_errormessage) > 0):
print('* Error: Error in compilation:')
print(compile3_errormessage)
sys.exit()
elif verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".log'"))
print(((("* Info: result in '" + direc) + output_name) + ".pdf'\n"))
print('* Info: Compilation OK')
except:
if verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".log'"))
sys.exit('* Error: Error in compilation')
for e in ['.aux', '.idx', '.ind', '.out']:
remove_LaTeX_file(e) | def func_call_compile():
print(('-' * 80))
print('* Info: Compilation')
for e in ['.aux', '.idx', '.ind', '.log', '.ilg', '.pdf', '.out']:
remove_LaTeX_file(e)
print('\n* Info: XeLaTeX')
if verbose:
print('* Info: Call:', call_compile)
try:
process_compile1 = subprocess.run(call_compile, capture_output=True, universal_newlines=True)
compile1_errormessage = process_compile1.stderr
compile1_message = process_compile1.stdout
if (len(compile1_errormessage) > 0):
print('* Error: Error in compilation')
print(compile1_errormessage)
sys.exit()
elif verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".log'\n"))
print('* Info: Compilation OK')
except:
if verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".log'"))
sys.exit('* Error: Error in compilation')
print(('.' * 80))
print('* Info: XeLaTeX')
if verbose:
print('* Info: Call:', call_compile)
try:
process_compile2 = subprocess.run(call_compile, capture_output=True, universal_newlines=True)
compile2_errormessage = process_compile2.stderr
compile2_message = process_compile2.stdout
if (len(compile2_errormessage) > 0):
print('* Error: Error in compilation:')
print(compile2_errormessage)
sys.exit()
elif verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".log'\n"))
print('* Info: Compilation OK')
except:
if verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".log'"))
sys.exit('* Error: Error in compilation')
print(('.' * 80))
print('* Info: Makeindex')
if verbose:
print('* Info: Call:', call_index)
try:
process_index = subprocess.run(call_index, capture_output=True, universal_newlines=True)
index_errormessage = process_index.stderr
index_message = process_index.stdout
except:
if verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".ilg'\n"))
sys.exit('* Error: Error in Makeindex')
if verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".ilg'\n"))
print('* Info: Makeindex OK')
print(('.' * 80))
print('* Info: XeLaTeX')
if verbose:
print('* Info: Call:', call_compile)
try:
process_compile3 = subprocess.run(call_compile, capture_output=True, universal_newlines=True)
compile3_errormessage = process_compile3.stderr
compile3_message = process_compile3.stdout
if (len(compile3_errormessage) > 0):
print('* Error: Error in compilation:')
print(compile3_errormessage)
sys.exit()
elif verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".log'"))
print(((("* Info: result in '" + direc) + output_name) + ".pdf'\n"))
print('* Info: Compilation OK')
except:
if verbose:
print(((("* Info: more information in '" + direc) + output_name) + ".log'"))
sys.exit('* Error: Error in compilation')
for e in ['.aux', '.idx', '.ind', '.out']:
remove_LaTeX_file(e)<|docstring|>Compile the generated LaTeX file.<|endoftext|> |
766c070903d870c54f1d1a79c3c7de10a65b0809f08ba7c79627bd0a1d0d8897 | def head():
'Show the given options.'
print('* Info: CTANLoad+Out')
if verbose:
print('* Info: Call:', call)
if (('-c' in call) or ('--check_integrity' in call)):
print(' {0:5} {1:55}'.format('-c', (('(' + integrity_text) + ')')))
if (('-f' in call) or ('--download_files' in call)):
print(' {0:5} {1:55}'.format('-f', (('(' + download_text) + ')')))
if (('-l' in call) or ('--lists' in call)):
print(' {0:5} {1:55}'.format('-l', (('(' + (lists_text + ')')[0:50]) + ellipse)))
if (('-mo' in call) or ('--make_output' in call)):
print(' {0:5} {1:55}'.format('-mo', (('(' + (make_output_text + ')')[0:50]) + ellipse)))
if (('-mt' in call) or ('--make_topics' in call)):
print(' {0:5} {1:55}'.format('-mt', (('(' + (topics_text + ')')[0:50]) + ellipse)))
if (('-p' in call) or ('--pdf_output' in call)):
print(' {0:5} {1:55}'.format('-p', (('(' + pdf_text) + ')')))
if (('-r' in call) or ('--regenerate_pickle_files' in call)):
print(' {0:5} {1:55}'.format('-r', (('(' + regenerate_text) + ')')))
if (('-stat' in call) or ('--statistics' in call)):
print(' {0:5} {1:55}'.format('-stat', (('(' + statistics_text) + ')')))
if (('-v' in call) or ('--verbose' in call)):
print(' {0:5} {1:55}'.format('-v', (('(' + verbose_text) + ')')))
if (('-b' in call) or ('--btype' in call)):
print(' {0:5} {2:55} {1}'.format('-b', btype, (('(' + btype_text) + ')')))
if (('-d' in call) or ('--directory' in call)):
print(' {0:5} {2:55} {1}'.format('-d', direc, (('(' + direc_text) + ')')))
if (('-m' in call) or ('--mode' in call)):
print(' {0:5} {2:55} {1}'.format('-m', mode, (('(' + mode_text) + ')')))
if (('-n' in call) or ('--number' in call)):
print(' {0:5} {2:55} {1}'.format('-n', number, (('(' + number_text) + ')')))
if (('-o' in call) or ('--output' in call)):
print(' {0:5} {2:55} {1}'.format('-o', args.output_name, (('(' + output_text) + ')')))
if (('-k' in call) or ('--key' in call)):
print(' {0:5} {2:55} {1}'.format('-k', fold(key), (('(' + (key_text + ')')[0:50]) + ellipse)))
if (('-kl' in call) or ('--key_load' in call)):
print(' {0:5} {2:55} {1}'.format('-k', fold(key_load), (('(' + key_load_text) + ')')))
if (('-ko' in call) or ('--key_out' in call)):
print(' {0:5} {2:55} {1}'.format('-k', fold(key_out), (('(' + key_out_text) + ')')))
if (('-s' in call) or ('--skip' in call)):
print(' {0:5} {2:55} {1}'.format('-s', skip, (('(' + skip_text) + ')')))
if (('-t' in call) or ('--template' in call)):
print(' {0:5} {2:55} {1}'.format('-t', fold(template), (('(' + template_text) + ')')))
if (('-tl' in call) or ('--template_load' in call)):
print(' {0:5} {2:55} {1}'.format('-tl', template_load, (('(' + template_load_text) + ')')))
if (('-to' in call) or ('--template_out' in call)):
print(' {0:5} {2:55} {1}'.format('-to', template_out, (('(' + template_out_text) + ')')))
if (('-A' in call) or ('--author_template' in call)):
print(' {0:5} {2:55} {1}'.format('-A', fold(author_template), (('(' + author_template_text) + ')')))
if (('-Al' in call) or ('-author_load_template' in call)):
print(' {0:5} {2:55} {1}'.format('-Al', fold(author_load_template), (('(' + author_load_template_text) + ')')))
if (('-Ao' in call) or ('--author_outd_template' in call)):
print(' {0:5} {2:55} {1}'.format('-Ao', fold(author_out_template), (('(' + author_out_template_text) + ')')))
print('\n')
if regeneration:
print('* Info: CTANLoad (Regeneration) to be executed')
if load:
print('* Info: CTANLoad (Load) to be executed')
if check:
print('* Info: CTANLoad (Check) to be executed')
if output:
print('* Info: CTANOut to be executed')
if compile:
print('* Info: XeLaTeX and MakeIndex to be executed')
print('\n') | Show the given options. | CTANLoad+Out/CTANLoad+Out.py | head | GuenterPartosch/Convert_CTAN | 1 | python | def head():
print('* Info: CTANLoad+Out')
if verbose:
print('* Info: Call:', call)
if (('-c' in call) or ('--check_integrity' in call)):
print(' {0:5} {1:55}'.format('-c', (('(' + integrity_text) + ')')))
if (('-f' in call) or ('--download_files' in call)):
print(' {0:5} {1:55}'.format('-f', (('(' + download_text) + ')')))
if (('-l' in call) or ('--lists' in call)):
print(' {0:5} {1:55}'.format('-l', (('(' + (lists_text + ')')[0:50]) + ellipse)))
if (('-mo' in call) or ('--make_output' in call)):
print(' {0:5} {1:55}'.format('-mo', (('(' + (make_output_text + ')')[0:50]) + ellipse)))
if (('-mt' in call) or ('--make_topics' in call)):
print(' {0:5} {1:55}'.format('-mt', (('(' + (topics_text + ')')[0:50]) + ellipse)))
if (('-p' in call) or ('--pdf_output' in call)):
print(' {0:5} {1:55}'.format('-p', (('(' + pdf_text) + ')')))
if (('-r' in call) or ('--regenerate_pickle_files' in call)):
print(' {0:5} {1:55}'.format('-r', (('(' + regenerate_text) + ')')))
if (('-stat' in call) or ('--statistics' in call)):
print(' {0:5} {1:55}'.format('-stat', (('(' + statistics_text) + ')')))
if (('-v' in call) or ('--verbose' in call)):
print(' {0:5} {1:55}'.format('-v', (('(' + verbose_text) + ')')))
if (('-b' in call) or ('--btype' in call)):
print(' {0:5} {2:55} {1}'.format('-b', btype, (('(' + btype_text) + ')')))
if (('-d' in call) or ('--directory' in call)):
print(' {0:5} {2:55} {1}'.format('-d', direc, (('(' + direc_text) + ')')))
if (('-m' in call) or ('--mode' in call)):
print(' {0:5} {2:55} {1}'.format('-m', mode, (('(' + mode_text) + ')')))
if (('-n' in call) or ('--number' in call)):
print(' {0:5} {2:55} {1}'.format('-n', number, (('(' + number_text) + ')')))
if (('-o' in call) or ('--output' in call)):
print(' {0:5} {2:55} {1}'.format('-o', args.output_name, (('(' + output_text) + ')')))
if (('-k' in call) or ('--key' in call)):
print(' {0:5} {2:55} {1}'.format('-k', fold(key), (('(' + (key_text + ')')[0:50]) + ellipse)))
if (('-kl' in call) or ('--key_load' in call)):
print(' {0:5} {2:55} {1}'.format('-k', fold(key_load), (('(' + key_load_text) + ')')))
if (('-ko' in call) or ('--key_out' in call)):
print(' {0:5} {2:55} {1}'.format('-k', fold(key_out), (('(' + key_out_text) + ')')))
if (('-s' in call) or ('--skip' in call)):
print(' {0:5} {2:55} {1}'.format('-s', skip, (('(' + skip_text) + ')')))
if (('-t' in call) or ('--template' in call)):
print(' {0:5} {2:55} {1}'.format('-t', fold(template), (('(' + template_text) + ')')))
if (('-tl' in call) or ('--template_load' in call)):
print(' {0:5} {2:55} {1}'.format('-tl', template_load, (('(' + template_load_text) + ')')))
if (('-to' in call) or ('--template_out' in call)):
print(' {0:5} {2:55} {1}'.format('-to', template_out, (('(' + template_out_text) + ')')))
if (('-A' in call) or ('--author_template' in call)):
print(' {0:5} {2:55} {1}'.format('-A', fold(author_template), (('(' + author_template_text) + ')')))
if (('-Al' in call) or ('-author_load_template' in call)):
print(' {0:5} {2:55} {1}'.format('-Al', fold(author_load_template), (('(' + author_load_template_text) + ')')))
if (('-Ao' in call) or ('--author_outd_template' in call)):
print(' {0:5} {2:55} {1}'.format('-Ao', fold(author_out_template), (('(' + author_out_template_text) + ')')))
print('\n')
if regeneration:
print('* Info: CTANLoad (Regeneration) to be executed')
if load:
print('* Info: CTANLoad (Load) to be executed')
if check:
print('* Info: CTANLoad (Check) to be executed')
if output:
print('* Info: CTANOut to be executed')
if compile:
print('* Info: XeLaTeX and MakeIndex to be executed')
print('\n') | def head():
print('* Info: CTANLoad+Out')
if verbose:
print('* Info: Call:', call)
if (('-c' in call) or ('--check_integrity' in call)):
print(' {0:5} {1:55}'.format('-c', (('(' + integrity_text) + ')')))
if (('-f' in call) or ('--download_files' in call)):
print(' {0:5} {1:55}'.format('-f', (('(' + download_text) + ')')))
if (('-l' in call) or ('--lists' in call)):
print(' {0:5} {1:55}'.format('-l', (('(' + (lists_text + ')')[0:50]) + ellipse)))
if (('-mo' in call) or ('--make_output' in call)):
print(' {0:5} {1:55}'.format('-mo', (('(' + (make_output_text + ')')[0:50]) + ellipse)))
if (('-mt' in call) or ('--make_topics' in call)):
print(' {0:5} {1:55}'.format('-mt', (('(' + (topics_text + ')')[0:50]) + ellipse)))
if (('-p' in call) or ('--pdf_output' in call)):
print(' {0:5} {1:55}'.format('-p', (('(' + pdf_text) + ')')))
if (('-r' in call) or ('--regenerate_pickle_files' in call)):
print(' {0:5} {1:55}'.format('-r', (('(' + regenerate_text) + ')')))
if (('-stat' in call) or ('--statistics' in call)):
print(' {0:5} {1:55}'.format('-stat', (('(' + statistics_text) + ')')))
if (('-v' in call) or ('--verbose' in call)):
print(' {0:5} {1:55}'.format('-v', (('(' + verbose_text) + ')')))
if (('-b' in call) or ('--btype' in call)):
print(' {0:5} {2:55} {1}'.format('-b', btype, (('(' + btype_text) + ')')))
if (('-d' in call) or ('--directory' in call)):
print(' {0:5} {2:55} {1}'.format('-d', direc, (('(' + direc_text) + ')')))
if (('-m' in call) or ('--mode' in call)):
print(' {0:5} {2:55} {1}'.format('-m', mode, (('(' + mode_text) + ')')))
if (('-n' in call) or ('--number' in call)):
print(' {0:5} {2:55} {1}'.format('-n', number, (('(' + number_text) + ')')))
if (('-o' in call) or ('--output' in call)):
print(' {0:5} {2:55} {1}'.format('-o', args.output_name, (('(' + output_text) + ')')))
if (('-k' in call) or ('--key' in call)):
print(' {0:5} {2:55} {1}'.format('-k', fold(key), (('(' + (key_text + ')')[0:50]) + ellipse)))
if (('-kl' in call) or ('--key_load' in call)):
print(' {0:5} {2:55} {1}'.format('-k', fold(key_load), (('(' + key_load_text) + ')')))
if (('-ko' in call) or ('--key_out' in call)):
print(' {0:5} {2:55} {1}'.format('-k', fold(key_out), (('(' + key_out_text) + ')')))
if (('-s' in call) or ('--skip' in call)):
print(' {0:5} {2:55} {1}'.format('-s', skip, (('(' + skip_text) + ')')))
if (('-t' in call) or ('--template' in call)):
print(' {0:5} {2:55} {1}'.format('-t', fold(template), (('(' + template_text) + ')')))
if (('-tl' in call) or ('--template_load' in call)):
print(' {0:5} {2:55} {1}'.format('-tl', template_load, (('(' + template_load_text) + ')')))
if (('-to' in call) or ('--template_out' in call)):
print(' {0:5} {2:55} {1}'.format('-to', template_out, (('(' + template_out_text) + ')')))
if (('-A' in call) or ('--author_template' in call)):
print(' {0:5} {2:55} {1}'.format('-A', fold(author_template), (('(' + author_template_text) + ')')))
if (('-Al' in call) or ('-author_load_template' in call)):
print(' {0:5} {2:55} {1}'.format('-Al', fold(author_load_template), (('(' + author_load_template_text) + ')')))
if (('-Ao' in call) or ('--author_outd_template' in call)):
print(' {0:5} {2:55} {1}'.format('-Ao', fold(author_out_template), (('(' + author_out_template_text) + ')')))
print('\n')
if regeneration:
print('* Info: CTANLoad (Regeneration) to be executed')
if load:
print('* Info: CTANLoad (Load) to be executed')
if check:
print('* Info: CTANLoad (Check) to be executed')
if output:
print('* Info: CTANOut to be executed')
if compile:
print('* Info: XeLaTeX and MakeIndex to be executed')
print('\n')<|docstring|>Show the given options.<|endoftext|> |
c8f72a86bb5bbe0871c64799780fad6bbf9b93ecff5f0cb004ac0f0d3ec55820 | def main():
'Main Function'
print(('=' * 80))
head()
if regeneration:
func_call_regeneration()
if load:
func_call_load()
if check:
func_call_check()
if output:
func_call_output()
if compile:
if path.exists(((direc + output_name) + '.tex')):
func_call_compile()
else:
print("* Warning: LaTeX file '{0}' does not exist".format(((direc + output_name) + '.tex')))
print(('-' * 80)) | Main Function | CTANLoad+Out/CTANLoad+Out.py | main | GuenterPartosch/Convert_CTAN | 1 | python | def main():
print(('=' * 80))
head()
if regeneration:
func_call_regeneration()
if load:
func_call_load()
if check:
func_call_check()
if output:
func_call_output()
if compile:
if path.exists(((direc + output_name) + '.tex')):
func_call_compile()
else:
print("* Warning: LaTeX file '{0}' does not exist".format(((direc + output_name) + '.tex')))
print(('-' * 80)) | def main():
print(('=' * 80))
head()
if regeneration:
func_call_regeneration()
if load:
func_call_load()
if check:
func_call_check()
if output:
func_call_output()
if compile:
if path.exists(((direc + output_name) + '.tex')):
func_call_compile()
else:
print("* Warning: LaTeX file '{0}' does not exist".format(((direc + output_name) + '.tex')))
print(('-' * 80))<|docstring|>Main Function<|endoftext|> |
eff9ceaed33c94753aaf9890050b45a2635dec531a065a7976a89c63921cbfac | def get_model(points, w, mu, sigma, is_training, bn_decay=None, weigth_decay=0.005, add_noise=False, num_classes=40):
' Classification PointNet, input is BxNx3, output Bx40 '
batch_size = points.get_shape()[0].value
n_points = points.get_shape()[1].value
n_gaussians = w.shape[0].value
res = int(np.round(np.power(n_gaussians, (1.0 / 3.0))))
fv = tf_util.get_3dmfv(points, w, mu, sigma, flatten=False)
if add_noise:
noise = tf.cond(is_training, (lambda : tf.random_normal(shape=tf.shape(fv), mean=0.0, stddev=0.01, dtype=tf.float32)), (lambda : tf.zeros(shape=tf.shape(fv))))
fv = (fv + noise)
grid_fisher = tf.reshape(fv, [batch_size, (- 1), res, res, res])
grid_fisher = tf.transpose(grid_fisher, [0, 2, 3, 4, 1])
layer = 1
net = inception_module(grid_fisher, n_filters=64, kernel_sizes=[3, 5], is_training=is_training, bn_decay=bn_decay, scope=('inception' + str(layer)))
layer = (layer + 1)
net = inception_module(net, n_filters=128, kernel_sizes=[3, 5], is_training=is_training, bn_decay=bn_decay, scope=('inception' + str(layer)))
layer = (layer + 1)
net = inception_module(net, n_filters=256, kernel_sizes=[3, 5], is_training=is_training, bn_decay=bn_decay, scope=('inception' + str(layer)))
layer = (layer + 1)
net = tf_util.max_pool3d(net, [2, 2, 2], scope=('maxpool' + str(layer)), stride=[2, 2, 2], padding='SAME')
layer = (layer + 1)
net = inception_module(net, n_filters=256, kernel_sizes=[3, 5], is_training=is_training, bn_decay=bn_decay, scope=('inception' + str(layer)))
layer = (layer + 1)
net = inception_module(net, n_filters=512, kernel_sizes=[3, 5], is_training=is_training, bn_decay=bn_decay, scope=('inception' + str(layer)))
layer = (layer + 1)
net = tf_util.max_pool3d(net, [2, 2, 2], scope=('maxpool' + str(layer)), stride=[2, 2, 2], padding='SAME')
net = tf.reshape(net, [batch_size, (- 1)])
net = tf_util.fully_connected(net, 1024, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay, weigth_decay=weigth_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp1')
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay, weigth_decay=weigth_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp2')
net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training, scope='fc3', bn_decay=bn_decay, weigth_decay=weigth_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp3')
net = tf_util.fully_connected(net, num_classes, activation_fn=None, scope='fc4', is_training=is_training, weigth_decay=weigth_decay)
return (net, fv) | Classification PointNet, input is BxNx3, output Bx40 | models/3dmfv_net_cls.py | get_model | mhwasil/3DmFV-Net | 172 | python | def get_model(points, w, mu, sigma, is_training, bn_decay=None, weigth_decay=0.005, add_noise=False, num_classes=40):
' '
batch_size = points.get_shape()[0].value
n_points = points.get_shape()[1].value
n_gaussians = w.shape[0].value
res = int(np.round(np.power(n_gaussians, (1.0 / 3.0))))
fv = tf_util.get_3dmfv(points, w, mu, sigma, flatten=False)
if add_noise:
noise = tf.cond(is_training, (lambda : tf.random_normal(shape=tf.shape(fv), mean=0.0, stddev=0.01, dtype=tf.float32)), (lambda : tf.zeros(shape=tf.shape(fv))))
fv = (fv + noise)
grid_fisher = tf.reshape(fv, [batch_size, (- 1), res, res, res])
grid_fisher = tf.transpose(grid_fisher, [0, 2, 3, 4, 1])
layer = 1
net = inception_module(grid_fisher, n_filters=64, kernel_sizes=[3, 5], is_training=is_training, bn_decay=bn_decay, scope=('inception' + str(layer)))
layer = (layer + 1)
net = inception_module(net, n_filters=128, kernel_sizes=[3, 5], is_training=is_training, bn_decay=bn_decay, scope=('inception' + str(layer)))
layer = (layer + 1)
net = inception_module(net, n_filters=256, kernel_sizes=[3, 5], is_training=is_training, bn_decay=bn_decay, scope=('inception' + str(layer)))
layer = (layer + 1)
net = tf_util.max_pool3d(net, [2, 2, 2], scope=('maxpool' + str(layer)), stride=[2, 2, 2], padding='SAME')
layer = (layer + 1)
net = inception_module(net, n_filters=256, kernel_sizes=[3, 5], is_training=is_training, bn_decay=bn_decay, scope=('inception' + str(layer)))
layer = (layer + 1)
net = inception_module(net, n_filters=512, kernel_sizes=[3, 5], is_training=is_training, bn_decay=bn_decay, scope=('inception' + str(layer)))
layer = (layer + 1)
net = tf_util.max_pool3d(net, [2, 2, 2], scope=('maxpool' + str(layer)), stride=[2, 2, 2], padding='SAME')
net = tf.reshape(net, [batch_size, (- 1)])
net = tf_util.fully_connected(net, 1024, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay, weigth_decay=weigth_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp1')
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay, weigth_decay=weigth_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp2')
net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training, scope='fc3', bn_decay=bn_decay, weigth_decay=weigth_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp3')
net = tf_util.fully_connected(net, num_classes, activation_fn=None, scope='fc4', is_training=is_training, weigth_decay=weigth_decay)
return (net, fv) | def get_model(points, w, mu, sigma, is_training, bn_decay=None, weigth_decay=0.005, add_noise=False, num_classes=40):
' '
batch_size = points.get_shape()[0].value
n_points = points.get_shape()[1].value
n_gaussians = w.shape[0].value
res = int(np.round(np.power(n_gaussians, (1.0 / 3.0))))
fv = tf_util.get_3dmfv(points, w, mu, sigma, flatten=False)
if add_noise:
noise = tf.cond(is_training, (lambda : tf.random_normal(shape=tf.shape(fv), mean=0.0, stddev=0.01, dtype=tf.float32)), (lambda : tf.zeros(shape=tf.shape(fv))))
fv = (fv + noise)
grid_fisher = tf.reshape(fv, [batch_size, (- 1), res, res, res])
grid_fisher = tf.transpose(grid_fisher, [0, 2, 3, 4, 1])
layer = 1
net = inception_module(grid_fisher, n_filters=64, kernel_sizes=[3, 5], is_training=is_training, bn_decay=bn_decay, scope=('inception' + str(layer)))
layer = (layer + 1)
net = inception_module(net, n_filters=128, kernel_sizes=[3, 5], is_training=is_training, bn_decay=bn_decay, scope=('inception' + str(layer)))
layer = (layer + 1)
net = inception_module(net, n_filters=256, kernel_sizes=[3, 5], is_training=is_training, bn_decay=bn_decay, scope=('inception' + str(layer)))
layer = (layer + 1)
net = tf_util.max_pool3d(net, [2, 2, 2], scope=('maxpool' + str(layer)), stride=[2, 2, 2], padding='SAME')
layer = (layer + 1)
net = inception_module(net, n_filters=256, kernel_sizes=[3, 5], is_training=is_training, bn_decay=bn_decay, scope=('inception' + str(layer)))
layer = (layer + 1)
net = inception_module(net, n_filters=512, kernel_sizes=[3, 5], is_training=is_training, bn_decay=bn_decay, scope=('inception' + str(layer)))
layer = (layer + 1)
net = tf_util.max_pool3d(net, [2, 2, 2], scope=('maxpool' + str(layer)), stride=[2, 2, 2], padding='SAME')
net = tf.reshape(net, [batch_size, (- 1)])
net = tf_util.fully_connected(net, 1024, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay, weigth_decay=weigth_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp1')
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay, weigth_decay=weigth_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp2')
net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training, scope='fc3', bn_decay=bn_decay, weigth_decay=weigth_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp3')
net = tf_util.fully_connected(net, num_classes, activation_fn=None, scope='fc4', is_training=is_training, weigth_decay=weigth_decay)
return (net, fv)<|docstring|>Classification PointNet, input is BxNx3, output Bx40<|endoftext|> |
ae79a826c603bec6cc782fb236dfb8aafc20a201f55ce2df403aea6d397e1266 | def get_loss(pred, label):
' pred: B*NUM_CLASSES,\n label: B, '
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
return classify_loss | pred: B*NUM_CLASSES,
label: B, | models/3dmfv_net_cls.py | get_loss | mhwasil/3DmFV-Net | 172 | python | def get_loss(pred, label):
' pred: B*NUM_CLASSES,\n label: B, '
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
return classify_loss | def get_loss(pred, label):
' pred: B*NUM_CLASSES,\n label: B, '
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
return classify_loss<|docstring|>pred: B*NUM_CLASSES,
label: B,<|endoftext|> |
8de176db65b4ba73b74069280d1c807523efa8c8e0c8d562b942e8c4203d79f0 | def setUp(self):
'Override to set up a mock database and install the plugins.'
logging.disable()
self.database = Mock()
self.database.reports_overviews.find_one.return_value = dict(_id='id')
self.database.sessions.find_one.return_value = None
self.success = '{"ok": true}'
self.session = dict(user='jadoe', email='[email protected]', session_expiration_datetime=datetime.max.replace(tzinfo=timezone.utc))
self.injection_plugin = bottle.install(InjectionPlugin(self.database, 'database'))
self.auth_plugin = bottle.install(AuthPlugin()) | Override to set up a mock database and install the plugins. | components/server/tests/external/routes/plugins/test_route_auth_plugin.py | setUp | ICTU/quality-time | 33 | python | def setUp(self):
logging.disable()
self.database = Mock()
self.database.reports_overviews.find_one.return_value = dict(_id='id')
self.database.sessions.find_one.return_value = None
self.success = '{"ok": true}'
self.session = dict(user='jadoe', email='[email protected]', session_expiration_datetime=datetime.max.replace(tzinfo=timezone.utc))
self.injection_plugin = bottle.install(InjectionPlugin(self.database, 'database'))
self.auth_plugin = bottle.install(AuthPlugin()) | def setUp(self):
logging.disable()
self.database = Mock()
self.database.reports_overviews.find_one.return_value = dict(_id='id')
self.database.sessions.find_one.return_value = None
self.success = '{"ok": true}'
self.session = dict(user='jadoe', email='[email protected]', session_expiration_datetime=datetime.max.replace(tzinfo=timezone.utc))
self.injection_plugin = bottle.install(InjectionPlugin(self.database, 'database'))
self.auth_plugin = bottle.install(AuthPlugin())<|docstring|>Override to set up a mock database and install the plugins.<|endoftext|> |
43f6671c282a5e15690e16b16ed59536be4ae8afc3c47296bdb694256f9e891c | def tearDown(self):
'Override to remove the plugins and reset the logging.'
bottle.uninstall(self.auth_plugin)
bottle.uninstall(self.injection_plugin)
logging.disable(logging.NOTSET) | Override to remove the plugins and reset the logging. | components/server/tests/external/routes/plugins/test_route_auth_plugin.py | tearDown | ICTU/quality-time | 33 | python | def tearDown(self):
bottle.uninstall(self.auth_plugin)
bottle.uninstall(self.injection_plugin)
logging.disable(logging.NOTSET) | def tearDown(self):
bottle.uninstall(self.auth_plugin)
bottle.uninstall(self.injection_plugin)
logging.disable(logging.NOTSET)<|docstring|>Override to remove the plugins and reset the logging.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.