content
stringlengths 0
1.55M
|
---|
<import_from_future_stmt> absolute_import<import_from_stmt>.kafka_base_monitor KafkaBaseMonitor<class_stmt>StatsMonitor(KafkaBaseMonitor)<block_start>regex="statsrequest:*:*"<def_stmt>setup self settings<block_start>'''
Setup kafka
'''<line_sep>KafkaBaseMonitor.setup(self settings)<block_end><def_stmt>handle self key value<block_start>'''
Processes a vaild stats request
@param key: The key that matched the request
@param value: The value associated with the key
'''<line_sep># break down key
elements=key.split(":")<line_sep>stats=elements[1]<line_sep>appid=elements[2]<line_sep>uuid=value<line_sep># log we received the stats request
extras=self.get_log_dict('stats' appid uuid=uuid)<line_sep>self.logger.info('Received {s} stats request'.format(s=stats) extra=extras)<line_sep>extras={}<if_stmt>stats<eq>'all'<block_start>extras=self.get_all_stats()<block_end><elif_stmt>stats<eq>'kafka-monitor'<block_start>extras=self.get_kafka_monitor_stats()<block_end><elif_stmt>stats<eq>'redis-monitor'<block_start>extras=self.get_redis_monitor_stats()<block_end><elif_stmt>stats<eq>'crawler'<block_start>extras=self.get_crawler_stats()<block_end><elif_stmt>stats<eq>'spider'<block_start>extras=self.get_spider_stats()<block_end><elif_stmt>stats<eq>'machine'<block_start>extras=self.get_machine_stats()<block_end><elif_stmt>stats<eq>'queue'<block_start>extras=self.get_queue_stats()<block_end><elif_stmt>stats<eq>'rest'<block_start>extras=self.get_rest_stats()<block_end><else_stmt><block_start>self.logger.warn('Received invalid stats request: {s}'.format(s=stats) extra=extras)<line_sep><return><block_end>extras['stats']=stats<line_sep>extras['appid']=appid<line_sep>extras['uuid']=uuid<line_sep>extras['server_time']=int(self.get_current_time())<if_stmt>self._send_to_kafka(extras)<block_start>extras['success']=<true><line_sep>self.logger.info('Sent stats to kafka' extra=extras)<block_end><else_stmt><block_start>extras['success']=<false><line_sep>self.logger.error('Failed to send stats to kafka' extra=extras)<block_end><block_end><def_stmt>get_all_stats self<block_start>'''
Gather all stats objects
'''<line_sep>self.logger.debug("Gathering all stats")<line_sep>the_dict={}<line_sep>the_dict['kafka-monitor']=self.get_kafka_monitor_stats()<line_sep>the_dict['redis-monitor']=self.get_redis_monitor_stats()<line_sep>the_dict['crawler']=self.get_crawler_stats()<line_sep>the_dict['rest']=self.get_rest_stats()<line_sep><return>the_dict<block_end><def_stmt>get_kafka_monitor_stats self<block_start>'''
Gather Kafka Monitor stats
@return: A dict of stats
'''<line_sep>self.logger.debug("Gathering kafka-monitor stats")<line_sep><return>self._get_plugin_stats('kafka-monitor')<block_end><def_stmt>get_redis_monitor_stats self<block_start>'''
Gather Redis Monitor stats
@return: A dict of stats
'''<line_sep>self.logger.debug("Gathering redis-monitor stats")<line_sep><return>self._get_plugin_stats('redis-monitor')<block_end><def_stmt>get_rest_stats self<block_start>'''
Gather Rest stats
@return: A dict of stats
'''<line_sep>self.logger.debug("Gathering rest stats")<line_sep><return>self._get_plugin_stats('rest')<block_end><def_stmt>_get_plugin_stats self name<block_start>'''
Used for getting stats for Plugin based stuff, like Kafka Monitor
and Redis Monitor
@param name: the main class stats name
@return: A formatted dict of stats
'''<line_sep>the_dict={}<line_sep>keys=self.redis_conn.keys('stats:{n}:*'.format(n=name))<for_stmt>key keys# break down key
<block_start>elements=key.split(":")<line_sep>main=elements[2]<line_sep>end=elements[3]<if_stmt>main<eq>'total'<or>main<eq>'fail'<block_start><if_stmt>main<not><in>the_dict<block_start>the_dict[main]={}<block_end>the_dict[main][end]=self._get_key_value(key end<eq>'lifetime')<block_end><elif_stmt>main<eq>'self'<block_start><if_stmt>'nodes'<not><in>the_dict# main is self, end is machine, true_tail is uuid
<block_start>the_dict['nodes']={}<block_end>true_tail=elements[4]<if_stmt>end<not><in>the_dict['nodes']<block_start>the_dict['nodes'][end]=[]<block_end>the_dict['nodes'][end].append(true_tail)<block_end><else_stmt><block_start><if_stmt>'plugins'<not><in>the_dict<block_start>the_dict['plugins']={}<block_end><if_stmt>main<not><in>the_dict['plugins']<block_start>the_dict['plugins'][main]={}<block_end>the_dict['plugins'][main][end]=self._get_key_value(key end<eq>'lifetime')<block_end><block_end><return>the_dict<block_end><def_stmt>_get_key_value self key is_hll=<false><block_start>'''
Returns the proper key value for the stats
@param key: the redis key
@param is_hll: the key is a HyperLogLog, else is a sorted set
'''<if_stmt>is_hll# get hll value
<block_start><return>self.redis_conn.execute_command("PFCOUNT" key)<block_end><else_stmt># get zcard value
<block_start><return>self.redis_conn.zcard(key)<block_end><block_end><def_stmt>get_spider_stats self<block_start>'''
Gather spider based stats
'''<line_sep>self.logger.debug("Gathering spider stats")<line_sep>the_dict={}<line_sep>spider_set=set()<line_sep>total_spider_count=0<line_sep>keys=self.redis_conn.keys('stats:crawler:*:*:*')<for_stmt>key keys# we only care about the spider
<block_start>elements=key.split(":")<line_sep>spider=elements[3]<if_stmt>spider<not><in>the_dict<block_start>the_dict[spider]={}<line_sep>the_dict[spider]['count']=0<block_end><if_stmt>len(elements)<eq>6# got a time based stat
<block_start>response=elements[4]<line_sep>end=elements[5]<if_stmt>response<not><in>the_dict[spider]<block_start>the_dict[spider][response]={}<block_end>the_dict[spider][response][end]=self._get_key_value(key end<eq>'lifetime')<block_end><elif_stmt>len(elements)<eq>5# got a spider identifier
<block_start>the_dict[spider]['count']<augadd>1<line_sep>total_spider_count<augadd>1<line_sep>spider_set.add(spider)<block_end><else_stmt><block_start>self.logger.warn("Unknown crawler stat key" {"key":key})<block_end><block_end># simple counts
the_dict['unique_spider_count']=len(spider_set)<line_sep>the_dict['total_spider_count']=total_spider_count<line_sep>ret_dict={}<line_sep>ret_dict['spiders']=the_dict<line_sep><return>ret_dict<block_end><def_stmt>get_machine_stats self<block_start>'''
Gather spider based stats
'''<line_sep>self.logger.debug("Gathering machine stats")<line_sep>the_dict={}<line_sep>keys=self.redis_conn.keys('stats:crawler:*:*:*:*')<for_stmt>key keys# break down key
<block_start>elements=key.split(":")<line_sep>machine=elements[2]<line_sep>spider=elements[3]<line_sep>response=elements[4]<line_sep>end=elements[5]<line_sep># we only care about the machine, not spider type
<if_stmt>machine<not><in>the_dict<block_start>the_dict[machine]={}<block_end><if_stmt>response<not><in>the_dict[machine]<block_start>the_dict[machine][response]={}<block_end><if_stmt>end<in>the_dict[machine][response]<block_start>the_dict[machine][response][end]=the_dict[machine][response][end]+self._get_key_value(key end<eq>'lifetime')<block_end><else_stmt><block_start>the_dict[machine][response][end]=self._get_key_value(key end<eq>'lifetime')<block_end><block_end># simple count
the_dict['count']=len(list(the_dict.keys()))<line_sep>ret_dict={}<line_sep>ret_dict['machines']=the_dict<line_sep><return>ret_dict<block_end><def_stmt>get_crawler_stats self<block_start>'''
Gather crawler stats
@return: A dict of stats
'''<line_sep>self.logger.debug("Gathering crawler stats")<line_sep>the_dict={}<line_sep>the_dict['spiders']=self.get_spider_stats()['spiders']<line_sep>the_dict['machines']=self.get_machine_stats()['machines']<line_sep>the_dict['queue']=self.get_queue_stats()['queues']<line_sep><return>the_dict<block_end><def_stmt>get_queue_stats self<block_start>'''
Gather queue stats
@return: A dict of stats
'''<line_sep>self.logger.debug("Gathering queue based stats")<line_sep>the_dict={}<line_sep>keys=self.redis_conn.keys('*:*:queue')<line_sep>total_backlog=0<for_stmt>key keys<block_start>elements=key.split(":")<line_sep>spider=elements[0]<line_sep>domain=elements[1]<line_sep>spider='queue_'+spider<if_stmt>spider<not><in>the_dict<block_start>the_dict[spider]={'spider_backlog':0 'num_domains':0 'domains':[]}<block_end>count=self.redis_conn.zcard(key)<line_sep>total_backlog<augadd>count<line_sep>the_dict[spider]['spider_backlog']<augadd>count<line_sep>the_dict[spider]['num_domains']<augadd>1<line_sep>the_dict[spider]['domains'].append({'domain':domain 'backlog':count})<block_end>the_dict['total_backlog']=total_backlog<line_sep>ret_dict={'queues':the_dict}<line_sep><return>ret_dict<block_end><block_end> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>#
# produce ttSemiLep event hypotheses
#
## geom hypothesis
<import_from_stmt>TopQuarkAnalysis.TopJetCombination.TtSemiLepHypGeom_cff *<line_sep>## wMassDeltaTopMass hypothesis
<import_from_stmt>TopQuarkAnalysis.TopJetCombination.TtSemiLepHypWMassDeltaTopMass_cff *<line_sep>## wMassMaxSumPt hypothesis
<import_from_stmt>TopQuarkAnalysis.TopJetCombination.TtSemiLepHypWMassMaxSumPt_cff *<line_sep>## maxSumPtWMass hypothesis
<import_from_stmt>TopQuarkAnalysis.TopJetCombination.TtSemiLepHypMaxSumPtWMass_cff *<line_sep>## genMatch hypothesis
<import_from_stmt>TopQuarkAnalysis.TopJetCombination.TtSemiLepHypGenMatch_cff *<line_sep>## mvaDisc hypothesis
<import_from_stmt>TopQuarkAnalysis.TopJetCombination.TtSemiLepHypMVADisc_cff *<line_sep>## kinFit hypothesis
<import_from_stmt>TopQuarkAnalysis.TopJetCombination.TtSemiLepHypKinFit_cff *<line_sep>## hitFit hypothesis
<import_from_stmt>TopQuarkAnalysis.TopJetCombination.TtSemiLepHypHitFit_cff *<line_sep>## make all considered event hypotheses
makeTtSemiLepHypothesesTask=cms.Task(makeHypothesis_geomTask makeHypothesis_wMassDeltaTopMassTask makeHypothesis_wMassMaxSumPtTask makeHypothesis_maxSumPtWMassTask makeHypothesis_genMatchTask makeHypothesis_mvaDiscTask makeHypothesis_kinFitTask makeHypothesis_hitFitTask)<line_sep>makeTtSemiLepHypotheses=cms.Sequence(makeTtSemiLepHypothesesTask)<line_sep> |
# coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A Jax version of Sinkhorn's algorithm."""<import_from_stmt>typing Any Dict Optional NamedTuple Union<import_stmt>jax<import_stmt>jax.numpy<as>jnp<import_from_stmt>ott.core fixed_point_loop<import_from_stmt>ott.core problems<import_from_stmt>ott.core sinkhorn<import_from_stmt>ott.geometry epsilon_scheduler<import_from_stmt>ott.geometry geometry<class_stmt>GWOutput(NamedTuple)<block_start>"""Holds the output of the Gromov-Wasserstein solver.
Attributes:
costs: Holds the sequence of regularized GW costs seen through the outer
loop of the solver.
linear_convergence: Holds the sequence of bool convergence flags of the
inner Sinkhorn iterations.
convergence: Bool convergence flag for the outer GW iterations.
errors: Holds sequence of vectors of errors of the Sinkhorn algorithm
at each iteration.
linear_state: State used to solve and store solutions to the local
linearization of GW.
geom: The geometry underlying the local linearization.
transport: The transport matrix.
reg_gw_cost: Regularized optimal transport cost of the linearization.
"""<line_sep>costs:Optional[jnp.ndarray]=<none><line_sep>linear_convergence:Optional[jnp.ndarray]=<none><line_sep>convergence:bool=<false><line_sep>errors:Optional[jnp.ndarray]=<none><line_sep>linear_state:Any=<none><line_sep>geom:geometry.Geometry=<none><def_stmt>set self **kwargs<arrow>'GWOutput'<block_start>"""Returns a copy of self, possibly with overwrites."""<line_sep><return>self._replace(**kwargs)<block_end>@property<def_stmt>transport self<block_start><return>self.linear_state.matrix<block_end>@property<def_stmt>reg_gw_cost self<block_start><return>self.linear_state.reg_ot_cost<block_end><block_end><class_stmt>GWState(NamedTuple)<block_start>"""Holds the state of the Gromov-Wasserstein solver.
Attributes:
costs: Holds the sequence of regularized GW costs seen through the outer
loop of the solver.
linear_convergence: Holds the sequence of bool convergence flags of the
inner Sinkhorn iterations.
errors: Holds sequence of vectors of errors of the Sinkhorn algorithm
at each iteration.
linear_state: State used to solve and store solutions to the local
linearization of GW.
linear_pb: Local linearization of the quadratic GW problem.
"""<line_sep>costs:Optional[jnp.ndarray]=<none><line_sep>linear_convergence:Optional[jnp.ndarray]=<none><line_sep>errors:Optional[jnp.ndarray]=<none><line_sep>linear_state:Any=<none><line_sep>linear_pb:Optional[problems.LinearProblem]=<none><def_stmt>set self **kwargs<arrow>'GWState'<block_start>"""Returns a copy of self, possibly with overwrites."""<line_sep><return>self._replace(**kwargs)<block_end><def_stmt>update self iteration:int linear_sol linear_pb store_errors:bool<block_start>costs=self.costs.at[iteration].set(linear_sol.reg_ot_cost)<line_sep>errors=<none><if_stmt>store_errors<and>self.errors<is><not><none><block_start>errors=self.errors.at[iteration :].set(linear_sol.errors)<block_end>linear_convergence=self.linear_convergence.at[iteration].set(linear_sol.converged)<line_sep><return>self.set(linear_state=linear_sol linear_pb=linear_pb costs=costs linear_convergence=linear_convergence errors=errors)<block_end><block_end>@jax.tree_util.register_pytree_node_class<class_stmt>GromovWasserstein<block_start>"""A Gromov Wasserstein solver."""<def_stmt>__init__ self epsilon:Union[epsilon_scheduler.Epsilon float]=1.0 min_iterations:int=5 max_iterations:int=50 threshold:float=1e-3 jit:bool=<true> store_sinkhorn_errors:bool=<false> linear_ot_solver:sinkhorn.Sinkhorn=sinkhorn.Sinkhorn() **kwargs<block_start>self.epsilon=epsilon<line_sep>self.min_iterations=min_iterations<line_sep>self.max_iterations=max_iterations<line_sep>self.threshold=threshold<line_sep>self.jit=jit<line_sep>self.store_sinkhorn_errors=store_sinkhorn_errors<line_sep>self.linear_ot_solver=linear_ot_solver<line_sep>self._kwargs=kwargs<block_end><def_stmt>tree_flatten self<block_start><return>([self.epsilon self.linear_ot_solver self.threshold] dict(min_iterations=self.min_iterations max_iterations=self.max_iterations jit=self.jit store_sinkhorn_errors=self.store_sinkhorn_errors **self._kwargs))<block_end>@classmethod<def_stmt>tree_unflatten cls aux_data children<block_start><return>cls(epsilon=children[0] linear_ot_solver=children[1] threshold=children[2] **aux_data)<block_end><def_stmt>not_converged self state iteration<block_start>costs,i,tol=state.costs iteration self.threshold<line_sep><return>jnp.logical_or(i<le>2 jnp.logical_and(jnp.isfinite(costs[i-1]) jnp.logical_not(jnp.isclose(costs[i-2] costs[i-1] rtol=tol))))<block_end><def_stmt>__call__ self prob:problems.QuadraticProblem<arrow>GWOutput<block_start><if_stmt><not>prob.is_balanced<block_start><raise>ValueError('Unbalanced Gromov-Wasserstein is not supported yet.')<block_end>gromov_fn=jax.jit(iterations)<if>self.jit<else>iterations<line_sep>out=gromov_fn(self prob)<line_sep># TODO(lpapaxanthos): remove stop_gradient when using backprop
linearization=prob.update_linearization(jax.lax.stop_gradient(out.linear_state) self.epsilon)<line_sep>linear_state=out.linear_state.set_cost(linearization <true> <true>)<line_sep>iteration=jnp.sum(out.costs<ne>0)<line_sep>convergence=jnp.logical_not(self.not_converged(out iteration))<line_sep><return>out.set(linear_state=linear_state convergence=convergence)<block_end><def_stmt>init_state self prob:problems.QuadraticProblem<arrow>GWState<block_start>"""Initializes the state of the Gromov-Wasserstein iterations."""<line_sep>linearization=prob.init_linearization(self.epsilon)<line_sep>linear_state=self.linear_ot_solver(linearization)<line_sep>num_iter=self.max_iterations<if_stmt>self.store_sinkhorn_errors<block_start>errors=-jnp.ones((num_iter self.linear_ot_solver.outer_iterations))<block_end><else_stmt><block_start>errors=<none><block_end><return>GWState(jnp.zeros((num_iter )) jnp.zeros((num_iter )) errors linear_state linearization)<block_end><def_stmt>output_from_state self state<block_start>"""Create an output from a loop state.
Arguments:
state: A GWState.
Returns:
A GWOutput.
"""<line_sep>geom=state.linear_pb.geom<line_sep><return>GWOutput(costs=state.costs linear_convergence=state.linear_convergence errors=state.errors linear_state=state.linear_state geom=geom)<block_end><block_end><def_stmt>iterations solver:GromovWasserstein prob:problems.QuadraticProblem<arrow>GWOutput<block_start>"""A jittable Gromov-Wasserstein outer loop."""<def_stmt>cond_fn iteration constants state<block_start>solver=constants<line_sep><return>solver.not_converged(state iteration)<block_end><def_stmt>body_fn iteration constants state compute_error<block_start><del_stmt>compute_error# Always assumed True for outer loop of GW.
solver=constants<line_sep>linear_pb=prob.update_linearization(state.linear_state solver.epsilon)<line_sep>out=solver.linear_ot_solver(linear_pb)<line_sep><return>state.update(iteration out linear_pb solver.store_sinkhorn_errors)<block_end>state=fixed_point_loop.fixpoint_iter(cond_fn=cond_fn body_fn=body_fn min_iterations=solver.min_iterations max_iterations=solver.max_iterations inner_iterations=1 constants=solver state=solver.init_state(prob))<line_sep><return>solver.output_from_state(state)<block_end><def_stmt>make epsilon:Union[epsilon_scheduler.Epsilon float]=1. max_iterations:int=50 jit:bool=<false> warm_start:bool=<true> store_sinkhorn_errors:bool=<false> sinkhorn_kwargs:Optional[Dict[str Any]]=<none> threshold:float=1e-2 min_iterations:int=1 **kwargs<arrow>GromovWasserstein<block_start>"""Creates a GromovWasserstein solver.
Args:
epsilon: a regularization parameter or a epsilon_scheduler.Epsilon object.
max_iterations: int32, the maximum number of outer iterations for
Gromov Wasserstein.
jit: bool, if True, jits the function.
warm_start: deprecated.
store_sinkhorn_errors: whether or not to return all the errors of the inner
Sinkhorn iterations.
sinkhorn_kwargs: Optionally a dictionary containing the keywords arguments
for calls to the sinkhorn function.
threshold: threshold (progress between two iterate costs) used to stop GW.
min_iterations: see fixed_point_loop.
**kwargs: additional kwargs for epsilon.
Returns:
A GromovWasserstein solver.
"""<del_stmt>warm_start<line_sep>sinkhorn_kwargs={}<if>sinkhorn_kwargs<is><none><else>sinkhorn_kwargs<line_sep>sink=sinkhorn.make(**sinkhorn_kwargs)<line_sep><return>GromovWasserstein(epsilon max_iterations=max_iterations jit=jit linear_ot_solver=sink threshold=threshold store_sinkhorn_errors=store_sinkhorn_errors min_iterations=min_iterations **kwargs)<block_end><def_stmt>gromov_wasserstein geom_x:geometry.Geometry geom_y:geometry.Geometry a:Optional[jnp.ndarray]=<none> b:Optional[jnp.ndarray]=<none> loss:str='sqeucl' **kwargs<arrow>GWOutput<block_start>"""Fits Gromov Wasserstein.
Args:
geom_x: a Geometry object for the first view.
geom_y: a second Geometry object for the second view.
a: jnp.ndarray<float>[num_a,] or jnp.ndarray<float>[batch,num_a] weights.
b: jnp.ndarray<float>[num_b,] or jnp.ndarray<float>[batch,num_b] weights.
loss: str 'sqeucl' or 'kl' to define the GW loss.
**kwargs: keyword arguments to make.
Returns:
A GromovWassersteinState named tuple.
"""<line_sep>losses={'sqeucl':problems.make_square_loss 'kl':problems.make_kl_loss}<line_sep>loss_fn=losses.get(loss <none>)<line_sep>prob=problems.QuadraticProblem(geom_x geom_y a=a b=b loss=loss_fn())<line_sep>solver=make(**kwargs)<line_sep><return>solver(prob)<block_end> |
"""Manage AWS policies."""<import_stmt>json<import_stmt>pathlib<import_stmt>re<import_from_stmt>typing Dict List Tuple<import_from_stmt>xdg xdg_cache_home<import_from_stmt>wonk aws exceptions optimizer<import_from_stmt>wonk.constants MAX_MANAGED_POLICY_SIZE<import_from_stmt>wonk.models Policy Statement canonicalize_resources smallest_json to_set<line_sep>POLICY_CACHE_DIR=xdg_cache_home()/"com.amino.wonk"/"policies"<def_stmt>minify policies:List[Policy]<arrow>List[Statement]<block_start>"""Reduce the input policies to the minimal set of functionally identical equivalents."""<line_sep>internal_statements:List[Statement]=[]<for_stmt>policy policies<block_start>internal_statements.extend(policy.statements)<block_end>this_changed=<true><while_stmt>this_changed<block_start>changed,internal_statements=grouped_actions(internal_statements)<if_stmt><not>changed<block_start>this_changed=<false><block_end>changed,internal_statements=grouped_resources(internal_statements)<if_stmt><not>changed<block_start>this_changed=<false><block_end><block_end><return>internal_statements<block_end><def_stmt>grouped_actions statements:List[Statement]<arrow>Tuple[bool List[Statement]]<block_start>"""Merge similar policies' actions.
Returns a list of statements whose actions have been combined when possible.
"""<line_sep>statement_sets:Dict[str Statement]={}<line_sep>changed=<false><for_stmt>statement statements<block_start>group=statement.grouping_for_actions()<try_stmt><block_start>existing_item=statement_sets[group]<block_end><except_stmt>KeyError<block_start>statement_sets[group]=statement<line_sep><continue><block_end>new_action_value=existing_item.action_value|statement.action_value<if_stmt>existing_item.action_value<ne>new_action_value<block_start>changed=<true><line_sep>statement_sets[group]=existing_item.replace(action_value=new_action_value)<block_end><block_end><return>changed list(statement_sets.values())<block_end><def_stmt>grouped_resources statements:List[Statement]<arrow>Tuple[bool List[Statement]]<block_start>"""Merge similar policies' resources.
Returns a list of statements whose resources have been combined when possible.
"""<line_sep>statement_sets:Dict[str Statement]={}<line_sep>changed=<false><for_stmt>statement statements<block_start>group=statement.grouping_for_resources()<try_stmt><block_start>existing_item=statement_sets[group]<block_end><except_stmt>KeyError<block_start>statement_sets[group]=statement<line_sep><continue><block_end>new_resource_value=canonicalize_resources(to_set(existing_item.resource_value)|to_set(statement.resource_value))<if_stmt>existing_item.resource_value<ne>new_resource_value<block_start>changed=<true><line_sep>statement_sets[group]=existing_item.replace(resource_value=new_resource_value)<block_end><block_end><return>changed list(statement_sets.values())<block_end><def_stmt>combine policies:List[Policy]<arrow>List[Policy]<block_start>"""Combine policy files into the smallest possible set of outputs."""<line_sep>new_policy=Policy(statements=minify(policies))<line_sep># Simplest case: we're able to squeeze everything into a single file. This is the ideal.
<try_stmt><block_start>new_policy.render()<block_end><except_stmt>exceptions.UnshrinkablePolicyError<block_start><pass><block_end><else_stmt><block_start><return>[new_policy]<block_end># Well, that didn't work. Now we need to split the policy into several documents. Subtract the
# length of the tightest packaging of the policy "envelope" from the maximum size, then
# subtract the number of statements[1] (because we might have to glue the results together
# with commas). This is how much room we have to pack statements into.
#
# [1] Why "len(statements) - 2"? Because you can glue n statements together with n-1 commas,
# and it's guaranteed that we can fit at most n-1 statements into a single document because if
# we could fit all n then we wouldn't have made it to this point in the program. And yes, this
# is exactly the part of the program where we start caring about every byte.
minimum_possible_policy_size=len(str(Policy(statements=[])))<line_sep>max_number_of_commas=len(new_policy.statements)-2<line_sep>max_statement_size=(MAX_MANAGED_POLICY_SIZE-minimum_possible_policy_size-max_number_of_commas)<line_sep>packed_list=[]<for_stmt>statement new_policy.statements<block_start>packed=str(statement)<if_stmt>len(packed)<le>max_statement_size<block_start>packed_list.append(packed)<line_sep><continue><block_end><for_stmt>statement_dict statement.split(max_statement_size)<block_start>packed_list.append(smallest_json(statement_dict))<block_end><block_end>statement_sets=optimizer.pack_statements(packed_list max_statement_size 10)<line_sep>policies=[]<for_stmt>statement_set statement_sets# The splitting process above might have resulted in this policy having multiple statements
# that could be merged back together. The easiest way to handle this is to create a new
# policy as-is, then group its statements together into *another* new, optimized policy,
# and emit that one.
<block_start>unmerged_policy=Policy(statements=[Statement(json.loads(statement))<for>statement statement_set])<line_sep>merged_policy=Policy(statements=minify([unmerged_policy]))<line_sep>policies.append(merged_policy)<block_end><return>policies<block_end><def_stmt>policy_set_pattern policy_set:str<arrow>re.Pattern<block_start>"""Return a regexp matching the policy set's name."""<line_sep>final=policy_set.rsplit("/" maxsplit=1)[-1]<line_sep><return>re.compile(rf"^{final}_\d+$")<block_end><def_stmt>write_policy_set output_dir:pathlib.Path base_name:str policies:List[Policy]<block_start>"""Write the packed sets, return the names of the files written, and collect garbage."""<line_sep># Get the list of existing files for this policy set so that we can delete them later. First,
# get a list of candidates with Path.glob() because that's faster and easier than getting a
# list of _every_ file and filtering it with Python. Then use a regular expression to match
# each candidate so that policy set "foo" doesn't unintentionally delete policy set "foo_bar"'s
# files.
pattern=policy_set_pattern(base_name)<line_sep>cleanup={candidate<for>candidate output_dir.glob(f"{base_name}_*")<if>pattern.match(candidate.stem)}<if_stmt>len(cleanup)<g>10# Wonk only creates at most 10 policies for a policy set. If we've found more than 10
# matches then something's gone awry, like the policy set is "*" or such. Either way, pull
# the plug and refuse to delete them.
<block_start><raise>exceptions.TooManyPoliciesError(base_name len(cleanup))<block_end># For consistency, delete all of the pre-existing files before we start so we can't be left
# with a mix of old and new files.
<for_stmt>old cleanup<block_start>old.unlink()<block_end># Write each of the files that file go into this policy set, and create a list of the filenames
# we've written.
output_filenames=[]<for_stmt>i,policy enumerate(policies 1)<block_start>output_path=output_dir/f"{base_name}_{i}.json"<line_sep>output_filenames.append(str(output_path))<line_sep>output_path.write_text(policy.render())<block_end><return>output_filenames<block_end><def_stmt>make_cache_file name:str version:str<arrow>pathlib.Path<block_start>"""Return the path to the document's cache file."""<line_sep>cache_dir=POLICY_CACHE_DIR/name<line_sep>cache_dir.mkdir(parents=<true> exist_ok=<true>)<line_sep><return>cache_dir/f"{version}.json"<block_end><def_stmt>fetch client arn:str force:bool=<false><arrow>str<block_start>"""Return the contents of the policy."""<line_sep>current_version=aws.get_policy_version(client arn)<line_sep>cache_file=make_cache_file(aws.name_for(arn) current_version)<line_sep>policy_doc=<none><try_stmt><block_start><if_stmt><not>force<block_start>policy_doc=cache_file.read_text()<block_end><block_end><except_stmt>FileNotFoundError<block_start><pass><block_end><if_stmt>policy_doc<is><none><block_start>policy_doc=aws.get_policy(client arn current_version)<line_sep>cache_file.write_text(policy_doc)<block_end><return>policy_doc<block_end> |
<import_stmt>os<import_from_stmt>os.path exists join<import_stmt>pickle<import_stmt>numpy<as>np<import_stmt>open3d<import_stmt>cv2<import_stmt>time<class_stmt>ThreeDMatch(object)<block_start>"""
Given point cloud fragments and corresponding pose in '{root}'.
1. Save the aligned point cloud pts in '{savepath}/3DMatch_{downsample}_points.pkl'
2. Calculate the overlap ratio and save in '{savepath}/3DMatch_{downsample}_overlap.pkl'
3. Save the ids of anchor keypoints and positive keypoints in '{savepath}/3DMatch_{downsample}_keypts.pkl'
"""<def_stmt>__init__ self root savepath split downsample<block_start>self.root=root<line_sep>self.savepath=savepath<line_sep>self.split=split<line_sep>self.downsample=downsample<line_sep># dict: from id to pts.
self.pts={}<line_sep># dict: from id_id to overlap_ratio
self.overlap_ratio={}<line_sep># dict: from id_id to anc_keypts id & pos_keypts id
self.keypts_pairs={}<with_stmt>open(os.path.join(root f'scene_list_{split}.txt'))<as>f<block_start>scene_list=f.readlines()<block_end>self.ids_list=[]<line_sep>self.scene_to_ids={}<for_stmt>scene scene_list<block_start>scene=scene.replace("\n" "")<line_sep>self.scene_to_ids[scene]=[]<for_stmt>seq sorted(os.listdir(os.path.join(self.root scene)))<block_start><if_stmt><not>seq.startswith('seq')<block_start><continue><block_end>scene_path=os.path.join(self.root scene+f'/{seq}')<line_sep>ids=[scene+f"/{seq}/"+str(filename.split(".")[0])<for>filename os.listdir(scene_path)<if>filename.endswith('ply')]<line_sep>ids=sorted(ids key=<lambda>x:int(x.split("_")[-1]))<line_sep>self.ids_list<augadd>ids<line_sep>self.scene_to_ids[scene]<augadd>ids<line_sep>print(f"Scene {scene}, seq {seq}: num ply: {len(ids)}")<block_end><block_end>print(f"Total {len(scene_list)} scenes, {len(self.ids_list)} point cloud fragments.")<line_sep>self.idpair_list=[]<line_sep>self.load_all_ply(downsample)<line_sep>self.cal_overlap(downsample)<block_end><def_stmt>load_ply self data_dir ind downsample aligned=<true><block_start>pcd=open3d.io.read_point_cloud(join(data_dir f'{ind}.ply'))<line_sep>pcd=open3d.geometry.PointCloud.voxel_down_sample(pcd voxel_size=downsample)<if_stmt>aligned<is><true><block_start>matrix=np.load(join(data_dir f'{ind}.pose.npy'))<line_sep>pcd.transform(matrix)<block_end><return>pcd<block_end><def_stmt>load_all_ply self downsample<block_start>pts_filename=join(self.savepath f'3DMatch_{self.split}_{downsample:.3f}_points.pkl')<if_stmt>exists(pts_filename)<block_start><with_stmt>open(pts_filename 'rb')<as>file<block_start>self.pts=pickle.load(file)<block_end>print(f"Load pts file from {self.savepath}")<line_sep><return><block_end>self.pts={}<for_stmt>i,anc_id enumerate(self.ids_list)<block_start>anc_pcd=self.load_ply(self.root anc_id downsample=downsample aligned=<true>)<line_sep>points=np.array(anc_pcd.points)<line_sep>print(len(points))<line_sep>self.pts[anc_id]=points<line_sep>print('processing ply: {:.1f}%'.format(100<times>i/len(self.ids_list)))<block_end><with_stmt>open(pts_filename 'wb')<as>file<block_start>pickle.dump(self.pts file)<block_end><block_end><def_stmt>get_matching_indices self anc_pts pos_pts search_voxel_size K=<none><block_start>match_inds=[]<line_sep>bf_matcher=cv2.BFMatcher(cv2.NORM_L2)<line_sep>match=bf_matcher.match(anc_pts pos_pts)<for_stmt>match_val match<block_start><if_stmt>match_val.distance<l>search_voxel_size<block_start>match_inds.append([match_val.queryIdx match_val.trainIdx])<block_end><block_end><return>np.array(match_inds)<block_end><def_stmt>cal_overlap self downsample<block_start>overlap_filename=join(self.savepath f'3DMatch_{self.split}_{downsample:.3f}_overlap.pkl')<line_sep>keypts_filename=join(self.savepath f'3DMatch_{self.split}_{downsample:.3f}_keypts.pkl')<if_stmt>exists(overlap_filename)<and>exists(keypts_filename)<block_start><with_stmt>open(overlap_filename 'rb')<as>file<block_start>self.overlap_ratio=pickle.load(file)<line_sep>print(f"Reload overlap info from {overlap_filename}")<block_end><with_stmt>open(keypts_filename 'rb')<as>file<block_start>self.keypts_pairs=pickle.load(file)<line_sep>print(f"Reload keypts info from {keypts_filename}")<block_end><import_stmt>pdb<line_sep>pdb.set_trace()<line_sep><return><block_end>t0=time.time()<for_stmt>scene,scene_ids self.scene_to_ids.items()<block_start>scene_overlap={}<line_sep>print(f"Begin processing scene {scene}")<for_stmt>i range(0 len(scene_ids))<block_start>anc_id=scene_ids[i]<for_stmt>j range(i+1 len(scene_ids))<block_start>pos_id=scene_ids[j]<line_sep>anc_pts=self.pts[anc_id].astype(np.float32)<line_sep>pos_pts=self.pts[pos_id].astype(np.float32)<try_stmt><block_start>matching_01=self.get_matching_indices(anc_pts pos_pts self.downsample)<block_end><except_stmt>BaseException<as>e<block_start>print(f"Something wrong with get_matching_indices {e} for {anc_id}, {pos_id}")<line_sep>matching_01=np.array([])<block_end>overlap_ratio=len(matching_01)/len(anc_pts)<line_sep>scene_overlap[f'{anc_id}@{pos_id}']=overlap_ratio<if_stmt>overlap_ratio<g>0.30<block_start>self.keypts_pairs[f'{anc_id}@{pos_id}']=matching_01.astype(np.int32)<line_sep>self.overlap_ratio[f'{anc_id}@{pos_id}']=overlap_ratio<line_sep>print(f'\t {anc_id}, {pos_id} overlap ratio: {overlap_ratio}')<block_end><block_end>print('processing {:s} ply: {:.1f}%'.format(scene 100<times>i/len(scene_ids)))<block_end>print('Finish {:s}, Done in {:.1f}s'.format(scene time.time()-t0))<block_end><with_stmt>open(overlap_filename 'wb')<as>file<block_start>pickle.dump(self.overlap_ratio file)<block_end><with_stmt>open(keypts_filename 'wb')<as>file<block_start>pickle.dump(self.keypts_pairs file)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>ThreeDMatch(root='path to your ply file.' savepath='data/3DMatch' split='train' downsample=0.030)<block_end> |
<import_from_stmt>torch.nn Parameter<import_from_stmt>.memory Memory<class_stmt>SharedParameterUtil<block_start>_isHijacked=<false><line_sep>_memory=<none><line_sep>_mainNew=<none><line_sep>@staticmethod<def_stmt>_shared_new cls data=<none> requires_grad=<true><block_start><if_stmt>data<is><none><block_start><return>SharedParameterUtil._mainNew(cls data requires_grad)<block_end>mShape=data.shape<line_sep>fSize=1<for_stmt>i mShape<block_start>fSize<augmul>i<block_end>sharedSlot=Memory.obtain(SharedParameterUtil._memory)<line_sep>nT=sharedSlot.reshape(-1)[:fSize]<line_sep>nT=nT.reshape(mShape)<line_sep><return>SharedParameterUtil._mainNew(cls nT requires_grad)<block_end>@staticmethod<def_stmt>hijackParameters memoryKey<block_start><if_stmt>SharedParameterUtil._isHijacked<block_start><raise>RuntimeError("already hijacked, reset first")<block_end>SharedParameterUtil._mainNew=Parameter.__new__<line_sep>SharedParameterUtil._isHijacked=<true><line_sep>SharedParameterUtil._memory=memoryKey<line_sep>Parameter.__new__=SharedParameterUtil._shared_new<block_end>@staticmethod<def_stmt>resetParameters resetMemory=<false><block_start><if_stmt><not>SharedParameterUtil._isHijacked<block_start><return><block_end>Parameter.__new__=SharedParameterUtil._mainNew<line_sep>SharedParameterUtil._isHijacked=<false><line_sep>SharedParameterUtil._mainNew=<none><if_stmt>resetMemory<block_start>Memory.deallocKey(SharedParameterUtil._memory)<block_end>SharedParameterUtil._memory=<none><block_end><block_end> |
<import_stmt>pytest<import_from_stmt>mixer.backend.django mixer<import_from_stmt>projects.models Project ProjectMembership<import_from_stmt>users.models User<line_sep>@pytest.mark.django_db<class_stmt>TestProject<block_start><def_stmt>test_project_create self<block_start>user=mixer.blend(User username='test')<line_sep>proj=mixer.blend(Project owner=user)<assert_stmt>proj.owner<eq>user<block_end><def_stmt>test_project_str self<block_start>proj=mixer.blend(Project)<assert_stmt>str(proj)<eq>proj.title<block_end><block_end>@pytest.mark.django_db<class_stmt>TestProjectMembers<block_start><def_stmt>test_member self<block_start>proj=mixer.blend(Project)<line_sep>user=mixer.blend(User username='test')<line_sep>mixer.blend(ProjectMembership member=user project=proj)<assert_stmt>proj.members.get(username='test')<eq>user<block_end><def_stmt>test_proj_member_str self<block_start>pmem=mixer.blend(ProjectMembership)<assert_stmt>str(pmem)<eq>f'{pmem.member.full_name} , {pmem.project.title}'<block_end><block_end> |
#=========================================================================
# test_utility.py
#=========================================================================
# Author : <NAME>
# Date : Feb 21, 2019
"""Test utilities used by RTLIR tests."""<import_from_stmt>contextlib contextmanager<import_stmt>pytest<line_sep>@pytest.fixture<def_stmt>do_test request<block_start>"""Call `local_do_test` of the requesting module."""<line_sep><return>request.module.local_do_test<block_end>@contextmanager<def_stmt>expected_failure exception=Exception msg=<none><block_start>"""Mark one test case as should-fail.
Not to be confused with pytest.xfail, which is commonly used to mark
tests related to unimplemented functionality. This test only passes when
it throws an expected exception.
"""<try_stmt><block_start><yield><block_end><except_stmt>exception<as>e<block_start><if_stmt>msg<is><none><or>e.args[0].find(msg)<ne>-1<block_start><return><block_end><else_stmt><block_start><raise><block_end><block_end><raise>Exception('expected-to-fail test unexpectedly passed!')<block_end><def_stmt>get_parameter name func<block_start>"""Return the parameter for `name` arg of `func`"""<try_stmt><block_start><for_stmt>mark func.pytestmark<block_start><if_stmt>mark.name<eq>'parametrize'# Find the position of the given name
<block_start>pos=-1<for_stmt>i,arg enumerate(mark.args[0].split())<block_start><if_stmt>arg<eq>name<block_start>pos=i<line_sep><break><block_end><block_end><if_stmt>pos<eq>-1<block_start><raise>Exception(f'{func} does not have parameter named {name}!')<block_end><if_stmt>len(mark.args[0].split())<eq>1<block_start><return>mark.args[1]<block_end><return>list(map(<lambda>x:x[pos] mark.args[1]))<block_end><block_end><block_end><except_stmt>AttributeError<block_start><raise>Exception(f'given function {func} does not have pytest marks!')<block_end><block_end> |
<import_stmt>pytest<import_from_stmt>gitlabform EXIT_INVALID_INPUT<import_from_stmt>gitlabform.configuration.projects_and_groups ConfigurationProjectsAndGroups<import_from_stmt>gitlabform.filter NonEmptyConfigsProvider<def_stmt>test_error_on_missing_key <block_start>config_yaml="""
---
# no key at all
"""<with_stmt>pytest.raises(SystemExit)<as>e<block_start>configuration=ConfigurationProjectsAndGroups(config_string=config_yaml)<line_sep>NonEmptyConfigsProvider(configuration <none> <none>)<block_end><assert_stmt>e.value.code<eq>EXIT_INVALID_INPUT<block_end><def_stmt>test_error_on_empty_key <block_start>config_yaml="""
---
projects_and_groups:
"""<with_stmt>pytest.raises(SystemExit)<as>e<block_start>configuration=ConfigurationProjectsAndGroups(config_string=config_yaml)<line_sep>NonEmptyConfigsProvider(configuration <none> <none>)<block_end><assert_stmt>e.value.code<eq>EXIT_INVALID_INPUT<block_end> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep># values tuned also according to slide 3 of :
# https://indico.cern.ch/getFile.py/access?contribId=23&sessionId=2&resId=0&materialId=slides&confId=271548
# selection efficiency of approx 6% for ZMM_8TeV
myZgammaFilter=cms.EDFilter('ZgammaMassFilter' HepMCProduct=cms.InputTag("generator" "unmeared") minPhotonPt=cms.double(7.) minLeptonPt=cms.double(7.) minPhotonEta=cms.double(-3) minLeptonEta=cms.double(-3) maxPhotonEta=cms.double(3) maxLeptonEta=cms.double(3) minDileptonMass=cms.double(30.) minZgMass=cms.double(40.))<line_sep>ZgammaFilter=cms.Sequence(myZgammaFilter)<line_sep> |
#####################################################
# Copyright (c) <NAME> [GitHub D-X-Y], 2021.01 #
#####################################################
# Define complex searc space for AutoDL #
#####################################################
<import_from_stmt>.basic_space Categorical<import_from_stmt>.basic_space Continuous<import_from_stmt>.basic_space Integer<import_from_stmt>.basic_space Space<import_from_stmt>.basic_space VirtualNode<import_from_stmt>.basic_op has_categorical<import_from_stmt>.basic_op has_continuous<import_from_stmt>.basic_op is_determined<import_from_stmt>.basic_op get_determined_value<import_from_stmt>.basic_op get_min<import_from_stmt>.basic_op get_max<line_sep> |
<import_stmt>argparse<import_stmt>importlib<import_stmt>os<import_from_stmt>fairseq.models MODEL_REGISTRY ARCH_MODEL_INV_REGISTRY<line_sep># automatically import any Python files in the models/ directory
models_dir=os.path.dirname(__file__)<for_stmt>file os.listdir(models_dir)<block_start>path=os.path.join(models_dir file)<if_stmt><not>file.startswith('_')<and><not>file.startswith('.')<and>(file.endswith('.py')<or>os.path.isdir(path))<block_start>model_name=file[:file.find('.py')]<if>file.endswith('.py')<else>file<line_sep>module=importlib.import_module('infoxlm.models.'+model_name)<line_sep># extra `model_parser` for sphinx
<if_stmt>model_name<in>MODEL_REGISTRY<block_start>parser=argparse.ArgumentParser(add_help=<false>)<line_sep>group_archs=parser.add_argument_group('Named architectures')<line_sep>group_archs.add_argument('--arch' choices=ARCH_MODEL_INV_REGISTRY[model_name])<line_sep>group_args=parser.add_argument_group('Additional command-line arguments')<line_sep>MODEL_REGISTRY[model_name].add_args(group_args)<line_sep>globals()[model_name+'_parser']=parser<block_end><block_end><block_end> |
<import_from_stmt>markupsafe escape<def_stmt>run <block_start>string="Hello World!"<times>1000<line_sep>escape(string)<block_end> |
<import_from_stmt>.cam *<import_from_stmt>.gradcam *<import_from_stmt>.utils *<line_sep> |
<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('dfirtrack_config' '0013_csvimporterconfigmodel') ]<line_sep>operations=[migrations.AddField(model_name='mainconfigmodel' name='main_overview' field=models.CharField(choices=[('main_overview_artifact' 'Artifact') ('main_overview_case' 'Case') ('main_overview_system' 'System') ('main_overview_tag' 'Tag') ('main_overview_task' 'Task') ] default='main_overview_system' max_length=50 ) ) ]<block_end> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>CommonTools.PileupAlgos.Puppi_cff *<line_sep>pupuppi=puppi.clone(invertPuppi=<true>)<line_sep> |
# -*- coding: utf-8 -*-
<import_from_stmt>zope.interface Interface<class_stmt>IOPContent(Interface)<block_start>""" Openprocurement Content """<block_end><class_stmt>IContentConfigurator(Interface)<block_start>""" Content configurator """<block_end> |
<import_from_stmt>insights.parsers.neutron_server_log NeutronServerLog<import_from_stmt>insights.tests context_wrap<line_sep>NEUTRON_LOG="""
2016-09-13 05:56:45.155 30586 WARNING keystonemiddleware.auth_token [-] Identity response: {"error": {"message": "Could not find token: b45405915eb44e608885f894028d37b9", "code": 404, "title": "Not Found"}}
2016-09-13 05:56:45.156 30586 WARNING keystonemiddleware.auth_token [-] Authorization failed for token
2016-09-13 06:06:45.884 30588 WARNING keystonemiddleware.auth_token [-] Authorization failed for token
2016-09-13 06:06:45.886 30588 WARNING keystonemiddleware.auth_token [-] Identity response: {"error": {"message": "Could not find token: <PASSWORD>ba1<PASSWORD>", "code": 404, "title": "Not Found"}}
2016-09-13 06:06:45.887 30588 WARNING keystonemiddleware.auth_token [-] Authorization failed for token
2016-09-13 06:06:46.131 30586 WARNING keystonemiddleware.auth_token [-] Authorization failed for token
2016-09-13 06:06:46.131 30586 WARNING keystonemiddleware.auth_token [-] Identity response: {"error": {"message": "Could not find token: <KEY>0", "code": 404, "title": "Not Found"}}
2016-09-13 06:06:46.132 30586 WARNING keystonemiddleware.auth_token [-] Authorization failed for token
""".strip()<def_stmt>test_server_log <block_start>neutron_server=NeutronServerLog(context_wrap(NEUTRON_LOG))<assert_stmt>len(neutron_server.get(["WARNING" "Authorization failed for token"]))<eq>5<assert_stmt>len(neutron_server.get(["Identity response:"]))<eq>3<assert_stmt>len(neutron_server.get("Identity response:"))<eq>3<block_end> |
"""Benchmarking experiments for Q-functions."""<import_from_stmt>garage_benchmarks.experiments.q_functions.continuous_mlp_q_function # isort:skip # noqa: E501
continuous_mlp_q_function <line_sep>__all__=['continuous_mlp_q_function']<line_sep> |
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>os<import_from_stmt>typing List<import_stmt>numpy<import_from_stmt>deepsparse.utils.log log_init<line_sep>__all__=["arrays_to_bytes" "bytes_to_arrays" "verify_outputs" ]<line_sep>log=log_init(os.path.basename(__file__))<def_stmt>arrays_to_bytes arrays:List[numpy.array]<arrow>bytearray<block_start>"""
:param arrays: List of numpy arrays to serialize as bytes
:return: bytearray representation of list of numpy arrays
"""<line_sep>to_return=bytearray()<for_stmt>arr arrays<block_start>arr_dtype=bytearray(str(arr.dtype) "utf-8")<line_sep>arr_shape=bytearray(",".join([str(a)<for>a arr.shape]) "utf-8")<line_sep>sep=bytearray("|" "utf-8")<line_sep>arr_bytes=arr.ravel().tobytes()<line_sep>to_return<augadd>arr_dtype+sep+arr_shape+sep+arr_bytes<block_end><return>to_return<block_end><def_stmt>bytes_to_arrays serialized_arr:bytearray<arrow>List[numpy.array]<block_start>"""
:param serialized_arr: bytearray representation of list of numpy arrays
:return: List of numpy arrays decoded from input
"""<line_sep>sep="|".encode("utf-8")<line_sep>arrays=[]<line_sep>i_start=0<while_stmt>i_start<l>len(serialized_arr)-1<block_start>i_0=serialized_arr.find(sep i_start)<line_sep>i_1=serialized_arr.find(sep i_0+1)<line_sep>arr_dtype=numpy.dtype(serialized_arr[i_start:i_0].decode("utf-8"))<line_sep>arr_shape=tuple([int(a)<for>a serialized_arr[i_0+1:i_1].decode("utf-8").split(",")])<line_sep>arr_num_bytes=numpy.prod(arr_shape)<times>arr_dtype.itemsize<line_sep>arr_str=serialized_arr[i_1+1:arr_num_bytes+(i_1+1)]<line_sep>arr=numpy.frombuffer(arr_str dtype=arr_dtype).reshape(arr_shape)<line_sep>arrays.append(arr.copy())<line_sep>i_start=i_1+arr_num_bytes+1<block_end><return>arrays<block_end><def_stmt>verify_outputs outputs:List[numpy.array] gt_outputs:List[numpy.array] atol:float=8.0e-4 rtol:float=0.0 <arrow>List[float]<block_start>"""
Compares two lists of output tensors, checking that they are sufficiently similar
:param outputs: List of numpy arrays, usually model outputs
:param gt_outputs: List of numpy arrays, usually reference outputs
:param atol: Absolute tolerance for allclose
:param rtol: Relative tolerance for allclose
:return: The list of max differences for each pair of outputs
"""<line_sep>max_diffs=[]<if_stmt>len(outputs)<ne>len(gt_outputs)<block_start><raise>Exception(f"number of outputs doesn't match, {len(outputs)} != {len(gt_outputs)}")<block_end><for_stmt>i range(len(gt_outputs))<block_start>gt_output=gt_outputs[i]<line_sep>output=outputs[i]<if_stmt>output.shape<ne>gt_output.shape<block_start><raise>Exception(f"output shapes don't match, {output.shape} != {gt_output.shape}")<block_end><if_stmt>type(output)<ne>type(gt_output)<block_start><raise>Exception(f"output types don't match, {type(output)} != {type(gt_output)}")<block_end>max_diff=numpy.max(numpy.abs(output-gt_output))<line_sep>max_diffs.append(max_diff)<line_sep>log.info(f"output {i}: {output.shape} {gt_output.shape} MAX DIFF: {max_diff}")<if_stmt><not>numpy.allclose(output gt_output rtol=rtol atol=atol)<block_start><raise>Exception("output data doesn't match\n"<concat>f"output {i}: {output.shape} {gt_output.shape} MAX DIFF: {max_diff}\n"<concat>f" mean = {numpy.mean(output):.5f} {numpy.mean(gt_output):.5f}\n"<concat>f" std = {numpy.std(output):.5f} {numpy.std(gt_output):.5f}\n"<concat>f" max = {numpy.max(output):.5f} {numpy.max(gt_output):.5f}\n"<concat>f" min = {numpy.min(output):.5f} {numpy.min(gt_output):.5f}")<block_end><block_end><return>max_diffs<block_end> |
<import_from_stmt>django.dispatch Signal<line_sep># Page signals
# provides args: instance, revision
page_published=Signal()<line_sep># provides args: instance
page_unpublished=Signal()<line_sep># provides args: instance, parent_page_before, parent_page_after, url_path_before, url_path_after
pre_page_move=Signal()<line_sep># provides args: instance, parent_page_before, parent_page_after, url_path_before, url_path_after
post_page_move=Signal()<line_sep># Workflow signals
# provides args: instance, user
workflow_approved=Signal()<line_sep># provides args: instance, user
workflow_rejected=Signal()<line_sep># provides args: instance, user
workflow_cancelled=Signal()<line_sep># provides args: instance, user
workflow_submitted=Signal()<line_sep># Workflow task signals
# provides args: instance, user
task_approved=Signal()<line_sep># provides args: instance, user
task_rejected=Signal()<line_sep># provides args: instance, user
task_submitted=Signal()<line_sep># provides args: instance, user
task_cancelled=Signal()<line_sep># Locale signals
# Like pre_delete, but sent on deletion before on_delete validation is applied.
# Currently only sent by the Locale model.
# Required as a workaround for https://code.djangoproject.com/ticket/6870
# provides args: sender, instance
pre_validate_delete=Signal()<line_sep> |
<import_stmt>random<import_stmt>os<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>pandas<as>pd<import_from_stmt>mean_average_precision MetricBuilder<import_stmt>pickle<line_sep>classes=['Negative for Pneumonia' 'Typical Appearance' 'Indeterminate Appearance' 'Atypical Appearance']<def_stmt>seed_everything seed=123<block_start>random.seed(seed)<line_sep>os.environ['PYTHONHASHSEED']=str(seed)<line_sep>np.random.seed(seed)<line_sep>torch.manual_seed(seed)<line_sep>torch.cuda.manual_seed(seed)<line_sep>torch.backends.cudnn.deterministic=<true><line_sep>torch.backends.cudnn.benchmark=<true><block_end><def_stmt>refine_det boxes labels scores<block_start>boxes=boxes.clip(0 1)<line_sep>boxes_out=[]<line_sep>labels_out=[]<line_sep>scores_out=[]<for_stmt>box,label,score zip(boxes labels scores)<block_start>x1,y1,x2,y2=box<if_stmt>x1<eq>x2<or>y1<eq>y2<block_start><continue><block_end>box=[min(x1 x2) min(y1 y2) max(x1 x2) max(y1 y2)]<line_sep>boxes_out.append(box)<line_sep>labels_out.append(label)<line_sep>scores_out.append(score)<block_end><return>boxes_out labels_out scores_out<block_end><def_stmt>get_study_map df pred_dict num_classes=6 stride=0.1<block_start><assert_stmt>num_classes<in>[4 6]<line_sep>metric_fn=MetricBuilder.build_evaluation_metric("map_2d" async_mode=<true> num_classes=num_classes)<line_sep>### Study level ###
<for_stmt>studyid,grp df.groupby('studyid')<block_start>gts=[]<for_stmt>clsidx,clsname enumerate(classes)<block_start><assert_stmt>len(np.unique(grp[clsname].values))<eq>1<if_stmt>grp[clsname].values[0]<eq>1<block_start>gts.append([0 0 1 1 clsidx 0 0])<block_end><block_end>gts=np.array(gts)<line_sep>study_preds=[]<for_stmt>_,row grp.iterrows()<block_start>study_preds.append(pred_dict[row['imageid']])<block_end>study_preds=np.array(study_preds)<line_sep>study_preds=np.mean(study_preds axis=0)<line_sep>preds=[]<for_stmt>clsidx range(len(classes))<block_start>preds.append([0 0 1 1 clsidx study_preds[clsidx]])<block_end>preds=np.array(preds)<line_sep>metric_fn.add(preds gts)<block_end>### Image level ###
<if_stmt>num_classes<eq>6<block_start><for_stmt>_,row df.iterrows()<block_start>gts=[]<line_sep>arr=row['label'].split(' ')<line_sep>nums=len(arr)<floordiv>6<for_stmt>i range(nums)<block_start>class_name=arr[6<times>i]<line_sep>conf=int(arr[6<times>i+1])<if_stmt>class_name<eq>'opacity'<block_start>clsid=5<block_end><else_stmt><block_start>clsid=4<block_end>x1=int(float(arr[6<times>i+2]))<line_sep>y1=int(float(arr[6<times>i+3]))<line_sep>x2=int(float(arr[6<times>i+4]))<line_sep>y2=int(float(arr[6<times>i+5]))<line_sep>gts.append([x1 y1 x2 y2 clsid 0 0])<block_end>gts=np.array(gts)<line_sep>preds=np.array([[0 0 1 1 4 1]])<line_sep>metric_fn.add(preds gts)<block_end><block_end>result=metric_fn.value(iou_thresholds=0.5 recall_thresholds=np.arange(0. 1.0+stride stride) mpolicy='soft')<line_sep>average_precision={}<for_stmt>clsid range(num_classes)<block_start>average_precision[clsid]=[]<block_end><for_stmt>k,v result.items()<block_start><if_stmt>k<eq>'mAP'<block_start><continue><block_end><for_stmt>clsid range(num_classes)<block_start>average_precision[clsid].append(v[clsid]['ap'])<block_end><block_end>output={'mAP':result['mAP'] }<for_stmt>clsid range(num_classes)<block_start>average_precision[clsid]=np.mean(average_precision[clsid])<if_stmt>clsid<l>len(classes)<block_start>output[classes[clsid]]=average_precision[clsid]<block_end><elif_stmt>clsid<eq>4<block_start>output['none']=average_precision[clsid]<block_end><else_stmt><block_start>output['opacity']=average_precision[clsid]<block_end><block_end><return>output<block_end><def_stmt>save_dict obj name<block_start><with_stmt>open(name 'wb')<as>f<block_start>pickle.dump(obj f pickle.HIGHEST_PROTOCOL)<block_end><block_end><def_stmt>load_dict name<block_start><with_stmt>open(name 'rb')<as>f<block_start><return>pickle.load(f)<block_end><block_end> |
<import_from_stmt>torch.optim.lr_scheduler CosineAnnealingLR<class_stmt>OneCycleCosineAnnealLR(CosineAnnealingLR)<block_start><def_stmt>__init__ self *args **kwargs<block_start>self.start_epoch=<none><line_sep>self.last_epoch=<none><line_sep>super().__init__(*args **kwargs)<block_end><def_stmt>step self epoch=<none><block_start><if_stmt>self.last_epoch<is><not><none><block_start><if_stmt>self.start_epoch<is><none><block_start>self.start_epoch=self.last_epoch<line_sep>self.last_epoch=0<for_stmt>i range(len(self.base_lrs))<block_start>self.optimizer.param_groups[i]['lr']=self.base_lrs[0]<block_end><block_end><if_stmt>self.last_epoch<ge>self.T_max-1<block_start>self.start_epoch=self.last_epoch<line_sep>self.last_epoch=-1<for_stmt>i range(len(self.base_lrs))<block_start>self.optimizer.param_groups[i]['lr']=self.base_lrs[0]<block_end><block_end><block_end>super().step(epoch)<block_end><block_end>__all__=['OneCycleCosineAnnealLR']<line_sep> |
# regular assignment
foo=7<line_sep>print(foo)<line_sep># annotated assignmnet
bar:number=9<line_sep>print(bar)<line_sep> |
<import_from_stmt>amazon_paapi.exceptions AsinNotFoundException<import_from_stmt>amazon_paapi.tools get_asin<import_stmt>pytest<def_stmt>test_get_asin <block_start><assert_stmt>get_asin('B01N5IB20Q')<eq>'B01N5IB20Q'<assert_stmt>get_asin('https://www.amazon.es/gp/product/B07PHPXHQS')<eq>'B07PHPXHQS'<assert_stmt>get_asin('https://www.amazon.es/gp/product/B07PHPXHQS?pf_rd_r=3FXDZDV1W6KY83KEE2Z4&pf_rd_p=c6fa5af0-ec7c-40de-8332-fd1421de4244&pd_rd_r=58786171-de0f-4fe1-a2df-ee335d6715ee&pd_rd_w=KND7A&pd_rd_wg=kIr5z&ref_=pd_gw_unk')<eq>'B07PHPXHQS'<assert_stmt>get_asin('https://www.amazon.es/dp/B07PKW4CKF')<eq>'B07PKW4CKF'<assert_stmt>get_asin('https://www.amazon.es/dp/B07PKW4CKF?_encoding=UTF8&ref_=pocs_dp_m_sp_multi_c_more_nooffers_B08D1G2XVX')<eq>'B07PKW4CKF'<with_stmt>pytest.raises(AsinNotFoundException)<block_start>get_asin('https://www.amazon.es/gp/')<block_end><with_stmt>pytest.raises(AsinNotFoundException)<block_start>get_asin('this is not even a URL')<block_end><block_end> |
<import_stmt>hashlib<import_from_stmt>collections defaultdict<import_from_stmt>datetime datetime<import_from_stmt>urllib.parse urlencode<import_from_stmt>babel.dates format_date<import_from_stmt>babel.dates format_datetime<import_from_stmt>babel.dates format_time<import_from_stmt>babel.numbers format_currency<import_from_stmt>pyramid.decorator reify<import_from_stmt>pyramid.i18n get_locale_name<import_from_stmt>pyramid.interfaces ILocation<import_from_stmt>pyramid.location inside<import_from_stmt>pyramid.location lineage<import_from_stmt>pyramid.renderers get_renderer<import_from_stmt>pyramid.renderers render<import_from_stmt>pyramid.settings asbool<import_from_stmt>sqlalchemy and_<import_from_stmt>sqlalchemy not_<import_from_stmt>sqlalchemy or_<import_from_stmt>kotti DBSession<import_from_stmt>kotti get_settings<import_from_stmt>kotti.events objectevent_listeners<import_from_stmt>kotti.interfaces INavigationRoot<import_from_stmt>kotti.resources Content<import_from_stmt>kotti.resources Document<import_from_stmt>kotti.resources Node<import_from_stmt>kotti.resources Tag<import_from_stmt>kotti.resources TagsToContents<import_from_stmt>kotti.resources get_root<import_from_stmt>kotti.sanitizers sanitize<import_from_stmt>kotti.security view_permitted<import_from_stmt>kotti.util TemplateStructure<import_from_stmt>kotti.util render_view<import_from_stmt>kotti.views.site_setup CONTROL_PANEL_LINKS<import_from_stmt>kotti.views.slots slot_events<class_stmt>SettingHasValuePredicate<block_start><def_stmt>__init__ self val config<block_start>self.name,self.value=val<if_stmt><not>isinstance(self.value bool)<block_start><raise>ValueError("Only boolean values supported")<block_end><block_end><def_stmt>text self<block_start><return>f"if_setting_has_value = {self.name} == {self.value}"<block_end>phash=text<def_stmt>__call__ self context request<block_start><return>asbool(request.registry.settings[self.name])<eq>self.value<block_end><block_end><class_stmt>RootOnlyPredicate<block_start><def_stmt>__init__ self val config<block_start>self.val=val<block_end><def_stmt>text self<block_start><return>f"root_only = {self.val}"<block_end>phash=text<def_stmt>__call__ self context request<block_start><return>(context<is>request.root)<eq>self.val<block_end><block_end><def_stmt>template_api context request **kwargs<block_start><return>get_settings()["kotti.templates.api"][0](context request **kwargs)<block_end><def_stmt>add_renderer_globals event<block_start><if_stmt>event.get("renderer_name")<ne>"json"<block_start>request=event["request"]<line_sep>api=getattr(request "template_api" <none>)<if_stmt>api<is><none><and>request<is><not><none><block_start>api=template_api(event["context"] event["request"])<block_end>event["api"]=api<block_end><block_end><class_stmt>Slots<block_start><def_stmt>__init__ self context request<block_start>self.context=context<line_sep>self.request=request<block_end><def_stmt>__getattr__ self key<block_start><for_stmt>event_type slot_events<block_start><if_stmt>event_type.name<eq>key<block_start><break><block_end><block_end><else_stmt><block_start><raise>AttributeError(key)<block_end>value=[]<line_sep>event=event_type(self.context self.request)<for_stmt>snippet objectevent_listeners(event)<block_start><if_stmt>snippet<is><not><none><block_start><if_stmt>isinstance(snippet list)<block_start>value.extend(snippet)<block_end><else_stmt><block_start>value.append(snippet)<block_end><block_end><block_end>setattr(self key value)<line_sep><return>value<block_end><block_end><class_stmt>TemplateAPI<block_start>"""This implements the ``api`` object that's passed to all templates.
Use dict-access as a shortcut to retrieve template macros from templates.
"""<line_sep># Instead of overriding these, consider using the
# ``kotti.overrides`` variable.
BARE_MASTER="kotti:templates/master-bare.pt"<line_sep>VIEW_MASTER="kotti:templates/view/master.pt"<line_sep>EDIT_MASTER="kotti:templates/edit/master.pt"<line_sep>SITE_SETUP_MASTER="kotti:templates/site-setup/master.pt"<line_sep>body_css_class=""<def_stmt>__init__ self context request bare=<none> **kwargs<block_start>self.context,self.request=context request<if_stmt>getattr(request "template_api" <none>)<is><none><block_start>request.template_api=self<block_end>self.S=get_settings()<if_stmt>request.is_xhr<and>bare<is><none><block_start>bare=<true># use bare template that renders just the content area
<block_end>self.bare=bare<line_sep>self.slots=Slots(context request)<line_sep>self.__dict__.update(kwargs)<block_end>@staticmethod<def_stmt>is_location context<block_start>"""Does `context` implement :class:`pyramid.interfaces.ILocation`?
:param context: The context.
:type context: kotti.interfaces.INode
:rtype: bool
:returns: True if Is the context object implements
:class:`pyramid.interfaces.ILocation`.
"""<line_sep><return>ILocation.providedBy(context)<block_end>@reify<def_stmt>edit_needed self<block_start><if_stmt>"kotti.fanstatic.edit_needed"<in>self.S<block_start><return>[r.need()<for>r self.S["kotti.fanstatic.edit_needed"]]<block_end><block_end>@reify<def_stmt>view_needed self<block_start><if_stmt>"kotti.fanstatic.view_needed"<in>self.S<block_start><return>[r.need()<for>r self.S["kotti.fanstatic.view_needed"]]<block_end><block_end><def_stmt>macro self asset_spec macro_name="main"<block_start><if_stmt>self.bare<and>asset_spec<in>(self.VIEW_MASTER self.EDIT_MASTER self.SITE_SETUP_MASTER )<block_start>asset_spec=self.BARE_MASTER<block_end><return>get_renderer(asset_spec).implementation().macros[macro_name]<block_end>@reify<def_stmt>site_title self<block_start>""" The site title.
:result: Value of the ``kotti.site_title`` setting (if specified) or
the root item's ``title`` attribute.
:rtype: str
"""<line_sep>value=get_settings().get("kotti.site_title")<if_stmt><not>value<block_start>value=self.root.title<block_end><return>value<block_end>@reify<def_stmt>page_title self<block_start>"""
Title for the current page as used in the ``<head>`` section of the
default ``master.pt`` template.
:result: '[Human readable view title ]``context.title`` -
:meth:`~TemplateAPI.site_title`''
:rtype: str
"""<line_sep>view_title=self.request.view_name.replace("_" " ").title()<if_stmt>view_title<block_start>view_title<augadd>" "<block_end>view_title<augadd>self.context.title<line_sep><return>f"{view_title} - {self.site_title}"<block_end><def_stmt>url self context=<none> *elements **kwargs<block_start>"""
URL construction helper. Just a convenience wrapper for
:func:`pyramid.request.resource_url` with the same signature. If
``context`` is ``None`` the current context is passed to
``resource_url``.
"""<if_stmt>context<is><none><block_start>context=self.context<block_end><if_stmt><not>ILocation.providedBy(context)<block_start><return>self.request.url<block_end><return>self.request.resource_url(context *elements **kwargs)<block_end>@reify<def_stmt>root self<block_start>"""
The site root.
:result: The root object of the site.
:rtype: :class:`kotti.resources.Node`
"""<if_stmt>ILocation.providedBy(self.context)<block_start><return>self.lineage[-1]<block_end><else_stmt><block_start><return>get_root()<block_end><block_end>@reify<def_stmt>navigation_root self<block_start>"""
The root node for the navigation.
:result: Nearest node in the :meth:`lineage` that provides
:class:`kotti.interfaces.INavigationRoot` or :meth:`root` if
no node provides that interface.
:rtype: :class:`kotti.resources.Node`
"""<for_stmt>o self.lineage<block_start><if_stmt>INavigationRoot.providedBy(o)<block_start><return>o<block_end><block_end><return>self.root<block_end>@reify<def_stmt>lineage self<block_start>"""
Lineage from current context to the root node.
:result: List of nodes.
:rtype: list of :class:`kotti.resources.Node`
"""<line_sep><return>list(lineage(self.context))<block_end>@reify<def_stmt>breadcrumbs self<block_start>"""
List of nodes from the :meth:`navigation_root` to the context.
:result: List of nodes.
:rtype: list of :class:`kotti.resources.Node`
"""<line_sep>breadcrumbs=self.lineage<if_stmt>self.root<ne>self.navigation_root<block_start>index=breadcrumbs.index(self.navigation_root)<line_sep>breadcrumbs=breadcrumbs[:index+1]<block_end><return>reversed(breadcrumbs)<block_end><def_stmt>has_permission self permission context=<none><block_start>""" Convenience wrapper for :func:`pyramid.security.has_permission`
with the same signature. If ``context`` is ``None`` the current
context is passed to ``has_permission``."""<if_stmt>context<is><none><block_start>context=self.context<block_end><return>self.request.has_permission(permission context)<block_end><def_stmt>render_view self name="" context=<none> request=<none> secure=<true> bare=<true><block_start><if_stmt>context<is><none><block_start>context=self.context<block_end><if_stmt>request<is><none><block_start>request=self.request<block_end>before=self.bare<try_stmt><block_start>self.bare=bare<line_sep>html=render_view(context request name secure)<block_end><finally_stmt><block_start>self.bare=before<block_end><return>TemplateStructure(html)<block_end><def_stmt>render_template self renderer **kwargs<block_start><return>TemplateStructure(render(renderer kwargs self.request))<block_end><def_stmt>list_children self context=<none> permission="view"<block_start><if_stmt>context<is><none><block_start>context=self.context<block_end><if_stmt>isinstance(context Node)<block_start><if_stmt>permission<is><none><block_start><return>context.children<block_end><return>context.children_with_permission(self.request permission)<block_end><return>[c<for>c getattr(context "values" <lambda>:[])()<if>(<not>permission<or>self.request.has_permission(permission c))]<block_end>inside=staticmethod(inside)<def_stmt>avatar_url self user=<none> size="14" default_image="identicon"<block_start><if_stmt>user<is><none><block_start>user=self.request.user<block_end>email=user.email<if_stmt><not>email<block_start>email=user.name<block_end>h=hashlib.md5(email.encode("utf8")).hexdigest()<line_sep>query={"default":default_image "size":str(size)}<line_sep>url="https://secure.gravatar.com/avatar/{}?{}".format(h urlencode(query))<line_sep><return>url<block_end>@reify<def_stmt>locale_name self<block_start><return>get_locale_name(self.request)<block_end><def_stmt>format_date self d fmt=<none><block_start><if_stmt>fmt<is><none><block_start>fmt=self.S["kotti.date_format"]<block_end><return>format_date(d format=fmt locale=self.locale_name)<block_end><def_stmt>format_datetime self dt fmt=<none><block_start><if_stmt>fmt<is><none><block_start>fmt=self.S["kotti.datetime_format"]<block_end><if_stmt><not>isinstance(dt datetime)<block_start>dt=datetime.fromtimestamp(dt)<block_end><return>format_datetime(dt format=fmt locale=self.locale_name)<block_end><def_stmt>format_time self t fmt=<none><block_start><if_stmt>fmt<is><none><block_start>fmt=self.S["kotti.time_format"]<block_end><return>format_time(t format=fmt locale=self.locale_name)<block_end><def_stmt>format_currency self n currency fmt=<none><block_start><return>format_currency(n currency format=fmt locale=self.locale_name)<block_end>@staticmethod<def_stmt>get_type name<block_start><for_stmt>class_ get_settings()["kotti.available_types"]<block_start><if_stmt>class_.type_info.name<eq>name<block_start><return>class_<block_end><block_end><block_end><def_stmt>find_edit_view self item<block_start>view_name=self.request.view_name<if_stmt><not>view_permitted(item self.request view_name)<block_start>view_name="edit"<block_end><if_stmt><not>view_permitted(item self.request view_name)<block_start>view_name=""<block_end><return>view_name<block_end>@reify<def_stmt>edit_links self<block_start><if_stmt><not>hasattr(self.context "type_info")<block_start><return>[]<block_end><return>[link<for>link self.context.type_info.edit_links<if>link.visible(self.context self.request)]<block_end>@reify<def_stmt>site_setup_links self<block_start><return>[link<for>link CONTROL_PANEL_LINKS<if>link.visible(self.root self.request)]<block_end>@staticmethod<def_stmt>sanitize html sanitizer="default"<block_start>""" Convenience wrapper for :func:`kotti.sanitizers.sanitize`.
:param html: HTML to be sanitized
:type html: str
:param sanitizer: name of the sanitizer to use.
:type sanitizer: str
:result: sanitized HTML
:rtype: str
"""<line_sep><return>sanitize(html sanitizer)<block_end><block_end><class_stmt>NodesTree<block_start><def_stmt>__init__ self node request item_mapping item_to_children permission<block_start>self._node=node<line_sep>self._request=request<line_sep>self._item_mapping=item_mapping<line_sep>self._item_to_children=item_to_children<line_sep>self._permission=permission<block_end>@property<def_stmt>__parent__ self<block_start><if_stmt>self.parent_id<block_start><return>self._item_mapping[self.parent_id]<block_end><block_end>@property<def_stmt>children self<block_start><return>[NodesTree(child self._request self._item_mapping self._item_to_children self._permission )<for>child self._item_to_children[self.id]<if>self._request.has_permission(self._permission child)]<block_end><def_stmt>_flatten self item# noinspection PyProtectedMember
<block_start><yield>item._node<for_stmt>ch item.children<block_start><yield><from>self._flatten(ch)<block_end><block_end><def_stmt>tolist self<block_start><return>list(self._flatten(self))<block_end><def_stmt>__getattr__ self key<block_start><return>getattr(self._node key)<block_end><block_end><def_stmt>nodes_tree request context=<none> permission="view"<block_start>item_mapping={}<line_sep>item_to_children=defaultdict(<lambda>:[])<for_stmt>node DBSession.query(Content).with_polymorphic(Content)<block_start>item_mapping[node.id]=node<if_stmt>request.has_permission(permission node)<block_start>item_to_children[node.parent_id].append(node)<block_end><block_end><for_stmt>children item_to_children.values()<block_start>children.sort(key=<lambda>ch:ch.position)<block_end><if_stmt>context<is><none><block_start>node=item_to_children[<none>][0]<block_end><else_stmt><block_start>node=context<block_end><return>NodesTree(node request item_mapping item_to_children permission)<block_end><def_stmt>search_content search_term request=<none><block_start><return>get_settings()["kotti.search_content"][0](search_term request)<block_end><def_stmt>default_search_content search_term request=<none># noinspection PyUnresolvedReferences
<block_start>searchstring=f"%{search_term}%"<line_sep># generic_filter can be applied to all Node (and subclassed) objects
generic_filter=or_(Content.name.like(searchstring) Content.title.like(searchstring) Content.description.like(searchstring) )<line_sep>results=(DBSession.query(Content).filter(generic_filter).order_by(Content.title.asc()).all())<line_sep># specific result contain objects matching additional criteria
# but must not match the generic criteria (because these objects
# are already in the generic_results)
document_results=DBSession.query(Document).filter(and_(Document.body.like(searchstring) not_(generic_filter)))<for_stmt>results_set [content_with_tags([searchstring]) document_results.all()]<block_start>[results.append(c)<for>c results_set<if>c<not><in>results]<block_end>result_dicts=[]<for_stmt>result results<block_start><if_stmt>request.has_permission("view" result)<block_start>result_dicts.append(dict(name=result.name title=result.title description=result.description path=request.resource_path(result) ))<block_end><block_end><return>result_dicts<block_end><def_stmt>content_with_tags tag_terms<block_start><return>(DBSession.query(Content).join(TagsToContents).join(Tag).filter(or_(*[Tag.title.like(tag_term)<for>tag_term tag_terms])).all())<block_end><def_stmt>search_content_for_tags tags request=<none><block_start>result_dicts=[]<for_stmt>result content_with_tags(tags)<block_start><if_stmt>request.has_permission("view" result)<block_start>result_dicts.append(dict(name=result.name title=result.title description=result.description path=request.resource_path(result) ))<block_end><block_end><return>result_dicts<block_end><def_stmt>includeme config<block_start>""" Pyramid includeme hook.
:param config: app config
:type config: :class:`pyramid.config.Configurator`
"""<line_sep>config.add_view_predicate("root_only" RootOnlyPredicate)<line_sep>config.add_view_predicate("if_setting_has_value" SettingHasValuePredicate)<block_end> |
<import_stmt>textwrap<import_from_stmt>conans.client.tools.env _environment_add<import_from_stmt>conans.test.utils.conan_v2_tests ConanV2ModeTestCase<class_stmt>CollectLibsTestCase(ConanV2ModeTestCase)<block_start><def_stmt>test_conan_username self<block_start>t=self.get_client()<line_sep>conanfile=textwrap.dedent("""
from conans import ConanFile
class Recipe(ConanFile):
name = "name"
version = "version"
""")<line_sep>t.save({'conanfile.py':conanfile})<with_stmt>_environment_add({'CONAN_USERNAME':"user"})<block_start>t.run('create .' assert_error=<true>)<line_sep>self.assertIn("Conan v2 incompatible: Environment variable 'CONAN_USERNAME' is deprecated" t.out)<block_end><block_end><def_stmt>test_conan_channel self<block_start>t=self.get_client()<line_sep>conanfile=textwrap.dedent("""
from conans import ConanFile
class Recipe(ConanFile):
name = "name"
version = "version"
default_user = "user"
""")<line_sep>t.save({'conanfile.py':conanfile})<with_stmt>_environment_add({'CONAN_CHANNEL':"user"})<block_start>t.run('create .' assert_error=<true>)<line_sep>self.assertIn("Conan v2 incompatible: Environment variable 'CONAN_CHANNEL' is deprecated" t.out)<block_end><block_end><block_end> |
# Generated by Django 2.1.4 on 2019-01-01 13:13
<import_from_stmt>django.conf settings<import_stmt>django.contrib.auth.validators<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[migrations.swappable_dependency(settings.AUTH_USER_MODEL) ('openbook_invitations' '0001_initial') ]<line_sep>operations=[migrations.CreateModel(name='UserInvite' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('invited_date' models.DateField(verbose_name='invited date')) ('name' models.CharField(blank=<true> max_length=256 null=<true>)) ('email' models.EmailField(max_length=254 unique=<true> verbose_name='email address')) ('username' models.CharField(blank=<true> error_messages={'unique':'A user with that username already exists.'} help_text='Required. 30 characters or fewer. Letters, digits and _ only.' max_length=30 null=<true> unique=<true> validators=[django.contrib.auth.validators.UnicodeUsernameValidator()] verbose_name='username')) ('badge_keyword' models.CharField(blank=<true> max_length=16 null=<true>)) ('token' models.CharField(max_length=256)) ('invited_by' models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name='invited_users' to=settings.AUTH_USER_MODEL)) ] ) migrations.RemoveField(model_name='inviteuser' name='invited_by' ) migrations.DeleteModel(name='InviteUser' ) ]<block_end> |
"""
SQL-based result tracker (cannot store actual results, only status).
Using this tracker requires installation of the sqlalchemy package.
Regarding using models from different sources in Flask-SQLAlchemy:
https://stackoverflow.com/questions/28789063/associate-external-class-model-with-flask-sqlalchemy
TODO: Note that this tracker doesn't handle job reruns gracefully yet, because the result field will be
progressively overwritten but not reset when the job is rerun.
Authors:
<NAME>
"""<import_from_stmt>contextlib contextmanager<import_stmt>json<import_stmt>os<import_from_stmt>copy deepcopy<import_from_stmt>sqlalchemy Column Integer String DateTime Text create_engine func <import_from_stmt>sqlalchemy.orm sessionmaker<import_from_stmt>sqlalchemy.ext.declarative declarative_base<import_from_stmt>sqlalchemy.exc DBAPIError<import_from_stmt>sqlalchemy.dialects mysql<import_from_stmt>evcouplings.utils.helpers retry<import_from_stmt>evcouplings.utils.config InvalidParameterError<import_from_stmt>evcouplings.utils.tracker EStatus<import_from_stmt>evcouplings.utils.tracker.base ResultTracker<line_sep># create SQLALchemy declarative base for SQL models
Base=declarative_base()<line_sep>JOB_TABLE_NAME="evcouplings_jobs"<line_sep># work around 65k limitation for mysql (without introducing max length, which would
# cause issues with postgresql)
# see here: https://github.com/sqlalchemy/sqlalchemy/issues/4443
LongText=Text().with_variant(mysql.LONGTEXT() "mysql")<class_stmt>SQLTracker(ResultTracker)<block_start>"""
Tracks compute job results in an SQL backend
"""<def_stmt>__init__ self **kwargs<block_start>"""
Create new SQL-based tracker. For now, this tracker will ignore file_list
and store all file paths in the database except for those in delete_list.
Parameters
----------
connection_string : str
SQLite connection URI. Must include database name,
and username/password if authentication is used.
job_id : str
Unique job identifier of job which should be tracked
prefix : str
Prefix of pipeline job
pipeline : str
Name of pipeline that is running
file_list : list(str)
List of file item keys from outconfig that should
be stored in database. For now, this parameter has no
effect and all file paths will be stored in database.
delete_list : list(str)
List of file item keys from outconfig that will be deleted
after run is finished. These files cannot be stored as paths
to the pipeline result in the output.
config : dict(str)
Entire configuration dictionary of job
retry_max_number : int, optional (default: None)
Maximum number of attemps to perform database queries / updates.
If None, will try forever.
retry_wait : int, optional (default: None)
Time in seconds between retries to connect to database
"""<line_sep>super().__init__(**kwargs)<line_sep># for SQL tracker, job ID may not be longer than 255 chars to not interfere with older SQL DBs
<if_stmt>len(self.job_id)<g>255<block_start><raise>InvalidParameterError("Length of job_id for SQL tracker may not exceed 255 characters for database compatibility reasons")<block_end># create SQLAlchemy engine and session maker to
# instantiate later sessions
self._engine=create_engine(self.connection_string)<line_sep>self._Session=sessionmaker(bind=self._engine)<line_sep># Make sure all tables are there in database
Base.metadata.create_all(bind=self._engine)<block_end>@contextmanager<def_stmt>session_scope self<block_start>"""
Provide a transactional scope around a series of operations.
Source: https://docs.sqlalchemy.org/en/latest/orm/session_basics.html
"""<line_sep>session=self._Session()<try_stmt><block_start><yield>session<line_sep>session.commit()<block_end><except_stmt><block_start>session.rollback()<line_sep><raise><block_end><finally_stmt><block_start>session.close()<block_end><block_end><def_stmt>get self<block_start>"""
Return the current entry tracked by this tracker.
Does not attempt to retry if database connection fails.
"""<with_stmt>self.session_scope()<as>session<block_start>query_res=session.query(ComputeJob).filter_by(job_id=self.job_id).all()<line_sep>q=[deepcopy(x.__dict__)<for>x query_res]<if_stmt>len(q)<eq>0<block_start><return><none><block_end><if_stmt>len(q)<g>1<block_start><raise>ValueError("Job ID not unique, found more than one job.")<block_end><else_stmt><block_start><return>q[0]<block_end><block_end><block_end><def_stmt>_retry_query self func session rollback=<true><block_start>"""
Retry database query until success or maximum number of attempts
is reached
Parameters
----------
func : callable
Query function that will be executed until successful
session : sqlalchemy.orm.session.Session
SQLALchemy database session
rollback : bool, optional (default: True)
Perform rollback of session before reattempt,
can be set to False for read-only queries
Returns
-------
Result of func()
Raises
------
ResourceError
If execution is not successful within maximum
number of attempts
"""<if_stmt>rollback<block_start>retry_action=session.rollback<block_end><else_stmt><block_start>retry_action=<none><block_end><return>retry(func self.retry_max_number self.retry_wait exceptions=DBAPIError retry_action=retry_action)<block_end><def_stmt>_execute_update self session q status=<none> message=<none> stage=<none> results=<none><block_start>"""
Wraps update to SQL database (to allow for retries)
Parameters
----------
session : sqlalchemy.orm.session.Session
SQLALchemy database session
q : sqlalchemy.orm.query.Query
SQLAlchemy query if a job with self.job_id
already exists
For remaining parameters, see update()
"""<line_sep># check if we already have some job
num_rows=len(q.all())<line_sep># create new entry if not already existing
<if_stmt>num_rows<eq>0# Note: do not initialize location here, since this should
# be either set by outside code upon job creation,
# or based on current working dir of running job
<block_start>r=ComputeJob(job_id=self.job_id prefix=self.prefix status=EStatus.INIT config=json.dumps(self.config) pipeline=self.pipeline time_created=func.now())<line_sep>session.add(r)<block_end><else_stmt># can only be one row due to unique constraint
<block_start>r=q.one()<block_end># if status is given, update
<if_stmt>status<is><not><none><block_start>r.status=status<line_sep># if we switch into running state, record
# current time as starting time of actual computation
<if_stmt>status<eq>EStatus.RUN<block_start>r.time_started=func.now()<line_sep># pragmatic hack to filling in the location if not
# already set - can only do this based on current directory
# inside pipeline runner (i.e. when job is started), since
# any other code that creates the job entry may operate in a
# different working directory (e.g. batch submitter in evcouplings app)
<if_stmt>r.location<is><none><block_start>r.location=os.getcwd()<block_end><block_end><block_end># if stage is given, update
<if_stmt>stage<is><not><none><block_start>r.stage=stage<block_end># set termination/fail message
<if_stmt>message<is><not><none><block_start>r.message=str(message)<block_end># update timestamp of last modification
# (will correspond to finished time at the end)
r.time_updated=func.now()<line_sep># finally, also update results (stored as json)
<if_stmt>results<is><not><none># first, extract current state in database to dict
<block_start><if_stmt>r.results<is><not><none><block_start>current_result_state=json.loads(r.results)<block_end><else_stmt><block_start>current_result_state={}<block_end># store everything in database except files that are
# flagged for deletion on filesystem, since we only
# store the file paths to these files
result_update={k:v<for>(k v) results.items()<if>k<not><in>self.delete_list}<line_sep># create result update, make sure update overwrites
# any pre-existing keys
new_result_state={**current_result_state **result_update}<line_sep># finally, add updated result state to database record
r.results=json.dumps(new_result_state)<block_end>session.commit()<block_end><def_stmt>update self status=<none> message=<none> stage=<none> results=<none><block_start><with_stmt>self.session_scope()<as>session# see if we can find the job in the database already
<block_start>q=self._retry_query(<lambda>:session.query(ComputeJob).filter_by(job_id=self.job_id) session=session rollback=<false>)<line_sep># then execute actual update
self._retry_query(<lambda>:self._execute_update(session q status message stage results) session=session rollback=<true>)<block_end><block_end><block_end><class_stmt>ComputeJob(Base)<block_start>"""
Single compute job. Holds general information about job
and its status, but not about individual parameters
(these are stored in config file to keep table schema
stable).
"""<line_sep>__tablename__=JOB_TABLE_NAME<line_sep># internal unique ID of this single compute job
key=Column(Integer primary_key=<true>)<line_sep># human-readable job identifier (must be unique)
job_id=Column(String(255) unique=<true>)<line_sep># job prefix
prefix=Column(String(2048))<line_sep># job pipeline (monomer, complex, ...)
pipeline=Column(String(128))<line_sep># location - e.g., working dir, remote URI, asf
location=Column(String(2048))<line_sep># job status ("pending", "running", "finished",
# "failed", "terminated")
status=Column(String(128))<line_sep># message upon job failure / termination
# (e.g. exception, termination code, ...)
message=Column(LongText)<line_sep># job identifier e.g. on compute cluster
# e.g. if job should be stopped
runner_id=Column(String(2048))<line_sep># stage of computational pipeline
# ("align", "couplings", ...)
stage=Column(String(128))<line_sep># time the job was created
time_created=Column(DateTime())<line_sep># time the job started running
time_started=Column(DateTime())<line_sep># time the job finished running; last
# update corresponds to time job finished
time_updated=Column(DateTime())<line_sep># configuration of job (stringified JSON)
config=Column(LongText)<line_sep># Optional MD5 hash of configuration to identify
# unique job configurations
fingerprint=Column(String(32))<line_sep># results of job (stringified JSON)
results=Column(LongText)<block_end> |
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>os<line_sep># GPU memory garbage collection optimization flags
os.environ['FLAGS_eager_delete_tensor_gb']="0.0"<import_stmt>sys<import_stmt>timeit<import_stmt>argparse<import_stmt>pprint<import_stmt>shutil<import_stmt>functools<import_stmt>paddle<import_stmt>numpy<as>np<import_stmt>paddle.fluid<as>fluid<import_from_stmt>src.utils.metrics ConfusionMatrix<import_from_stmt>src.utils.config cfg<import_from_stmt>src.utils.timer Timer calculate_eta<import_from_stmt>src.utils dist_utils<import_from_stmt>src.datasets build_dataset<import_from_stmt>src.models.model_builder build_model<import_from_stmt>src.models.model_builder ModelPhase<import_from_stmt>src.models.model_builder parse_shape_from_file<import_from_stmt>eval evaluate<import_from_stmt>vis visualize<def_stmt>parse_args <block_start>parser=argparse.ArgumentParser(description='semseg-paddle')<line_sep>parser.add_argument('--cfg' dest='cfg_file' help='Config file for training (and optionally testing)' default=<none> type=str)<line_sep>parser.add_argument('--use_gpu' dest='use_gpu' help='Use gpu or cpu' action='store_true' default=<false>)<line_sep>parser.add_argument('--use_mpio' dest='use_mpio' help='Use multiprocess I/O or not' action='store_true' default=<false>)<line_sep>parser.add_argument('--log_steps' dest='log_steps' help='Display logging information at every log_steps' default=10 type=int)<line_sep>parser.add_argument('--debug' dest='debug' help='debug mode, display detail information of training' action='store_true')<line_sep>parser.add_argument('--use_vdl' dest='use_vdl' help='whether to record the data during training to VisualDL' action='store_true')<line_sep>parser.add_argument('--vdl_log_dir' dest='vdl_log_dir' help='VisualDL logging directory' default=<none> type=str)<line_sep>parser.add_argument('--do_eval' dest='do_eval' help='Evaluation models result on every new checkpoint' action='store_true')<line_sep>parser.add_argument('opts' help='See utils/config.py for all options' default=<none> nargs=argparse.REMAINDER)<line_sep><return>parser.parse_args()<block_end><def_stmt>save_checkpoint exe program ckpt_name<block_start>"""
Save checkpoint for evaluation or resume training
"""<line_sep>filename='{}_{}_{}_epoch_{}.pdparams'.format(str(cfg.MODEL.MODEL_NAME) str(cfg.MODEL.BACKBONE) str(cfg.DATASET.DATASET_NAME) ckpt_name)<line_sep>ckpt_dir=cfg.TRAIN.MODEL_SAVE_DIR<line_sep>print("Save model checkpoint to {}".format(ckpt_dir))<if_stmt><not>os.path.isdir(ckpt_dir)<block_start>os.makedirs(ckpt_dir)<block_end>fluid.io.save_params(exe ckpt_dir program filename)<line_sep><return>ckpt_dir<block_end><def_stmt>load_checkpoint exe program<block_start>"""
Load checkpoiont from pretrained model directory for resume training
"""<line_sep>print('Resume model training from:' cfg.TRAIN.RESUME_MODEL_DIR)<if_stmt><not>os.path.exists(cfg.TRAIN.RESUME_MODEL_DIR)<block_start><raise>ValueError("TRAIN.PRETRAIN_MODEL {} not exist!".format(cfg.TRAIN.RESUME_MODEL_DIR))<block_end>fluid.io.load_persistables(exe cfg.TRAIN.RESUME_MODEL_DIR main_program=program)<line_sep>model_path=cfg.TRAIN.RESUME_MODEL_DIR<line_sep># Check is path ended by path spearator
<if_stmt>model_path[-1]<eq>os.sep<block_start>model_path=model_path[0:-1]<block_end>epoch_name=os.path.basename(model_path)<line_sep># If resume model is final model
<if_stmt>epoch_name<eq>'final'<block_start>begin_epoch=cfg.SOLVER.NUM_EPOCHS<block_end># If resume model path is end of digit, restore epoch status
<elif_stmt>epoch_name.isdigit()<block_start>epoch=int(epoch_name)<line_sep>begin_epoch=epoch+1<block_end><else_stmt><block_start><raise>ValueError("Resume model path is not valid!")<block_end>print("Model checkpoint loaded successfully!")<line_sep><return>begin_epoch<block_end><def_stmt>print_info *msg<block_start><if_stmt>cfg.TRAINER_ID<eq>0<block_start>print(*msg)<block_end><block_end><def_stmt>train cfg<block_start>startup_prog=fluid.Program()<line_sep>train_prog=fluid.Program()<line_sep>drop_last=<true><line_sep>dataset=build_dataset(cfg.DATASET.DATASET_NAME file_list=cfg.DATASET.TRAIN_FILE_LIST mode=ModelPhase.TRAIN shuffle=<true> data_dir=cfg.DATASET.DATA_DIR base_size=cfg.DATAAUG.BASE_SIZE crop_size=cfg.DATAAUG.CROP_SIZE rand_scale=<true>)<def_stmt>data_generator <block_start><if_stmt>args.use_mpio<block_start>data_gen=dataset.multiprocess_generator(num_processes=cfg.DATALOADER.NUM_WORKERS max_queue_size=cfg.DATALOADER.BUF_SIZE)<block_end><else_stmt><block_start>data_gen=dataset.generator()<block_end>batch_data=[]<for_stmt>b data_gen<block_start>batch_data.append(b)<if_stmt>len(batch_data)<eq>(cfg.TRAIN_BATCH_SIZE<floordiv>cfg.NUM_TRAINERS)<block_start><for_stmt>item batch_data<block_start><yield>item[0] item[1] item[2]<block_end>batch_data=[]<block_end><block_end># If use sync batch norm strategy, drop last batch if number of samples
# in batch_data is less then cfg.BATCH_SIZE to avoid NCCL hang issues
<if_stmt><not>cfg.TRAIN.SYNC_BATCH_NORM<block_start><for_stmt>item batch_data<block_start><yield>item[0] item[1] item[2]<block_end><block_end><block_end># Get device environment
gpu_id=int(os.environ.get('FLAGS_selected_gpus' 0))<line_sep>place=fluid.CUDAPlace(gpu_id)<if>args.use_gpu<else>fluid.CPUPlace()<line_sep>places=fluid.cuda_places()<if>args.use_gpu<else>fluid.cpu_places()<line_sep># Get number of GPU
dev_count=cfg.NUM_TRAINERS<if>cfg.NUM_TRAINERS<g>1<else>len(places)<line_sep>print_info("#device count: {}".format(dev_count))<line_sep>cfg.TRAIN_BATCH_SIZE=dev_count<times>int(cfg.TRAIN_BATCH_SIZE_PER_GPU)<line_sep>print_info("#train_batch_size: {}".format(cfg.TRAIN_BATCH_SIZE))<line_sep>print_info("#batch_size_per_dev: {}".format(cfg.TRAIN_BATCH_SIZE_PER_GPU))<line_sep>py_reader,avg_loss,lr,pred,grts,masks=build_model(train_prog startup_prog phase=ModelPhase.TRAIN)<line_sep>py_reader.decorate_sample_generator(data_generator batch_size=cfg.TRAIN_BATCH_SIZE_PER_GPU drop_last=drop_last)<line_sep>exe=fluid.Executor(place)<line_sep>exe.run(startup_prog)<line_sep>exec_strategy=fluid.ExecutionStrategy()<line_sep># Clear temporary variables every 100 iteration
<if_stmt>args.use_gpu<block_start>exec_strategy.num_threads=fluid.core.get_cuda_device_count()<block_end>exec_strategy.num_iteration_per_drop_scope=100<line_sep>build_strategy=fluid.BuildStrategy()<if_stmt>cfg.NUM_TRAINERS<g>1<and>args.use_gpu<block_start>dist_utils.prepare_for_multi_process(exe build_strategy train_prog)<line_sep>exec_strategy.num_threads=1<block_end><if_stmt>cfg.TRAIN.SYNC_BATCH_NORM<and>args.use_gpu<block_start><if_stmt>dev_count<g>1# Apply sync batch norm strategy
<block_start>print_info("Sync BatchNorm strategy is effective.")<line_sep>build_strategy.sync_batch_norm=<true><block_end><else_stmt><block_start>print_info("Sync BatchNorm strategy will not be effective if GPU device"<concat>" count <= 1")<block_end><block_end>compiled_train_prog=fluid.CompiledProgram(train_prog).with_data_parallel(loss_name=avg_loss.name exec_strategy=exec_strategy build_strategy=build_strategy)<line_sep># Resume training
begin_epoch=cfg.SOLVER.BEGIN_EPOCH<if_stmt>cfg.TRAIN.RESUME_MODEL_DIR<block_start>begin_epoch=load_checkpoint(exe train_prog)<block_end># Load pretrained model
<elif_stmt>os.path.exists(cfg.TRAIN.PRETRAINED_MODEL_DIR)<block_start>print_info('Pretrained model dir: ' cfg.TRAIN.PRETRAINED_MODEL_DIR)<line_sep>load_vars=[]<line_sep>load_fail_vars=[]<def_stmt>var_shape_matched var shape<block_start>"""
Check whehter persitable variable shape is match with current network
"""<line_sep>var_exist=os.path.exists(os.path.join(cfg.TRAIN.PRETRAINED_MODEL_DIR var.name))<if_stmt>var_exist<block_start>var_shape=parse_shape_from_file(os.path.join(cfg.TRAIN.PRETRAINED_MODEL_DIR var.name))<line_sep><return>var_shape<eq>shape<block_end><return><false><block_end><for_stmt>x train_prog.list_vars()<block_start><if_stmt>isinstance(x fluid.framework.Parameter)<block_start>shape=tuple(fluid.global_scope().find_var(x.name).get_tensor().shape())<if_stmt>var_shape_matched(x shape)<block_start>load_vars.append(x)<block_end><else_stmt><block_start>load_fail_vars.append(x)<block_end><block_end><block_end>fluid.io.load_vars(exe dirname=cfg.TRAIN.PRETRAINED_MODEL_DIR vars=load_vars)<for_stmt>var load_vars<block_start>print_info("Parameter[{}] loaded sucessfully!".format(var.name))<block_end><for_stmt>var load_fail_vars<block_start>print_info("Parameter[{}] don't exist or shape does not match current network, skip"<concat>" to load it.".format(var.name))<block_end>print_info("{}/{} pretrained parameters loaded successfully!".format(len(load_vars) len(load_vars)+len(load_fail_vars)))<block_end><else_stmt><block_start>print_info('Pretrained model dir {} not exists, training from scratch...'.format(cfg.TRAIN.PRETRAINED_MODEL_DIR))<block_end>fetch_list=[avg_loss.name lr.name]<if_stmt>args.debug# Fetch more variable info and use streaming confusion matrix to
# calculate IoU results if in debug mode
<block_start>np.set_printoptions(precision=4 suppress=<true> linewidth=160 floatmode="fixed")<line_sep>fetch_list.extend([pred.name grts.name masks.name])<line_sep>cm=ConfusionMatrix(cfg.DATASET.NUM_CLASSES streaming=<true>)<block_end><if_stmt>args.use_vdl<block_start><if_stmt><not>args.vdl_log_dir<block_start>print_info("Please specify the log directory by --vdl_log_dir.")<line_sep>exit(1)<block_end><import_from_stmt>visualdl LogWriter<line_sep>log_writer=LogWriter(args.vdl_log_dir)<block_end># trainer_id = int(os.getenv("PADDLE_TRAINER_ID", 0))
# num_trainers = int(os.environ.get('PADDLE_TRAINERS_NUM', 1))
step=0<line_sep>all_step=cfg.DATASET.TRAIN_TOTAL_IMAGES<floordiv>cfg.TRAIN_BATCH_SIZE<if_stmt>cfg.DATASET.TRAIN_TOTAL_IMAGES%cfg.TRAIN_BATCH_SIZE<and>drop_last<ne><true><block_start>all_step<augadd>1<block_end>all_step<augmul>(cfg.SOLVER.NUM_EPOCHS-begin_epoch+1)<line_sep>avg_loss=0.0<line_sep>timer=Timer()<line_sep>timer.start()<if_stmt>begin_epoch<g>cfg.SOLVER.NUM_EPOCHS<block_start><raise>ValueError(("begin epoch[{}] is larger than cfg.SOLVER.NUM_EPOCHS[{}]").format(begin_epoch cfg.SOLVER.NUM_EPOCHS))<block_end><if_stmt>args.use_mpio<block_start>print_info("Use multiprocess reader")<block_end><else_stmt><block_start>print_info("Use multi-thread reader")<block_end><for_stmt>epoch range(begin_epoch cfg.SOLVER.NUM_EPOCHS+1)<block_start>py_reader.start()<while_stmt><true><block_start><try_stmt><block_start><if_stmt>args.debug# Print category IoU and accuracy to check whether the
# traning process is corresponed to expectation
<block_start>loss,lr,pred,grts,masks=exe.run(program=compiled_train_prog fetch_list=fetch_list return_numpy=<true>)<line_sep>cm.calculate(pred grts masks)<line_sep>avg_loss<augadd>np.mean(np.array(loss))<line_sep>step<augadd>1<if_stmt>step%args.log_steps<eq>0<block_start>speed=args.log_steps/timer.elapsed_time()<line_sep>avg_loss<augdiv>args.log_steps<line_sep>category_acc,mean_acc=cm.accuracy()<line_sep>category_iou,mean_iou=cm.mean_iou()<line_sep>print_info(("epoch={}/{} step={}/{} lr={:.5f} loss={:.4f} acc={:.5f} mIoU={:.5f} step/sec={:.3f} | ETA {}").format(epoch cfg.SOLVER.NUM_EPOCHS step all_step lr[0] avg_loss mean_acc mean_iou speed calculate_eta(all_step-step speed)))<line_sep>print_info("Category IoU: " category_iou)<line_sep>print_info("Category Acc: " category_acc)<if_stmt>args.use_vdl<block_start>log_writer.add_scalar('Train/mean_iou' mean_iou step)<line_sep>log_writer.add_scalar('Train/mean_acc' mean_acc step)<line_sep>log_writer.add_scalar('Train/loss' avg_loss step)<line_sep>log_writer.add_scalar('Train/lr' lr[0] step)<line_sep>log_writer.add_scalar('Train/step/sec' speed step)<block_end>sys.stdout.flush()<line_sep>avg_loss=0.0<line_sep>cm.zero_matrix()<line_sep>timer.restart()<block_end><block_end><else_stmt># If not in debug mode, avoid unnessary log and calculate
<block_start>loss,lr=exe.run(program=compiled_train_prog fetch_list=fetch_list return_numpy=<true>)<line_sep>avg_loss<augadd>np.mean(np.array(loss))<line_sep>step<augadd>1<if_stmt>step%args.log_steps<eq>0<and>cfg.TRAINER_ID<eq>0<block_start>avg_loss<augdiv>args.log_steps<line_sep>speed=args.log_steps/timer.elapsed_time()<line_sep>print(("epoch={}/{} step={}/{} lr={:.5f} loss={:.4f} step/sec={:.3f} | ETA {}").format(epoch cfg.SOLVER.NUM_EPOCHS global_step all_step lr[0] avg_loss speed calculate_eta(all_step-global_step speed)))<if_stmt>args.use_vdl<block_start>log_writer.add_scalar('Train/loss' avg_loss step)<line_sep>log_writer.add_scalar('Train/lr' lr[0] step)<line_sep>log_writer.add_scalar('Train/speed' speed step)<block_end>sys.stdout.flush()<line_sep>avg_loss=0.0<line_sep>timer.restart()<block_end><block_end><block_end><except_stmt>fluid.core.EOFException<block_start>py_reader.reset()<line_sep><break><block_end><except_stmt>Exception<as>e<block_start>print(e)<block_end><block_end><if_stmt>epoch%cfg.TRAIN.SNAPSHOT_EPOCH<eq>0<and>cfg.TRAINER_ID<eq>0<block_start>ckpt_dir=save_checkpoint(exe train_prog epoch)<if_stmt>args.do_eval<block_start>print("Evaluation start")<line_sep>_,mean_iou,_,mean_acc=evaluate(cfg=cfg ckpt_dir=ckpt_dir use_gpu=args.use_gpu use_mpio=args.use_mpio)<if_stmt>args.use_vdl<block_start>log_writer.add_scalar('Evaluate/mean_iou' mean_iou step)<line_sep>log_writer.add_scalar('Evaluate/mean_acc' mean_acc step)<block_end><block_end># Use VisualDL to visualize results
<if_stmt>args.use_vdl<and>cfg.DATASET.VIS_FILE_LIST<is><not><none><block_start>visualize(cfg=cfg use_gpu=args.use_gpu vis_file_list=cfg.DATASET.VIS_FILE_LIST vis_dir="visual" ckpt_dir=ckpt_dir log_writer=log_writer)<block_end><block_end><block_end># save final model
<if_stmt>cfg.TRAINER_ID<eq>0<block_start>save_checkpoint(exe train_prog 'final')<block_end><if_stmt>args.use_vdl<block_start>log_writer.close()<block_end><block_end><def_stmt>main args<block_start><if_stmt>args.cfg_file<is><not><none><block_start>cfg.update_from_file(args.cfg_file)<block_end><if_stmt>args.opts<block_start>cfg.update_from_list(args.opts)<block_end>cfg.TRAINER_ID=int(os.getenv("PADDLE_TRAINER_ID" 0))<line_sep>cfg.NUM_TRAINERS=int(os.environ.get('PADDLE_TRAINERS_NUM' 1))<line_sep>cfg.check_and_infer()<line_sep>print_info(pprint.pformat(cfg))<line_sep>train(cfg)<block_end><if_stmt>__name__<eq>'__main__'<block_start>args=parse_args()<line_sep>start=timeit.default_timer()<line_sep>main(args)<line_sep>end=timeit.default_timer()<line_sep>print("training time: {} h".format(1.0<times>(end-start)/3600))<block_end> |
<import_from_stmt>htm htm<line_sep>@htm<def_stmt>html tag props children<block_start><return>tag props children<block_end>result01=html("""
<div>Hello World</div>
""")<line_sep> |
<import_stmt>re<line_sep># Source: https://stackoverflow.com/a/14693789
_ansi_escape=re.compile(r'\x1b\[[0-?]*[ -/]*[@-~]')<def_stmt>uncolor text<block_start><return>_ansi_escape.sub('' text)<block_end> |
# Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>typing Optional Tuple Any Union Type Callable List Text<import_stmt>numpy<as>np<import_stmt>tensornetwork.tensor<import_stmt>tensornetwork.backends.abstract_backend<as>abstract_backend<import_from_stmt>tensornetwork backends<line_sep>AbstractBackend=abstract_backend.AbstractBackend<line_sep>Array=Any<line_sep>Tensor=tensornetwork.tensor.Tensor<class_stmt>MatvecCache<block_start>"""
Caches matvec functions so that they have identical function signature
when called repeatedly. This circumvents extraneous recompilations when
Jit is used. Incoming matvec functions should be in terms of Tensor
and have function signature A = matvec(x, *args), where each of the
positional arguments in *args is also a Tensor.
"""<def_stmt>__init__ self<block_start>self.clear()<block_end><def_stmt>clear self<block_start>self.cache={}<block_end><def_stmt>retrieve self backend_name:Text matvec:Callable<block_start><if_stmt>backend_name<not><in>self.cache<block_start>self.cache[backend_name]={}<block_end><if_stmt>matvec<not><in>self.cache[backend_name]<block_start><def_stmt>wrapped x *args<block_start>X=Tensor(x backend=backend_name)<line_sep>Args=[Tensor(a backend=backend_name)<for>a args]<line_sep>Y=matvec(X *Args)<line_sep><return>Y.array<block_end>self.cache[backend_name][matvec]=wrapped<block_end><return>self.cache[backend_name][matvec]<block_end><block_end>KRYLOV_MATVEC_CACHE=MatvecCache()<def_stmt>krylov_error_checks backend:Union[Text AbstractBackend <none>] x0:Union[Tensor <none>] args:Union[List[Tensor] <none>]<block_start>"""
Checks that at least one of backend and x0 are not None; that backend
and x0.backend agree; that if args is not None its elements are Tensors
whose backends also agree. Creates a backend object from backend
and returns the arrays housed by x0 and args.
Args:
backend: A backend, text specifying one, or None.
x0: A tn.Tensor, or None.
args: A list of tn.Tensor, or None.
Returns:
backend: A backend object.
x0_array: x0.array if x0 was supplied, or None.
args_arr: Each array in the list of args if it was supplied, or None.
"""<line_sep># If the backend wasn't specified, infer it from x0. If neither was specified
# raise ValueError.
<if_stmt>backend<is><none><block_start><if_stmt>x0<is><none><block_start><raise>ValueError("One of backend or x0 must be specified.")<block_end>backend=x0.backend<block_end><else_stmt><block_start>backend=backends.backend_factory.get_backend(backend)<block_end># If x0 was specified, return the enclosed array. If attempting to do so
# raises AttributeError, instead raise TypeError. If backend was also
# specified, but was different than x0.backend, raise ValueError.
<if_stmt>x0<is><not><none><block_start><try_stmt><block_start>x0_array=x0.array<block_end><except_stmt>AttributeError<as>err<block_start><raise>TypeError("x0 must be a tn.Tensor.")<from>err<block_end><if_stmt>x0.backend.name<ne>backend.name<block_start>errstr=("If both x0 and backend are specified the"<concat>"backends must agree. \n"<concat>f"x0 backend: {x0.backend.name} \n"<concat>f"backend: {backend.name} \n")<line_sep><raise>ValueError(errstr)<block_end><block_end><else_stmt># If x0 was not specified, set x0_array (the returned value) to None.
<block_start>x0_array=<none><block_end># If args were specified, set the returned args_array to be all the enclosed
# arrays. If any of them raise AttributeError during the attempt, raise
# TypeError. If args was not specified, set args_array to None.
<if_stmt>args<is><not><none><block_start><try_stmt><block_start>args_array=[a.array<for>a args]<block_end><except_stmt>AttributeError<as>err<block_start><raise>TypeError("Every element of args must be a tn.Tensor.")<from>err<block_end><block_end><else_stmt><block_start>args_array=<none><block_end><return>(backend x0_array args_array)<block_end><def_stmt>eigsh_lanczos A:Callable backend:Optional[Union[Text AbstractBackend]]=<none> args:Optional[List[Tensor]]=<none> x0:Optional[Tensor]=<none> shape:Optional[Tuple[int <ellipsis>]]=<none> dtype:Optional[Type[np.number]]=<none> num_krylov_vecs:int=20 numeig:int=1 tol:float=1E-8 delta:float=1E-8 ndiag:int=20 reorthogonalize:bool=<false><arrow>Tuple[Tensor List]<block_start>"""
Lanczos method for finding the lowest eigenvector-eigenvalue pairs
of `A`.
Args:
A: A (sparse) implementation of a linear operator.
Call signature of `A` is `res = A(vector, *args)`, where `vector`
can be an arbitrary `Array`, and `res.shape` has to be `vector.shape`.
backend: A backend, text specifying one, or None.
args: A list of arguments to `A`. `A` will be called as
`res = A(x0, *args)`.
x0: An initial vector for the Lanczos algorithm. If `None`,
a random initial vector is created using the `backend.randn` method
shape: The shape of the input-dimension of `A`.
dtype: The dtype of the input `A`. If both no `x0` is provided,
a random initial state with shape `shape` and dtype `dtype` is created.
num_krylov_vecs: The number of iterations (number of krylov vectors).
numeig: The nummber of eigenvector-eigenvalue pairs to be computed.
If `numeig > 1`, `reorthogonalize` has to be `True`.
tol: The desired precision of the eigenvalus. Uses
`backend.norm(eigvalsnew[0:numeig] - eigvalsold[0:numeig]) < tol`
as stopping criterion between two diagonalization steps of the
tridiagonal operator.
delta: Stopping criterion for Lanczos iteration.
If a Krylov vector :math: `x_n` has an L2 norm
:math:`\\lVert x_n\\rVert < delta`, the iteration
is stopped. It means that an (approximate) invariant subspace has
been found.
ndiag: The tridiagonal Operator is diagonalized every `ndiag`
iterations to check convergence.
reorthogonalize: If `True`, Krylov vectors are kept orthogonal by
explicit orthogonalization (more costly than `reorthogonalize=False`)
Returns:
(eigvals, eigvecs)
eigvals: A list of `numeig` lowest eigenvalues
eigvecs: A list of `numeig` lowest eigenvectors
"""<line_sep>backend,x0_array,args_array=krylov_error_checks(backend x0 args)<line_sep>mv=KRYLOV_MATVEC_CACHE.retrieve(backend.name A)<line_sep>result=backend.eigsh_lanczos(mv args=args_array initial_state=x0_array shape=shape dtype=dtype num_krylov_vecs=num_krylov_vecs numeig=numeig tol=tol delta=delta ndiag=ndiag reorthogonalize=reorthogonalize)<line_sep>eigvals,eigvecs=result<line_sep>eigvecsT=[Tensor(ev backend=backend)<for>ev eigvecs]<line_sep><return>eigvals eigvecsT<block_end><def_stmt>eigs A:Callable backend:Optional[Union[Text AbstractBackend]]=<none> args:Optional[List[Tensor]]=<none> x0:Optional[Tensor]=<none> shape:Optional[Tuple[int <ellipsis>]]=<none> dtype:Optional[Type[np.number]]=<none> num_krylov_vecs:int=20 numeig:int=1 tol:float=1E-8 which:Text='LR' maxiter:int=20<arrow>Tuple[Tensor List]<block_start>"""
Implicitly restarted Arnoldi method for finding the lowest
eigenvector-eigenvalue pairs of a linear operator `A`.
`A` is a function implementing the matrix-vector
product.
WARNING: This routine uses jax.jit to reduce runtimes. jitting is triggered
at the first invocation of `eigs`, and on any subsequent calls
if the python `id` of `A` changes, even if the formal definition of `A`
stays the same.
Example: the following will jit once at the beginning, and then never again:
```python
import jax
import numpy as np
def A(H,x):
return jax.np.dot(H,x)
for n in range(100):
H = jax.np.array(np.random.rand(10,10))
x = jax.np.array(np.random.rand(10,10))
res = eigs(A, [H],x) #jitting is triggerd only at `n=0`
```
The following code triggers jitting at every iteration, which
results in considerably reduced performance
```python
import jax
import numpy as np
for n in range(100):
def A(H,x):
return jax.np.dot(H,x)
H = jax.np.array(np.random.rand(10,10))
x = jax.np.array(np.random.rand(10,10))
res = eigs(A, [H],x) #jitting is triggerd at every step `n`
```
Args:
A: A (sparse) implementation of a linear operator.
Call signature of `A` is `res = A(vector, *args)`, where `vector`
can be an arbitrary `Tensor`, and `res.shape` has to be `vector.shape`.
backend: A backend, text specifying one, or None.
args: A list of arguments to `A`. `A` will be called as
`res = A(initial_state, *args)`.
x0: An initial vector for the algorithm. If `None`,
a random initial `Tensor` is created using the `backend.randn` method
shape: The shape of the input-dimension of `A`.
dtype: The dtype of the input `A`. If no `initial_state` is provided,
a random initial state with shape `shape` and dtype `dtype` is created.
num_krylov_vecs: The number of iterations (number of krylov vectors).
numeig: The number of eigenvector-eigenvalue pairs to be computed.
tol: The desired precision of the eigenvalues. For the jax backend
this has currently no effect, and precision of eigenvalues is not
guaranteed. This feature may be added at a later point. To increase
precision the caller can either increase `maxiter` or `num_krylov_vecs`.
which: Flag for targetting different types of eigenvalues. Currently
supported are `which = 'LR'` (larges real part) and `which = 'LM'`
(larges magnitude).
maxiter: Maximum number of restarts. For `maxiter=0` the routine becomes
equivalent to a simple Arnoldi method.
Returns:
(eigvals, eigvecs)
eigvals: A list of `numeig` eigenvalues
eigvecs: A list of `numeig` eigenvectors
"""<line_sep>backend,x0_array,args_array=krylov_error_checks(backend x0 args)<line_sep>mv=KRYLOV_MATVEC_CACHE.retrieve(backend.name A)<line_sep>result=backend.eigs(mv args=args_array initial_state=x0_array shape=shape dtype=dtype num_krylov_vecs=num_krylov_vecs numeig=numeig tol=tol which=which maxiter=maxiter)<line_sep>eigvals,eigvecs=result<line_sep>eigvecsT=[Tensor(eV backend=backend)<for>eV eigvecs]<line_sep><return>eigvals eigvecsT<block_end><def_stmt>gmres A_mv:Callable b:Tensor A_args:Optional[List]=<none> x0:Optional[Tensor]=<none> tol:float=1E-05 atol:Optional[float]=<none> num_krylov_vectors:Optional[int]=<none> maxiter:Optional[int]=1 M:Optional[Callable]=<none><arrow>Tuple[Tensor int]<block_start>""" GMRES solves the linear system A @ x = b for x given a vector `b` and
a general (not necessarily symmetric/Hermitian) linear operator `A`.
As a Krylov method, GMRES does not require a concrete matrix representation
of the n by n `A`, but only a function
`vector1 = A_mv(vector0, *A_args, **A_kwargs)`
prescribing a one-to-one linear map from vector0 to vector1 (that is,
A must be square, and thus vector0 and vector1 the same size). If `A` is a
dense matrix, or if it is a symmetric/Hermitian operator, a different
linear solver will usually be preferable.
GMRES works by first constructing the Krylov basis
K = (x0, A_mv@x0, A_mv@A_mv@x0, ..., (A_mv^num_krylov_vectors)@x_0) and then
solving a certain dense linear system K @ q0 = q1 from whose solution x can
be approximated. For `num_krylov_vectors = n` the solution is provably exact
in infinite precision, but the expense is cubic in `num_krylov_vectors` so
one is typically interested in the `num_krylov_vectors << n` case.
The solution can in this case be repeatedly
improved, to a point, by restarting the Arnoldi iterations each time
`num_krylov_vectors` is reached. Unfortunately the optimal parameter choices
balancing expense and accuracy are difficult to predict in advance, so
applying this function requires a degree of experimentation.
In a tensor network code one is typically interested in A_mv implementing
some tensor contraction. This implementation thus allows `b` and `x0` to be
of whatever arbitrary, though identical, shape `b = A_mv(x0, ...)` expects.
Reshaping to and from a matrix problem is handled internally.
Args:
A_mv : A function `v0 = A_mv(v, *A_args, **A_kwargs)` where `v0` and
`v` have the same shape.
b : The `b` in `A @ x = b`; it should be of the shape `A_mv`
operates on.
A_args : Positional arguments to `A_mv`, supplied to this interface
as a list.
Default: None.
x0 : An optional guess solution. Zeros are used by default.
If `x0` is supplied, its shape and dtype must match those of
`b`, or an
error will be thrown.
Default: zeros.
tol, atol: Solution tolerance to achieve,
norm(residual) <= max(tol*norm(b), atol).
Default: tol=1E-05
atol=tol
num_krylov_vectors
: Size of the Krylov space to build at each restart.
Expense is cubic in this parameter. If supplied, it must be
an integer in 0 < num_krylov_vectors <= b.size.
Default: b.size.
maxiter : The Krylov space will be repeatedly rebuilt up to this many
times. Large values of this argument
should be used only with caution, since especially for nearly
symmetric matrices and small `num_krylov_vectors` convergence
might well freeze at a value significantly larger than `tol`.
Default: 1.
M : Inverse of the preconditioner of A; see the docstring for
`scipy.sparse.linalg.gmres`. This is only supported in the
numpy backend. Supplying this argument to other backends will
trigger NotImplementedError.
Default: None.
Raises:
ValueError: -if `x0` is supplied but its shape differs from that of `b`.
-in NumPy, if the ARPACK solver reports a breakdown (which
usually indicates some kind of floating point issue).
-if num_krylov_vectors is 0 or exceeds b.size.
-if tol was negative.
-if M was supplied with any backend but NumPy.
Returns:
x : The converged solution. It has the same shape as `b`.
info : 0 if convergence was achieved, the number of restarts otherwise.
"""<try_stmt><block_start>b_array=b.array<block_end><except_stmt>AttributeError<as>err<block_start><raise>TypeError("b must be a tn.Tensor")<from>err<block_end>backend,x0_array,args_array=krylov_error_checks(b.backend x0 A_args)<line_sep>mv=KRYLOV_MATVEC_CACHE.retrieve(backend.name A_mv)<line_sep>out=backend.gmres(mv b_array A_args=args_array x0=x0_array tol=tol atol=atol num_krylov_vectors=num_krylov_vectors maxiter=maxiter M=M)<line_sep>result,info=out<line_sep>resultT=Tensor(result backend=b.backend)<line_sep><return>(resultT info)<block_end> |
<import_from_stmt>datetime date<import_from_stmt>. GenericCalendarTest<import_from_stmt>..africa.mozambique Mozambique<class_stmt>MozambiqueTest(GenericCalendarTest)<block_start>cal_class=Mozambique<def_stmt>test_year_new_year_shift self<block_start>holidays=self.cal.holidays_set(2019)<line_sep>self.assertIn(date(2019 1 1) holidays)<line_sep>self.assertNotIn(date(2019 1 2) holidays)<line_sep>holidays=self.cal.holidays_set(2020)<line_sep>self.assertIn(date(2020 1 1) holidays)<line_sep>self.assertNotIn(date(2020 1 2) holidays)<block_end><def_stmt>test_n_holidays self<block_start>n_holidays=len(self.cal.holidays_set(2019))<for_stmt>holiday self.cal.get_calendar_holidays(2020)<block_start>print(holiday)<block_end><assert_stmt>n_holidays<eq>10<block_end><def_stmt>test_year_2018 self<block_start>holidays=self.cal.holidays_set(2018)<line_sep># Fixed days section:
# 1. New Year's Day
self.assertIn(date(2018 1 1) holidays)<line_sep># 2. Mozambican Heroes' Day
self.assertIn(date(2018 2 3) holidays)<line_sep># 3. Mozambican Women's Day
self.assertIn(date(2018 4 7) holidays)<line_sep># 4. Good Friday
self.assertIn(date(2018 3 30) holidays)<line_sep># 5. Labour Day
self.assertIn(date(2018 5 1) holidays)<line_sep># 6. Independence Day
self.assertIn(date(2018 6 25) holidays)<line_sep># 7. Victory Day
self.assertIn(date(2018 9 7) holidays)<line_sep># 8. Armed Forces Day
self.assertIn(date(2018 9 25) holidays)<line_sep># 9. Peace And Reconciliation Day
self.assertIn(date(2018 10 4) holidays)<line_sep># 10. Christmas day
self.assertIn(date(2018 12 25) holidays)<block_end><def_stmt>test_year_2019 self<block_start>holidays=self.cal.holidays_set(2019)<line_sep># Fixed days section:
# 1. New Year's Day
self.assertIn(date(2019 1 1) holidays)<line_sep># 2. Mozambican Heroes' Day
self.assertIn(date(2019 2 3) holidays)<line_sep># 3. Mozambican Women's Day
self.assertIn(date(2019 4 7) holidays)<line_sep># 4. Good Friday
self.assertIn(date(2019 4 19) holidays)<line_sep># 5. Labour Day
self.assertIn(date(2019 5 1) holidays)<line_sep># 6. Independence Day
self.assertIn(date(2019 6 25) holidays)<line_sep># 7. Victory Day
self.assertIn(date(2019 9 7) holidays)<line_sep># 8. Armed Forces Day
self.assertIn(date(2019 9 25) holidays)<line_sep># 9. Peace And Reconciliation Day
self.assertIn(date(2019 10 4) holidays)<line_sep># 10. Christmas day
self.assertIn(date(2019 12 25) holidays)<block_end><def_stmt>test_year_2020 self<block_start>holidays=self.cal.holidays_set(2020)<line_sep># Fixed days section:
# 1. New Year's Day
self.assertIn(date(2020 1 1) holidays)<line_sep># 2. Mozambican Heroes' Day
self.assertIn(date(2020 2 3) holidays)<line_sep># 3. Mozambican Women's Day
self.assertIn(date(2020 4 7) holidays)<line_sep># 4. Good Friday
self.assertIn(date(2020 4 10) holidays)<line_sep># 5. Labour Day
self.assertIn(date(2020 5 1) holidays)<line_sep># 6. Independence Day
self.assertIn(date(2020 6 25) holidays)<line_sep># 7. Victory Day
self.assertIn(date(2020 9 7) holidays)<line_sep># 8. Armed Forces Day
self.assertIn(date(2020 9 25) holidays)<line_sep># 9. Peace And Reconciliation Day
self.assertIn(date(2020 10 4) holidays)<line_sep># 10. Christmas day
self.assertIn(date(2020 12 25) holidays)<block_end><def_stmt>test_2020_new_years_day_label self<block_start>holidays=self.cal.holidays(2020)<line_sep>holidays=dict(holidays)<line_sep>self.assertEqual(holidays[date(2020 1 1)] "New year")<block_end><def_stmt>test_2020_heroes_day_label self<block_start>holidays=self.cal.holidays(2020)<line_sep>holidays=dict(holidays)<line_sep>self.assertEqual(holidays[date(2020 2 3)] "Mozambican Heroes' Day")<block_end><def_stmt>test_2020_women_day_label self<block_start>holidays=self.cal.holidays(2020)<line_sep>holidays=dict(holidays)<line_sep>self.assertEqual(holidays[date(2020 4 7)] "Mozambican Women's Day")<block_end><def_stmt>test_2020_good_friday_label self<block_start>holidays=self.cal.holidays(2020)<line_sep>holidays=dict(holidays)<line_sep>self.assertEqual(holidays[date(2020 4 10)] "Good Friday")<block_end><def_stmt>test_2020_labour_day_label self<block_start>holidays=self.cal.holidays(2020)<line_sep>holidays=dict(holidays)<line_sep>self.assertEqual(holidays[date(2020 5 1)] "Labour Day")<block_end><def_stmt>test_2020_independence_day_label self<block_start>holidays=self.cal.holidays(2020)<line_sep>holidays=dict(holidays)<line_sep>self.assertEqual(holidays[date(2020 6 25)] "Independence Day")<block_end><def_stmt>test_2020_victory_day_label self<block_start>holidays=self.cal.holidays(2020)<line_sep>holidays=dict(holidays)<line_sep>self.assertEqual(holidays[date(2020 9 7)] "Victory Day")<block_end><def_stmt>test_2020_armed_forces_day_label self<block_start>holidays=self.cal.holidays(2020)<line_sep>holidays=dict(holidays)<line_sep>self.assertEqual(holidays[date(2020 9 25)] "Armed Forces Day")<block_end><def_stmt>test_2020_peace_and_reconciliation_day_label self<block_start>holidays=self.cal.holidays(2020)<line_sep>holidays=dict(holidays)<line_sep>self.assertEqual(holidays[date(2020 10 4)] "Peace And Reconciliation Day")<block_end><def_stmt>test_2020_christmas_day_label self<block_start>holidays=self.cal.holidays(2020)<line_sep>holidays=dict(holidays)<line_sep>self.assertEqual(holidays[date(2020 12 25)] "Christmas Day")<block_end><block_end> |
"""
Code illustration: 4.01
@ Tkinter GUI Application Development Blueprints
"""<import_from_stmt>configurations *<class_stmt>Model()<block_start><def_stmt>__init__ self<block_start><pass><block_end><block_end> |
<import_stmt>os<import_stmt>sys<import_stmt>json<import_stmt>subprocess<import_stmt>logging<import_stmt>tarfile<import_from_stmt>.preprocess preprocess_infer<import_from_stmt>.postprocess postprocess<import_from_stmt>.get_alignments GetAlignments<import_from_stmt>..penman_utils to_graph_line<import_from_stmt>...defaults data_dir<line_sep>logger=logging.getLogger(__name__)<line_sep>this_dir=os.path.dirname(os.path.realpath(__file__))<class_stmt>FAA_Aligner(object)<block_start><def_stmt>__init__ self **kwargs<block_start>self.model_dir=kwargs.get('model_dir' os.path.join(data_dir 'model_aligner_faa'))<line_sep>self.model_tar_fn=kwargs.get('model_tar_fn' os.path.join(this_dir 'model_aligner_faa.tar.gz'))<line_sep>self.setup_model_dir()<line_sep>self.aligner=TrainedAligner(self.model_dir **kwargs)<try_stmt><block_start>self.aligner.check_for_binaries()# Will raise FileNotFoundError if binaries can't be found
<block_end><except_stmt>FileNotFoundError<block_start>logger.critical('No binaries for fast_algin (https://github.com/clab/fast_align) found. '<concat>'These must be installed to use the faa_aligner. See the amrlib docs for details.')<line_sep><raise><block_end><block_end># Input space_tok_sents is a list of space tokenized strings
# graph_strings is a list and amr graph strings, the same size.
<def_stmt>align_sents self space_tok_sents graph_strings<block_start><assert_stmt>len(space_tok_sents)<eq>len(graph_strings)<line_sep>graph_strings=[to_graph_line(g)<for>g graph_strings]<line_sep>data=preprocess_infer(space_tok_sents graph_strings skip_empty_check=<true>)<line_sep># Filter lines for empty strings. The aligner doesn't return a value for blanks on either eng or amr
skips,eng_lines,amr_lines=set() [] []<for_stmt>i,(eng_l amr_l) enumerate(zip(data.eng_preproc_lines data.amr_preproc_lines))<block_start>eng_l,amr_l=eng_l.strip() amr_l.strip()<if_stmt><not>eng_l<or><not>amr_l<block_start>skips.add(i)<block_end><else_stmt><block_start>eng_lines.append(eng_l)<line_sep>amr_lines.append(amr_l)<block_end><block_end>model_out_lines=self.aligner.align(eng_lines amr_lines)<assert_stmt>len(model_out_lines)<eq>len(eng_lines)<line_sep># Add back in blanks for skipped lines
final_astrings=['']<times>len(data.eng_preproc_lines)<for_stmt>i range(len(final_astrings))<block_start><if_stmt>i<not><in>skips<block_start>final_astrings[i]=model_out_lines.pop(0)<block_end><block_end>data.model_out_lines=final_astrings<line_sep>amr_surface_aligns,alignment_strings=postprocess(data)<line_sep><return>amr_surface_aligns alignment_strings<block_end># check the model directory, if it doesn't have the metadata file try to create
# the directory from the tar.gz file
<def_stmt>setup_model_dir self# Check for the metadata and if so, consider the model ready to go
<block_start><if_stmt>os.path.isfile(os.path.join(self.model_dir 'amrlib_meta.json'))<block_start><return><true><block_end># if there's a local copy, etract it
<elif_stmt>os.path.isfile(self.model_tar_fn)<block_start>tar=tarfile.open(self.model_tar_fn)<line_sep>tar.extractall(path=data_dir)<line_sep>logger.info('Extracting a local copy of model')<if_stmt>os.path.isfile(os.path.join(self.model_dir 'amrlib_meta.json'))<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end><else_stmt><block_start>logger.critical('No model in model_dir and no local version available to extract')<line_sep><return><false><block_end><block_end><block_end># Code adapted from from https://github.com/clab/fast_align/blob/master/src/force_align.py
<class_stmt>TrainedAligner<block_start><def_stmt>__init__ self model_in_dir **kwargs# If the bin_dir is not provided, get it from the environment, but default
# to '' which means it must be in the path
<block_start>bin_dir=os.environ.get('FABIN_DIR' '')<line_sep>bin_dir=kwargs.get('bin_dir' bin_dir)<line_sep>self.fast_align=os.path.join(bin_dir 'fast_align')<line_sep>self.atools=os.path.join(bin_dir 'atools')<line_sep>fwd_params_fn=os.path.join(model_in_dir 'fwd_params')<line_sep>rev_params_fn=os.path.join(model_in_dir 'rev_params')<line_sep># Get the parameters from the metadata
<with_stmt>open(os.path.join(model_in_dir 'amrlib_meta.json'))<as>f<block_start>meta=json.load(f)<block_end>p=meta['train_params']<line_sep># timeout the exe to exit
self.timeout=kwargs.get('timeout' 1.0)<line_sep># Create the actual commands to execute
fwd_cmd='%s -i - -d -q %f -a %f -T %f -m %f -f %s'%(self.fast_align p['q'] p['a'] p['fwd_T'] p['fwd_m'] fwd_params_fn)<line_sep>rev_cmd='%s -i - -d -q %f -a %f -T %f -m %f -f %s -r'%(self.fast_align p['q'] p['a'] p['fwd_T'] p['fwd_m'] rev_params_fn)<line_sep>tools_cmd='%s -i - -j - -c %s'%(self.atools p['heuristic'])<line_sep>self.fwd_cmd=fwd_cmd.split()<line_sep>self.rev_cmd=rev_cmd.split()<line_sep>self.tools_cmd=tools_cmd.split()<block_end># Open a connection to the subprocess in text mode
@staticmethod<def_stmt>popen_io cmd<block_start><return>subprocess.Popen(cmd stdin=subprocess.PIPE stdout=subprocess.PIPE stderr=subprocess.PIPE text=<true>)<block_end><def_stmt>align self eng_td_lines amr_td_lines# Combine lines into fast align input format
<block_start>lines=['%s ||| %s'%(el al)<for>el,al zip(eng_td_lines amr_td_lines)]<line_sep># Open connections to the alignment binaries
self.fwd_align=self.popen_io(self.fwd_cmd)<line_sep>self.rev_align=self.popen_io(self.rev_cmd)<line_sep>self.tools=self.popen_io(self.tools_cmd)<line_sep># Input to fast_align
fa_in='\n'.join([l.strip()<for>l lines])<line_sep>fwd_out,fwd_err=self.fwd_align.communicate(fa_in timeout=self.timeout)<line_sep>rev_out,fwd_err=self.rev_align.communicate(fa_in timeout=self.timeout)<line_sep># output is f words ||| e words ||| links ||| score
fwd_lines=[l.split('|||')[2].strip()<for>l fwd_out.splitlines()<if>l]<line_sep>rev_lines=[l.split('|||')[2].strip()<for>l rev_out.splitlines()<if>l]<line_sep># Input to atools
# be sure to put a line-feed at the end or you'll get a duplicate line in the output
at_in='\n'.join(['%s\n%s'%(fl rl)<for>fl,rl zip(fwd_lines rev_lines)])+'\n'<line_sep>at_out,at_err=self.tools.communicate(at_in timeout=self.timeout)<line_sep>at_lines=[l.strip()<for>l at_out.splitlines()]<line_sep><return>at_lines<block_end># This will raise FileNotFoundError if either call fails
# Note that both commands trigger the help message and will produce a return-code of 1
# which is typically considered and error
<def_stmt>check_for_binaries self<block_start>ret_fa=subprocess.run(self.fast_align stderr=subprocess.PIPE stdout=subprocess.PIPE)<line_sep>ret_tool=subprocess.run(self.atools stderr=subprocess.PIPE stdout=subprocess.PIPE)<block_end><block_end> |
"""
Inferring a function from a reproducing kernel Hilbert space (RKHS) by taking
gradients of eval with respect to the function-valued argument
"""<import_from_future_stmt> print_function<import_stmt>autograd.numpy<as>np<import_stmt>autograd.numpy.random<as>npr<import_from_stmt>autograd.extend primitive defvjp defjvp VSpace Box<import_from_stmt>autograd.util func<import_from_stmt>autograd grad<class_stmt>RKHSFun(object)<block_start><def_stmt>__init__ self kernel alphas={}<block_start>self.alphas=alphas<line_sep>self.kernel=kernel<line_sep>self.vs=RKHSFunVSpace(self)<block_end>@primitive<def_stmt>__call__ self x<block_start><return>sum([a<times>self.kernel(x x_repr)<for>x_repr,a self.alphas.items()] 0.0)<block_end><def_stmt>__add__ self f<block_start><return>self.vs.add(self f)<block_end><def_stmt>__mul__ self a<block_start><return>self.vs.scalar_mul(self a)<block_end><block_end># TODO: add vjp of __call__ wrt x (and show it in action)
defvjp(func(RKHSFun.__call__) <lambda>ans f x:<lambda>g:RKHSFun(f.kernel {x:1})<times>g)<class_stmt>RKHSFunBox(Box RKHSFun)<block_start>@property<def_stmt>kernel self<block_start><return>self._value.kernel<block_end><block_end>RKHSFunBox.register(RKHSFun)<class_stmt>RKHSFunVSpace(VSpace)<block_start><def_stmt>__init__ self value<block_start>self.kernel=value.kernel<block_end><def_stmt>zeros self<block_start><return>RKHSFun(self.kernel)<block_end><def_stmt>randn self# These arbitrary vectors are not analogous to randn in any meaningful way
<block_start>N=npr.randint(1 3)<line_sep><return>RKHSFun(self.kernel dict(zip(npr.randn(N) npr.randn(N))))<block_end><def_stmt>_add self f g<block_start><assert_stmt>f.kernel<is>g.kernel<line_sep><return>RKHSFun(f.kernel add_dicts(f.alphas g.alphas))<block_end><def_stmt>_scalar_mul self f a<block_start><return>RKHSFun(f.kernel {x:a<times>a_cur<for>x,a_cur f.alphas.items()})<block_end><def_stmt>_inner_prod self f g<block_start><assert_stmt>f.kernel<is>g.kernel<line_sep><return>sum([a1<times>a2<times>f.kernel(x1 x2)<for>x1,a1 f.alphas.items()<for>x2,a2 g.alphas.items()] 0.0)<block_end><block_end>RKHSFunVSpace.register(RKHSFun)<def_stmt>add_dicts d1 d2<block_start>d={}<for_stmt>k,v d1.items()+d2.items()<block_start>d[k]=d[k]+v<if>k<in>d<else>v<block_end><return>d<block_end><if_stmt>__name__<eq>"__main__"<block_start><def_stmt>sq_exp_kernel x1 x2<block_start><return>np.exp(-(x1-x2)<power>2)<block_end>xs=range(5)<line_sep>ys=[1 2 3 2 1]<def_stmt>logprob f xs ys<block_start><return>-sum((f(x)-y)<power>2<for>x,y zip(xs ys))<block_end>f=RKHSFun(sq_exp_kernel)<for_stmt>i range(100)<block_start>f=f+grad(logprob)(f xs ys)<times>0.01<block_end><for_stmt>x,y zip(xs ys)<block_start>print('{}\t{}\t{}'.format(x y f(x)))<block_end><block_end> |
"""Tests for ais.nmea_queue."""<import_stmt>contextlib<import_stmt>unittest<import_stmt>pytest<import_stmt>six<import_from_stmt>six.moves StringIO<import_stmt>ais<import_from_stmt>ais nmea<import_from_stmt>ais nmea_queue<line_sep>BARE_NMEA="""
# pylint: disable=line-too-long
$GPZDA,203003.00,12,07,2009,00,00,*47
!AIVDM,1,1,,B,23?up2001gGRju>Ap:;R2APP08:c,0*0E
!BSVDM,1,1,,A,15Mj23`PB`o=Of>KjvnJg8PT0L2R,0*7E
!SAVDM,1,1,,B,35Mj2p001qo@5tVKLBWmIDJT01:@,0*33
!AIVDM,1,1,,A,B5NWV1P0<vSE=I3QdK4bGwoUoP06,0*4F
!SAVDM,1,1,,A,403Owi1utn1W0qMtr2AKStg020S:,0*4B
!SAVDM,2,1,4,A,55Mub7P00001L@;SO7TI8DDltqB222222222220O0000067<0620@jhQDTVG,0*43
!SAVDM,2,2,4,A,30H88888880,2*49
"""<line_sep>TAG_BLOCK=r"""
# pylint: disable=line-too-long
\n:440661,s:r3669963,c:1428537660*0F\$GPZDA,000253,09,04,2015,+00,00*6C
\g:1-2-4372,s:rORBCOMM109,c:1426032000,T:2015-03-11 00.00.00*32\!AIVDM,2,1,2,B,576u>F02>hOUI8AGR20tt<j104p4l62222222216H14@@Hoe0JPEDp1TQH88,0*16
\s:rORBCOMM999u,c:1426032000,T:2015-03-11 00.00.00*36\!AIVDM,1,1,,,;5Qu0v1utmGssvvkA`DRgm100000,0*46
\g:2-2-4372,s:rORBCOMM109,c:1426032000,T:2015-03-11 00.00.00*31\!AIVDM,2,2,2,B,88888888880,2*25
\g:1-2-27300,n:636994,s:b003669710,c:1428621738*5F\!SAVDM,2,1,2,B,55Mw@A7J1adAL@?;7WPl58F0U<h4pB222222220t1PN5553fN4g?`4iSp5Rc,0*26
\g:2-2-27300,n:636995*15\!SAVDM,2,2,2,B,iP`88888880,2*5E
\n:636996,s:b003669710,c:1428621738*19\!SAVDM,1,1,,B,35Mv4LPP@Go?FFtEbDDWQmlT20k@,0*04
\g:4-4-993623,n:577969*22\$ARVSI,r003669930,,233948.825272,1831,-97,0*24
\n:80677,s:b003669952,c:1428884269*2A\!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17
"""<line_sep>USCG=r"""
# pylint: disable=line-too-long
!SAVDM,1,1,,A,15N4OMPP01I<cGrA1v>Id?vF060l,0*22,b003669978,1429287189
!SAVDM,2,1,4,B,54h@7?02BAF=`L4wN21<eTH4hj2222222222220U4HG6553U06T0C3H0Q@@j,0*5D,d-86,S389,t161310.00,T10.377780,D07MN-MI-LAKBS1,1429287190
!SAVDM,2,2,4,B,88888888880,2*39,d-86,S389,t161310.00,T10.377780,D07MN-MI-LAKBS1,1429287190
!AIVDM,1,1,,B,3592u`iP03GWEflBRosm0Ov@0000,0*70,d-107,S0297,t161407.00,T07.92201452,r11CSDO1,1429287248
!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17,rMySat,1429287258
"""<line_sep>MIXED=r"""
!SAVDM,1,1,,A,15N4OMPP01I<cGrA1v>Id?vF060l,0*22,b003669978,1429287189
!SAVDM,1,1,,A,403Owi1utn1W0qMtr2AKStg020S:,0*4B
\n:80677,s:b003669952,c:1428884269*2A\!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17
random text
"""<class_stmt>NmeaQueueTest(unittest.TestCase)<block_start><def_stmt>testTextData self# These lines should all pass straight through.
<block_start>src_lines=('' 'a' '123' # Not quite NMEA strings.
'$GPZDA' '!AIVDM' '*FF' )<line_sep>queue=nmea_queue.NmeaQueue()<for_stmt>line src_lines<block_start>queue.put(line)<block_end>self.assertEqual(queue.qsize() len(src_lines))<for_stmt>i range(1 queue.qsize()+1)<block_start>msg=queue.get()<line_sep>self.assertEqual(msg['line_nums'] [i])<line_sep>self.assertEqual(msg['line_type'] nmea.TEXT)<line_sep>self.assertEqual(msg['lines'] list(src_lines[i-1:i]))<block_end>self.assertEqual(msg {'line_nums':[6] 'line_type':'TEXT' 'lines':['*FF']})<block_end><def_stmt>testBareSingleLineData self<block_start>queue=nmea_queue.NmeaQueue()<line_sep>lines=[line<for>line BARE_NMEA.split('\n')<if>','<in>line]<for_stmt>line lines<block_start>queue.put(line)<block_end>self.assertEqual(queue.qsize() 7)<line_sep>msgs=[]<while_stmt><not>queue.empty()<block_start>msgs.append(queue.get())<block_end>self.assertEqual(msgs[0] {'line_nums':[1] 'line_type':'BARE' 'lines':['$GPZDA,203003.00,12,07,2009,00,00,*47']})<line_sep>self.assertEqual(msgs[1] {'decoded':{'cog':52.099998474121094 'id':2 'md5':'99c8c2804fde0481e6143051930b66c4' 'mmsi':218069000 'nav_status':0 'position_accuracy':0 'raim':<false> 'repeat_indicator':0 'rot':0.0 'rot_over_range':<false> 'slot_number':683 'slot_timeout':2 'sog':11.100000381469727 'spare':0 'special_manoeuvre':0 'sync_state':0 'timestamp':16 'true_heading':48 'x':-118.227775 'y':31.24317} 'line_nums':[2] 'line_type':'BARE' 'lines':['!AIVDM,1,1,,B,23?up2001gGRju>Ap:;R2APP08:c,0*0E'] 'matches':[{'body':'23?up2001gGRju>Ap:;R2APP08:c' 'chan':'B' 'checksum':'0E' 'fill_bits':0 'sen_num':1 'sen_tot':1 'seq_id':<none> 'talker':'AI' 'vdm_type':'VDM' 'vdm':'!AIVDM,1,1,,B,23?up2001gGRju>Ap:;R2APP08:c,0*0E'}]})<block_end><def_stmt>testTagBlockLines self<block_start>queue=nmea_queue.NmeaQueue()<line_sep>lines=[line<for>line TAG_BLOCK.split('\n')<if>','<in>line]<for_stmt>line lines<block_start>queue.put(line)<block_end>self.assertEqual(queue.qsize() 6)<line_sep>msgs=[]<while_stmt><not>queue.empty()<block_start>msgs.append(queue.get())<block_end># self.assertNotIn('decoded', msgs[0])
# TODO(schwehr): Check the ZDA message decoding.
<for_stmt>msg_num range(1 5)<block_start>self.assertIn('decoded' msgs[msg_num])<block_end>ids=[msg['decoded']['id']<for>msg msgs[1:]<if>'decoded'<in>msg]<line_sep>self.assertEqual(ids [11 5 5 3 27])<line_sep>self.assertEqual(msgs[-1] {'decoded':{'cog':131 'gnss':<true> 'id':27 'md5':'50898a3435865cf76f1b502b2821672b' 'mmsi':577305000 'nav_status':5 'position_accuracy':1 'raim':<false> 'repeat_indicator':0 'sog':0 'spare':0 'x':-90.20666666666666 'y':29.145} 'line_nums':[9] 'line_type':'TAGB' 'lines':['\\n:80677,s:b003669952,c:1428884269*2A'<concat>'\\!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17'] 'matches':[{'dest':<none> 'group':<none> 'group_id':<none> 'line_num':80677 'metadata':'n:80677,s:b003669952,c:1428884269*2A' 'payload':'!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17' 'quality':<none> 'rcvr':'b003669952' 'rel_time':<none> 'sentence_num':<none> 'sentence_tot':<none> 'tag_checksum':'2A' 'text':<none> 'text_date':<none> 'time':1428884269}] 'times':[1428884269]})<block_end><def_stmt>testUscgLines self<block_start>queue=nmea_queue.NmeaQueue()<line_sep>lines=[line<for>line USCG.split('\n')<if>','<in>line]<for_stmt>line lines<block_start>queue.put(line)<block_end>self.assertEqual(queue.qsize() 4)<line_sep>msgs=[]<while_stmt><not>queue.empty()<block_start>msgs.append(queue.get())<block_end><for_stmt>msg msgs<block_start>self.assertIn('decoded' msg)<block_end>ids=[msg['decoded']['id']<for>msg msgs]<line_sep>self.assertEqual(ids [1 5 3 27])<line_sep>self.assertEqual(msgs[3] {'decoded':{'cog':131 'gnss':<true> 'id':27 'md5':'50898a3435865cf76f1b502b2821672b' 'mmsi':577305000 'nav_status':5 'position_accuracy':1 'raim':<false> 'repeat_indicator':0 'sog':0 'spare':0 'x':-90.20666666666666 'y':29.145} 'line_nums':[5] 'line_type':'USCG' 'lines':['!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17,rMySat,1429287258'] 'matches':[{'body':'K8VSqb9LdU28WP8<' 'chan':'B' 'checksum':'17' 'counter':<none> 'fill_bits':0 'hour':<none> 'minute':<none> 'payload':'!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17' 'receiver_time':<none> 'rssi':<none> 'second':<none> 'sen_num':1 'sen_tot':1 'seq_id':<none> 'signal_strength':<none> 'slot':<none> 'station':'rMySat' 'station_type':'r' 'talker':'SA' 'time':1429287258 'time_of_arrival':<none> 'uscg_metadata':',rMySat,1429287258' 'vdm':'!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17' 'vdm_type':'VDM'}]})<block_end><def_stmt>testMixedLines self<block_start>queue=nmea_queue.NmeaQueue()<line_sep>lines=[line<for>line MIXED.split('\n')<if>line.strip()]<for_stmt>line lines<block_start>queue.put(line)<block_end>self.assertEqual(queue.qsize() 4)<line_sep>msgs=[]<while_stmt><not>queue.empty()<block_start>msgs.append(queue.get())<block_end><for_stmt>msg msgs[:-1]<block_start>self.assertIn('decoded' msg)<block_end>ids=[msg['decoded']['id']<for>msg msgs[:-1]]<line_sep>self.assertEqual(ids [1 4 27])<line_sep>line_types=[msg['line_type']<for>msg msgs]<line_sep>self.assertEqual(line_types [nmea.USCG nmea.BARE nmea.TAGB nmea.TEXT])<block_end><block_end>@pytest.mark.parametrize("nmea" [six.text_type(BARE_NMEA.strip()) six.text_type(TAG_BLOCK.strip()) six.text_type(USCG.strip()) six.text_type(MIXED.strip())])<def_stmt>test_NmeaFile_against_queue nmea<block_start>queue=nmea_queue.NmeaQueue()<for_stmt>line nmea.splitlines()<block_start>queue.put(line)<block_end>expected=[]<line_sep>msg=queue.GetOrNone()<while_stmt>msg<block_start>expected.append(msg)<line_sep>msg=queue.GetOrNone()<block_end><with_stmt>contextlib.closing(StringIO(nmea))<as>f ais.open(f)<as>src<block_start>actual=list(src)<block_end><for_stmt>e,a zip(expected actual)<block_start><assert_stmt>e<eq>a<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end> |
"""
Tests of neo.io.elphyo
"""<import_stmt>unittest<import_from_stmt>neo.io ElphyIO<import_from_stmt>neo.test.iotest.common_io_test BaseTestIO<class_stmt>TestElphyIO(BaseTestIO unittest.TestCase)<block_start>ioclass=ElphyIO<line_sep>entities_to_download=['elphy']<line_sep>entities_to_test=['elphy/DATA1.DAT' 'elphy/ElphyExample.DAT' 'elphy/ElphyExample_Mode1.dat' 'elphy/ElphyExample_Mode2.dat' 'elphy/ElphyExample_Mode3.dat']<def_stmt>test_read_data self<block_start><for_stmt>filename self.entities_to_test<block_start>io=ElphyIO(self.get_local_path(filename))<line_sep>bl=io.read_block()<line_sep>self.assertTrue(len(bl.segments)<g>0)<line_sep># ensure that at least one data object is generated for each file
self.assertTrue(any(list(bl.segments[0].size.values())))<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end> |
<import_stmt>xmlrpc.client<as>xmlrpclib<import_stmt>pytest<import_from_stmt>tests.factories ReleaseFactory<line_sep>@pytest.fixture(params=['/RPC2' '/pypi'])<def_stmt>rpc_endpoint request<block_start><return>request.param<block_end>@pytest.mark.django_db<def_stmt>test_search_package_name client admin_user live_server repository rpc_endpoint<block_start>ReleaseFactory(package__name='my-package' package__repository=repository summary='Test summary')<line_sep>client=xmlrpclib.ServerProxy(live_server+rpc_endpoint)<line_sep>response=client.search({'name':'my-package'})<assert_stmt>response<eq>[{'_pypi_ordering':0 'name':'my-package' 'summary':'Test summary' 'version':'1.0.0'}]<block_end>@pytest.mark.django_db<def_stmt>test_search_package_summary client admin_user live_server repository rpc_endpoint<block_start>ReleaseFactory(package__name='my-package' package__repository=repository summary='Test summary')<line_sep>client=xmlrpclib.ServerProxy(live_server+rpc_endpoint)<line_sep>response=client.search({'summary':['Test summary']})<assert_stmt>response<eq>[{'_pypi_ordering':0 'name':'my-package' 'summary':'Test summary' 'version':'1.0.0'}]<block_end>@pytest.mark.django_db<def_stmt>test_search_operator_and client admin_user live_server repository rpc_endpoint<block_start>ReleaseFactory(package__name='my-package-1' package__repository=repository summary='Test summary')<line_sep>ReleaseFactory(package__name='arcoiro' package__repository=repository summary='Test summary')<line_sep>ReleaseFactory(package__name='my-package-2' package__repository=repository summary='arcoiro')<line_sep>client=xmlrpclib.ServerProxy(live_server+rpc_endpoint)<line_sep>response=client.search({'name':['my-package'] 'summary':['Test summary']} 'and')<assert_stmt>response<eq>[{'_pypi_ordering':0 'name':'my-package-1' 'summary':'Test summary' 'version':'1.0.0'}]<block_end>@pytest.mark.django_db<def_stmt>test_search_operator_or client admin_user live_server repository rpc_endpoint<block_start>ReleaseFactory(package__name='my-package-1' package__repository=repository summary='Test summary')<line_sep>ReleaseFactory(package__name='arcoiro' package__repository=repository summary='Test summary')<line_sep>ReleaseFactory(package__name='my-package-2' package__repository=repository summary='arcoiro')<line_sep>client=xmlrpclib.ServerProxy(live_server+rpc_endpoint)<line_sep>response=client.search({'name':['my-package'] 'summary':['Test summary']} 'or')<assert_stmt>response<eq>[{'_pypi_ordering':0 'name':'arcoiro' 'summary':'Test summary' 'version':'1.0.0'} {'_pypi_ordering':0 'name':'my-package-1' 'summary':'Test summary' 'version':'1.0.0'} {'_pypi_ordering':0 'name':'my-package-2' 'summary':'arcoiro' 'version':'1.0.0'}]<block_end>@pytest.mark.django_db<def_stmt>test_search_invalid_fields_are_ignores client admin_user live_server repository rpc_endpoint<block_start>ReleaseFactory(package__name='my-package' package__repository=repository summary='Test summary')<line_sep>client=xmlrpclib.ServerProxy(live_server+rpc_endpoint)<line_sep>response=client.search({'name':['my-package'] 'invalid':['Ops']})<assert_stmt>response<eq>[{'_pypi_ordering':0 'name':'my-package' 'summary':'Test summary' 'version':'1.0.0'}]<block_end> |
<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> absolute_import<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>dotmap DotMap<import_stmt>gym<import_from_stmt>dmbrl.misc.DotmapUtils get_required_argument<import_from_stmt>dmbrl.modeling.layers FC<import_stmt>dmbrl.env<class_stmt>ReacherConfigModule<block_start>ENV_NAME="MBRLReacher3D-v0"<line_sep>TASK_HORIZON=150<line_sep>NTRAIN_ITERS=100<line_sep>NROLLOUTS_PER_ITER=1<line_sep>PLAN_HOR=25<line_sep>MODEL_IN,MODEL_OUT=24 17<line_sep>GP_NINDUCING_POINTS=200<def_stmt>__init__ self<block_start>self.ENV=gym.make(self.ENV_NAME)<line_sep>self.ENV.reset()<line_sep>cfg=tf.ConfigProto()<line_sep>cfg.gpu_options.allow_growth=<true><line_sep>self.SESS=tf.Session(config=cfg)<line_sep>self.NN_TRAIN_CFG={"epochs":5}<line_sep>self.OPT_CFG={"Random":{"popsize":2000} "CEM":{"popsize":400 "num_elites":40 "max_iters":5 "alpha":0.1}}<line_sep>self.UPDATE_FNS=[self.update_goal]<line_sep>self.goal=tf.Variable(self.ENV.goal dtype=tf.float32)<line_sep>self.SESS.run(self.goal.initializer)<block_end>@staticmethod<def_stmt>obs_postproc obs pred<block_start><return>obs+pred<block_end>@staticmethod<def_stmt>targ_proc obs next_obs<block_start><return>next_obs-obs<block_end><def_stmt>update_goal self sess=<none><block_start><if_stmt>sess<is><not><none><block_start>self.goal.load(self.ENV.goal sess)<block_end><block_end><def_stmt>obs_cost_fn self obs<block_start><if_stmt>isinstance(obs np.ndarray)<block_start><return>np.sum(np.square(ReacherConfigModule.get_ee_pos(obs are_tensors=<false>)-self.ENV.goal) axis=1)<block_end><else_stmt><block_start><return>tf.reduce_sum(tf.square(ReacherConfigModule.get_ee_pos(obs are_tensors=<true>)-self.goal) axis=1)<block_end><block_end>@staticmethod<def_stmt>ac_cost_fn acs<block_start><if_stmt>isinstance(acs np.ndarray)<block_start><return>0.01<times>np.sum(np.square(acs) axis=1)<block_end><else_stmt><block_start><return>0.01<times>tf.reduce_sum(tf.square(acs) axis=1)<block_end><block_end><def_stmt>nn_constructor self model_init_cfg<block_start>model=get_required_argument(model_init_cfg "model_class" "Must provide model class")(DotMap(name="model" num_networks=get_required_argument(model_init_cfg "num_nets" "Must provide ensemble size") sess=self.SESS load_model=model_init_cfg.get("load_model" <false>) model_dir=model_init_cfg.get("model_dir" <none>)))<if_stmt><not>model_init_cfg.get("load_model" <false>)<block_start>model.add(FC(200 input_dim=self.MODEL_IN activation="swish" weight_decay=0.00025))<line_sep>model.add(FC(200 activation="swish" weight_decay=0.0005))<line_sep>model.add(FC(200 activation="swish" weight_decay=0.0005))<line_sep>model.add(FC(200 activation="swish" weight_decay=0.0005))<line_sep>model.add(FC(self.MODEL_OUT weight_decay=0.00075))<block_end>model.finalize(tf.train.AdamOptimizer {"learning_rate":0.00075})<line_sep><return>model<block_end><def_stmt>gp_constructor self model_init_cfg<block_start>model=get_required_argument(model_init_cfg "model_class" "Must provide model class")(DotMap(name="model" kernel_class=get_required_argument(model_init_cfg "kernel_class" "Must provide kernel class") kernel_args=model_init_cfg.get("kernel_args" {}) num_inducing_points=get_required_argument(model_init_cfg "num_inducing_points" "Must provide number of inducing points.") sess=self.SESS))<line_sep><return>model<block_end>@staticmethod<def_stmt>get_ee_pos states are_tensors=<false><block_start>theta1,theta2,theta3,theta4,theta5,theta6,theta7=states[: :1] states[: 1:2] states[: 2:3] states[: 3:4] states[: 4:5] states[: 5:6] states[: 6:]<if_stmt>are_tensors<block_start>rot_axis=tf.concat([tf.cos(theta2)<times>tf.cos(theta1) tf.cos(theta2)<times>tf.sin(theta1) -tf.sin(theta2)] axis=1)<line_sep>rot_perp_axis=tf.concat([-tf.sin(theta1) tf.cos(theta1) tf.zeros(tf.shape(theta1))] axis=1)<line_sep>cur_end=tf.concat([0.1<times>tf.cos(theta1)+0.4<times>tf.cos(theta1)<times>tf.cos(theta2) 0.1<times>tf.sin(theta1)+0.4<times>tf.sin(theta1)<times>tf.cos(theta2)-0.188 -0.4<times>tf.sin(theta2)] axis=1)<for_stmt>length,hinge,roll [(0.321 theta4 theta3) (0.16828 theta6 theta5)]<block_start>perp_all_axis=tf.cross(rot_axis rot_perp_axis)<line_sep>x=tf.cos(hinge)<times>rot_axis<line_sep>y=tf.sin(hinge)<times>tf.sin(roll)<times>rot_perp_axis<line_sep>z=-tf.sin(hinge)<times>tf.cos(roll)<times>perp_all_axis<line_sep>new_rot_axis=x+y+z<line_sep>new_rot_perp_axis=tf.cross(new_rot_axis rot_axis)<line_sep>new_rot_perp_axis=tf.where(tf.less(tf.norm(new_rot_perp_axis axis=1) 1e-30) rot_perp_axis new_rot_perp_axis)<line_sep>new_rot_perp_axis<augdiv>tf.norm(new_rot_perp_axis axis=1 keepdims=<true>)<line_sep>rot_axis,rot_perp_axis,cur_end=new_rot_axis new_rot_perp_axis cur_end+length<times>new_rot_axis<block_end><block_end><else_stmt><block_start>rot_axis=np.concatenate([np.cos(theta2)<times>np.cos(theta1) np.cos(theta2)<times>np.sin(theta1) -np.sin(theta2)] axis=1)<line_sep>rot_perp_axis=np.concatenate([-np.sin(theta1) np.cos(theta1) np.zeros(theta1.shape)] axis=1)<line_sep>cur_end=np.concatenate([0.1<times>np.cos(theta1)+0.4<times>np.cos(theta1)<times>np.cos(theta2) 0.1<times>np.sin(theta1)+0.4<times>np.sin(theta1)<times>np.cos(theta2)-0.188 -0.4<times>np.sin(theta2)] axis=1)<for_stmt>length,hinge,roll [(0.321 theta4 theta3) (0.16828 theta6 theta5)]<block_start>perp_all_axis=np.cross(rot_axis rot_perp_axis)<line_sep>x=np.cos(hinge)<times>rot_axis<line_sep>y=np.sin(hinge)<times>np.sin(roll)<times>rot_perp_axis<line_sep>z=-np.sin(hinge)<times>np.cos(roll)<times>perp_all_axis<line_sep>new_rot_axis=x+y+z<line_sep>new_rot_perp_axis=np.cross(new_rot_axis rot_axis)<line_sep>new_rot_perp_axis[np.linalg.norm(new_rot_perp_axis axis=1)<l>1e-30]=rot_perp_axis[np.linalg.norm(new_rot_perp_axis axis=1)<l>1e-30]<line_sep>new_rot_perp_axis<augdiv>np.linalg.norm(new_rot_perp_axis axis=1 keepdims=<true>)<line_sep>rot_axis,rot_perp_axis,cur_end=new_rot_axis new_rot_perp_axis cur_end+length<times>new_rot_axis<block_end><block_end><return>cur_end<block_end><block_end>CONFIG_MODULE=ReacherConfigModule<line_sep> |
<import_from_stmt>django.conf.urls.defaults *<line_sep>urlpatterns=patterns('' url(r'test/$' 'captcha.tests.views.test' name='captcha-test') url(r'test2/$' 'captcha.tests.views.test_custom_error_message' name='captcha-test-custom-error-message') url(r'test3/$' 'captcha.tests.views.test_per_form_format' name='test_per_form_format') url(r'' include('captcha.urls')) )<line_sep> |
<import_stmt>os<import_stmt>numpy<as>np<import_stmt>joblib<import_from_stmt>skimage transform<import_stmt>deeppy<as>dp<import_from_stmt>.augment img_augment sample_img_augment_params AugmentedFeed SupervisedAugmentedFeed <import_from_stmt>.util img_transform<line_sep>cachedir=os.getenv('CACHE_HOME' './cache')<line_sep>mem=joblib.Memory(cachedir=os.path.join(cachedir 'lfw'))<line_sep>@mem.cache<def_stmt>lfw_imgs alignment<block_start><if_stmt>alignment<eq>'landmarks'<block_start>dataset=dp.dataset.LFW('original')<line_sep>imgs=dataset.imgs<line_sep>landmarks=dataset.landmarks('68')<line_sep>n_landmarks=68<line_sep>landmarks_mean=np.mean(landmarks axis=0)<line_sep>landmarks_mean=np.array([landmarks_mean[:n_landmarks] landmarks_mean[n_landmarks:]])<line_sep>aligned_imgs=[]<for_stmt>img,points zip(imgs landmarks)<block_start>points=np.array([points[:n_landmarks] points[n_landmarks:]])<line_sep>transf=transform.estimate_transform('similarity' landmarks_mean.T points.T)<line_sep>img=img/255.<line_sep>img=transform.warp(img transf order=3)<line_sep>img=np.round(img<times>255).astype(np.uint8)<line_sep>aligned_imgs.append(img)<block_end>imgs=np.array(aligned_imgs)<block_end><else_stmt><block_start>dataset=dp.dataset.LFW(alignment)<line_sep>imgs=dataset.imgs<block_end><return>imgs<block_end><def_stmt>lfw_imgs_split alignment split_name with_attributes=<true> test_fold=0<block_start>imgs=lfw_imgs(alignment)<line_sep>dataset=dp.dataset.LFW()<if_stmt>split_name<eq>'testtrain'<block_start>all_persons=list(dataset.index.keys())<line_sep>test_persons=dataset.people_splits['test'][test_fold]<line_sep>persons=[p<for>p all_persons<if>p<not><in>test_persons]<block_end><if_stmt>split_name<eq>'valtrain'<block_start>test_persons=dataset.people_splits['train']<block_end><elif_stmt>split_name<eq>'val'<block_start>persons=dataset.people_splits[split_name]<block_end><elif_stmt>split_name<eq>'test'<block_start>persons=dataset.people_splits[split_name][test_fold]<block_end><if_stmt><not>with_attributes<block_start>new_imgs=[]<for_stmt>person_id persons<block_start><for_stmt>img_idx dataset.index[person_id]<block_start>new_imgs.append(imgs[img_idx])<block_end><block_end>imgs=np.array(new_imgs)<line_sep><return>imgs<block_end># Extract attributes vectors and discard images without attributes
new_imgs=[]<line_sep>attrs=[]<for_stmt>person_id persons<block_start><if_stmt>person_id<in>dataset.attributes<block_start><for_stmt>img_no range(1 len(dataset.index[person_id])+1)<block_start><if_stmt>img_no<in>dataset.attributes[person_id]<block_start>new_imgs.append(imgs[dataset.index[person_id][img_no-1]])<line_sep>attrs.append(dataset.attributes[person_id][img_no])<block_end><block_end><block_end><block_end>imgs=np.array(new_imgs)<line_sep>attrs=np.array(attrs).astype(dp.float_)<line_sep><return>imgs attrs<block_end><def_stmt>_resize args<block_start>img,crop_size,rescale_size=args<line_sep>crop=(img.shape[0]-crop_size)<floordiv>2<line_sep>img=img[crop:-crop crop:-crop]<line_sep>img=transform.resize(img (rescale_size rescale_size 3) order=3)<line_sep>img=(img<times>255).astype(np.uint8)<line_sep><return>img<block_end><def_stmt>_resize_augment args<block_start>img,crop_size,rescale_size=args<line_sep>augment_params=sample_img_augment_params(translation_sigma=1.0 scale_sigma=0.01 rotation_sigma=0.01 gamma_sigma=0.07 contrast_sigma=0.07 hue_sigma=0.0125)<line_sep>img=img_augment(img *augment_params)<line_sep>img=_resize((img crop_size rescale_size))<line_sep><return>img<block_end>@mem.cache<def_stmt>resize_imgs imgs crop_size rescale_size n_augment=0<block_start><if_stmt>n_augment<eq>0<block_start>preprocess_fun=_resize<line_sep>n_imgs=len(imgs)<block_end><else_stmt><block_start>preprocess_fun=_resize_augment<line_sep>n_imgs=n_augment<block_end><def_stmt>img_iter <block_start><for_stmt>i range(n_imgs)<block_start><yield>imgs[i%len(imgs)]<block_end><block_end><with_stmt>joblib.Parallel(n_jobs=-2)<as>parallel<block_start>imgs=parallel(joblib.delayed(preprocess_fun)((img crop_size rescale_size))<for>img img_iter())<block_end>imgs=np.array(imgs)<line_sep><return>imgs<block_end>@mem.cache<def_stmt>feeds alignment crop_size rescale_size batch_size epoch_size n_augment=int(1e5) with_attributes=<false> split='val'<block_start><if_stmt>split<eq>'val'<block_start>train_split='valtrain'<line_sep>test_split='val'<block_end><elif_stmt>split<eq>'test'<block_start>train_split='testtrain'<line_sep>test_split='test'<block_end>x_train,y_train=lfw_imgs_split(alignment train_split)<line_sep># Shuffle training images
idxs=np.random.permutation(len(x_train))<line_sep>x_train=x_train[idxs]<line_sep>y_train=y_train[idxs]<if_stmt>n_augment<g>0<block_start>y_train=y_train[np.arange(n_augment)%len(x_train)]<block_end>x_train=resize_imgs(x_train crop_size rescale_size n_augment)<line_sep>x_train=np.transpose(x_train (0 3 1 2))<line_sep>x_test,y_test=lfw_imgs_split(alignment test_split)<line_sep>x_test=resize_imgs(x_test crop_size rescale_size)<line_sep>x_test=img_transform(x_test to_bc01=<true>)<if_stmt>with_attributes<block_start>train_feed=SupervisedAugmentedFeed(x_train y_train batch_size=batch_size epoch_size=epoch_size)<line_sep>test_feed=dp.SupervisedFeed(x_test y_test batch_size=batch_size)<block_end><else_stmt><block_start>train_feed=AugmentedFeed(x_train batch_size epoch_size)<line_sep>test_feed=dp.Feed(x_test batch_size)<block_end><return>train_feed test_feed<block_end> |
<import_stmt>ctypes<import_stmt>itertools<import_stmt>windows<import_stmt>windows.hooks<import_from_stmt>windows.generated_def.winstructs *<class_stmt>Ressource(object)<block_start><def_stmt>__init__ self filename lpName lpType<block_start>self.filename=filename<line_sep>self.lpName=lpName<line_sep>self.lpType=lpType<line_sep>self.driver_data=<none><line_sep>self.loaded_ressource=<none><block_end><def_stmt>match self hModule lpName lpType<block_start>x=<not>hModule<and>self.lpName<eq>lpName<and>self.lpType<eq>lpType<line_sep><return>x<block_end><def_stmt>get_driver_data self<block_start><if_stmt>self.driver_data<is><not><none><block_start><return>self.driver_data<block_end>self.driver_data=open(self.filename 'rb').read()<line_sep><return>self.driver_data<block_end><def_stmt>load_resource self<block_start>driver_data=self.get_driver_data()<line_sep>char_p=ctypes.c_char_p(driver_data)<line_sep>real_addr=ctypes.cast(char_p ctypes.c_void_p).value<line_sep><return>real_addr<block_end><def_stmt>resource_len self<block_start><return>len(self.get_driver_data())<block_end><block_end>resource_list=[]<line_sep>HRSRC_dict={}<line_sep>HRSRC_attibution=itertools.count(0x42424242)<line_sep>@windows.hooks.Callback(PVOID PVOID PVOID PVOID)<def_stmt>FindResourceWHook hModule lpName lpType real_function<block_start><for_stmt>res resource_list<block_start><if_stmt>res.match(hModule lpName lpType)<block_start>HRSRC=next(HRSRC_attibution)<line_sep>HRSRC_dict[HRSRC]=res<line_sep><return>HRSRC<block_end><block_end><return>real_function()<block_end>@windows.hooks.SizeofResourceCallback<def_stmt>SizeofResourceHook hModule hResInfo real_function<block_start><if_stmt>hResInfo<in>HRSRC_dict<block_start><return>HRSRC_dict[hResInfo].resource_len()<block_end><return>real_function()<block_end>@windows.hooks.LoadResourceCallback<def_stmt>LoadResourceHook hModule hResInfo real_function<block_start><if_stmt>hResInfo<in>HRSRC_dict<block_start><return>HRSRC_dict[hResInfo].load_resource()<block_end><return>real_function()<block_end>@windows.hooks.LockResourceCallback<def_stmt>LockResourceHook hResData real_function<block_start>x=real_function()<line_sep><return>x<block_end> |
<import_from_stmt>rest_framework.serializers ModelSerializer<import_from_stmt>api.models Category Post<class_stmt>CategorySerializer(ModelSerializer)<block_start><class_stmt>Meta<block_start>model=Category<line_sep>fields='__all__'<block_end><block_end><class_stmt>PostSerializer(ModelSerializer)<block_start><class_stmt>Meta<block_start>model=Post<line_sep>fields='__all__'<block_end><block_end> |
<import_stmt>math threading time<import_from_stmt>.. colors<import_from_stmt>..util deprecated log<import_from_stmt>. matrix_drawing<as>md<import_from_stmt>. font<import_from_stmt>.layout MultiLayout<import_from_stmt>.geometry make_matrix_coord_map_multi<import_from_stmt>.geometry.matrix make_matrix_coord_map make_matrix_coord_map_positions <line_sep>ROTATION_WARNING="""
Matrix.rotation must be a multiple of 90 degrees but was in fact %s degress.
It was rounded to %s degrees."""<class_stmt>Matrix(MultiLayout)<block_start>CLONE_ATTRS=MultiLayout.CLONE_ATTRS+('width' 'height' 'rotation' 'vert_flip' 'y_flip' 'serpentine' 'pixelSize')<def_stmt>__init__ self drivers width=0 height=0 rotation=0 vert_flip=<false> y_flip=<false> serpentine=<true> threadedUpdate=<false> brightness=255 pixelSize=(1 1) **kwargs<block_start>"""Main class for matricies.
driver -- instance that inherits from DriverBase
width -- X axis size of matrix
height -- Y axis size of matrix
coord_map -- a 2D matrix defining the X,Y to strip index mapping.
Not needed in most cases
rotation -- how to rotate when generating the map.
Not used if coord_map specified
vert_flip - flips the generated map along the Y axis.
This along with rotation can achieve any orientation
"""<line_sep>self.gen_multi=make_matrix_coord_map_multi<line_sep>super().__init__(drivers threadedUpdate brightness **kwargs)<line_sep>rot_mod=rotation%360<line_sep>self.rotation=90<times>round(rot_mod/90)<if_stmt>self.rotation<ne>rot_mod<block_start>log.warning(ROTATION_WARNING rotation self.rotation)<block_end>self.width=width<or>getattr(self.drivers[0] 'width')<or>0<line_sep>self.height=height<or>getattr(self.drivers[0] 'height')<or>0<line_sep>self.vert_flip=vert_flip<line_sep>self.y_flip=y_flip<line_sep>self.serpentine=serpentine<line_sep>self.pixelSize=pixelSize<line_sep>pw,ph=self.pixelSize<line_sep># If both are 0, try to assume it's a square display.
<if_stmt><not>(self.width<or>self.height)<block_start>square=int(math.sqrt(self.numLEDs))<if_stmt>(square<times>square)<eq>self.numLEDs<block_start>self.width=self.height=square<block_end><else_stmt><block_start><raise>TypeError('No width or height passed but '<concat>'the number of LEDs is not a perfect square')<block_end><block_end><if_stmt>self.width<times>self.height<g>self.numLEDs<block_start><raise>ValueError('width * height cannot exceed total pixel count! %s * %s > %s'%(self.width self.height self.numLEDs))<block_end><if_stmt><not>self.coord_map<block_start><if_stmt>len(self.drivers)<eq>1# TODO: this should really go into documentation
<block_start>log.debug('Auto generating coordinate map. Use make_matrix_coord_map '<concat>'directly if more control needed.')<line_sep># was switched to y_flip, but need to keep vert_flip available
y_flip=y_flip<or>vert_flip<line_sep>self.coord_map=make_matrix_coord_map(self.width self.height serpentine=serpentine rotation=rotation y_flip=vert_flip)<block_end><elif_stmt>self.drivers<block_start><raise>TypeError('Must provide coord_map if using multiple drivers!')<block_end><block_end>self.set_pixel_positions(make_matrix_coord_map_positions(self.coord_map))<line_sep># If rotation is 90 or 270 degrees, dimensions need to be swapped so
# they match the matrix rotation.
<if_stmt>rotation<in>(90 270)<block_start>w=self.width<line_sep>h=self.height<line_sep>self.width=h<line_sep>self.height=w<block_end>self.texture=<none><line_sep>self.set=self._setColor<if_stmt>pw<l>0<or>pw<g>self.width<or>ph<l>0<or>ph<g>self.height<block_start><raise>ValueError('pixelSize must be greater than 0 '<concat>'and not larger than total matrix')<block_end><if_stmt>self.width%pw<ne>0<or>self.height%ph<ne>0<block_start><raise>ValueError('pixelSize must evenly divide into matrix dimensions!')<block_end><if_stmt>pw<eq>1<and>ph<eq>1<block_start>self._set=self.__setNormal<block_end><else_stmt><block_start>self._set=self.__setScaled<line_sep>self.width=self.width/pw<line_sep>self.height=self.height/ph<line_sep>self.numLEDs=self.width<times>self.height<block_end>self.fonts=font.fonts<block_end>@property<def_stmt>shape self<block_start>"""Returns ``width, height``"""<line_sep><return>self.width self.height<block_end><def_stmt>get self x y<block_start>"""
Return the pixel color at position (x, y), or Colors.black if that
position is out-of-bounds.
"""<try_stmt><block_start>pixel=self.coord_map[y][x]<line_sep><return>self._get_base(pixel)<block_end><except_stmt>IndexError<block_start><return>colors.COLORS.Black<block_end><block_end><def_stmt>set self x y color<block_start>"""Set the pixel color at position x, y."""<line_sep># The actual implementation of this method is computed at construction
# time and monkey-patched in from one of self._setTexture,
# self.__setNormal or self.__setScaled
<raise>NotImplementedError<block_end><def_stmt>get_pixel_positions self<block_start><return>make_matrix_coord_map_positions(self.coord_map)<block_end><def_stmt>loadFont self name height width data<block_start>self.fonts[name]={'data':data 'height':height 'width':width}<block_end><def_stmt>setTexture self tex=<none><block_start><if_stmt>tex<is><none><block_start>self.texture=tex<line_sep>self.set=self._setColor<line_sep><return><block_end><if_stmt><not>isinstance(tex list)<block_start><raise>ValueError('Texture must be a list!')<block_end><if_stmt>len(tex)<ne>self.height<block_start><raise>ValueError('Given texture is must be {} high!'.format(self.height))<block_end><for_stmt>r tex<block_start><if_stmt><not>isinstance(r list)<block_start><raise>ValueError('Texture rows must be lists!')<block_end><if_stmt>len(r)<ne>self.width<block_start><raise>ValueError('Texture rows must be {} wide!'.format(self.width))<block_end><block_end>self.texture=tex<line_sep>self.set=self._setTexture<block_end><def_stmt>__setNormal self x y color<block_start><try_stmt><block_start>pixel=self.coord_map[y][x]<line_sep>self._set_base(pixel color)<block_end><except_stmt>IndexError<block_start><pass><block_end><block_end><def_stmt>__setScaled self x y color<block_start>sx=x<times>self.pixelSize[0]<line_sep>sy=y<times>self.pixelSize[1]<for_stmt>xs range(sx sx+self.pixelSize[0])<block_start><for_stmt>ys range(sy sy+self.pixelSize[1])<block_start>self.__setNormal(xs ys color)<block_end><block_end><block_end># Set single pixel to Color value
<def_stmt>_setColor self x y color=<none><block_start><try_stmt><block_start>self._set(x y color<or>(0 0 0))<block_end><except_stmt>IndexError<block_start><pass><block_end><block_end><def_stmt>_setTexture self x y color=<none><block_start><if_stmt>x<ge>0<and>y<ge>0<block_start><try_stmt><block_start>self._set(x y color<or>self.texture[y][x])<block_end><except_stmt>IndexError<block_start><pass><block_end><block_end><block_end><def_stmt>setHSV self x y hsv<block_start>color=colors.hsv2rgb(hsv)<line_sep>self._set(x y color)<block_end><def_stmt>setRGB self x y r g b<block_start>color=(r g b)<line_sep>self._set(x y color)<block_end>##########################################################################
# Drawing Functions
# Lovingly borrowed from Adafruit
# https://github.com/adafruit/Adafruit-GFX-Library/blob/master/Adafruit_GFX.cpp
##########################################################################
<def_stmt>drawCircle self x0 y0 r color=<none><block_start>"""
Draw a circle in an RGB color, with center x0, y0 and radius r.
"""<line_sep>md.draw_circle(self.set x0 y0 r color)<block_end><def_stmt>fillCircle self x0 y0 r color=<none><block_start>"""
Draw a filled circle in an RGB color, with center x0, y0 and radius r.
"""<line_sep>md.fill_circle(self.set x0 y0 r color)<block_end><def_stmt>drawLine self x0 y0 x1 y1 color=<none> colorFunc=<none> aa=<false><block_start>"""
Draw a between x0, y0 and x1, y1 in an RGB color.
:param colorFunc: a function that takes an integer from x0 to x1 and
returns a color corresponding to that point
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""<line_sep>md.draw_line(self.set x0 y0 x1 y1 color colorFunc aa)<block_end># Bresenham's algorithm
<def_stmt>bresenham_line self x0 y0 x1 y1 color=<none> colorFunc=<none><block_start>"""
Draw line from point x0, y0 to x1, y1 using Bresenham's algorithm.
Will draw beyond matrix bounds.
"""<line_sep>md.bresenham_line(self.set x0 y0 x1 y1 color colorFunc)<block_end># Xiaolin Wu's Line Algorithm
<def_stmt>wu_line self x0 y0 x1 y1 color=<none> colorFunc=<none><block_start>"""
Draw a between x0, y0 and x1, y1 in an RGB color.
:param colorFunc: a function that takes an integer from x0 to x1 and
returns a color corresponding to that point
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""<line_sep>md.wu_line(self.set x0 y0 x1 y1 color colorFunc)<block_end><def_stmt>drawRect self x y w h color=<none> aa=<false><block_start>"""
Draw rectangle with top-left corner at x,y, width w and height h
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""<line_sep>md.draw_rect(self.set x y w h color aa)<block_end><def_stmt>fillRect self x y w h color=<none> aa=<false><block_start>"""
Draw a solid rectangle with top-left corner at (x, y), width w and
height h.
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""<line_sep>md.fill_rect(self.set x y w h color aa)<block_end><def_stmt>fillScreen self color=<none><block_start>"""Fill the matrix with the given RGB color"""<line_sep>md.fill_rect(self.set 0 0 self.width self.height color)<block_end><def_stmt>drawRoundRect self x y w h r color=<none> aa=<false><block_start>"""
Draw a rounded rectangle with top-left corner at (x, y), width w,
height h, and corner radius r
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""<line_sep>md.draw_round_rect(self.set x y w h r color aa)<block_end><def_stmt>fillRoundRect self x y w h r color=<none> aa=<false><block_start>"""
Draw a rounded rectangle with top-left corner at (x, y), width w,
height h, and corner radius r
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""<line_sep>md.fill_round_rect(self.set x y w h r color aa)<block_end><def_stmt>drawTriangle self x0 y0 x1 y1 x2 y2 color=<none> aa=<false><block_start>"""
Draw triangle with vertices (x0, y0), (x1, y1) and (x2, y2)
:param aa: if True, use Bresenham's algorithm for line drawing;
Otherwise use Xiaolin Wu's algorithm
"""<line_sep>md.draw_triangle(self.set x0 y0 x1 y1 x2 y2 color aa)<block_end><def_stmt>fillTriangle self x0 y0 x1 y1 x2 y2 color=<none> aa=<false><block_start>"""
Draw filled triangle with points x0,y0 - x1,y1 - x2,y2
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""<line_sep>md.fill_triangle(self.set x0 y0 x1 y1 x2 y2 color aa)<block_end><if_stmt>deprecated.allowed()# pragma: no cover
<block_start>fillTrangle=fillTriangle<block_end><def_stmt>drawChar self x y c color bg aa=<false> font=font.default_font font_scale=1<block_start>"""
Draw a single character c at at (x, y) in an RGB color.
"""<line_sep>md.draw_char(self.fonts self.set self.width self.height x y c color bg aa font font_scale)<block_end><def_stmt>drawText self text x=0 y=0 color=<none> bg=colors.COLORS.Off aa=<false> font=font.default_font font_scale=1<block_start>"""
Draw a line of text starting at (x, y) in an RGB color.
:param colorFunc: a function that takes an integer from x0 to x1 and
returns a color corresponding to that point
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""<line_sep>md.draw_text(self.fonts self.set text self.width self.height x y color bg aa font font_scale)<block_end><block_end><if_stmt>deprecated.allowed()# pragma: no cover
<block_start>LEDMatrix=Matrix<block_end> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep># pixel cluster vertex finder
<import_from_stmt>RecoHI.HiTracking.HIPixelClusterVertex_cfi *<line_sep># pixel track producer
<import_from_stmt>RecoHI.HiTracking.HIPixel3ProtoTracks_cfi *<line_sep># fast vertex finding
<import_from_stmt>RecoHI.HiTracking.HIPixelMedianVertex_cfi *<line_sep># selected pixel tracks
<import_from_stmt>RecoHI.HiTracking.HISelectedProtoTracks_cfi *<line_sep># accurate vertex finding
<import_from_stmt>RecoHI.HiTracking.HIPixelAdaptiveVertex_cfi *<line_sep># selection of best primary vertex
<import_from_stmt>RecoHI.HiTracking.HIBestVertexSequences_cff *<line_sep>hiPixelVerticesTask=cms.Task(hiPixelClusterVertex PixelLayerTriplets hiPixel3ProtoTracksTask hiPixelMedianVertex hiSelectedProtoTracks hiPixelAdaptiveVertex bestHiVertexTask)<line_sep>hiPixelVertices=cms.Sequence(hiPixelVerticesTask)<line_sep> |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-15 13:16
<import_from_future_stmt> unicode_literals<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('inventory' '0022_auto_20170530_0724') ]<line_sep>operations=[migrations.CreateModel(name='PuppetCertificateExtension' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('mt_hash' models.CharField(max_length=40 unique=<true>)) ('mt_created_at' models.DateTimeField(auto_now_add=<true>)) ('extension_key' models.TextField()) ('extension_value' models.TextField()) ] options={'abstract':<false> } ) migrations.CreateModel(name='PuppetDBInventory' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('mt_hash' models.CharField(max_length=40 unique=<true>)) ('mt_created_at' models.DateTimeField(auto_now_add=<true>)) ('certname_trusted' models.TextField()) ('authenticated' models.TextField()) ('aio_agent_version' models.TextField(blank=<true> null=<true>)) ('environment' models.TextField(blank=<true> null=<true>)) ('timestamp' models.DateTimeField()) ('agent_specified_environment' models.TextField(blank=<true> null=<true>)) ('clientversion' models.TextField(blank=<true> null=<true>)) ('extensions' models.ManyToManyField(to='inventory.PuppetCertificateExtension')) ] options={'abstract':<false> } ) migrations.CreateModel(name='PuppetFact' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('mt_hash' models.CharField(max_length=40 unique=<true>)) ('mt_created_at' models.DateTimeField(auto_now_add=<true>)) ('fact_key' models.TextField()) ('fact_key_display_name' models.TextField()) ('fact_value' models.TextField()) ] options={'abstract':<false> } ) migrations.AddField(model_name='puppetdbinventory' name='facts' field=models.ManyToManyField(to='inventory.PuppetFact') ) migrations.AddField(model_name='machinesnapshot' name='puppetdb_inventory' field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.CASCADE to='inventory.PuppetDBInventory') ) ]<block_end> |
<import_from_stmt>.._util loadfixture<line_sep>teamocil_yaml=loadfixture('config_teamocil/test1.yaml')<line_sep>teamocil_conf={'windows':[{'name':'sample-two-panes' 'root':'~/Code/sample/www' 'layout':'even-horizontal' 'panes':[{'cmd':['pwd' 'ls -la']} {'cmd':'rails server --port 3000'}] }]}<line_sep>expected={'session_name':<none> 'windows':[{'window_name':'sample-two-panes' 'layout':'even-horizontal' 'start_directory':'~/Code/sample/www' 'panes':[{'shell_command':['pwd' 'ls -la']} {'shell_command':'rails server --port 3000'} ] }] }<line_sep> |
<import_stmt>math<import_from_stmt>typing List Union Sequence<import_from_stmt>pyrep.backend sim<import_from_stmt>pyrep.objects.object Object object_type_to_class<import_stmt>numpy<as>np<import_from_stmt>pyrep.const ObjectType PerspectiveMode RenderMode<class_stmt>VisionSensor(Object)<block_start>"""A camera-type sensor, reacting to light, colors and images.
"""<def_stmt>__init__ self name_or_handle:Union[str int]<block_start>super().__init__(name_or_handle)<line_sep>self.resolution=sim.simGetVisionSensorResolution(self._handle)<block_end>@staticmethod<def_stmt>create resolution:List[int] explicit_handling=<false> perspective_mode=<true> show_volume_not_detecting=<true> show_volume_detecting=<true> passive=<false> use_local_lights=<false> show_fog=<true> near_clipping_plane=1e-2 far_clipping_plane=10.0 view_angle=60.0 ortho_size=1.0 sensor_size=<none> render_mode=RenderMode.OPENGL3 position=<none> orientation=<none><arrow>'VisionSensor'<block_start>""" Create a Vision Sensor
:param resolution: List of the [x, y] resolution.
:param explicit_handling: Sensor will be explicitly handled.
:param perspective_mode: Sensor will be operated in Perspective Mode.
Orthographic mode if False.
:param show_volume_not_detecting: Sensor volume will be shown when not
detecting anything.
:param show_volume_detecting: Sensor will be shown when detecting.
:param passive: Sensor will be passive (use an external image).
:param use_local_lights: Sensor will use local lights.
:param show_fog: Sensor will show fog (if enabled).
:param near_clipping_plane: Near clipping plane.
:param far_clipping_plane: Far clipping plane.
:param view_angle: Perspective angle (in degrees) if in Perspective Mode.
:param ortho_size: Orthographic projection size [m] if in Orthographic
Mode.
:param sensor_size: Size [x, y, z] of the Vision Sensor object.
:param render_mode: Sensor rendering mode, one of:
RenderMode.OPENGL
RenderMode.OPENGL_AUXILIARY
RenderMode.OPENGL_COLOR_CODED
RenderMode.POV_RAY
RenderMode.EXTERNAL
RenderMode.EXTERNAL_WINDOWED
RenderMode.OPENGL3
RenderMode.OPENGL3_WINDOWED
:param position: The [x, y, z] position, if specified.
:param orientation: The [x, y, z] orientation in radians, if specified.
:return: The created Vision Sensor.
"""<line_sep>options=0<if_stmt>explicit_handling<block_start>options<augor>1<block_end><if_stmt>perspective_mode<block_start>options<augor>2<block_end><if_stmt><not>show_volume_not_detecting<block_start>options<augor>4<block_end><if_stmt><not>show_volume_detecting<block_start>options<augor>8<block_end><if_stmt>passive<block_start>options<augor>16<block_end><if_stmt>use_local_lights<block_start>options<augor>32<block_end><if_stmt><not>show_fog<block_start>options<augor>64<block_end>int_params=[resolution[0] # 0
resolution[1] # 1
0 # 2
0# 3
]<if_stmt>sensor_size<is><none><block_start>sensor_size=[0.01 0.01 0.03]<block_end>float_params=[near_clipping_plane # 0
far_clipping_plane # 1
math.radians(view_angle)<if>perspective_mode<else>ortho_size # 2
sensor_size[0] # 3
sensor_size[1] # 4
sensor_size[2] # 5
0.0 # 6
0.0 # 7
0.0 # 8
0.0 # 9
0.0 # 10
]<line_sep>vs=VisionSensor(sim.simCreateVisionSensor(options int_params float_params <none>))<line_sep>vs.set_render_mode(render_mode)<if_stmt>position<is><not><none><block_start>vs.set_position(position)<block_end><if_stmt>orientation<is><not><none><block_start>vs.set_orientation(orientation)<block_end><return>vs<block_end><def_stmt>_get_requested_type self<arrow>ObjectType<block_start><return>ObjectType.VISION_SENSOR<block_end><def_stmt>handle_explicitly self<arrow><none><block_start>"""Handle sensor explicitly.
This enables capturing image (e.g., capture_rgb())
without PyRep.step().
"""<if_stmt><not>self.get_explicit_handling()<block_start><raise>RuntimeError('The explicit_handling is disabled. '<concat>'Call set_explicit_handling(value=1) to enable explicit_handling first.')<block_end>sim.simHandleVisionSensor(self._handle)<block_end><def_stmt>capture_rgb self<arrow>np.ndarray<block_start>"""Retrieves the rgb-image of a vision sensor.
:return: A numpy array of size (width, height, 3)
"""<line_sep><return>sim.simGetVisionSensorImage(self._handle self.resolution)<block_end><def_stmt>capture_depth self in_meters=<false><arrow>np.ndarray<block_start>"""Retrieves the depth-image of a vision sensor.
:param in_meters: Whether the depth should be returned in meters.
:return: A numpy array of size (width, height)
"""<line_sep><return>sim.simGetVisionSensorDepthBuffer(self._handle self.resolution in_meters)<block_end><def_stmt>capture_pointcloud self<arrow>np.ndarray<block_start>"""Retrieves point cloud in word frame.
:return: A numpy array of size (width, height, 3)
"""<line_sep>d=self.capture_depth(in_meters=<true>)<line_sep><return>self.pointcloud_from_depth(d)<block_end><def_stmt>pointcloud_from_depth self depth:np.ndarray<arrow>np.ndarray<block_start>"""Converts depth (in meters) to point cloud in word frame.
:return: A numpy array of size (width, height, 3)
"""<line_sep>intrinsics=self.get_intrinsic_matrix()<line_sep><return>VisionSensor.pointcloud_from_depth_and_camera_params(depth self.get_matrix() intrinsics)<block_end>@staticmethod<def_stmt>pointcloud_from_depth_and_camera_params depth:np.ndarray extrinsics:np.ndarray intrinsics:np.ndarray<arrow>np.ndarray<block_start>"""Converts depth (in meters) to point cloud in word frame.
:return: A numpy array of size (width, height, 3)
"""<line_sep>upc=_create_uniform_pixel_coords_image(depth.shape)<line_sep>pc=upc<times>np.expand_dims(depth -1)<line_sep>C=np.expand_dims(extrinsics[:3 3] 0).T<line_sep>R=extrinsics[:3 :3]<line_sep>R_inv=R.T# inverse of rot matrix is transpose
R_inv_C=np.matmul(R_inv C)<line_sep>extrinsics=np.concatenate((R_inv -R_inv_C) -1)<line_sep>cam_proj_mat=np.matmul(intrinsics extrinsics)<line_sep>cam_proj_mat_homo=np.concatenate([cam_proj_mat [np.array([0 0 0 1])]])<line_sep>cam_proj_mat_inv=np.linalg.inv(cam_proj_mat_homo)[0:3]<line_sep>world_coords_homo=np.expand_dims(_pixel_to_world_coords(pc cam_proj_mat_inv) 0)<line_sep>world_coords=world_coords_homo[<ellipsis> :-1][0]<line_sep><return>world_coords<block_end><def_stmt>get_intrinsic_matrix self<block_start>res=np.array(self.get_resolution())<line_sep>pp_offsets=res/2<line_sep>ratio=res[0]/res[1]<line_sep>pa_x=pa_y=math.radians(self.get_perspective_angle())<if_stmt>ratio<g>1<block_start>pa_y=2<times>np.arctan(np.tan(pa_y/2)/ratio)<block_end><elif_stmt>ratio<l>1<block_start>pa_x=2<times>np.arctan(np.tan(pa_x/2)<times>ratio)<block_end>persp_angles=np.array([pa_x pa_y])<line_sep>focal_lengths=-res/(2<times>np.tan(persp_angles/2))<line_sep><return>np.array([[focal_lengths[0] 0. pp_offsets[0]] [0. focal_lengths[1] pp_offsets[1]] [0. 0. 1.]])<block_end><def_stmt>get_resolution self<arrow>List[int]<block_start>""" Return the Sensor's resolution.
:return: Resolution [x, y]
"""<line_sep><return>sim.simGetVisionSensorResolution(self._handle)<block_end><def_stmt>set_resolution self resolution:List[int]<arrow><none><block_start>""" Set the Sensor's resolution.
:param resolution: New resolution [x, y]
"""<line_sep>sim.simSetObjectInt32Parameter(self._handle sim.sim_visionintparam_resolution_x resolution[0])<line_sep>sim.simSetObjectInt32Parameter(self._handle sim.sim_visionintparam_resolution_y resolution[1])<line_sep>self.resolution=resolution<block_end><def_stmt>get_perspective_mode self<arrow>PerspectiveMode<block_start>""" Retrieve the Sensor's perspective mode.
:return: The current PerspectiveMode.
"""<line_sep>perspective_mode=sim.simGetObjectInt32Parameter(self._handle sim.sim_visionintparam_perspective_operation )<line_sep><return>PerspectiveMode(perspective_mode)<block_end><def_stmt>set_perspective_mode self perspective_mode:PerspectiveMode<arrow><none><block_start>""" Set the Sensor's perspective mode.
:param perspective_mode: The new perspective mode, one of:
PerspectiveMode.ORTHOGRAPHIC
PerspectiveMode.PERSPECTIVE
"""<line_sep>sim.simSetObjectInt32Parameter(self._handle sim.sim_visionintparam_perspective_operation perspective_mode.value)<block_end><def_stmt>get_render_mode self<arrow>RenderMode<block_start>""" Retrieves the Sensor's rendering mode
:return: RenderMode for the current rendering mode.
"""<line_sep>render_mode=sim.simGetObjectInt32Parameter(self._handle sim.sim_visionintparam_render_mode)<line_sep><return>RenderMode(render_mode)<block_end><def_stmt>set_render_mode self render_mode:RenderMode<arrow><none><block_start>""" Set the Sensor's rendering mode
:param render_mode: The new sensor rendering mode, one of:
RenderMode.OPENGL
RenderMode.OPENGL_AUXILIARY
RenderMode.OPENGL_COLOR_CODED
RenderMode.POV_RAY
RenderMode.EXTERNAL
RenderMode.EXTERNAL_WINDOWED
RenderMode.OPENGL3
RenderMode.OPENGL3_WINDOWED
"""<line_sep>sim.simSetObjectInt32Parameter(self._handle sim.sim_visionintparam_render_mode render_mode.value)<block_end><def_stmt>get_windowed_size self<arrow>Sequence[int]<block_start>"""Get the size of windowed rendering.
:return: The (x, y) resolution of the window. 0 for full-screen.
"""<line_sep>size_x=sim.simGetObjectInt32Parameter(self._handle sim.sim_visionintparam_windowed_size_x)<line_sep>size_y=sim.simGetObjectInt32Parameter(self._handle sim.sim_visionintparam_windowed_size_y)<line_sep><return>size_x size_y<block_end><def_stmt>set_windowed_size self resolution:Sequence[int]=(0 0)<arrow><none><block_start>"""Set the size of windowed rendering.
:param resolution: The (x, y) resolution of the window.
0 for full-screen.
"""<line_sep>sim.simSetObjectInt32Parameter(self._handle sim.sim_visionintparam_windowed_size_x resolution[0])<line_sep>sim.simSetObjectInt32Parameter(self._handle sim.sim_visionintparam_windowed_size_y resolution[1])<block_end><def_stmt>get_perspective_angle self<arrow>float<block_start>""" Get the Sensor's perspective angle.
:return: The sensor's perspective angle (in degrees).
"""<line_sep><return>math.degrees(sim.simGetObjectFloatParameter(self._handle sim.sim_visionfloatparam_perspective_angle))<block_end><def_stmt>set_perspective_angle self angle:float<arrow><none><block_start>""" Set the Sensor's perspective angle.
:param angle: New perspective angle (in degrees)
"""<line_sep>sim.simSetObjectFloatParameter(self._handle sim.sim_visionfloatparam_perspective_angle math.radians(angle))<block_end><def_stmt>get_orthographic_size self<arrow>float<block_start>""" Get the Sensor's orthographic size.
:return: The sensor's orthographic size (in metres).
"""<line_sep><return>sim.simGetObjectFloatParameter(self._handle sim.sim_visionfloatparam_ortho_size)<block_end><def_stmt>set_orthographic_size self ortho_size:float<arrow><none><block_start>""" Set the Sensor's orthographic size.
:param angle: New orthographic size (in metres)
"""<line_sep>sim.simSetObjectFloatParameter(self._handle sim.sim_visionfloatparam_ortho_size ortho_size)<block_end><def_stmt>get_near_clipping_plane self<arrow>float<block_start>""" Get the Sensor's near clipping plane.
:return: Near clipping plane (metres)
"""<line_sep><return>sim.simGetObjectFloatParameter(self._handle sim.sim_visionfloatparam_near_clipping)<block_end><def_stmt>set_near_clipping_plane self near_clipping:float<arrow><none><block_start>""" Set the Sensor's near clipping plane.
:param near_clipping: New near clipping plane (in metres)
"""<line_sep>sim.simSetObjectFloatParameter(self._handle sim.sim_visionfloatparam_near_clipping near_clipping)<block_end><def_stmt>get_far_clipping_plane self<arrow>float<block_start>""" Get the Sensor's far clipping plane.
:return: Near clipping plane (metres)
"""<line_sep><return>sim.simGetObjectFloatParameter(self._handle sim.sim_visionfloatparam_far_clipping)<block_end><def_stmt>set_far_clipping_plane self far_clipping:float<arrow><none><block_start>""" Set the Sensor's far clipping plane.
:param far_clipping: New far clipping plane (in metres)
"""<line_sep>sim.simSetObjectFloatParameter(self._handle sim.sim_visionfloatparam_far_clipping far_clipping)<block_end><def_stmt>set_entity_to_render self entity_to_render:int<arrow><none><block_start>""" Set the entity to render to the Sensor, this can be an object or more usefully a collection.
-1 to render all objects in scene.
:param entity_to_render: Handle of the entity to render
"""<line_sep>sim.simSetObjectInt32Parameter(self._handle sim.sim_visionintparam_entity_to_render entity_to_render)<block_end><def_stmt>get_entity_to_render self<arrow><none><block_start>""" Get the entity to render to the Sensor, this can be an object or more usefully a collection.
-1 if all objects in scene are rendered.
:return: Handle of the entity to render
"""<line_sep><return>sim.simGetObjectInt32Parameter(self._handle sim.sim_visionintparam_entity_to_render)<block_end><block_end><def_stmt>_create_uniform_pixel_coords_image resolution:np.ndarray<block_start>pixel_x_coords=np.reshape(np.tile(np.arange(resolution[1]) [resolution[0]]) (resolution[0] resolution[1] 1)).astype(np.float32)<line_sep>pixel_y_coords=np.reshape(np.tile(np.arange(resolution[0]) [resolution[1]]) (resolution[1] resolution[0] 1)).astype(np.float32)<line_sep>pixel_y_coords=np.transpose(pixel_y_coords (1 0 2))<line_sep>uniform_pixel_coords=np.concatenate((pixel_x_coords pixel_y_coords np.ones_like(pixel_x_coords)) -1)<line_sep><return>uniform_pixel_coords<block_end><def_stmt>_transform coords trans<block_start>h,w=coords.shape[:2]<line_sep>coords=np.reshape(coords (h<times>w -1))<line_sep>coords=np.transpose(coords (1 0))<line_sep>transformed_coords_vector=np.matmul(trans coords)<line_sep>transformed_coords_vector=np.transpose(transformed_coords_vector (1 0))<line_sep><return>np.reshape(transformed_coords_vector (h w -1))<block_end><def_stmt>_pixel_to_world_coords pixel_coords cam_proj_mat_inv<block_start>h,w=pixel_coords.shape[:2]<line_sep>pixel_coords=np.concatenate([pixel_coords np.ones((h w 1))] -1)<line_sep>world_coords=_transform(pixel_coords cam_proj_mat_inv)<line_sep>world_coords_homo=np.concatenate([world_coords np.ones((h w 1))] axis=-1)<line_sep><return>world_coords_homo<block_end>object_type_to_class[ObjectType.VISION_SENSOR]=VisionSensor<line_sep> |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 4 16:09:56 2016
@author: <EMAIL>
"""<import_stmt>numpy<as>np<import_stmt>scipy<import_stmt>matplotlib.pyplot<as>plt<import_stmt>seaborn<as>sns<line_sep>#%matplotlib inline
'''
Mahalanobis distance
====================
'''<import_from_stmt>matplotlib.patches Ellipse<def_stmt>plot_cov_ellipse cov pos nstd=2 ax=<none> **kwargs<block_start>"""
Plots an `nstd` sigma error ellipse based on the specified covariance
matrix (`cov`). Additional keyword arguments are passed on to the
ellipse patch artist.
Parameters
----------
cov : The 2x2 covariance matrix to base the ellipse on
pos : The location of the center of the ellipse. Expects a 2-element
sequence of [x0, y0].
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""<def_stmt>eigsorted cov<block_start>vals,vecs=np.linalg.eigh(cov)<line_sep>order=vals.argsort()[::-1]<line_sep><return>vals[order] vecs[: order]<block_end><if_stmt>ax<is><none><block_start>ax=plt.gca()<block_end>vals,vecs=eigsorted(cov)<line_sep>theta=np.degrees(np.arctan2(*vecs[: 0][::-1]))<line_sep># Width and height are "full" widths, not radius
width,height=2<times>nstd<times>np.sqrt(vals)<line_sep>ellip=Ellipse(xy=pos width=width height=height angle=theta **kwargs)<line_sep>ax.add_artist(ellip)<line_sep><return>ellip<block_end>n_samples,n_features=100 2<line_sep>mean0,mean1=np.array([0 0]) np.array([0 2])<line_sep>Cov=np.array([[1 .8] [.8 1]])<line_sep>np.random.seed(42)<line_sep>X0=np.random.multivariate_normal(mean0 Cov n_samples)<line_sep>X1=np.random.multivariate_normal(mean1 Cov n_samples)<line_sep>x=np.array([2 2])<line_sep>plt.scatter(X0[: 0] X0[: 1] color='b')<line_sep>plt.scatter(X1[: 0] X1[: 1] color='r')<line_sep>plt.scatter(mean0[0] mean0[1] color='b' s=200 label="m0")<line_sep>plt.scatter(mean1[0] mean1[1] color='r' s=200 label="m2")<line_sep>plt.scatter(x[0] x[1] color='k' s=200 label="x")<line_sep>plot_cov_ellipse(Cov pos=mean0 facecolor='none' linewidth=2 edgecolor='b')<line_sep>plot_cov_ellipse(Cov pos=mean1 facecolor='none' linewidth=2 edgecolor='r')<line_sep>plt.legend(loc='upper left')<line_sep>#
d2_m0x=scipy.spatial.distance.euclidean(mean0 x)<line_sep>d2_m0m2=scipy.spatial.distance.euclidean(mean0 mean1)<line_sep>Covi=scipy.linalg.inv(Cov)<line_sep>dm_m0x=scipy.spatial.distance.mahalanobis(mean0 x Covi)<line_sep>dm_m0m2=scipy.spatial.distance.mahalanobis(mean0 mean1 Covi)<line_sep>print('Euclidean dist(m0, x)=%.2f > dist(m0, m2)=%.2f'%(d2_m0x d2_m0m2))<line_sep>print('Mahalanobis dist(m0, x)=%.2f < dist(m0, m2)=%.2f'%(dm_m0x dm_m0m2))<line_sep>'''
## Exercise
- Write a function `euclidean(a, b)` that compute the euclidean distance
- Write a function `mahalanobis(a, b, Covi)` that compute the euclidean
distance, with the inverse of the covariance matrix. Use `scipy.linalg.inv(Cov)`
to invert your matrix.
'''<def_stmt>euclidian a b<block_start><return>np.sqrt(np.sum((a-b)<power>2))<block_end><def_stmt>mahalanobis a b cov_inv<block_start><return>np.sqrt(np.dot(np.dot((a-b) cov_inv) (a-b).T))<block_end><assert_stmt>mahalanobis(mean0 mean1 Covi)<eq>dm_m0m2<assert_stmt>euclidian(mean0 mean1)<eq>d2_m0m2<line_sep>mahalanobis(X0 mean0 Covi)<line_sep>X=X0<line_sep>mean=mean0<line_sep>covi=Covi<line_sep>np.sqrt(np.dot(np.dot((X-mean) covi) (X-mean).T))<def_stmt>mahalanobis X mean covi<block_start>"""
from scipy.spatial.distance import mahalanobis
d2= np.array([mahalanobis(X[i], mean, covi) for i in range(X.shape[0])])
np.all(mahalanobis(X, mean, covi) == d2)
"""<line_sep><return>np.sqrt(np.sum(np.dot((X-mean) covi)<times>(X-mean) axis=1))<block_end> |
# ==================================================================================================
# Copyright 2014 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
<import_from_stmt>collections defaultdict<import_stmt>tokenize<import_stmt>sys<import_from_stmt>..common CheckstylePlugin<class_stmt>TrailingWhitespace(CheckstylePlugin)<block_start>"""Warn on invalid trailing whitespace."""<line_sep>@classmethod<def_stmt>build_exception_map cls tokens<block_start>"""Generates a set of ranges where we accept trailing slashes, specifically within comments
and strings.
"""<line_sep>exception_ranges=defaultdict(list)<for_stmt>token tokens<block_start>token_type,_,token_start,token_end=token[0:4]<if_stmt>token_type<in>(tokenize.COMMENT tokenize.STRING)<block_start><if_stmt>token_start[0]<eq>token_end[0]<block_start>exception_ranges[token_start[0]].append((token_start[1] token_end[1]))<block_end><else_stmt><block_start>exception_ranges[token_start[0]].append((token_start[1] sys.maxint))<for_stmt>line range(token_start[0]+1 token_end[0])<block_start>exception_ranges[line].append((0 sys.maxint))<block_end>exception_ranges[token_end[0]].append((0 token_end[1]))<block_end><block_end><block_end><return>exception_ranges<block_end><def_stmt>__init__ self *args **kw<block_start>super(TrailingWhitespace self).__init__(*args **kw)<line_sep>self._exception_map=self.build_exception_map(self.python_file.tokens)<block_end><def_stmt>has_exception self line_number exception_start exception_end=<none><block_start>exception_end=exception_end<or>exception_start<for_stmt>start,end self._exception_map.get(line_number ())<block_start><if_stmt>start<le>exception_start<and>exception_end<le>end<block_start><return><true><block_end><block_end><return><false><block_end><def_stmt>nits self<block_start><for_stmt>line_number,line self.python_file.enumerate()<block_start>stripped_line=line.rstrip()<if_stmt>stripped_line<ne>line<and><not>self.has_exception(line_number len(stripped_line) len(line))<block_start><yield>self.error('T200' 'Line has trailing whitespace.' line_number)<block_end><if_stmt>line.rstrip().endswith('\\')<block_start><if_stmt><not>self.has_exception(line_number len(line.rstrip())-1)<block_start><yield>self.error('T201' 'Line has trailing slashes.' line_number)<block_end><block_end><block_end><block_end><block_end> |
<import_stmt>numpy<as>np<import_from_stmt>copy deepcopy<import_from_stmt>pathlib Path<import_from_stmt>._backend WARN NOTE TF_KERAS Layer<try_stmt><block_start><import_stmt>tensorflow<as>tf<block_end><except_stmt><block_start><pass><block_end># handled in __init__ via _backend.py
TF24plus=bool(float(tf.__version__[:3])<g>2.3)<def_stmt>_kw_from_configs configs defaults<block_start><def_stmt>_fill_absent_defaults kw defaults# override `defaults`, but keep those not in `configs`
<block_start><for_stmt>name,_dict defaults.items()<block_start><if_stmt>name<not><in>kw<block_start>kw[name]=_dict<block_end><else_stmt><block_start><for_stmt>k,v _dict.items()<block_start><if_stmt>k<not><in>kw[name]<block_start>kw[name][k]=v<block_end><block_end><block_end><block_end><return>kw<block_end>configs=configs<or>{}<line_sep>configs=deepcopy(configs)# ensure external dict unchanged
<for_stmt>key configs<block_start><if_stmt>key<not><in>defaults<block_start><raise>ValueError(f"unexpected `configs` key: {key}; "<concat>"supported are: %s"%', '.join(list(defaults)))<block_end><block_end>kw=deepcopy(configs)# ensure external dict unchanged
# override `defaults`, but keep those not in `configs`
kw=_fill_absent_defaults(configs defaults)<line_sep><return>kw<block_end><def_stmt>_validate_args _id layer=<none><block_start><def_stmt>_ensure_list _id layer# if None, leave as-is
<block_start>_ids,layer=[[x]<if><not>isinstance(x (list type(<none>)))<else>x<for>x (_id layer)]<line_sep># ensure external lists unaffected
_ids,layer=[x.copy()<if>isinstance(x list)<else>x<for>x (_ids layer)]<line_sep><return>_ids layer<block_end><def_stmt>_ids_to_names_and_idxs _ids<block_start>names,idxs=[] []<for_stmt>_id _ids<block_start><if_stmt><not>isinstance(_id (str int tuple))<block_start>tp=type(_id).__name__<line_sep><raise>ValueError("unsupported _id list element type: %s"%tp+"; supported are: str, int, tuple")<block_end><if_stmt>isinstance(_id str)<block_start>names.append(_id)<block_end><else_stmt><block_start><if_stmt>isinstance(_id int)<block_start>idxs.append(_id)<block_end><else_stmt><block_start><assert_stmt>all(isinstance(x int)<for>x _id)<line_sep>idxs.append(_id)<block_end><block_end><block_end><return>names<or><none> idxs<or><none><block_end><def_stmt>_one_requested _ids layer<block_start><return>len(layer<or>_ids)<eq>1<block_end># give `layer` precedence
<if_stmt>_id<and>layer<block_start>print(WARN "`layer` will override `_id`")<block_end>_ids,layer=_ensure_list(_id layer)<if_stmt>_ids<is><none><block_start>names,idxs=<none> <none><block_end><else_stmt><block_start>names,idxs=_ids_to_names_and_idxs(_ids)<block_end><return>names idxs layer _one_requested(_ids layer)<block_end><def_stmt>_process_rnn_args model _id layer input_data labels mode data=<none> norm=<none><block_start>"""Helper method to validate `input_data` & `labels` dims, layer info args,
`mode` arg, and fetch various pertinent RNN attributes.
"""<import_from_stmt>.inspect_gen get_layer get_gradients<import_from_stmt>.inspect_rnn get_rnn_weights<def_stmt>_validate_args_ _id layer input_data labels mode norm data<block_start>_validate_args(_id layer)<if_stmt>data<is><not><none><block_start>got_inputs=(input_data<is><not><none>)<or>(labels<is><not><none>)<if_stmt>got_inputs<block_start>print(NOTE "`data` will override `input_data`, `labels`, "<concat>"and `mode`")<block_end><if_stmt><not>isinstance(data list)<block_start><raise>Exception("`data` must be a list of kernel & gate matrices")<block_end><if_stmt><not>(isinstance(data[0] np.ndarray)<or>isinstance(data[0] list))<block_start><raise>Exception("`data` list elements must be numpy arrays "+"or lists")<block_end><elif_stmt>isinstance(data[0] list)<block_start><if_stmt><not>isinstance(data[0][0] np.ndarray)<block_start><raise>Exception("`data` list elements' elements must be "+"numpy arrays")<block_end><block_end><block_end><if_stmt>mode<not><in>['weights' 'grads']<block_start><raise>Exception("`mode` must be one of: 'weights', 'grads'")<block_end><if_stmt>mode<eq>'grads'<and>(input_data<is><none><or>labels<is><none>)<block_start><raise>Exception("must supply input_data and labels for mode=='grads'")<block_end><if_stmt>mode<eq>'weights'<and>(input_data<is><not><none><or>labels<is><not><none>)<block_start>print(NOTE "`input_data` and `labels will` be ignored for "<concat>"`mode`=='weights'")<block_end>is_iter=(isinstance(norm list)<or>isinstance(norm tuple)<or>isinstance(norm np.ndarray))<line_sep>is_iter_len2=is_iter<and>len(norm)<eq>2<if_stmt>(norm<is><not><none>)<and>(norm<ne>'auto')<and><not>is_iter_len2<block_start><raise>Exception("`norm` must be None, 'auto' or iterable ( "+"list, tuple, np.ndarray) of length 2")<block_end><block_end>_validate_args_(_id layer input_data labels mode norm data)<if_stmt>layer<is><none><block_start>layer=get_layer(model _id)<block_end>rnn_type=_validate_rnn_type(layer return_value=<true>)<line_sep>gate_names=_rnn_gate_names(rnn_type)<line_sep>n_gates=len(gate_names)<line_sep>is_bidir=hasattr(layer 'backward_layer')<line_sep>rnn_dim=layer.layer.units<if>is_bidir<else>layer.units<line_sep>direction_names=['FORWARD' 'BACKWARD']<if>is_bidir<else>[[]]<if_stmt>'CuDNN'<in>rnn_type<block_start>uses_bias=<true><block_end><else_stmt><block_start>uses_bias=layer.layer.use_bias<if>is_bidir<else>layer.use_bias<block_end><if_stmt>data<is><none><block_start><if_stmt>mode<eq>'weights'<block_start>data=get_rnn_weights(model _id as_tensors=<false> concat_gates=<true>)<block_end><else_stmt><block_start>data=get_gradients(model <none> input_data labels layer=layer mode='weights')<block_end><block_end>rnn_info=dict(rnn_type=rnn_type gate_names=gate_names n_gates=n_gates is_bidir=is_bidir rnn_dim=rnn_dim uses_bias=uses_bias direction_names=direction_names)<line_sep><return>data rnn_info<block_end><def_stmt>_validate_rnn_type rnn_layer return_value=<false><block_start><if_stmt>hasattr(rnn_layer 'backward_layer')<block_start>rnn_type=type(rnn_layer.layer).__name__<block_end><else_stmt><block_start>rnn_type=type(rnn_layer).__name__<block_end>supported_rnns=['LSTM' 'GRU' 'CuDNNLSTM' 'CuDNNGRU' 'SimpleRNN' 'IndRNN']<if_stmt>rnn_type<not><in>supported_rnns<block_start><raise>Exception("unsupported RNN type `%s` - must be one of: %s"%(rnn_type ', '.join(supported_rnns)))<block_end><if_stmt>return_value<block_start><return>rnn_type<block_end><block_end><def_stmt>_rnn_gate_names rnn_type<block_start><return>{'LSTM':['INPUT' 'FORGET' 'CELL' 'OUTPUT'] 'GRU':['UPDATE' 'RESET' 'NEW'] 'CuDNNLSTM':['INPUT' 'FORGET' 'CELL' 'OUTPUT'] 'CuDNNGRU':['UPDATE' 'RESET' 'NEW'] 'SimpleRNN':[''] 'IndRNN':[''] }[rnn_type]<block_end><def_stmt>_filter_duplicates_by_keys keys *data<block_start><def_stmt>_second_index ls k<block_start><return>[i<for>i,x enumerate(ls)<if>x<eq>k][1]<block_end>collected=[]<for_stmt>k keys<block_start><if_stmt>k<in>collected<block_start><for_stmt>i range(len(data))<block_start>data[i].pop(_second_index(keys k))<block_end>keys.pop(keys.index(k))<block_end>collected.append(k)<block_end><if_stmt>isinstance(data tuple)<and>len(data)<eq>1<block_start>data=data[0]<block_end><return>keys data<block_end><def_stmt>_save_rnn_fig figs savepath kwargs<block_start><if_stmt>len(figs)<eq>1<block_start>figs[0].savefig(savepath)<line_sep><return><block_end>_dir=str(Path(savepath).parent)<line_sep>ext=Path(savepath).suffix<line_sep>basename=Path(savepath).stem<line_sep>names=[basename+'_0' basename+'_1']<for_stmt>fig,name zip(figs names)<block_start>fig.savefig(Path(_dir).joinpath(name ext) **kwargs)<block_end><block_end><def_stmt>_layer_of_output output<block_start>h=output._keras_history<if_stmt>isinstance(h tuple)<block_start><for_stmt>x h<block_start><if_stmt>isinstance(x Layer)<block_start><return>x<block_end><block_end><block_end><return>h.layer<block_end><def_stmt>clipnums nums<block_start><if_stmt><not>isinstance(nums (list tuple))<block_start>nums=[nums]<block_end>clipped=[]<for_stmt>num nums<block_start><if_stmt>isinstance(num int)<or>(isinstance(num float)<and>num.is_integer())<block_start>clipped.append(str(int(num)))<block_end><elif_stmt>abs(num)<g>1e-3<and>abs(num)<l>1e3<block_start>clipped.append("%.3f"%num)<block_end><else_stmt><block_start>clipped.append(("%.2e"%num).replace("+0" "+").replace("-0" "-"))<block_end><block_end><return>clipped<if>len(clipped)<g>1<else>clipped[0]<block_end><def_stmt>_get_params model layers=<none> params=<none> mode='outputs' verbose=1<block_start><def_stmt>_validate_args layers params mode<block_start>got_both=(layers<is><not><none><and>params<is><not><none>)<line_sep>got_neither=(layers<is><none><and>params<is><none>)<if_stmt>got_both<or>got_neither<block_start><raise>ValueError("one (and only one) of `layers` or `params` "<concat>"must be supplied")<block_end><if_stmt>mode<not><in>('outputs' 'weights')<block_start><raise>ValueError("`mode` must be one of: 'outputs', 'weights'")<block_end><if_stmt>layers<is><not><none><and><not>isinstance(layers list)<block_start>layers=[layers]<block_end><if_stmt>params<is><not><none><and><not>isinstance(params list)<block_start>params=[params]<block_end><return>layers params<block_end><def_stmt>_filter_params params verbose<block_start><def_stmt>_to_omit p<block_start><if_stmt>isinstance(p tf.Variable)# param is layer weight
<block_start><return><false><block_end><elif_stmt>tf.is_tensor(p)# param is layer output
<block_start>layer=_layer_of_output(p)<if_stmt>(TF_KERAS<or>tf.__version__[0]<eq>'2')<and>hasattr(layer 'activation')# these activations don't have gradients defined (or ==0),
# and tf.keras doesn't re-route output gradients
# to the pre-activation weights transform
<block_start>value=getattr(layer.activation '__name__' '').lower()<in>('softmax' )<if_stmt>value<and>verbose<block_start>print(WARN ("{} has {} activation, which has a None "<concat>"gradient in tf.keras; will skip".format(layer layer.activation.__name__)))<block_end><return>value<block_end><elif_stmt>'Input'<in>getattr(layer.__class__ '__name__')# omit input layer(s)
<block_start><if_stmt>verbose<block_start>print(WARN layer "is an Input layer; getting input "<concat>"gradients is unsupported - will skip")<block_end><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end><else_stmt><block_start><raise>ValueError(("unsupported param type: {} ({}); must be"<concat>"tf.Variable or tf.Tensor".format(type(p) p)))<block_end><block_end>_params=[]<for_stmt>p params<block_start><if_stmt><not>_to_omit(p)<block_start>_params.append(p)<block_end><block_end><return>_params<block_end># run check even if `params` is not None to couple `_get_params` with
# `_validate_args` for other methods
layers,params=_validate_args(layers params mode)<if_stmt><not>params<block_start><if_stmt>mode<eq>'outputs'<block_start>params=[l.output<for>l layers]<block_end><else_stmt><block_start>params=[w<for>l layers<for>w l.trainable_weights]<block_end><block_end>params=_filter_params(params verbose)<line_sep><return>params<block_end><def_stmt>is_tensor x<block_start><return>(tf.is_tensor(x)<if>TF24plus<else>isinstance(x tf.Tensor))<block_end> |
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>setuptools setup find_packages<import_from_stmt>distutils.core Extension<import_stmt>glob<import_stmt>numpy<import_stmt>os<import_stmt>re<line_sep>here=os.path.abspath(os.path.dirname(__file__))<line_sep>README=open(os.path.join(here 'README.rst')).read()<line_sep>NEWS=open(os.path.join(here 'NEWS.txt')).read()<def_stmt>read_package_version <block_start>version_file='pyfora/_version.py'<with_stmt>open(version_file 'rt')<as>version_file<block_start>version_line=version_file.read()<block_end>match=re.search(r"^__version__ = ['\"]([^'\"]*)['\"]" version_line re.M)<if_stmt>match<block_start><return>match.group(1)<block_end><raise>RuntimeError("Can't read version string from '%s'."%(version_file ))<block_end>version=read_package_version()<line_sep>install_requires=['futures' 'socketIO-client>=0.6.5' 'numpy' 'wsaccel' 'websocket-client==0.37.0']<line_sep>ext_modules=[]<line_sep>extra_compile_args=['-std=c++11']<line_sep>pythonObjectRehydratorModule=Extension('pyfora.PythonObjectRehydrator' language='c++' extra_compile_args=extra_compile_args sources=['pyfora/src/pythonObjectRehydratorModule.cpp' 'pyfora/src/BinaryObjectRegistry.cpp' 'pyfora/src/StringBuilder.cpp' 'pyfora/src/PureImplementationMappings.cpp' 'pyfora/src/PyObjectUtils.cpp' 'pyfora/src/ObjectRegistry.cpp' 'pyfora/src/IRToPythonConverter.cpp' 'pyfora/src/NamedSingletons.cpp' 'pyfora/src/BinaryObjectRegistryHelpers.cpp' 'pyfora/src/FreeVariableMemberAccessChain.cpp' 'pyfora/src/Json.cpp' 'pyfora/src/PyAbortSingletons.cpp' 'pyfora/src/ModuleLevelObjectIndex.cpp' 'pyfora/src/ScopedPyThreads.cpp' 'pyfora/src/PythonObjectRehydrator.cpp']+glob.glob('pyfora/src/TypeDescriptions/*.cpp')+glob.glob('pyfora/src/serialization/*.cpp') include_dirs=[numpy.get_include()])<line_sep>ext_modules.append(pythonObjectRehydratorModule)<line_sep>stringbuildermodule=Extension('pyfora.StringBuilder' language='c++' extra_compile_args=['-std=c++11'] sources=['pyfora/src/StringBuilder.cpp' 'pyfora/src/stringbuildermodule.cpp'])<line_sep>ext_modules.append(stringbuildermodule)<line_sep>binaryObjectRegistryModule=Extension('pyfora.BinaryObjectRegistry' language='c++' extra_compile_args=extra_compile_args sources=['pyfora/src/BinaryObjectRegistry.cpp' 'pyfora/src/PyObjectWalker.cpp' 'pyfora/src/PureImplementationMappings.cpp' 'pyfora/src/binaryobjectregistrymodule.cpp' 'pyfora/src/StringBuilder.cpp' 'pyfora/src/FileDescription.cpp' 'pyfora/src/PyObjectUtils.cpp' 'pyfora/src/Exceptions.cpp' 'pyfora/src/PyAstUtil.cpp' 'pyfora/src/FreeVariableMemberAccessChain.cpp' 'pyfora/src/PyAstFreeVariableAnalyses.cpp' 'pyfora/src/PyforaInspect.cpp' 'pyfora/src/FreeVariableResolver.cpp' 'pyfora/src/Ast.cpp' 'pyfora/src/UnresolvedFreeVariableExceptions.cpp' 'pyfora/src/BinaryObjectRegistryHelpers.cpp' 'pyfora/src/Json.cpp' 'pyfora/src/ModuleLevelObjectIndex.cpp'])<line_sep>ext_modules.append(binaryObjectRegistryModule)<line_sep>pyObjectWalkerModule=Extension('pyfora.PyObjectWalker' language='c++' extra_compile_args=extra_compile_args sources=['pyfora/src/pyobjectwalkermodule.cpp' 'pyfora/src/PyObjectWalker.cpp' 'pyfora/src/PureImplementationMappings.cpp' 'pyfora/src/BinaryObjectRegistry.cpp' 'pyfora/src/FileDescription.cpp' 'pyfora/src/StringBuilder.cpp' 'pyfora/src/PyObjectUtils.cpp' 'pyfora/src/FreeVariableResolver.cpp' 'pyfora/src/Exceptions.cpp' 'pyfora/src/PyAstUtil.cpp' 'pyfora/src/FreeVariableMemberAccessChain.cpp' 'pyfora/src/PyAstFreeVariableAnalyses.cpp' 'pyfora/src/PyforaInspect.cpp' 'pyfora/src/Ast.cpp' 'pyfora/src/UnresolvedFreeVariableExceptions.cpp' 'pyfora/src/BinaryObjectRegistryHelpers.cpp' 'pyfora/src/Json.cpp' 'pyfora/src/ModuleLevelObjectIndex.cpp'])<line_sep>ext_modules.append(pyObjectWalkerModule)<line_sep>setup(name='pyfora' version=version description="A library for parallel execution of Python code in the Ufora runtime" long_description=README+'\n\n'+NEWS classifiers=[# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha' 'Environment :: Console' 'Intended Audience :: Science/Research' 'License :: OSI Approved :: Apache Software License' 'Operating System :: OS Independent' 'Programming Language :: Python :: 2.7' 'Topic :: Scientific/Engineering'] keywords='ufora fora parallel remote data-science machine-learning' author='<NAME>.' author_email='<EMAIL>' url='http://www.ufora.com/' license='Apache' packages=find_packages('.') package_dir={'':'.'} package_data={'':['*.txt' '*.rst'] 'pyfora':['fora/**/*.fora']} zip_safe=<false> install_requires=install_requires entry_points={'console_scripts':['pyfora_aws=pyfora.aws.pyfora_aws:main']} ext_modules=ext_modules)<line_sep> |
<import_stmt>sys<line_sep>sys.path.append("../../")<import_from_stmt>numpy sin pi arange<import_from_stmt>appJar gui<import_from_stmt>matplotlib.backends.backend_tkagg NavigationToolbar2TkAgg<as>addToolbar<import_stmt>random<import_from_stmt>mpl_toolkits.mplot3d Axes3D<with_stmt>gui()<as>app<block_start>fig=app.addPlotFig("p1" showNav=<true>)<line_sep>ax=fig.add_subplot(111 projection='3d')<line_sep>ax.scatter([1 2] [1 2] [1 2])<block_end> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_stmt>SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi<as>digiparam<import_from_stmt>L1Trigger.L1THGCal.hgcalConcentratorProducer_cfi threshold_conc_proc best_conc_proc supertc_conc_proc coarsetc_onebitfraction_proc coarsetc_equalshare_proc bestchoice_ndata_decentralized custom_conc_proc autoEncoder_conc_proc<def_stmt>custom_triggercellselect_supertriggercell process stcSize=supertc_conc_proc.stcSize type_energy_division=supertc_conc_proc.type_energy_division fixedDataSizePerHGCROC=supertc_conc_proc.fixedDataSizePerHGCROC<block_start>parameters=supertc_conc_proc.clone(stcSize=stcSize type_energy_division=type_energy_division fixedDataSizePerHGCROC=fixedDataSizePerHGCROC)<line_sep>process.hgcalConcentratorProducer.ProcessorParameters=parameters<line_sep><return>process<block_end><def_stmt>custom_triggercellselect_threshold process threshold_silicon=threshold_conc_proc.threshold_silicon # in mipT
threshold_scintillator=threshold_conc_proc.threshold_scintillator # in mipT
coarsenTriggerCells=threshold_conc_proc.coarsenTriggerCells<block_start>parameters=threshold_conc_proc.clone(threshold_silicon=threshold_silicon threshold_scintillator=threshold_scintillator coarsenTriggerCells=coarsenTriggerCells)<line_sep>process.hgcalConcentratorProducer.ProcessorParameters=parameters<line_sep><return>process<block_end><def_stmt>custom_triggercellselect_bestchoice process triggercells=best_conc_proc.NData<block_start>parameters=best_conc_proc.clone(NData=triggercells)<line_sep>process.hgcalConcentratorProducer.ProcessorParameters=parameters<line_sep><return>process<block_end><def_stmt>custom_triggercellselect_bestchoice_decentralized process<block_start><return>custom_triggercellselect_bestchoice(process triggercells=bestchoice_ndata_decentralized)<block_end><def_stmt>custom_coarsetc_onebitfraction process stcSize=coarsetc_onebitfraction_proc.stcSize fixedDataSizePerHGCROC=coarsetc_onebitfraction_proc.fixedDataSizePerHGCROC oneBitFractionThreshold=coarsetc_onebitfraction_proc.oneBitFractionThreshold oneBitFractionLowValue=coarsetc_onebitfraction_proc.oneBitFractionLowValue oneBitFractionHighValue=coarsetc_onebitfraction_proc.oneBitFractionHighValue <block_start>parameters=coarsetc_onebitfraction_proc.clone(stcSize=stcSize fixedDataSizePerHGCROC=fixedDataSizePerHGCROC oneBitFractionThreshold=oneBitFractionThreshold oneBitFractionLowValue=oneBitFractionLowValue oneBitFractionHighValue=oneBitFractionHighValue )<line_sep>process.hgcalConcentratorProducer.ProcessorParameters=parameters<line_sep><return>process<block_end><def_stmt>custom_coarsetc_equalshare process stcSize=coarsetc_equalshare_proc.stcSize fixedDataSizePerHGCROC=coarsetc_equalshare_proc.fixedDataSizePerHGCROC <block_start>parameters=coarsetc_equalshare_proc.clone(stcSize=stcSize fixedDataSizePerHGCROC=fixedDataSizePerHGCROC )<line_sep>process.hgcalConcentratorProducer.ProcessorParameters=parameters<line_sep><return>process<block_end><def_stmt>custom_triggercellselect_mixedBestChoiceSuperTriggerCell process stcSize=custom_conc_proc.stcSize type_energy_division=custom_conc_proc.type_energy_division fixedDataSizePerHGCROC=custom_conc_proc.fixedDataSizePerHGCROC triggercells=custom_conc_proc.NData<block_start>parameters=custom_conc_proc.clone(stcSize=stcSize type_energy_division=type_energy_division fixedDataSizePerHGCROC=fixedDataSizePerHGCROC NData=triggercells Method=cms.vstring('bestChoiceSelect' 'superTriggerCellSelect' 'superTriggerCellSelect') )<line_sep>process.hgcalConcentratorProducer.ProcessorParameters=parameters<line_sep><return>process<block_end><def_stmt>custom_triggercellselect_mixedBestChoiceSuperTriggerCell_decentralized process<block_start><return>custom_triggercellselect_mixedBestChoiceSuperTriggerCell(process triggercells=bestchoice_ndata_decentralized)<block_end><def_stmt>custom_triggercellselect_autoencoder process cellRemap=autoEncoder_conc_proc.cellRemap nBitsPerInput=autoEncoder_conc_proc.nBitsPerInput maxBitsPerOutput=autoEncoder_conc_proc.maxBitsPerOutput bitsPerLink=autoEncoder_conc_proc.bitsPerLink modelFiles=autoEncoder_conc_proc.modelFiles linkToGraphMap=autoEncoder_conc_proc.linkToGraphMap zeroSuppresionThreshold=autoEncoder_conc_proc.zeroSuppresionThreshold saveEncodedValues=autoEncoder_conc_proc.saveEncodedValues preserveModuleSum=autoEncoder_conc_proc.preserveModuleSum scintillatorMethod='thresholdSelect' <block_start>parameters=autoEncoder_conc_proc.clone(cellRemap=cellRemap nBitsPerInput=nBitsPerInput maxBitsPerOutput=maxBitsPerOutput bitsPerLink=bitsPerLink modelFiles=modelFiles linkToGraphMap=linkToGraphMap zeroSuppresionThreshold=zeroSuppresionThreshold saveEncodedValues=saveEncodedValues preserveModuleSum=preserveModuleSum Method=cms.vstring(['autoEncoder' 'autoEncoder' scintillatorMethod]) )<line_sep>process.hgcalConcentratorProducer.ProcessorParameters=parameters<line_sep><return>process<block_end> |
<import_stmt>numpy<as>np<import_from_stmt>deepchem.utils.typing RDKitMol<import_from_stmt>deepchem.feat.base_classes MolecularFeaturizer<class_stmt>MACCSKeysFingerprint(MolecularFeaturizer)<block_start>"""MACCS Keys Fingerprint.
The MACCS (Molecular ACCess System) keys are one of the most commonly used structural keys.
Please confirm the details in [1]_, [2]_.
Examples
--------
>>> import deepchem as dc
>>> smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
>>> featurizer = dc.feat.MACCSKeysFingerprint()
>>> features = featurizer.featurize([smiles])
>>> type(features[0])
<class 'numpy.ndarray'>
>>> features[0].shape
(167,)
References
----------
.. [1] <NAME>., et al. "Reoptimization of MDL keys for use in drug discovery."
Journal of chemical information and computer sciences 42.6 (2002): 1273-1280.
.. [2] https://github.com/rdkit/rdkit/blob/master/rdkit/Chem/MACCSkeys.py
Note
----
This class requires RDKit to be installed.
"""<def_stmt>__init__ self<block_start>"""Initialize this featurizer."""<line_sep>self.calculator=<none><block_end><def_stmt>_featurize self datapoint:RDKitMol **kwargs<arrow>np.ndarray<block_start>"""
Calculate MACCS keys fingerprint.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
1D array of RDKit descriptors for `mol`. The length is 167.
"""<if_stmt>'mol'<in>kwargs<block_start>datapoint=kwargs.get("mol")<line_sep><raise>DeprecationWarning('Mol is being phased out as a parameter, please pass "datapoint" instead.')<block_end><if_stmt>self.calculator<is><none><block_start><try_stmt><block_start><import_from_stmt>rdkit.Chem.AllChem GetMACCSKeysFingerprint<line_sep>self.calculator=GetMACCSKeysFingerprint<block_end><except_stmt>ModuleNotFoundError<block_start><raise>ImportError("This class requires RDKit to be installed.")<block_end><block_end><return>self.calculator(datapoint)<block_end><block_end> |
<import_stmt>os<import_stmt>typing<import_from_stmt>sqlalchemy.orm Session<import_stmt>const<import_from_stmt>database models<import_from_stmt>database.database SessionLocal<import_from_stmt>db.api_key add_initial_api_key_for_admin<import_from_stmt>db.wireguard server_add_on_init<import_from_stmt>script.wireguard is_installed start_interface is_running load_environment_clients<def_stmt>setup_on_start <block_start>_db:Session=SessionLocal()<line_sep>servers:typing.List[models.WGServer]=_db.query(models.WGServer).all()<for_stmt>s servers<block_start><try_stmt><block_start>last_state=s.is_running<if_stmt>is_installed()<and>last_state<and>is_running(s)<block_start>start_interface(s)<block_end><block_end><except_stmt>Exception<as>e<block_start>print(e)<block_end><block_end><if_stmt>const.CLIENT<block_start>load_environment_clients(_db)<block_end><if_stmt>const.SERVER_INIT_INTERFACE<is><not><none><block_start>server_add_on_init(_db)<block_end><if_stmt>const.SERVER_STARTUP_API_KEY<is><not><none><block_start>ADMIN_USERNAME=os.getenv("ADMIN_USERNAME")<line_sep>add_initial_api_key_for_admin(_db const.SERVER_STARTUP_API_KEY ADMIN_USERNAME)<block_end>_db.close()<block_end> |
"""
This module takes an adapter as data supplier, pack data and provide data for data iterators
"""<class_stmt>ProviderBaseclass(object)<block_start>"""
This is the baseclass of packer. Any other detailed packer must inherit this class.
"""<def_stmt>__init__ self<block_start><pass><block_end><def_stmt>__str__ self<block_start><return>self.__class__.__name__<block_end><def_stmt>__del__ self<block_start><pass><block_end><def_stmt>write self<block_start>"""
Write a single sample to the files
:return:
"""<line_sep><raise>NotImplementedError()<block_end><def_stmt>read_by_index self index<block_start>"""
Read a single sample
:return:
"""<line_sep><raise>NotImplementedError()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>provider=ProviderBaseclass()<line_sep>print(provider)<block_end> |
<import_stmt>sys<import_stmt>requests<import_from_stmt>urllib.parse urljoin<line_sep>JFROG_API_KEY_HEADER_NAME='X-JFrog-Art-Api'<class_stmt>DockerRegistryPagination<block_start><def_stmt>__init__ self concatenating_key<block_start>self.concatenating_key=concatenating_key<block_end><def_stmt>__call__ self url *args **kwargs<block_start>response=requests.get(url *args **kwargs)<line_sep>response.raise_for_status()<line_sep>concatenated_list=response.json().get(self.concatenating_key [])<while_stmt>'next'<in>response.links.keys()<block_start>url=urljoin(url response.links['next']['url'])<line_sep>response=requests.get(url *args **kwargs)<line_sep>response.raise_for_status()<line_sep>concatenated_list.extend(response.json().get(self.concatenating_key []))<block_end><return>concatenated_list<block_end><block_end><class_stmt>ArtifactoryIntegrationLogic<block_start><def_stmt>__init__ self base_url api_key default_repo=<none> username=<none><block_start>self.username=username<line_sep>self.base_url=base_url<if_stmt><not>self.base_url.startswith('https://')<block_start>self.base_url='https://'+base_url<block_end><if_stmt>self.base_url.endswith('/')<block_start>self.base_url=self.base_url[:-1]<block_end>self.api_key=api_key<line_sep>self.default_repo=default_repo<block_end><def_stmt>get_artifactory_headers self<block_start><return>{JFROG_API_KEY_HEADER_NAME:self.api_key }<block_end><def_stmt>_get_all_repos_data self<block_start>res=requests.get(self.base_url+'/artifactory/api/repositories' headers=self.get_artifactory_headers() )<if_stmt>res.status_code<ne>200<block_start><if_stmt>res.status_code<eq>403<block_start><raise>Exception('Artifactory token is not valid or has been revoked.')<block_end><raise>Exception(f'Failed to get repositories. '<concat>f'Error: {res.text}. Code {res.status_code}')<block_end><return>res.json()<block_end><def_stmt>list_repos self search=''<block_start>all_repos_data=self._get_all_repos_data()<line_sep><return>sorted([i['key']<for>i all_repos_data<if>search.lower()<in>i['key'].lower()])<block_end><def_stmt>get_repo_type self repo_name<block_start>all_repos_data=self._get_all_repos_data()<for_stmt>i all_repos_data<block_start><if_stmt>i['key']<eq>repo_name<block_start><return>i['packageType']<block_end><block_end><raise>Exception(f'Repository {repo_name} does not exist or user does not have permissions for it.')<block_end><def_stmt>_list_docker_folders self repo search=''<block_start>request_func=DockerRegistryPagination('repositories')<try_stmt><block_start>repos=request_func(self.base_url+'/artifactory/api/docker/%s/v2/_catalog'%repo headers=self.get_artifactory_headers() )<line_sep><return>[i<for>i repos<if>search.lower()<in>i.lower()]<block_end><except_stmt>requests.exceptions.HTTPError<as>exc<block_start><raise>Exception(f'Failed to get images list using docker catalog. '<concat>f'Error: {exc.response.text}. Code {exc.response.status_code}')<from>exc<block_end><block_end><def_stmt>list_folders self repo=<none> search=''<block_start><if_stmt><not>repo<block_start>repo=self.default_repo<block_end><if_stmt><not>repo<block_start><raise>ValueError('Either send a repo or set the default repo for this to work.')<block_end>folders=self._list_docker_folders(repo search)<line_sep><return>sorted(folders)<block_end><def_stmt>_list_docker_images self folder repo search=''<block_start>request_func=DockerRegistryPagination('tags')<try_stmt><block_start>tags=request_func(self.base_url+'/artifactory/api/docker/%s/v2/%s/tags/list'%(repo folder) headers=self.get_artifactory_headers())<line_sep><return>[i<for>i tags<if>search.lower()<in>i.lower()]<block_end><except_stmt>requests.exceptions.HTTPError<as>exc<block_start><raise>Exception(f'Failed to get tag list using docker catalog. '<concat>f'Error: {exc.response.text}. Code {exc.response.status_code}')<from>exc<block_end><block_end><def_stmt>list_images self folder='' repo=<none> search=''<block_start><if_stmt><not>repo<block_start>repo=self.default_repo<block_end><if_stmt><not>repo<block_start><raise>ValueError('Either send a repo or set the default repo for this to work.')<block_end>images=self._list_docker_images(folder repo search)<line_sep><return>sorted(images)<block_end><block_end>rt_domain=sys.argv[1]<line_sep>api_key=sys.argv[2]<line_sep>user=sys.argv[3]<with_stmt>open("images.csv" "w")<as>outfile<block_start>rt=ArtifactoryIntegrationLogic(f"https://{rt_domain}" api_key username=user)<line_sep>repositories=rt.list_repos()<for_stmt>repository repositories<block_start>repo_type=rt.get_repo_type(repository).lower()<if_stmt>repo_type<eq>"docker"<block_start>repo_folders=rt.list_folders(repo=repository)<for_stmt>repo_folder repo_folders<block_start>folder_images=rt.list_images(repo=repository folder=repo_folder)<for_stmt>folder_image folder_images<block_start>outfile.write(f"{repository}, {repo_folder}, {folder_image}\r\n")<block_end><block_end><block_end><block_end><block_end> |
# This file is part of the Python aiocoap library project.
#
# Copyright (c) 2012-2014 <NAME> <http://sixpinetrees.blogspot.com/>,
# 2013-2014 <NAME> <<EMAIL>>
#
# aiocoap is free software, this file is published under the MIT license as
# described in the accompanying LICENSE file.
"""Confront a CoAP over TCP server with a client that speaks so bad protocol it
is easier to mock with sending byte sequences than with aiocoap"""<import_stmt>asyncio<import_stmt>unittest<import_stmt>aiocoap<import_from_stmt>.test_server WithTestServer precise_warnings no_warnings asynctest<import_from_stmt>.common tcp_disabled<line_sep>@unittest.skipIf(tcp_disabled "TCP disabled in environment")<class_stmt>TestNoncoapTCPClient(WithTestServer)<block_start><def_stmt>setUp self<block_start>super().setUp()<line_sep>self.mock_r,self.mock_w=self.loop.run_until_complete(asyncio.open_connection(self.serveraddress aiocoap.COAP_PORT))<block_end><def_stmt>tearDown self<block_start>self.mock_w.close()<line_sep>super().tearDown()<block_end>@staticmethod<def_stmt>_read_as_messages encoded:bytes<block_start>"""Process the encoded data into CoAP-over-TCP messages, return them as
a list and trailing (unrecognized / incomplete) data."""<line_sep>messages=[]<while_stmt><true><block_start>size=aiocoap.transports.tcp._extract_message_size(encoded)<if_stmt>size<is><not><none><block_start>size=sum(size)<block_end><if_stmt>size<is><none><or>size<g>len(encoded)<block_start><return>messages encoded<block_end>messages.append(aiocoap.transports.tcp._decode_message(encoded[:size]))<line_sep>encoded=encoded[size:]<block_end><block_end><async_keyword><def_stmt>should_abort_early self request:bytes<block_start>"""Send request bytes, expect that the server closes the connection
after having sent possibly a CSM and an abort"""<line_sep>self.mock_w.write(request)<line_sep>r=<await>self.mock_r.read()# timing out would be a typical failure case here too
parsed,trail=self._read_as_messages(r)<line_sep>self.assertEqual(trail b"" "Leftover data after closing message")<if_stmt>parsed[0].code<eq>aiocoap.CSM# don't discard the CSM unconditionallly: the server might have
# read the request data before sending its own initial CSM.
<block_start>parsed.pop(0)<block_end>self.assertEqual(len(parsed) 1 "Not exactly one (presumably abort) message received")<line_sep>self.assertEqual(parsed[0].code aiocoap.ABORT "Received message is not an abort message")<block_end><async_keyword><def_stmt>should_idle self request:bytes timeout=0.1<block_start>"""Send request bytes, expect that the server sends CSM and does not
close the connection, awaiting more from the client.
Returns all messages received until the timeout."""<line_sep>self.mock_w.write(request)<line_sep>triggered_eof=<false><async_keyword><def_stmt>kill_read <block_start>"""After a timeout, synthesize an end-of-file condition into the
reader, hoping this doesn't beak too much."""<line_sep><nonlocal>triggered_eof<line_sep><await>asyncio.sleep(timeout)<line_sep>triggered_eof=<true><line_sep>self.mock_r.feed_eof()<block_end>self.loop.create_task(kill_read())<line_sep>r=<await>self.mock_r.read()# timing out would be a typical failure case here too
self.assertEqual(triggered_eof <true> "Server closed connection prematurely")<line_sep>parsed,trail=self._read_as_messages(r)<line_sep># if this happens, the server is either sending garbage (announcing
# something long and not following up), or the timeout should be
# increased
self.assertEqual(trail b"" "Leftover data after reading timeout")<if_stmt>parsed[0].code<eq>aiocoap.CSM# don't discard the CSM unconditionallly: the server might have
# read the request data before sending its own initial CSM.
<block_start>parsed.pop(0)<block_end><return>parsed<block_end><async_keyword><def_stmt>should_idle_quietly self request:bytes timeout=0.1<block_start>"""should_idle, but assert that no messages were returned"""<line_sep>messages=<await>self.should_idle(request timeout)<line_sep># it's not a per-spec wrong thing to do, but highly unusual
self.assertEqual(messages [] "Server sent messages on its own")<block_end>@precise_warnings(["Aborting connection: Failed to parse message"])@asynctest<async_keyword><def_stmt>test_http_get self<block_start><await>self.should_abort_early(b'GET /.well-known/core HTTP/1.0')<block_end>@precise_warnings(["Aborting connection: No CSM received"])@asynctest<async_keyword><def_stmt>test_early_get self<block_start><await>self.should_abort_early(b'\0\x01')<block_end>@no_warnings@asynctest<async_keyword><def_stmt>test_incomplete_small self<block_start><await>self.should_idle_quietly(b'\0')<block_end>@no_warnings@asynctest<async_keyword><def_stmt>test_incomplete_large1 self# announcing but not sending 1 bytes extlen
<block_start><await>self.should_idle_quietly(b'\xd0')<block_end>@no_warnings@asynctest<async_keyword><def_stmt>test_incomplete_large2 self# sending one out of four bytes extlen
# a server could in theory reject this on grounds of "no matter what
# you say next, my buffer ain't large enough"
<block_start><await>self.should_idle_quietly(b'\xf0\0')<block_end>@no_warnings@asynctest<async_keyword><def_stmt>test_incomplete_large3 self# announcing a 269 byte long message, but not even sendin the code
<block_start><await>self.should_idle_quietly(b'\xe0\0\0')<block_end>@precise_warnings(['Aborting connection: Overly large message announced'])@asynctest<async_keyword><def_stmt>test_incomplete_large4 self# announcing the longest possible message, this should excede
# everyone's max-message-size.
#
# blocking to read more would be acceptable behavior as well.
<block_start><await>self.should_abort_early(b'\xf0\xff\xff\xff\xff')<block_end>@precise_warnings(['Aborting connection: Failed to parse message'])@asynctest<async_keyword><def_stmt>test_wrong_tkl self# send an unspecified token length of 15.
# the rest of the message is an empty CSM, so if the server were to
# extrapolate from the meaning of tkl 0..8, it'd read it as OK.
<block_start><await>self.should_abort_early(b'\x0fxxxxxxxxxxxxxxx\xe1')<block_end># Fun inside the CSM
@no_warnings@asynctest<async_keyword><def_stmt>test_exotic_elective_csm_option self# send option number something-even (something-odd plus 269) as an empty option
<block_start><await>self.should_idle_quietly(b'\x30\xe1\xe0\xf1\xf1')<block_end>@precise_warnings(['Aborting connection: Option not supported'])@asynctest<async_keyword><def_stmt>test_exotic_compulsory_csm_option self# send option number something-odd (something-even plus 269) as an empty option
<block_start><await>self.should_abort_early(b'\x30\xe1\xe0\xf2\xf2')<block_end>@precise_warnings(['Aborting connection: Option not supported'])@asynctest<async_keyword><def_stmt>test_exotic_compulsory_csm_option_late self# send an empty CSM, and after that the one from compulsory_csm_option
<block_start><await>self.should_abort_early(b'\0\xe1\x30\xe1\xe0\xf2\xf2')<block_end><block_end> |
<class_stmt>CouldNotConfigureException(BaseException)<block_start><def_stmt>__str__ self<block_start><return>"Could not configure the repository."<block_end><block_end><class_stmt>NotABinaryExecutableException(BaseException)<block_start><def_stmt>__str__ self<block_start><return>"The file given is not a binary executable"<block_end><block_end><class_stmt>ParametersNotAcceptedException(BaseException)<block_start><def_stmt>__str__ self<block_start><return>"The search parameters given were not accepted by the github api"<block_end><block_end><class_stmt>NoCoverageInformation(BaseException)<block_start><def_stmt>__init__ self binary_path<block_start>self.binary_path=binary_path<block_end><def_stmt>__str__ self<block_start><return>"Could not get any coverage information for "+str(self.binary_path)<block_end><block_end> |
<import_stmt>argparse<import_from_stmt>summarizer Summarizer<def_stmt>text_summarize text **kwargs<block_start>"""
Summarize the given text. Returns the summarize
"""<line_sep>model=Summarizer()<line_sep><return>model(text **kwargs)<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser(description='Summarize the given text')<line_sep>parser.add_argument('-t' '--text' help="Text to summarize" type=str)<line_sep>parser.add_argument('-f' '--file' help="Filename to read text from" type=str)<line_sep>parser.add_argument('-r' '--ratio' help="Given the ratio of the summarized text "<concat>"(default: 0.2)" type=float default=0.2)<line_sep>parser.add_argument('-o' '--output' help="Given the path to an output file. "<concat>"Otherwise stdout will be used" type=str default=<none>)<line_sep>args=parser.parse_args()<if_stmt><not>(args.text<or>args.file)<block_start>parser.error("Either --text or --file is required")<block_end><if_stmt>args.text<and>args.file<block_start>parser.error("The arguments --text and --file are not "<concat>"allowed together")<block_end><if_stmt>args.file<block_start><with_stmt>open(args.file 'r')<as>infile<block_start>text=infile.readlines()<line_sep>text="\n".join(text)<block_end><block_end><if_stmt>args.text<block_start>text=args.text<block_end>summary=text_summarize(text ratio=args.ratio)<if_stmt>args.output<block_start><with_stmt>open(args.output 'w')<as>outfile<block_start>outfile.write(summary)<line_sep>outfile.write("\n")<block_end><block_end><else_stmt><block_start>print(summary)<block_end><block_end> |
<import_from_future_stmt> absolute_import<import_stmt>httpretty<import_stmt>pygerduty<import_stmt>pygerduty.v2<line_sep>###################
# Version 1 Tests #
###################
@httpretty.activate<def_stmt>test_get_user_v1 <block_start>body=open('tests/fixtures/user_v1.json').read()<line_sep>httpretty.register_uri(httpretty.GET "https://contosso.pagerduty.com/api/v1/users/PIJ90N7" body=body status=200)<line_sep>p=pygerduty.PagerDuty("contosso" "password")<line_sep>user=p.users.show("PIJ90N7")<assert_stmt>user.id<eq>"PIJ90N7"<assert_stmt>user.name<eq>"<NAME>"<assert_stmt>user.role<eq>"admin"<block_end>@httpretty.activate<def_stmt>test_list_user_contact_methods_v1 <block_start>user_body=open('tests/fixtures/user_v1.json').read()<line_sep>contact_body=open('tests/fixtures/contacts_v1.json').read()<line_sep>httpretty.register_uri(httpretty.GET "https://contosso.pagerduty.com/api/v1/users/PIJ90N7" body=user_body status=200) <line_sep>httpretty.register_uri(httpretty.GET "https://contosso.pagerduty.com/api/v1/users/PIJ90N7/contact_methods" body=contact_body status=200)<line_sep>p=pygerduty.PagerDuty("contosso" "password")<line_sep>user=p.users.show("PIJ90N7")<line_sep>contact_methods=[c<for>c user.contact_methods.list()]<assert_stmt>len(contact_methods)<eq>3<assert_stmt>len([c<for>c contact_methods<if>c.type<eq>"email"])<eq>1<assert_stmt>len([c<for>c contact_methods<if>c.type<eq>"phone"])<eq>1<assert_stmt>len([c<for>c contact_methods<if>c.type<eq>"SMS"])<eq>1<block_end>###################
# Version 2 Tests #
###################
@httpretty.activate<def_stmt>test_get_user_v2 <block_start>body=open('tests/fixtures/user_v2.json').read()<line_sep>httpretty.register_uri(httpretty.GET "https://api.pagerduty.com/users/PXPGF42" body=body status=200)<line_sep>p=pygerduty.v2.PagerDuty("password")<line_sep>user=p.users.show("PXPGF42")<assert_stmt>user.id<eq>"PXPGF42"<assert_stmt>user.name<eq>"<NAME>"<assert_stmt>user.role<eq>"admin"<assert_stmt>user.self_<eq>'https://api.pagerduty.com/users/PXPGF42'<block_end>@httpretty.activate<def_stmt>test_list_user_contact_methods_v2 <block_start>user_body=open('tests/fixtures/user_v2.json').read()<line_sep>contact_body=open('tests/fixtures/contacts_v2.json').read()<line_sep>httpretty.register_uri(httpretty.GET "https://api.pagerduty.com/users/PXPGF42" body=user_body status=200)<line_sep>httpretty.register_uri(httpretty.GET "https://api.pagerduty.com/users/PXPGF42/contact_methods" body=contact_body status=200)<line_sep>p=pygerduty.v2.PagerDuty("password")<line_sep>user=p.users.show("PXPGF42")<line_sep>contact_methods=[c<for>c user.contact_methods.list()]<assert_stmt>len(contact_methods)<eq>3<assert_stmt>len([c<for>c contact_methods<if>c.type<eq>"email"])<eq>1<assert_stmt>len([c<for>c contact_methods<if>c.type<eq>"phone"])<eq>1<assert_stmt>len([c<for>c contact_methods<if>c.type<eq>"SMS"])<eq>1<assert_stmt>user.self_<eq>'https://api.pagerduty.com/users/PXPGF42'<block_end>@httpretty.activate<def_stmt>test_user_notification_rules_v2 <block_start>user_body=open('tests/fixtures/user_v2.json').read()<line_sep>notification_body=open('tests/fixtures/notification_v2.json').read()<line_sep>httpretty.register_uri(httpretty.GET "https://api.pagerduty.com/users/PXPGF42" body=user_body status=200)<line_sep>httpretty.register_uri(httpretty.GET "https://api.pagerduty.com/users/PXPGF42/notification_rules" body=notification_body status=200)<line_sep>p=pygerduty.v2.PagerDuty("password")<line_sep>user=p.users.show("PXPGF42")<line_sep>notification_rules=[n<for>n user.notification_rules.list()]<assert_stmt>len(notification_rules)<eq>1<assert_stmt>len([n<for>n notification_rules<if>n.type<eq>"assignment_notification_rule"])<eq>1<assert_stmt>user.self_<eq>"https://api.pagerduty.com/users/PXPGF42"<block_end><def_stmt>test_clean_response <block_start>mock_response={"user":{"id":"PHDGK84" "type":"user" "self":"https://api.pagerduty.com/users/PHDGK84" "name":"Snoopy" "contact_methods":[{"address":"<EMAIL>" "id":"PZMO0JF" "self":"https://api.pagerduty.com/users/PHDGK84/contact_method/PZMO0JF" "label":"Default"} {"address":"8928393498" "id":"PZMN843" "self":"https://api.pagerduty.com/users/PHDGK84/contact_method/PZMN843" "label":"Default"}] "notification_rules":[{"id":"P8WETWW" "contact_method":{"id":"PZMO0JF" "self":"https://api.pagerduty.com/users/PHDGK84/contact_method/PZMO0JF" }}]}}<line_sep>clean_response=pygerduty.common.clean_response(mock_response)<assert_stmt>clean_response<eq>{"user":{"id":"PHDGK84" "type":"user" "self_":"https://api.pagerduty.com/users/PHDGK84" "name":"Snoopy" "contact_methods":[{"address":"<EMAIL>" "id":"PZMO0JF" "self_":"https://api.pagerduty.com/users/PHDGK84/contact_method/PZMO0JF" "label":"Default"} {"address":"8928393498" "id":"PZMN843" "self_":"https://api.pagerduty.com/users/PHDGK84/contact_method/PZMN843" "label":"Default"}] "notification_rules":[{"id":"P8WETWW" "contact_method":{"id":"PZMO0JF" "self_":"https://api.pagerduty.com/users/PHDGK84/contact_method/PZMO0JF" }}]}}<block_end> |
# config.py
# encoding:utf-8
DEBUG=<true><line_sep>JSON_AS_ASCII=<false><line_sep> |
<import_from_stmt>mmcv.utils Registry<line_sep>OPTIMIZERS=Registry('optimizers')<line_sep> |
"""Transformers and utilities for training-related operations."""<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_stmt>sleap<import_from_stmt>sleap.nn.data.providers LabelsReader<import_from_stmt>sleap.nn.data.utils expand_to_rank ensure_list<import_stmt>attr<import_from_stmt>typing List Text Optional Any Union Dict Tuple Sequence<import_from_stmt>sklearn.model_selection train_test_split<def_stmt>split_labels_train_val labels:sleap.Labels validation_fraction:float<arrow>Tuple[sleap.Labels List[int] sleap.Labels List[int]]<block_start>"""Make a train/validation split from a labels dataset.
Args:
labels: A `sleap.Labels` dataset with labeled frames.
validation_fraction: Fraction of frames to use for validation.
Returns:
A tuple of `(labels_train, idx_train, labels_val, idx_val)`.
`labels_train` and `labels_val` are `sleap.Label` objects containing the
selected frames for each split. Their `videos`, `tracks` and `provenance`
attributes are identical to `labels` even if the split does not contain
instances with a particular video or track.
`idx_train` and `idx_val` are list indices of the labeled frames within the
input labels that were assigned to each split, i.e.:
`labels[idx_train] == labels_train[:]`
If there is only one labeled frame in `labels`, both of the labels will contain
the same frame.
If `validation_fraction` would result in fewer than one label for either split,
it will be rounded to ensure there is at least one label in each.
"""<if_stmt>len(labels)<eq>1<block_start><return>labels [0] labels [0]<block_end># Split indices.
n_val=round(len(labels)<times>validation_fraction)<line_sep>n_val=max(min(n_val len(labels)-1) 1)<line_sep>idx_train,idx_val=train_test_split(list(range(len(labels))) test_size=n_val)<line_sep># Create labels and keep original metadata.
labels_train=sleap.Labels(labels[idx_train])<line_sep>labels_train.videos=labels.videos<line_sep>labels_train.tracks=labels.tracks<line_sep>labels_train.provenance=labels.provenance<line_sep>labels_val=sleap.Labels(labels[idx_val])<line_sep>labels_val.videos=labels.videos<line_sep>labels_val.tracks=labels.tracks<line_sep>labels_val.provenance=labels.provenance<line_sep><return>labels_train idx_train labels_val idx_val<block_end><def_stmt>split_labels labels:sleap.Labels split_fractions:Sequence[float]<arrow>Tuple[sleap.Labels]<block_start>"""Split a `sleap.Labels` into multiple new ones with random subsets of the data.
Args:
labels: An instance of `sleap.Labels`.
split_fractions: One or more floats between 0 and 1 that specify the fraction of
examples that should be in each dataset. These should add up to <= 1.0.
Fractions of less than 1 element will be rounded up to ensure that is at
least 1 element in each split. One of the fractions may be -1 to indicate
that it should contain all elements left over from the other splits.
Returns:
A tuple of new `sleap.Labels` instances of the same length as `split_fractions`.
Raises:
ValueError: If more than one split fraction is specified as -1.
ValueError: If the splits add up to more than the total available examples.
Note:
Sampling is done without replacement.
"""<line_sep># Get indices for labeled frames.
labels_indices=np.arange(len(labels)).astype("int64")<line_sep># Compute split sizes.
n_examples=len(labels_indices)<line_sep>n_examples_per_split=np.array(split_fractions).astype("float64")<if_stmt>(n_examples_per_split<eq>-1).sum()<g>1<block_start><raise>ValueError("Only one split fraction can be specified as -1.")<block_end>n_examples_per_split[n_examples_per_split<eq>-1]=np.NaN<line_sep>n_examples_per_split=np.ceil(n_examples_per_split<times>n_examples)<line_sep>n_examples_per_split[np.isnan(n_examples_per_split)]=np.maximum(n_examples-np.nansum(n_examples_per_split) 1)<line_sep>n_examples_per_split=n_examples_per_split.astype("int64")<if_stmt>n_examples_per_split.sum()<g>n_examples<block_start><raise>ValueError("Splits cannot sum to more than the total input labels.")<block_end># Sample and create new Labels instances.
split_labels=[]<for_stmt>n_samples n_examples_per_split# Sample.
<block_start>sampled_indices=np.random.default_rng().choice(labels_indices size=n_samples replace=<false>)<line_sep># Create new instance.
split_labels.append(sleap.Labels([labels[int(ind)]<for>ind sampled_indices]))<line_sep># Exclude the sampled indices from the available indices.
labels_indices=np.setdiff1d(labels_indices sampled_indices)<block_end><return>tuple(split_labels)<block_end><def_stmt>split_labels_reader labels_reader:LabelsReader split_fractions:Sequence[float]<arrow>Tuple[LabelsReader]<block_start>"""Split a `LabelsReader` into multiple new ones with random subsets of the data.
Args:
labels_reader: An instance of `sleap.nn.data.providers.LabelsReader`. This is a
provider that generates datasets that contain elements read from a
`sleap.Labels` instance.
split_fractions: One or more floats between 0 and 1 that specify the fraction of
examples that should be in each dataset. These should add up to <= 1.0.
Fractions of less than 1 element will be rounded up to ensure that is at
least 1 element in each split. One of the fractions may be -1 to indicate
that it should contain all elements left over from the other splits.
Returns:
A tuple of `LabelsReader` instances of the same length as `split_fractions`. The
indices will be stored in the `example_indices` in each `LabelsReader` instance.
The actual `sleap.Labels` instance will be the same for each instance, only the
`example_indices` that are iterated over will change across splits.
If the input `labels_reader` already has `example_indices`, a subset of these
will be sampled to generate the splits.
Raises:
ValueError: If more than one split fraction is specified as -1.
ValueError: If the splits add up to more than the total available examples.
Note:
Sampling is done without replacement.
"""<line_sep># Get available indices.
labels_indices=labels_reader.example_indices<if_stmt>labels_indices<is><none><block_start>labels_indices=np.arange(len(labels_reader))<block_end>labels_indices=np.array(labels_indices).astype("int64")<line_sep># Compute split sizes.
n_examples=len(labels_indices)<line_sep>n_examples_per_split=np.array(split_fractions).astype("float64")<if_stmt>(n_examples_per_split<eq>-1).sum()<g>1<block_start><raise>ValueError("Only one split fraction can be specified as -1.")<block_end>n_examples_per_split[n_examples_per_split<eq>-1]=np.NaN<line_sep>n_examples_per_split=np.ceil(n_examples_per_split<times>n_examples)<line_sep>n_examples_per_split[np.isnan(n_examples_per_split)]=np.maximum(n_examples-np.nansum(n_examples_per_split) 1)<line_sep>n_examples_per_split=n_examples_per_split.astype("int64")<if_stmt>n_examples_per_split.sum()<g>n_examples<block_start><raise>ValueError("Splits cannot sum to more than the total input labels.")<block_end># Sample and create new LabelsReader instances.
split_readers=[]<for_stmt>n_samples n_examples_per_split# Sample.
<block_start>sampled_indices=np.random.default_rng().choice(labels_indices size=n_samples replace=<false>)<line_sep># Create new instance.
split_readers.append(LabelsReader(labels_reader.labels example_indices=sampled_indices))<line_sep># Exclude the sampled indices from the available indices.
labels_indices=np.setdiff1d(labels_indices sampled_indices)<block_end><return>tuple(split_readers)<block_end>@attr.s(auto_attribs=<true>)<class_stmt>KeyMapper<block_start>"""Maps example keys to specified outputs.
This is useful for transforming examples into tuples that map onto specific layer
names for training.
Attributes:
key_maps: Dictionary or list of dictionaries with string keys and values of
the form: {input_key: output_key}. If a list, the examples will be in tuples
in the same order.
"""<line_sep>key_maps:List[Dict[Text Text]]=attr.ib(converter=attr.converters.optional(ensure_list))<line_sep>@property<def_stmt>input_keys self<arrow>List[Text]<block_start>"""Return the keys that incoming elements are expected to have."""<line_sep>input_keys=[]<for_stmt>key_map self.key_maps<block_start>input_keys.extend(list(key_map.keys()))<block_end><return>input_keys<block_end>@property<def_stmt>output_keys self<arrow>List[Text]<block_start>"""Return the keys that outgoing elements will have. These may be nested."""<line_sep>output_keys=[]<for_stmt>key_map self.key_maps<block_start>output_keys.extend(list(key_map.values()))<block_end><return>output_keys<block_end><def_stmt>transform_dataset self ds_input:tf.data.Dataset<arrow>tf.data.Dataset<block_start>"""Create a dataset with input keys mapped to new key names.
Args:
ds_input: Any `tf.data.Dataset` that generates examples as a dictionary of
tensors with the keys in `input_keys`.
Return:
A dataset that generates examples with the tensors in `input_keys` mapped to
keys in `output_keys` according to the structure in `key_maps`.
"""<def_stmt>map_keys example<block_start>"""Local processing function for dataset mapping."""<line_sep>output_keys=[]<for_stmt>key_map self.key_maps<block_start>output_keys.append({key_out:example[key_in]<for>key_in,key_out key_map.items()})<block_end><return>tuple(output_keys)<block_end>ds_output=ds_input.map(map_keys)<line_sep><return>ds_output<block_end><block_end> |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"""<line_sep># noqa
<import_from_stmt>django forms<import_from_stmt>common.forms BaseComponentForm ListField<import_from_stmt>common.constants API_TYPE_OP<import_from_stmt>components.component Component<import_from_stmt>.toolkit tools configs<class_stmt>AddApp(Component)<block_start>"""
apiLabel {{ _("新建业务") }}
apiMethod POST
### {{ _("功能描述") }}
{{ _("新建业务") }}
### {{ _("请求参数") }}
{{ common_args_desc }}
#### {{ _("接口参数") }}
| {{ _("字段") }} | {{ _("类型") }} | {{ _("必选") }} | {{ _("描述") }} |
|-----------|------------|--------|------------|
| app_name | string | {{ _("是") }} | {{ _("业务名") }} |
| maintainers | string | {{ _("是") }} | {{ _("运维人员, 多个人之间用逗号分隔") }} |
| product_pm | string | {{ _("否") }} | {{ _("产品人员,多个人之间用逗号分隔") }} |
| developer | string | {{ _("否") }} | {{ _("开发人员,多个人之间用逗号分隔") }} |
| tester | string | {{ _("否") }} | {{ _("测试人员,多个人之间用逗号分隔") }} |
| operator | string | {{ _("否") }} | {{ _("操作者,多个人之间用逗号分隔") }} |
| company_name | string | {{ _("是") }} | {{ _("公司名,cmdb配置文件中定义的constants.php中的 COMPANY_NAME") }} |
| level | int | {{ _("是") }} | {{ _("业务拓扑级别,2或者3") }} |
| life_cycle | string | {{ _("是") }} | {{ _("生成周期,1: 测试中, 2: 已上线, 3: 停运其中的一个值") }} |
### {{ _("请求参数示例") }}
```python
{
"app_code": "esb_test",
"app_secret": "xxx",
"bk_token": "xxx",
"app_name": "Test",
"maintainers": "admin",
"product_pm": "admin",
"company_name": "CompanyName",
"level": 3,
"life_cycle": "1"
}
```
### 返回结果示例
```python
{
"result": true,
"code": "00",
"message": "",
"data": {
"appId": 25
}
}
```
"""<line_sep>sys_name=configs.SYSTEM_NAME<line_sep>api_type=API_TYPE_OP<line_sep>host=configs.host<class_stmt>Form(BaseComponentForm)<block_start>app_name=forms.CharField(label='business name' required=<true>)<line_sep>maintainers=ListField(label='OPS' required=<true>)<line_sep>product_pm=ListField(label='PM' required=<false>)<line_sep>developer=ListField(label='developer' required=<false>)<line_sep>tester=ListField(label='test staff' required=<false>)<line_sep>operator=ListField(label='operator' required=<false>)<line_sep>company_name=forms.CharField(label='company name' required=<true>)<line_sep>level=forms.IntegerField(label='business topology level' required=<true>)<line_sep>life_cycle=forms.CharField(label='life cycle' required=<true>)<def_stmt>clean self<block_start>data=self.cleaned_data<line_sep><return>{'ApplicationName':data['app_name'] 'Maintainers':','.join(data['maintainers']) 'ProductPm':','.join(data['product_pm']) 'Developer':','.join(data['developer']) 'Tester':','.join(data['tester']) 'Operator':','.join(data['operator']) 'CompanyName':data['company_name'] 'Level':data['level'] 'LifeCycle':data['life_cycle'] }<block_end><block_end><def_stmt>handle self<block_start>self.form_data['Creator']=self.current_user.username<line_sep>client=tools.CCClient(self)<line_sep>self.response.payload=client.post_request(self.host '/api/app/addApp' data=self.form_data )<block_end><block_end> |
<import_stmt>os<line_sep>os.environ.setdefault("DJANGO_SETTINGS_MODULE" "quora.settings")<import_from_stmt>django.core.wsgi get_wsgi_application<import_from_stmt>dj_static Cling<import_from_stmt>whitenoise.django DjangoWhiteNoise<line_sep>application=Cling(get_wsgi_application())<line_sep>application=DjangoWhiteNoise(application)<line_sep> |
# -*- coding: utf-8 -*-
<import_from_stmt>functools partial<import_from_stmt>tqdm tqdm<line_sep>pages_progress=partial(tqdm unit=' pages' smoothing=<false> leave=<true>)<line_sep> |
<import_from_stmt>django.core.management.base BaseCommand<import_from_stmt>django_irods.storage IrodsStorage<import_from_stmt>django.conf settings<class_stmt>Command(BaseCommand)<block_start>help="Reset quota by forcing quota iRODS microservices to recalculate quota for all users."<def_stmt>handle self *args **options<block_start>istorage=IrodsStorage()<line_sep># reset quota for data zone
root_path='/{}/home/{}'.format(settings.IRODS_ZONE settings.IRODS_USERNAME)<line_sep>istorage.setAVU(root_path 'resetQuotaDir' 1)<line_sep># reset quota for user zone
user_root_path='/{}/home/{}'.format(settings.HS_USER_IRODS_ZONE settings.HS_IRODS_PROXY_USER_IN_USER_ZONE)<line_sep>istorage.setAVU(user_root_path 'resetQuotaDir' 1)<block_end><block_end> |
<import_from_stmt>django.test TestCase<import_from_stmt>django_dynamic_fixture get<import_from_stmt>readthedocs.builds.models Build Version<import_from_stmt>readthedocs.projects.models Project<class_stmt>VersionConfigTests(TestCase)<block_start><def_stmt>setUp self<block_start>self.project=get(Project)<line_sep>self.version=get(Version project=self.project)<block_end><def_stmt>test_get_correct_config self<block_start>build_old=Build.objects.create(project=self.project version=self.version _config={'version':1} )<line_sep>build_new=Build.objects.create(project=self.project version=self.version _config={'version':2} )<line_sep>build_new_error=Build.objects.create(project=self.project version=self.version _config={'version':3} success=<false> )<line_sep>build_new_unfinish=Build.objects.create(project=self.project version=self.version _config={'version':4} state='building' )<line_sep>self.assertEqual(self.version.config {'version':2})<block_end><def_stmt>test_get_correct_config_when_same_config self<block_start>build_old=get(Build project=self.project version=self.version _config={} )<line_sep>build_old.config={'version':1}<line_sep>build_old.save()<line_sep>build_new=get(Build project=self.project version=self.version _config={} )<line_sep>build_new.config={'version':1}<line_sep>build_new.save()<line_sep>build_new_error=get(Build project=self.project version=self.version _config={} success=<false> )<line_sep>build_new_error.config={'version':3}<line_sep>build_new_error.save()<line_sep>build_new_unfinish=get(Build project=self.project version=self.version _config={} state='building' )<line_sep>build_new_unfinish.config={'version':1}<line_sep>build_new_unfinish.save()<line_sep>config=self.version.config<line_sep>self.assertEqual(config {'version':1})<block_end><block_end> |
batch_size=192<times>4<line_sep>config={}<line_sep># set the parameters related to the training and testing set
data_train_opt={}<line_sep>data_train_opt['batch_size']=batch_size<line_sep>data_train_opt['unsupervised']=<true><line_sep>data_train_opt['epoch_size']=<none><line_sep>data_train_opt['random_sized_crop']=<false><line_sep>data_train_opt['dataset_name']='imagenet'<line_sep>data_train_opt['split']='train'<line_sep>data_test_opt={}<line_sep>data_test_opt['batch_size']=batch_size<line_sep>data_test_opt['unsupervised']=<true><line_sep>data_test_opt['epoch_size']=<none><line_sep>data_test_opt['random_sized_crop']=<false><line_sep>data_test_opt['dataset_name']='imagenet'<line_sep>data_test_opt['split']='val'<line_sep>config['data_train_opt']=data_train_opt<line_sep>config['data_test_opt']=data_test_opt<line_sep>config['max_num_epochs']=200<line_sep>net_opt={}<line_sep>net_opt['num_classes']=8<line_sep>net_opt['num_stages']=4<line_sep>networks={}<line_sep>net_optim_params={'optim_type':'sgd' 'lr':0.01 'momentum':0.9 'weight_decay':5e-4 'nesterov':<true> 'LUT_lr':[(100 0.01) (150 0.001) (200 0.0001)]}<line_sep>networks['model']={'def_file':'architectures/AlexNet.py' 'pretrained':<none> 'opt':net_opt 'optim_params':net_optim_params}<line_sep>config['networks']=networks<line_sep>criterions={}<line_sep>criterions['loss']={'ctype':'MSELoss' 'opt':<true>}<line_sep>config['criterions']=criterions<line_sep>config['algorithm_type']='UnsupervisedModel'<line_sep> |
<import_from_stmt>.resnet resnet50<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>torch<import_stmt>numpy<as>np<class_stmt>fpn_module_global(nn.Module)<block_start><def_stmt>__init__ self numClass<block_start>super(fpn_module_global self).__init__()<line_sep>self._up_kwargs={'mode':'bilinear'}<line_sep># Top layer
self.toplayer=nn.Conv2d(2048 256 kernel_size=1 stride=1 padding=0)# Reduce channels
# Lateral layers
self.latlayer1=nn.Conv2d(1024 256 kernel_size=1 stride=1 padding=0)<line_sep>self.latlayer2=nn.Conv2d(512 256 kernel_size=1 stride=1 padding=0)<line_sep>self.latlayer3=nn.Conv2d(256 256 kernel_size=1 stride=1 padding=0)<line_sep># Smooth layers
self.smooth1_1=nn.Conv2d(256 256 kernel_size=3 stride=1 padding=1)<line_sep>self.smooth2_1=nn.Conv2d(256 256 kernel_size=3 stride=1 padding=1)<line_sep>self.smooth3_1=nn.Conv2d(256 256 kernel_size=3 stride=1 padding=1)<line_sep>self.smooth4_1=nn.Conv2d(256 256 kernel_size=3 stride=1 padding=1)<line_sep>self.smooth1_2=nn.Conv2d(256 128 kernel_size=3 stride=1 padding=1)<line_sep>self.smooth2_2=nn.Conv2d(256 128 kernel_size=3 stride=1 padding=1)<line_sep>self.smooth3_2=nn.Conv2d(256 128 kernel_size=3 stride=1 padding=1)<line_sep>self.smooth4_2=nn.Conv2d(256 128 kernel_size=3 stride=1 padding=1)<line_sep># Classify layers
self.classify=nn.Conv2d(128<times>4 numClass kernel_size=3 stride=1 padding=1)<line_sep># Local2Global: double #channels ####################################
# Top layer
self.toplayer_ext=nn.Conv2d(2048<times>2 256 kernel_size=1 stride=1 padding=0)# Reduce channels
# Lateral layers
self.latlayer1_ext=nn.Conv2d(1024<times>2 256 kernel_size=1 stride=1 padding=0)<line_sep>self.latlayer2_ext=nn.Conv2d(512<times>2 256 kernel_size=1 stride=1 padding=0)<line_sep>self.latlayer3_ext=nn.Conv2d(256<times>2 256 kernel_size=1 stride=1 padding=0)<line_sep># Smooth layers
self.smooth1_1_ext=nn.Conv2d(256<times>2 256 kernel_size=3 stride=1 padding=1)<line_sep>self.smooth2_1_ext=nn.Conv2d(256<times>2 256 kernel_size=3 stride=1 padding=1)<line_sep>self.smooth3_1_ext=nn.Conv2d(256<times>2 256 kernel_size=3 stride=1 padding=1)<line_sep>self.smooth4_1_ext=nn.Conv2d(256<times>2 256 kernel_size=3 stride=1 padding=1)<line_sep>self.smooth1_2_ext=nn.Conv2d(256<times>2 128 kernel_size=3 stride=1 padding=1)<line_sep>self.smooth2_2_ext=nn.Conv2d(256<times>2 128 kernel_size=3 stride=1 padding=1)<line_sep>self.smooth3_2_ext=nn.Conv2d(256<times>2 128 kernel_size=3 stride=1 padding=1)<line_sep>self.smooth4_2_ext=nn.Conv2d(256<times>2 128 kernel_size=3 stride=1 padding=1)<line_sep>self.smooth=nn.Conv2d(128<times>4<times>2 128<times>4 kernel_size=3 stride=1 padding=1)<block_end><def_stmt>_concatenate self p5 p4 p3 p2<block_start>_,_,H,W=p2.size()<line_sep>p5=F.interpolate(p5 size=(H W) **self._up_kwargs)<line_sep>p4=F.interpolate(p4 size=(H W) **self._up_kwargs)<line_sep>p3=F.interpolate(p3 size=(H W) **self._up_kwargs)<line_sep><return>torch.cat([p5 p4 p3 p2] dim=1)<block_end><def_stmt>_upsample_add self x y<block_start>'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.interpolate(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''<line_sep>_,_,H,W=y.size()<line_sep><return>F.interpolate(x size=(H W) **self._up_kwargs)+y<block_end><def_stmt>forward self c2 c3 c4 c5 c2_ext=<none> c3_ext=<none> c4_ext=<none> c5_ext=<none> ps0_ext=<none> ps1_ext=<none> ps2_ext=<none># Top-down
<block_start><if_stmt>c5_ext<is><none><block_start>p5=self.toplayer(c5)<line_sep>p4=self._upsample_add(p5 self.latlayer1(c4))<line_sep>p3=self._upsample_add(p4 self.latlayer2(c3))<line_sep>p2=self._upsample_add(p3 self.latlayer3(c2))<block_end><else_stmt><block_start>p5=self.toplayer_ext(torch.cat((c5 c5_ext) dim=1))<line_sep>p4=self._upsample_add(p5 self.latlayer1_ext(torch.cat((c4 c4_ext) dim=1)))<line_sep>p3=self._upsample_add(p4 self.latlayer2_ext(torch.cat((c3 c3_ext) dim=1)))<line_sep>p2=self._upsample_add(p3 self.latlayer3_ext(torch.cat((c2 c2_ext) dim=1)))<block_end>ps0=[p5 p4 p3 p2]<line_sep># Smooth
<if_stmt>ps0_ext<is><none><block_start>p5=self.smooth1_1(p5)<line_sep>p4=self.smooth2_1(p4)<line_sep>p3=self.smooth3_1(p3)<line_sep>p2=self.smooth4_1(p2)<block_end><else_stmt><block_start>p5=self.smooth1_1_ext(torch.cat((p5 ps0_ext[0]) dim=1))<line_sep>p4=self.smooth2_1_ext(torch.cat((p4 ps0_ext[1]) dim=1))<line_sep>p3=self.smooth3_1_ext(torch.cat((p3 ps0_ext[2]) dim=1))<line_sep>p2=self.smooth4_1_ext(torch.cat((p2 ps0_ext[3]) dim=1))<block_end>ps1=[p5 p4 p3 p2]<if_stmt>ps1_ext<is><none><block_start>p5=self.smooth1_2(p5)<line_sep>p4=self.smooth2_2(p4)<line_sep>p3=self.smooth3_2(p3)<line_sep>p2=self.smooth4_2(p2)<block_end><else_stmt><block_start>p5=self.smooth1_2_ext(torch.cat((p5 ps1_ext[0]) dim=1))<line_sep>p4=self.smooth2_2_ext(torch.cat((p4 ps1_ext[1]) dim=1))<line_sep>p3=self.smooth3_2_ext(torch.cat((p3 ps1_ext[2]) dim=1))<line_sep>p2=self.smooth4_2_ext(torch.cat((p2 ps1_ext[3]) dim=1))<block_end>ps2=[p5 p4 p3 p2]<line_sep># Classify
<if_stmt>ps2_ext<is><none><block_start>ps3=self._concatenate(p5 p4 p3 p2)<line_sep>output=self.classify(ps3)<block_end><else_stmt><block_start>p=self._concatenate(torch.cat((p5 ps2_ext[0]) dim=1) torch.cat((p4 ps2_ext[1]) dim=1) torch.cat((p3 ps2_ext[2]) dim=1) torch.cat((p2 ps2_ext[3]) dim=1))<line_sep>ps3=self.smooth(p)<line_sep>output=self.classify(ps3)<block_end><return>output ps0 ps1 ps2 ps3<block_end><block_end><class_stmt>fpn_module_local(nn.Module)<block_start><def_stmt>__init__ self numClass<block_start>super(fpn_module_local self).__init__()<line_sep>self._up_kwargs={'mode':'bilinear'}<line_sep># Top layer
fold=2<line_sep>self.toplayer=nn.Conv2d(2048<times>fold 256 kernel_size=1 stride=1 padding=0)# Reduce channels
# Lateral layers [C]
self.latlayer1=nn.Conv2d(1024<times>fold 256 kernel_size=1 stride=1 padding=0)<line_sep>self.latlayer2=nn.Conv2d(512<times>fold 256 kernel_size=1 stride=1 padding=0)<line_sep>self.latlayer3=nn.Conv2d(256<times>fold 256 kernel_size=1 stride=1 padding=0)<line_sep># Smooth layers
# ps0
self.smooth1_1=nn.Conv2d(256<times>fold 256 kernel_size=3 stride=1 padding=1)<line_sep>self.smooth2_1=nn.Conv2d(256<times>fold 256 kernel_size=3 stride=1 padding=1)<line_sep>self.smooth3_1=nn.Conv2d(256<times>fold 256 kernel_size=3 stride=1 padding=1)<line_sep>self.smooth4_1=nn.Conv2d(256<times>fold 256 kernel_size=3 stride=1 padding=1)<line_sep># ps1
self.smooth1_2=nn.Conv2d(256<times>fold 128 kernel_size=3 stride=1 padding=1)<line_sep>self.smooth2_2=nn.Conv2d(256<times>fold 128 kernel_size=3 stride=1 padding=1)<line_sep>self.smooth3_2=nn.Conv2d(256<times>fold 128 kernel_size=3 stride=1 padding=1)<line_sep>self.smooth4_2=nn.Conv2d(256<times>fold 128 kernel_size=3 stride=1 padding=1)<line_sep># ps2 is concatenation
# Classify layers
self.smooth=nn.Conv2d(128<times>4<times>fold 128<times>4 kernel_size=3 stride=1 padding=1)<line_sep>self.classify=nn.Conv2d(128<times>4 numClass kernel_size=3 stride=1 padding=1)<block_end><def_stmt>_concatenate self p5 p4 p3 p2<block_start>_,_,H,W=p2.size()<line_sep>p5=F.interpolate(p5 size=(H W) **self._up_kwargs)<line_sep>p4=F.interpolate(p4 size=(H W) **self._up_kwargs)<line_sep>p3=F.interpolate(p3 size=(H W) **self._up_kwargs)<line_sep><return>torch.cat([p5 p4 p3 p2] dim=1)<block_end><def_stmt>_upsample_add self x y<block_start>'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.interpolate(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''<line_sep>_,_,H,W=y.size()<line_sep><return>F.interpolate(x size=(H W) **self._up_kwargs)+y<block_end><def_stmt>forward self c2 c3 c4 c5 c2_ext c3_ext c4_ext c5_ext ps0_ext ps1_ext ps2_ext# Top-down
<block_start>p5=self.toplayer(torch.cat([c5]+[F.interpolate(c5_ext[0] size=c5.size()[2:] **self._up_kwargs)] dim=1))<line_sep>p4=self._upsample_add(p5 self.latlayer1(torch.cat([c4]+[F.interpolate(c4_ext[0] size=c4.size()[2:] **self._up_kwargs)] dim=1)))<line_sep>p3=self._upsample_add(p4 self.latlayer2(torch.cat([c3]+[F.interpolate(c3_ext[0] size=c3.size()[2:] **self._up_kwargs)] dim=1)))<line_sep>p2=self._upsample_add(p3 self.latlayer3(torch.cat([c2]+[F.interpolate(c2_ext[0] size=c2.size()[2:] **self._up_kwargs)] dim=1)))<line_sep>ps0=[p5 p4 p3 p2]<line_sep># Smooth
p5=self.smooth1_1(torch.cat([p5]+[F.interpolate(ps0_ext[0][0] size=p5.size()[2:] **self._up_kwargs)] dim=1))<line_sep>p4=self.smooth2_1(torch.cat([p4]+[F.interpolate(ps0_ext[1][0] size=p4.size()[2:] **self._up_kwargs)] dim=1))<line_sep>p3=self.smooth3_1(torch.cat([p3]+[F.interpolate(ps0_ext[2][0] size=p3.size()[2:] **self._up_kwargs)] dim=1))<line_sep>p2=self.smooth4_1(torch.cat([p2]+[F.interpolate(ps0_ext[3][0] size=p2.size()[2:] **self._up_kwargs)] dim=1))<line_sep>ps1=[p5 p4 p3 p2]<line_sep>p5=self.smooth1_2(torch.cat([p5]+[F.interpolate(ps1_ext[0][0] size=p5.size()[2:] **self._up_kwargs)] dim=1))<line_sep>p4=self.smooth2_2(torch.cat([p4]+[F.interpolate(ps1_ext[1][0] size=p4.size()[2:] **self._up_kwargs)] dim=1))<line_sep>p3=self.smooth3_2(torch.cat([p3]+[F.interpolate(ps1_ext[2][0] size=p3.size()[2:] **self._up_kwargs)] dim=1))<line_sep>p2=self.smooth4_2(torch.cat([p2]+[F.interpolate(ps1_ext[3][0] size=p2.size()[2:] **self._up_kwargs)] dim=1))<line_sep>ps2=[p5 p4 p3 p2]<line_sep># Classify
# use ps2_ext
ps3=self._concatenate(torch.cat([p5]+[F.interpolate(ps2_ext[0][0] size=p5.size()[2:] **self._up_kwargs)] dim=1) torch.cat([p4]+[F.interpolate(ps2_ext[1][0] size=p4.size()[2:] **self._up_kwargs)] dim=1) torch.cat([p3]+[F.interpolate(ps2_ext[2][0] size=p3.size()[2:] **self._up_kwargs)] dim=1) torch.cat([p2]+[F.interpolate(ps2_ext[3][0] size=p2.size()[2:] **self._up_kwargs)] dim=1))<line_sep>ps3=self.smooth(ps3)<line_sep>output=self.classify(ps3)<line_sep><return>output ps0 ps1 ps2 ps3<block_end><block_end><class_stmt>fpn(nn.Module)<block_start><def_stmt>__init__ self numClass<block_start>super(fpn self).__init__()<line_sep>self._up_kwargs={'mode':'bilinear'}<line_sep># Res net
self.resnet_global=resnet50(<true>)<line_sep>self.resnet_local=resnet50(<true>)<line_sep># fpn module
self.fpn_global=fpn_module_global(numClass)<line_sep>self.fpn_local=fpn_module_local(numClass)<line_sep>self.c2_g=<none><line_sep>self.c3_g=<none><line_sep>self.c4_g=<none><line_sep>self.c5_g=<none><line_sep>self.output_g=<none><line_sep>self.ps0_g=<none><line_sep>self.ps1_g=<none><line_sep>self.ps2_g=<none><line_sep>self.ps3_g=<none><line_sep>self.c2_l=[]<line_sep>self.c3_l=[]<line_sep>self.c4_l=[]<line_sep>self.c5_l=[]<line_sep>self.ps00_l=[]<line_sep>self.ps01_l=[]<line_sep>self.ps02_l=[]<line_sep>self.ps03_l=[]<line_sep>self.ps10_l=[]<line_sep>self.ps11_l=[]<line_sep>self.ps12_l=[]<line_sep>self.ps13_l=[]<line_sep>self.ps20_l=[]<line_sep>self.ps21_l=[]<line_sep>self.ps22_l=[]<line_sep>self.ps23_l=[]<line_sep>self.ps0_l=<none><line_sep>self.ps1_l=<none><line_sep>self.ps2_l=<none><line_sep>self.ps3_l=[]#; self.output_l = []
self.c2_b=<none><line_sep>self.c3_b=<none><line_sep>self.c4_b=<none><line_sep>self.c5_b=<none><line_sep>self.ps00_b=<none><line_sep>self.ps01_b=<none><line_sep>self.ps02_b=<none><line_sep>self.ps03_b=<none><line_sep>self.ps10_b=<none><line_sep>self.ps11_b=<none><line_sep>self.ps12_b=<none><line_sep>self.ps13_b=<none><line_sep>self.ps20_b=<none><line_sep>self.ps21_b=<none><line_sep>self.ps22_b=<none><line_sep>self.ps23_b=<none><line_sep>self.ps3_b=[]#; self.output_b = []
self.patch_n=0<line_sep>self.mse=nn.MSELoss()<line_sep>self.ensemble_conv=nn.Conv2d(128<times>4<times>2 numClass kernel_size=3 stride=1 padding=1)<line_sep>nn.init.normal_(self.ensemble_conv.weight mean=0 std=0.01)<line_sep># init fpn
<for_stmt>m self.fpn_global.children()<block_start><if_stmt>hasattr(m 'weight')<block_start>nn.init.normal_(m.weight mean=0 std=0.01)<block_end><if_stmt>hasattr(m 'bias')<block_start>nn.init.constant_(m.bias 0)<block_end><block_end><for_stmt>m self.fpn_local.children()<block_start><if_stmt>hasattr(m 'weight')<block_start>nn.init.normal_(m.weight mean=0 std=0.01)<block_end><if_stmt>hasattr(m 'bias')<block_start>nn.init.constant_(m.bias 0)<block_end><block_end><block_end><def_stmt>clear_cache self<block_start>self.c2_g=<none><line_sep>self.c3_g=<none><line_sep>self.c4_g=<none><line_sep>self.c5_g=<none><line_sep>self.output_g=<none><line_sep>self.ps0_g=<none><line_sep>self.ps1_g=<none><line_sep>self.ps2_g=<none><line_sep>self.ps3_g=<none><line_sep>self.c2_l=[]<line_sep>self.c3_l=[]<line_sep>self.c4_l=[]<line_sep>self.c5_l=[]<line_sep>self.ps00_l=[]<line_sep>self.ps01_l=[]<line_sep>self.ps02_l=[]<line_sep>self.ps03_l=[]<line_sep>self.ps10_l=[]<line_sep>self.ps11_l=[]<line_sep>self.ps12_l=[]<line_sep>self.ps13_l=[]<line_sep>self.ps20_l=[]<line_sep>self.ps21_l=[]<line_sep>self.ps22_l=[]<line_sep>self.ps23_l=[]<line_sep>self.ps0_l=<none><line_sep>self.ps1_l=<none><line_sep>self.ps2_l=<none><line_sep>self.ps3_l=[]<line_sep>self.output_l=[]<line_sep>self.c2_b=<none><line_sep>self.c3_b=<none><line_sep>self.c4_b=<none><line_sep>self.c5_b=<none><line_sep>self.ps00_b=<none><line_sep>self.ps01_b=<none><line_sep>self.ps02_b=<none><line_sep>self.ps03_b=<none><line_sep>self.ps10_b=<none><line_sep>self.ps11_b=<none><line_sep>self.ps12_b=<none><line_sep>self.ps13_b=<none><line_sep>self.ps20_b=<none><line_sep>self.ps21_b=<none><line_sep>self.ps22_b=<none><line_sep>self.ps23_b=<none><line_sep>self.ps3_b=[]<line_sep>self.output_b=[]<line_sep>self.patch_n=0<block_end><def_stmt>_sample_grid self fm bbox sampleSize<block_start>"""
:param fm: tensor(b,c,h,w) the global feature map
:param bbox: list [b* nparray(x1, y1, x2, y2)] the (x1,y1) is the left_top of bbox, (x2, y2) is the right_bottom of bbox
there are in range [0, 1]. x is corresponding to width dimension and y is corresponding to height dimension
:param sampleSize: (oH, oW) the point to sample in height dimension and width dimension
:return: tensor(b, c, oH, oW) sampled tensor
"""<line_sep>b,c,h,w=fm.shape<line_sep>b_bbox=len(bbox)<line_sep>bbox=[x<times>2-1<for>x bbox]# range transform
<if_stmt>b<ne>b_bbox<and>b<eq>1<block_start>fm=torch.cat([fm ]<times>b_bbox dim=0)<block_end>grid=np.zeros((b_bbox )+sampleSize+(2 ) dtype=np.float32)<line_sep>gridMap=np.array([[(cnt_w/(sampleSize[1]-1) cnt_h/(sampleSize[0]-1))<for>cnt_w range(sampleSize[1])]<for>cnt_h range(sampleSize[0])])<for_stmt>cnt_b range(b_bbox)<block_start>grid[cnt_b : : 0]=bbox[cnt_b][0]+(bbox[cnt_b][2]-bbox[cnt_b][0])<times>gridMap[: : 0]<line_sep>grid[cnt_b : : 1]=bbox[cnt_b][1]+(bbox[cnt_b][3]-bbox[cnt_b][1])<times>gridMap[: : 1]<block_end>grid=torch.from_numpy(grid).cuda()<line_sep><return>F.grid_sample(fm grid)<block_end><def_stmt>_crop_global self f_global top_lefts ratio<block_start>'''
top_lefts: [(top, left)] * b
'''<line_sep>_,c,H,W=f_global.size()<line_sep>b=len(top_lefts)<line_sep>h,w=int(np.round(H<times>ratio[0])) int(np.round(W<times>ratio[1]))<line_sep># bbox = [ np.array([left, top, left + ratio, top + ratio]) for (top, left) in top_lefts ]
# crop = self._sample_grid(f_global, bbox, (H, W))
crop=[]<for_stmt>i range(b)<block_start>top,left=int(np.round(top_lefts[i][0]<times>H)) int(np.round(top_lefts[i][1]<times>W))<line_sep># # global's sub-region & upsample
# f_global_patch = F.interpolate(f_global[0:1, :, top:top+h, left:left+w], size=(h, w), mode='bilinear')
f_global_patch=f_global[0:1 : top:top+h left:left+w]<line_sep>crop.append(f_global_patch[0])<block_end>crop=torch.stack(crop dim=0)# stack into mini-batch
<return>[crop]<block_end># return as a list for easy to torch.cat
<def_stmt>_merge_local self f_local merge f_global top_lefts oped ratio template<block_start>'''
merge feature maps from local patches, and finally to a whole image's feature map (on cuda)
f_local: a sub_batch_size of patch's feature map
oped: [start, end)
'''<line_sep>b,_,_,_=f_local.size()<line_sep>_,c,H,W=f_global.size()# match global feature size
<if_stmt>merge<is><none><block_start>merge=torch.zeros((1 c H W)).cuda()<block_end>h,w=int(np.round(H<times>ratio[0])) int(np.round(W<times>ratio[1]))<for_stmt>i range(b)<block_start>index=oped[0]+i<line_sep>top,left=int(np.round(H<times>top_lefts[index][0])) int(np.round(W<times>top_lefts[index][1]))<line_sep>merge[: : top:top+h left:left+w]<augadd>F.interpolate(f_local[i:i+1] size=(h w) **self._up_kwargs)<block_end><if_stmt>oped[1]<ge>len(top_lefts)<block_start>template=F.interpolate(template size=(H W) **self._up_kwargs)<line_sep>template=template.expand_as(merge)<line_sep># template = Variable(template).cuda()
merge<augdiv>template<block_end><return>merge<block_end><def_stmt>ensemble self f_local f_global<block_start><return>self.ensemble_conv(torch.cat((f_local f_global) dim=1))<block_end><def_stmt>collect_local_fm self image_global patches ratio top_lefts oped batch_size global_model=<none> template=<none> n_patch_all=<none><block_start>'''
patches: 1 patch
top_lefts: all top-left
oped: [start, end)
'''<with_stmt>torch.no_grad()<block_start><if_stmt>self.patch_n<eq>0<block_start>self.c2_g,self.c3_g,self.c4_g,self.c5_g=global_model.module.resnet_global.forward(image_global)<line_sep>self.output_g,self.ps0_g,self.ps1_g,self.ps2_g,self.ps3_g=global_model.module.fpn_global.forward(self.c2_g self.c3_g self.c4_g self.c5_g)<line_sep># self.output_g = F.interpolate(self.output_g, image_global.size()[2:], mode='nearest')
<block_end>self.patch_n<augadd>patches.size()[0]<line_sep>self.patch_n<augmod>n_patch_all<line_sep>self.resnet_local.eval()<line_sep>self.fpn_local.eval()<line_sep>c2,c3,c4,c5=self.resnet_local.forward(patches)<line_sep># global's 1x patch cat
output,ps0,ps1,ps2,ps3=self.fpn_local.forward(c2 c3 c4 c5 self._crop_global(self.c2_g top_lefts[oped[0]:oped[1]] ratio) c3_ext=self._crop_global(self.c3_g top_lefts[oped[0]:oped[1]] ratio) c4_ext=self._crop_global(self.c4_g top_lefts[oped[0]:oped[1]] ratio) c5_ext=self._crop_global(self.c5_g top_lefts[oped[0]:oped[1]] ratio) ps0_ext=[self._crop_global(f top_lefts[oped[0]:oped[1]] ratio)<for>f self.ps0_g] ps1_ext=[self._crop_global(f top_lefts[oped[0]:oped[1]] ratio)<for>f self.ps1_g] ps2_ext=[self._crop_global(f top_lefts[oped[0]:oped[1]] ratio)<for>f self.ps2_g])<line_sep># output = F.interpolate(output, patches.size()[2:], mode='nearest')
self.c2_b=self._merge_local(c2 self.c2_b self.c2_g top_lefts oped ratio template)<line_sep>self.c3_b=self._merge_local(c3 self.c3_b self.c3_g top_lefts oped ratio template)<line_sep>self.c4_b=self._merge_local(c4 self.c4_b self.c4_g top_lefts oped ratio template)<line_sep>self.c5_b=self._merge_local(c5 self.c5_b self.c5_g top_lefts oped ratio template)<line_sep>self.ps00_b=self._merge_local(ps0[0] self.ps00_b self.ps0_g[0] top_lefts oped ratio template)<line_sep>self.ps01_b=self._merge_local(ps0[1] self.ps01_b self.ps0_g[1] top_lefts oped ratio template)<line_sep>self.ps02_b=self._merge_local(ps0[2] self.ps02_b self.ps0_g[2] top_lefts oped ratio template)<line_sep>self.ps03_b=self._merge_local(ps0[3] self.ps03_b self.ps0_g[3] top_lefts oped ratio template)<line_sep>self.ps10_b=self._merge_local(ps1[0] self.ps10_b self.ps1_g[0] top_lefts oped ratio template)<line_sep>self.ps11_b=self._merge_local(ps1[1] self.ps11_b self.ps1_g[1] top_lefts oped ratio template)<line_sep>self.ps12_b=self._merge_local(ps1[2] self.ps12_b self.ps1_g[2] top_lefts oped ratio template)<line_sep>self.ps13_b=self._merge_local(ps1[3] self.ps13_b self.ps1_g[3] top_lefts oped ratio template)<line_sep>self.ps20_b=self._merge_local(ps2[0] self.ps20_b self.ps2_g[0] top_lefts oped ratio template)<line_sep>self.ps21_b=self._merge_local(ps2[1] self.ps21_b self.ps2_g[1] top_lefts oped ratio template)<line_sep>self.ps22_b=self._merge_local(ps2[2] self.ps22_b self.ps2_g[2] top_lefts oped ratio template)<line_sep>self.ps23_b=self._merge_local(ps2[3] self.ps23_b self.ps2_g[3] top_lefts oped ratio template)<line_sep>self.ps3_b.append(ps3.cpu())<line_sep># self.output_b.append(output.cpu()) # each output is 1, 7, h, w
<if_stmt>self.patch_n<eq>0# merged all patches into an image
<block_start>self.c2_l.append(self.c2_b)<line_sep>self.c3_l.append(self.c3_b)<line_sep>self.c4_l.append(self.c4_b)<line_sep>self.c5_l.append(self.c5_b)<line_sep>self.ps00_l.append(self.ps00_b)<line_sep>self.ps01_l.append(self.ps01_b)<line_sep>self.ps02_l.append(self.ps02_b)<line_sep>self.ps03_l.append(self.ps03_b)<line_sep>self.ps10_l.append(self.ps10_b)<line_sep>self.ps11_l.append(self.ps11_b)<line_sep>self.ps12_l.append(self.ps12_b)<line_sep>self.ps13_l.append(self.ps13_b)<line_sep>self.ps20_l.append(self.ps20_b)<line_sep>self.ps21_l.append(self.ps21_b)<line_sep>self.ps22_l.append(self.ps22_b)<line_sep>self.ps23_l.append(self.ps23_b)<line_sep># collected all ps3 and output of patches as a (b) tensor, append into list
self.ps3_l.append(torch.cat(self.ps3_b dim=0))<line_sep># a list of tensors
# self.output_l.append(torch.cat(self.output_b, dim=0)) # a list of 36, 7, h, w tensors
self.c2_b=<none><line_sep>self.c3_b=<none><line_sep>self.c4_b=<none><line_sep>self.c5_b=<none><line_sep>self.ps00_b=<none><line_sep>self.ps01_b=<none><line_sep>self.ps02_b=<none><line_sep>self.ps03_b=<none><line_sep>self.ps10_b=<none><line_sep>self.ps11_b=<none><line_sep>self.ps12_b=<none><line_sep>self.ps13_b=<none><line_sep>self.ps20_b=<none><line_sep>self.ps21_b=<none><line_sep>self.ps22_b=<none><line_sep>self.ps23_b=<none><line_sep>self.ps3_b=[]# ; self.output_b = []
<block_end><if_stmt>len(self.c2_l)<eq>batch_size<block_start>self.c2_l=torch.cat(self.c2_l dim=0)# .cuda()
self.c3_l=torch.cat(self.c3_l dim=0)# .cuda()
self.c4_l=torch.cat(self.c4_l dim=0)# .cuda()
self.c5_l=torch.cat(self.c5_l dim=0)# .cuda()
self.ps00_l=torch.cat(self.ps00_l dim=0)# .cuda()
self.ps01_l=torch.cat(self.ps01_l dim=0)# .cuda()
self.ps02_l=torch.cat(self.ps02_l dim=0)# .cuda()
self.ps03_l=torch.cat(self.ps03_l dim=0)# .cuda()
self.ps10_l=torch.cat(self.ps10_l dim=0)# .cuda()
self.ps11_l=torch.cat(self.ps11_l dim=0)# .cuda()
self.ps12_l=torch.cat(self.ps12_l dim=0)# .cuda()
self.ps13_l=torch.cat(self.ps13_l dim=0)# .cuda()
self.ps20_l=torch.cat(self.ps20_l dim=0)# .cuda()
self.ps21_l=torch.cat(self.ps21_l dim=0)# .cuda()
self.ps22_l=torch.cat(self.ps22_l dim=0)# .cuda()
self.ps23_l=torch.cat(self.ps23_l dim=0)# .cuda()
self.ps0_l=[self.ps00_l self.ps01_l self.ps02_l self.ps03_l]<line_sep>self.ps1_l=[self.ps10_l self.ps11_l self.ps12_l self.ps13_l]<line_sep>self.ps2_l=[self.ps20_l self.ps21_l self.ps22_l self.ps23_l]<line_sep># self.ps3_l = torch.cat(self.ps3_l, dim=0)# .cuda()
<block_end><return>self.ps3_l output<block_end><block_end># self.output_l
<def_stmt>forward self image_global patches top_lefts ratio mode=1 global_model=<none> n_patch=<none><block_start><if_stmt>mode<eq>1# train global model
<block_start>c2_g,c3_g,c4_g,c5_g=self.resnet_global.forward(image_global)<line_sep>output_g,ps0_g,ps1_g,ps2_g,ps3_g=self.fpn_global.forward(c2_g c3_g c4_g c5_g)<line_sep># imsize = image_global.size()[2:]
# output_g = F.interpolate(output_g, imsize, mode='nearest')
<return>output_g <none><block_end><elif_stmt>mode<eq>2# train global2local model
<block_start><with_stmt>torch.no_grad()<block_start><if_stmt>self.patch_n<eq>0# calculate global images only if patches belong to a new set of global images (when self.patch_n % n_patch == 0)
<block_start>self.c2_g,self.c3_g,self.c4_g,self.c5_g=self.resnet_global.forward(image_global)<line_sep>self.output_g,self.ps0_g,self.ps1_g,self.ps2_g,self.ps3_g=self.fpn_global.forward(self.c2_g self.c3_g self.c4_g self.c5_g)<line_sep># imsize_glb = image_global.size()[2:]
# self.output_g = F.interpolate(self.output_g, imsize_glb, mode='nearest')
<block_end>self.patch_n<augadd>patches.size()[0]<line_sep>self.patch_n<augmod>n_patch<block_end># train local model #######################################
c2_l,c3_l,c4_l,c5_l=self.resnet_local.forward(patches)<line_sep># global's 1x patch cat
output_l,ps0_l,ps1_l,ps2_l,ps3_l=self.fpn_local.forward(c2_l c3_l c4_l c5_l self._crop_global(self.c2_g top_lefts ratio) self._crop_global(self.c3_g top_lefts ratio) self._crop_global(self.c4_g top_lefts ratio) self._crop_global(self.c5_g top_lefts ratio) [self._crop_global(f top_lefts ratio)<for>f self.ps0_g] [self._crop_global(f top_lefts ratio)<for>f self.ps1_g] [self._crop_global(f top_lefts ratio)<for>f self.ps2_g])<line_sep># imsize = patches.size()[2:]
# output_l = F.interpolate(output_l, imsize, mode='nearest')
ps3_g2l=self._crop_global(self.ps3_g top_lefts ratio)[0]# only calculate loss on 1x
ps3_g2l=F.interpolate(ps3_g2l size=ps3_l.size()[2:] **self._up_kwargs)<line_sep>output=self.ensemble(ps3_l ps3_g2l)<line_sep># output = F.interpolate(output, imsize, mode='nearest')
<return>output self.output_g output_l self.mse(ps3_l ps3_g2l)<block_end><else_stmt># train local2global model
<block_start>c2_g,c3_g,c4_g,c5_g=self.resnet_global.forward(image_global)<line_sep># local patch cat into global
output_g,ps0_g,ps1_g,ps2_g,ps3_g=self.fpn_global.forward(c2_g c3_g c4_g c5_g c2_ext=self.c2_l c3_ext=self.c3_l c4_ext=self.c4_l c5_ext=self.c5_l ps0_ext=self.ps0_l ps1_ext=self.ps1_l ps2_ext=self.ps2_l)<line_sep># imsize = image_global.size()[2:]
# output_g = F.interpolate(output_g, imsize, mode='nearest')
self.clear_cache()<line_sep><return>output_g ps3_g<block_end><block_end><block_end> |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 30 21:24:36 2019
@author: wmy
"""<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>PIL Image<import_from_stmt>keras backend<as>K<import_from_stmt>keras.losses mean_absolute_error mean_squared_error<import_from_stmt>keras.models load_model<import_from_stmt>keras.optimizers Adam<import_stmt>random<import_stmt>os<import_from_stmt>model wdsr_a wdsr_b<import_from_stmt>utils DataLoader<line_sep>model=wdsr_b(scale=4 num_res_blocks=32)<line_sep>model.load_weights('./weights/wdsr-b-32-x4.h5')<line_sep>data_loader=DataLoader(scale=4)<def_stmt>evaluate_test model setpath='datasets/train' difficulty='easy' name='evaluate'<block_start>images=data_loader.search(setpath)<line_sep>image=random.choice(images)<line_sep>hr=data_loader.imread(image)<line_sep>resize=(hr.size[0]<floordiv>data_loader.scale hr.size[1]<floordiv>data_loader.scale)<line_sep>hidden_scale=random.uniform(1 3)<line_sep>radius=random.uniform(1 3)<if_stmt>difficulty<eq>'easy'<block_start>hidden_scale=random.uniform(1 1.5)<line_sep>radius=random.uniform(1 1.5)<line_sep><pass><block_end><elif_stmt>difficulty<eq>'normal'<block_start>hidden_scale=random.uniform(1.5 2)<line_sep>radius=random.uniform(1.5 2)<line_sep><pass><block_end><elif_stmt>difficulty<eq>'hard'<block_start>hidden_scale=random.uniform(2 2.5)<line_sep>radius=random.uniform(2 2.5)<line_sep><pass><block_end><elif_stmt>difficulty<eq>'lunatic'<block_start>hidden_scale=random.uniform(2.5 3)<line_sep>radius=random.uniform(2.5 3)<line_sep><pass><block_end><else_stmt><block_start><raise>ValueError("unknown difficulty")<block_end>hidden_resize=(int(resize[0]/hidden_scale) int(resize[1]/hidden_scale))<line_sep>lr=data_loader.gaussianblur(hr radius)<line_sep>lr=lr.resize(hidden_resize)<line_sep>lr=lr.resize(resize)<line_sep>lr_resize=lr.resize(hr.size)<line_sep>lr=np.asarray(lr)<line_sep>sr=model.predict(np.array([lr]))[0]<line_sep>sr=np.clip(sr 0 255)<line_sep>sr=sr.astype('uint8')<line_sep>lr=Image.fromarray(lr)<line_sep>sr=Image.fromarray(sr)<line_sep>lr_resize.save("images/"+name+"_lr.jpg")<line_sep>sr.save("images/"+name+"_sr.jpg")<line_sep>hr.save("images/"+name+"_hr.jpg")<line_sep><pass><block_end>evaluate_test(model difficulty='easy' name='easy')<line_sep>evaluate_test(model difficulty='normal' name='normal')<line_sep>evaluate_test(model difficulty='hard' name='hard')<line_sep>evaluate_test(model difficulty='lunatic' name='lunatic')<line_sep> |
# Copyright (c) 2010-2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
<import_stmt>sys<import_stmt>codecs<def_stmt>do_next context<block_start><return>0<block_end><def_stmt>do_skip context<block_start><return>1<block_end><def_stmt>do_fail context message='No matching statement found'<block_start>context._error(message)<block_end><def_stmt>do_say context message<block_start>context._msg(message)<line_sep><return>0<block_end><def_stmt>do_warn context message<block_start>context._warn(message)<line_sep><return>0<block_end><def_stmt>do_return context levels=1# print "do.return():", -levels
<block_start><return>-levels<block_end><def_stmt>out_create context path data=<none># print "out.create():", path, data
<block_start>context.builder.create(path data)<line_sep>context.builder.enter(path)<line_sep>context._trigger(context.on_add context.re_stack[-1])<line_sep>context.builder.leave()<line_sep><return>0<block_end><def_stmt>out_replace context path data=<none># print "out.replace():", path, data
<block_start>context.builder.add(path data replace=<true>)<line_sep>context.builder.enter(path)<line_sep>context._trigger(context.on_add context.re_stack[-1])<line_sep>context.builder.leave()<line_sep><return>0<block_end><def_stmt>out_add context path data=<none># print "out.add():", path, data
<block_start>context.builder.add(path data)<line_sep>context.builder.enter(path)<line_sep>context._trigger(context.on_add context.re_stack[-1])<line_sep>context.builder.leave()<line_sep><return>0<block_end><def_stmt>out_add_attribute context path name value# print "out.add_attribute():", path, name, value
<block_start>context.builder.add_attribute(path name value)<line_sep>context.builder.enter(path)<line_sep>context._trigger(context.on_add context.re_stack[-1])<line_sep>context.builder.leave()<line_sep><return>0<block_end><def_stmt>out_open context path# print "out.open():", path
<block_start>context.builder.open(path)<line_sep>context._trigger(context.on_add context.re_stack[-1])<line_sep>context.stack[-1].on_leave.append((context.builder.leave ()))<line_sep><return>0<block_end><def_stmt>out_enter context path# print "out.enter():", path
<block_start>context.builder.enter(path)<line_sep>context._trigger(context.on_add context.re_stack[-1])<line_sep>context.stack[-1].on_leave.append((context.builder.leave ()))<line_sep><return>0<block_end><def_stmt>out_enqueue_before context regex path data=<none># print "ENQ BEFORE", regex.pattern, path, data
<block_start>context.on_match_before.append((regex out_add (context path data)))<line_sep><return>0<block_end><def_stmt>out_enqueue_after context regex path data=<none># print "ENQ AFTER", regex.pattern, path, data
<block_start>context.on_match_after.append((regex out_add (context path data)))<line_sep><return>0<block_end><def_stmt>out_enqueue_on_add context regex path data=<none># print "ENQ ON ADD", regex.pattern, path, data
<block_start>context.on_add.append((regex out_add (context path data)))<line_sep><return>0<block_end><def_stmt>out_clear_queue context<block_start>context._clear_triggers()<line_sep><return>1<block_end><def_stmt>out_set_root_name context name<block_start>context.builder.set_root_name(name)<line_sep><return>0<block_end><class_stmt>Context(object)<block_start><def_stmt>__init__ self<block_start>self.functions={'do.fail':do_fail 'do.return':do_return 'do.next':do_next 'do.skip':do_skip 'do.say':do_say 'do.warn':do_warn 'out.create':out_create 'out.replace':out_replace 'out.add':out_add 'out.add_attribute':out_add_attribute 'out.open':out_open 'out.enter':out_enter 'out.enqueue_before':out_enqueue_before 'out.enqueue_after':out_enqueue_after 'out.enqueue_on_add':out_enqueue_on_add 'out.clear_queue':out_clear_queue 'out.set_root_name':out_set_root_name}<line_sep>self.lexicon={}<line_sep>self.grammars={}<line_sep>self.input=<none><line_sep>self.builder=<none><line_sep>self.end=0<line_sep>self._init()<block_end><def_stmt>_init self<block_start>self.start=0<line_sep>self.re_stack=[]<line_sep>self.stack=[]<line_sep>self._clear_triggers()<block_end><def_stmt>_clear_triggers self<block_start>self.on_match_before=[]<line_sep>self.on_match_after=[]<line_sep>self.on_add=[]<block_end><def_stmt>_trigger self triggers match<block_start>matching=[]<for_stmt>trigger triggers<block_start>regex,func,args=trigger<if_stmt>regex.search(match.group(0))<is><not><none><block_start>matching.append(trigger)<block_end><block_end><for_stmt>trigger matching<block_start>triggers.remove(trigger)<block_end><for_stmt>trigger matching<block_start>regex,func,args=trigger<line_sep>func(*args)<block_end><block_end><def_stmt>_match_before_notify self match<block_start>self.re_stack.append(match)<line_sep>self._trigger(self.on_match_before match)<block_end><def_stmt>_match_after_notify self match<block_start>self._trigger(self.on_match_after match)<line_sep>self.re_stack.pop()<block_end><def_stmt>_get_lineno self<block_start><return>self.input.count('\n' 0 self.start)+1<block_end><def_stmt>_get_line self number=<none><block_start><if_stmt>number<is><none><block_start>number=self._get_lineno()<block_end><return>self.input.split('\n')[number-1]<block_end><def_stmt>_get_line_position_from_char self char<block_start>line_start=char<while_stmt>line_start<ne>0<block_start><if_stmt>self.input[line_start-1]<eq>'\n'<block_start><break><block_end>line_start<augsub>1<block_end>line_end=self.input.find('\n' char)<line_sep><return>line_start line_end<block_end><def_stmt>_format self error<block_start>start,end=self._get_line_position_from_char(self.start)<line_sep>line_number=self._get_lineno()<line_sep>line=self._get_line()<line_sep>offset=self.start-start<line_sep>token_len=1<line_sep>output=line+'\n'<if_stmt>token_len<le>1<block_start>output<augadd>(' '<times>offset)+'^\n'<block_end><else_stmt><block_start>output<augadd>(' '<times>offset)+"'"+('-'<times>(token_len-2))+"'\n"<block_end>output<augadd>'%s in line %s'%(error line_number)<line_sep><return>output<block_end><def_stmt>_msg self error<block_start>print(self._format(error))<block_end><def_stmt>_warn self error<block_start>sys.stderr.write(self._format(error)+'\n')<block_end><def_stmt>_error self error<block_start><raise>Exception(self._format(error))<block_end><def_stmt>_eof self<block_start><return>self.start<ge>self.end<block_end><def_stmt>parse_string self input builder debug=0<block_start>self._init()<line_sep>self.input=input<line_sep>self.builder=builder<line_sep>self.end=len(input)<line_sep>self.grammars['input'].parse(self debug)<if_stmt>self.start<l>self.end<block_start>self._error('parser returned, but did not complete')<block_end><block_end><def_stmt>parse self filename builder encoding='utf8' debug=0<block_start><with_stmt>codecs.open(filename 'r' encoding=encoding)<as>input_file<block_start><return>self.parse_string(input_file.read() builder debug)<block_end><block_end><def_stmt>dump self<block_start><for_stmt>grammar self.grammars.values()<block_start>print(grammar)<block_end><block_end><block_end> |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
<import_stmt>logging<import_stmt>textwrap<import_from_stmt>pants.backend.terraform.lint.fmt TerraformFmtRequest<import_from_stmt>pants.backend.terraform.style StyleSetup StyleSetupRequest<import_from_stmt>pants.backend.terraform.tool TerraformProcess<import_from_stmt>pants.backend.terraform.tool rules<as>tool_rules<import_from_stmt>pants.core.goals.fmt FmtResult<import_from_stmt>pants.core.goals.lint LintRequest LintResult LintResults<import_from_stmt>pants.core.util_rules external_tool<import_from_stmt>pants.engine.fs Digest MergeDigests<import_from_stmt>pants.engine.internals.selectors Get MultiGet<import_from_stmt>pants.engine.process FallibleProcessResult ProcessResult<import_from_stmt>pants.engine.rules collect_rules rule<import_from_stmt>pants.engine.unions UnionRule<import_from_stmt>pants.option.subsystem Subsystem<import_from_stmt>pants.util.logging LogLevel<line_sep>logger=logging.getLogger(__name__)<class_stmt>TfFmtSubsystem(Subsystem)<block_start>options_scope="terraform-fmt"<line_sep>help="""Terraform fmt options."""<line_sep>@classmethod<def_stmt>register_options cls register<block_start>super().register_options(register)<line_sep>register("--skip" type=bool default=<false> help=(f"Don't use `terraform fmt` when running `{register.bootstrap.pants_bin_name} fmt` and "<concat>f"`{register.bootstrap.pants_bin_name} lint`.") )<block_end><block_end><class_stmt>TffmtRequest(TerraformFmtRequest)<block_start><pass><block_end>@rule(desc="Format with `terraform fmt`")<async_keyword><def_stmt>tffmt_fmt request:TffmtRequest tffmt:TfFmtSubsystem<arrow>FmtResult<block_start><if_stmt>tffmt.options.skip<block_start><return>FmtResult.skip(formatter_name="tffmt")<block_end>setup=<await>Get(StyleSetup StyleSetupRequest(request ("fmt" )))<line_sep>results=<await>MultiGet(Get(ProcessResult TerraformProcess process)<for>_,(process _) setup.directory_to_process.items())<def_stmt>format directory output<block_start><if_stmt>len(output.strip())<eq>0<block_start><return>""<block_end><return>textwrap.dedent(f"""\
Output from `terraform fmt` on files in {directory}:
{output.decode("utf-8")}
""")<block_end>stdout_content=""<line_sep>stderr_content=""<for_stmt>directory,result zip(setup.directory_to_process.keys() results)<block_start>stdout_content<augadd>format(directory result.stdout)<line_sep>stderr_content<augadd>format(directory result.stderr)<block_end># Merge all of the outputs into a single output.
output_digest=<await>Get(Digest MergeDigests(r.output_digest<for>r results))<line_sep>fmt_result=FmtResult(input=setup.original_digest output=output_digest stdout=stdout_content stderr=stderr_content formatter_name="tffmt" )<line_sep><return>fmt_result<block_end>@rule(desc="Lint with `terraform fmt`" level=LogLevel.DEBUG)<async_keyword><def_stmt>tffmt_lint request:TffmtRequest tffmt:TfFmtSubsystem<arrow>LintResults<block_start><if_stmt>tffmt.options.skip<block_start><return>LintResults([] linter_name="tffmt")<block_end>setup=<await>Get(StyleSetup StyleSetupRequest(request ("fmt" "-check")))<line_sep>results=<await>MultiGet(Get(FallibleProcessResult TerraformProcess process)<for>_,(process _) setup.directory_to_process.items())<line_sep>lint_results=[LintResult.from_fallible_process_result(result)<for>result results]<line_sep><return>LintResults(lint_results linter_name="tffmt")<block_end><def_stmt>rules <block_start><return>[*collect_rules() *external_tool.rules() *tool_rules() UnionRule(LintRequest TffmtRequest) UnionRule(TerraformFmtRequest TffmtRequest) ]<block_end> |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""TFDS for only claims."""<import_stmt>json<import_from_stmt>language.serene constants<import_from_stmt>language.serene util<import_stmt>tensorflow.compat.v2<as>tf<import_stmt>tensorflow_datasets.public_api<as>tfds<class_stmt>ClaimDataset(tfds.core.GeneratorBasedBuilder)<block_start>"""Claim only datasets for fever, useful for embedding only claims."""<line_sep>VERSION=tfds.core.Version("0.1.0")<def_stmt>__init__ self * fever_train_path=<none> fever_dev_path=<none> data_dir=<none> config=<none><block_start>super().__init__(data_dir=data_dir config=config)<line_sep>self._fever_train_path=fever_train_path<line_sep>self._fever_dev_path=fever_dev_path<block_end><def_stmt>_info self<block_start><return>tfds.core.DatasetInfo(builder=self features=tfds.features.FeaturesDict({"example_id":tf.string "metadata":tf.string "claim_text":tfds.features.Text() "evidence_text":tfds.features.Text() "wikipedia_url":tfds.features.Text() "sentence_id":tfds.features.Text() "scrape_type":tfds.features.Text() "evidence_label":tfds.features.ClassLabel(names=constants.EVIDENCE_MATCHING_CLASSES) "claim_label":tfds.features.ClassLabel(names=constants.FEVER_CLASSES)}))<block_end><def_stmt>_split_generators self dl_manager<block_start><return>[tfds.core.SplitGenerator(name=tfds.Split.TRAIN gen_kwargs={"filepath":self._fever_train_path}) tfds.core.SplitGenerator(name=tfds.Split.VALIDATION gen_kwargs={"filepath":self._fever_dev_path})]<block_end><def_stmt>_generate_examples self filepath **kwargs<block_start>fever_claims=util.read_jsonlines(filepath)<for_stmt>claim fever_claims<block_start>claim_id=claim["id"]<line_sep>claim_text=claim["claim"]<line_sep>claim_label=claim["label"]<line_sep>example_id=f"{claim_id}"<line_sep><yield>claim_id {"example_id":example_id "claim_text":claim_text "evidence_text":"" "wikipedia_url":"" # Ordinarily, this would (possibly) be concatenated to the evidence
# but since this is claim only, I'm using a null integer value
"sentence_id":"-1" # This label doesn't matter here since its claim only
"evidence_label":constants.NOT_MATCHING "claim_label":claim_label "scrape_type":"" "metadata":json.dumps({"claim_id":claim_id })}<block_end><block_end><block_end> |
<import_from_stmt>urllib.request urlopen<line_sep># if has Chinese, apply decode()
html=urlopen("https://mofanpy.com/static/scraping/basic-structure.html").read().decode('utf-8')<line_sep>print(html)<import_stmt>re<line_sep>res=re.findall(r"<title>(.+?)</title>" html)<line_sep>print("\nPage title is: " res[0])<line_sep># Page title is: Scraping tutorial 1 | 莫烦Python
res=re.findall(r"<p>(.*?)</p>" html flags=re.DOTALL)# re.DOTALL if multi line
print("\nPage paragraph is: " res[0])<line_sep># Page paragraph is:
# 这是一个在 <a href="https://mofanpy.com/">莫烦Python</a>
# <a href="https://mofanpy.com/tutorials/scraping">爬虫教程</a> 中的简单测试.
res=re.findall(r'href="(.*?)"' html)<line_sep>print("\nAll links: " res)<line_sep># All links: ['https://mofanpy.com/static/img/description/tab_icon.png', 'https://mofanpy.com/', 'https://mofanpy.com/tutorials/scraping']
|
<import_stmt>os<import_stmt>shutil<import_stmt>tempfile<import_from_stmt>unittest TestCase<import_from_stmt>aws_lambda_builders.builder LambdaBuilder<import_from_stmt>aws_lambda_builders.exceptions WorkflowFailedError<import_from_stmt>aws_lambda_builders.workflows.nodejs_npm.npm SubprocessNpm<import_from_stmt>aws_lambda_builders.workflows.nodejs_npm.utils OSUtils<import_from_stmt>aws_lambda_builders.workflows.nodejs_npm_esbuild.esbuild EsbuildExecutionError<import_from_stmt>aws_lambda_builders.workflows.nodejs_npm_esbuild.utils EXPERIMENTAL_FLAG_ESBUILD<import_from_stmt>parameterized parameterized<class_stmt>TestNodejsNpmWorkflowWithEsbuild(TestCase)<block_start>"""
Verifies that `nodejs_npm` workflow works by building a Lambda using NPM
"""<line_sep>TEST_DATA_FOLDER=os.path.join(os.path.dirname(__file__) "testdata")<def_stmt>setUp self<block_start>self.artifacts_dir=tempfile.mkdtemp()<line_sep>self.scratch_dir=tempfile.mkdtemp()<line_sep>self.dependencies_dir=tempfile.mkdtemp()<line_sep>self.no_deps=os.path.join(self.TEST_DATA_FOLDER "no-deps-esbuild")<line_sep>self.builder=LambdaBuilder(language="nodejs" dependency_manager="npm-esbuild" application_framework=<none>)<block_end><def_stmt>tearDown self<block_start>shutil.rmtree(self.artifacts_dir)<line_sep>shutil.rmtree(self.scratch_dir)<block_end>@parameterized.expand([("nodejs12.x" ) ("nodejs14.x" ) ("nodejs16.x" )])<def_stmt>test_doesnt_build_without_feature_flag self runtime<block_start>source_dir=os.path.join(self.TEST_DATA_FOLDER "with-deps-esbuild")<with_stmt>self.assertRaises(EsbuildExecutionError)<as>context<block_start>self.builder.build(source_dir self.artifacts_dir self.scratch_dir os.path.join(source_dir "package.json") runtime=runtime )<block_end>self.assertEqual(str(context.exception) "Esbuild Failed: Feature flag must be enabled to use this workflow")<block_end>@parameterized.expand([("nodejs12.x" ) ("nodejs14.x" ) ("nodejs16.x" )])<def_stmt>test_builds_javascript_project_with_dependencies self runtime<block_start>source_dir=os.path.join(self.TEST_DATA_FOLDER "with-deps-esbuild")<line_sep>options={"entry_points":["included.js"]}<line_sep>self.builder.build(source_dir self.artifacts_dir self.scratch_dir os.path.join(source_dir "package.json") runtime=runtime options=options experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD] )<line_sep>expected_files={"included.js" "included.js.map"}<line_sep>output_files=set(os.listdir(self.artifacts_dir))<line_sep>self.assertEqual(expected_files output_files)<block_end>@parameterized.expand([("nodejs12.x" ) ("nodejs14.x" ) ("nodejs16.x" )])<def_stmt>test_builds_javascript_project_with_multiple_entrypoints self runtime<block_start>source_dir=os.path.join(self.TEST_DATA_FOLDER "with-deps-esbuild-multiple-entrypoints")<line_sep>options={"entry_points":["included.js" "included2.js"]}<line_sep>self.builder.build(source_dir self.artifacts_dir self.scratch_dir os.path.join(source_dir "package.json") runtime=runtime options=options experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD] )<line_sep>expected_files={"included.js" "included.js.map" "included2.js" "included2.js.map"}<line_sep>output_files=set(os.listdir(self.artifacts_dir))<line_sep>self.assertEqual(expected_files output_files)<block_end>@parameterized.expand([("nodejs12.x" ) ("nodejs14.x" ) ("nodejs16.x" )])<def_stmt>test_builds_typescript_projects self runtime<block_start>source_dir=os.path.join(self.TEST_DATA_FOLDER "with-deps-esbuild-typescript")<line_sep>options={"entry_points":["included.ts"]}<line_sep>self.builder.build(source_dir self.artifacts_dir self.scratch_dir os.path.join(source_dir "package.json") runtime=runtime options=options experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD] )<line_sep>expected_files={"included.js" "included.js.map"}<line_sep>output_files=set(os.listdir(self.artifacts_dir))<line_sep>self.assertEqual(expected_files output_files)<block_end>@parameterized.expand([("nodejs12.x" ) ("nodejs14.x" ) ("nodejs16.x" )])<def_stmt>test_builds_with_external_esbuild self runtime<block_start>osutils=OSUtils()<line_sep>npm=SubprocessNpm(osutils)<line_sep>source_dir=os.path.join(self.TEST_DATA_FOLDER "no-deps-esbuild")<line_sep>esbuild_dir=os.path.join(self.TEST_DATA_FOLDER "esbuild-binary")<line_sep>npm.run(["ci"] cwd=esbuild_dir)<line_sep>binpath=npm.run(["bin"] cwd=esbuild_dir)<line_sep>options={"entry_points":["included.js"]}<line_sep>self.builder.build(source_dir self.artifacts_dir self.scratch_dir os.path.join(source_dir "package.json") runtime=runtime options=options executable_search_paths=[binpath] experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD] )<line_sep>expected_files={"included.js" "included.js.map"}<line_sep>output_files=set(os.listdir(self.artifacts_dir))<line_sep>self.assertEqual(expected_files output_files)<block_end>@parameterized.expand([("nodejs12.x" ) ("nodejs14.x" ) ("nodejs16.x" )])<def_stmt>test_no_options_passed_to_esbuild self runtime<block_start>source_dir=os.path.join(self.TEST_DATA_FOLDER "with-deps-esbuild")<with_stmt>self.assertRaises(WorkflowFailedError)<as>context<block_start>self.builder.build(source_dir self.artifacts_dir self.scratch_dir os.path.join(source_dir "package.json") runtime=runtime experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD] )<block_end>self.assertEqual(str(context.exception) "NodejsNpmEsbuildBuilder:EsbuildBundle - entry_points not set ({})")<block_end>@parameterized.expand([("nodejs12.x" ) ("nodejs14.x" ) ("nodejs16.x" )])<def_stmt>test_bundle_with_implicit_file_types self runtime<block_start>source_dir=os.path.join(self.TEST_DATA_FOLDER "implicit-file-types")<line_sep>options={"entry_points":["included" "implicit"]}<line_sep>self.builder.build(source_dir self.artifacts_dir self.scratch_dir os.path.join(source_dir "package.json") runtime=runtime options=options experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD] )<line_sep>expected_files={"included.js.map" "implicit.js.map" "implicit.js" "included.js"}<line_sep>output_files=set(os.listdir(self.artifacts_dir))<line_sep>self.assertEqual(expected_files output_files)<block_end>@parameterized.expand([("nodejs12.x" ) ("nodejs14.x" ) ("nodejs16.x" )])<def_stmt>test_bundles_project_without_dependencies self runtime<block_start>source_dir=os.path.join(self.TEST_DATA_FOLDER "no-package-esbuild")<line_sep>options={"entry_points":["included"]}<line_sep>osutils=OSUtils()<line_sep>npm=SubprocessNpm(osutils)<line_sep>esbuild_dir=os.path.join(self.TEST_DATA_FOLDER "esbuild-binary")<line_sep>npm.run(["ci"] cwd=esbuild_dir)<line_sep>binpath=npm.run(["bin"] cwd=esbuild_dir)<line_sep>self.builder.build(source_dir self.artifacts_dir self.scratch_dir os.path.join(source_dir "package.json") runtime=runtime options=options experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD] executable_search_paths=[binpath] )<line_sep>expected_files={"included.js.map" "included.js"}<line_sep>output_files=set(os.listdir(self.artifacts_dir))<line_sep>self.assertEqual(expected_files output_files)<block_end>@parameterized.expand([("nodejs12.x" ) ("nodejs14.x" ) ("nodejs16.x" )])<def_stmt>test_builds_project_with_remote_dependencies_without_download_dependencies_with_dependencies_dir self runtime<block_start>source_dir=os.path.join(self.TEST_DATA_FOLDER "with-deps-no-node_modules")<line_sep>options={"entry_points":["included.js"]}<line_sep>osutils=OSUtils()<line_sep>npm=SubprocessNpm(osutils)<line_sep>esbuild_dir=os.path.join(self.TEST_DATA_FOLDER "esbuild-binary")<line_sep>npm.run(["ci"] cwd=esbuild_dir)<line_sep>binpath=npm.run(["bin"] cwd=esbuild_dir)<line_sep>self.builder.build(source_dir self.artifacts_dir self.scratch_dir os.path.join(source_dir "package.json") options=options runtime=runtime dependencies_dir=self.dependencies_dir download_dependencies=<false> experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD] executable_search_paths=[binpath] )<line_sep>expected_files={"included.js.map" "included.js"}<line_sep>output_files=set(os.listdir(self.artifacts_dir))<line_sep>self.assertEqual(expected_files output_files)<block_end>@parameterized.expand([("nodejs12.x" ) ("nodejs14.x" ) ("nodejs16.x" )])<def_stmt>test_builds_project_with_remote_dependencies_with_download_dependencies_and_dependencies_dir self runtime<block_start>source_dir=os.path.join(self.TEST_DATA_FOLDER "with-deps-no-node_modules")<line_sep>options={"entry_points":["included.js"]}<line_sep>self.builder.build(source_dir self.artifacts_dir self.scratch_dir os.path.join(source_dir "package.json") runtime=runtime options=options dependencies_dir=self.dependencies_dir download_dependencies=<true> experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD] )<line_sep>expected_files={"included.js.map" "included.js"}<line_sep>output_files=set(os.listdir(self.artifacts_dir))<line_sep>self.assertEqual(expected_files output_files)<line_sep>expected_modules="minimal-request-promise"<line_sep>output_modules=set(os.listdir(os.path.join(self.dependencies_dir "node_modules")))<line_sep>self.assertIn(expected_modules output_modules)<line_sep>expected_dependencies_files={"node_modules"}<line_sep>output_dependencies_files=set(os.listdir(os.path.join(self.dependencies_dir)))<line_sep>self.assertNotIn(expected_dependencies_files output_dependencies_files)<block_end>@parameterized.expand([("nodejs12.x" ) ("nodejs14.x" ) ("nodejs16.x" )])<def_stmt>test_builds_project_with_remote_dependencies_without_download_dependencies_without_dependencies_dir self runtime<block_start>source_dir=os.path.join(self.TEST_DATA_FOLDER "with-deps-no-node_modules")<with_stmt>self.assertRaises(EsbuildExecutionError)<as>context<block_start>self.builder.build(source_dir self.artifacts_dir self.scratch_dir os.path.join(source_dir "package.json") runtime=runtime dependencies_dir=<none> download_dependencies=<false> experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD] )<block_end>self.assertEqual(str(context.exception) "Esbuild Failed: Lambda Builders encountered and invalid workflow")<block_end>@parameterized.expand([("nodejs12.x" ) ("nodejs14.x" ) ("nodejs16.x" )])<def_stmt>test_builds_project_without_combine_dependencies self runtime<block_start>source_dir=os.path.join(self.TEST_DATA_FOLDER "with-deps-no-node_modules")<line_sep>options={"entry_points":["included.js"]}<line_sep>self.builder.build(source_dir self.artifacts_dir self.scratch_dir os.path.join(source_dir "package.json") runtime=runtime options=options dependencies_dir=self.dependencies_dir download_dependencies=<true> combine_dependencies=<false> experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD] )<line_sep>expected_files={"included.js.map" "included.js"}<line_sep>output_files=set(os.listdir(self.artifacts_dir))<line_sep>self.assertEqual(expected_files output_files)<line_sep>expected_modules="minimal-request-promise"<line_sep>output_modules=set(os.listdir(os.path.join(self.dependencies_dir "node_modules")))<line_sep>self.assertIn(expected_modules output_modules)<line_sep>expected_dependencies_files={"node_modules"}<line_sep>output_dependencies_files=set(os.listdir(os.path.join(self.dependencies_dir)))<line_sep>self.assertNotIn(expected_dependencies_files output_dependencies_files)<block_end>@parameterized.expand([("nodejs12.x" ) ("nodejs14.x" ) ("nodejs16.x" )])<def_stmt>test_builds_javascript_project_with_external self runtime<block_start>source_dir=os.path.join(self.TEST_DATA_FOLDER "with-deps-esbuild-externals")<line_sep>options={"entry_points":["included.js"] "external":["minimal-request-promise"]}<line_sep>self.builder.build(source_dir self.artifacts_dir self.scratch_dir os.path.join(source_dir "package.json") runtime=runtime options=options experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD] )<line_sep>expected_files={"included.js" "included.js.map"}<line_sep>output_files=set(os.listdir(self.artifacts_dir))<line_sep>self.assertEqual(expected_files output_files)<with_stmt>open(str(os.path.join(self.artifacts_dir "included.js")))<as>f<block_start>js_file=f.read()<line_sep># Check that the module has been require() instead of bundled
self.assertIn('require("minimal-request-promise")' js_file)<block_end><block_end>@parameterized.expand([("nodejs12.x" ) ("nodejs14.x" ) ("nodejs16.x" )])<def_stmt>test_builds_javascript_project_with_loader self runtime<block_start>osutils=OSUtils()<line_sep>source_dir=os.path.join(self.TEST_DATA_FOLDER "no-deps-esbuild-loader")<line_sep>options={"entry_points":["included.js"] "loader":[".reference=json"]}<line_sep>self.builder.build(source_dir self.artifacts_dir self.scratch_dir os.path.join(source_dir "package.json") runtime=runtime options=options experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD] )<line_sep>expected_files={"included.js" "included.js.map"}<line_sep>output_files=set(os.listdir(self.artifacts_dir))<line_sep>self.assertEqual(expected_files output_files)<line_sep>included_js_path=os.path.join(self.artifacts_dir "included.js")<line_sep># check that the .reference file is correctly bundled as code by running the result
self.assertEqual(osutils.check_output(included_js_path) str.encode("===\n"<concat>"The Muses\n"<concat>"===\n"<concat>"\n"<concat>"\tcalliope: eloquence and heroic poetry\n"<concat>"\terato: lyric or erotic poetry\n"<concat>"\tmelpomene: tragedy\n"<concat>"\tpolymnia: sacred poetry\n"<concat>"\tterpsichore: dance\n"<concat>"\tthalia: comedy\n"<concat>"\turania: astronomy and astrology") )<block_end><block_end> |
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>pysc2.maps lib<import_from_stmt>smac.env.starcraft2.maps smac_maps<line_sep>map_param_registry={"1o_10b_vs_1r":{"n_agents":11 "n_enemies":1 "limit":50 "a_race":"Z" "b_race":"Z" "unit_type_bits":2 "map_type":"overload_bane"} "1o_2r_vs_4r":{"n_agents":3 "n_enemies":4 "limit":50 "a_race":"Z" "b_race":"Z" "unit_type_bits":2 "map_type":"overload_roach"} "bane_vs_hM":{"n_agents":3 "n_enemies":2 "limit":30 "a_race":"Z" "b_race":"T" "unit_type_bits":2 "map_type":"bZ_hM"}}<line_sep>smac_maps.map_param_registry.update(map_param_registry)<def_stmt>get_map_params map_name<block_start>map_param_registry=smac_maps.get_smac_map_registry()<line_sep><return>map_param_registry[map_name]<block_end><for_stmt>name map_param_registry.keys()<block_start>globals()[name]=type(name (smac_maps.SMACMap ) dict(filename=name))<block_end> |
# Copyright: See the LICENSE file.
"""Helper to test circular factory dependencies."""<import_stmt>factory<class_stmt>Bar<block_start><def_stmt>__init__ self foo y<block_start>self.foo=foo<line_sep>self.y=y<block_end><block_end><class_stmt>BarFactory(factory.Factory)<block_start><class_stmt>Meta<block_start>model=Bar<block_end>y=13<line_sep>foo=factory.SubFactory('cyclic.foo.FooFactory')<block_end> |
"""
==================================
Plotting two simple sine functions
==================================
A simple example plotting a fit of two sine functions.
"""<import_stmt>numpy<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>pyearth Earth<line_sep># Create some fake data
numpy.random.seed(2)<line_sep>m=10000<line_sep>n=10<line_sep>X=80<times>numpy.random.uniform(size=(m n))-40<line_sep>y1=100<times>numpy.abs(numpy.sin((X[: 6])/10)-4.0)+10<times>numpy.random.normal(size=m)<line_sep>y2=100<times>numpy.abs(numpy.sin((X[: 6])/2)-8.0)+5<times>numpy.random.normal(size=m)<line_sep># Fit an Earth model
model=Earth(max_degree=3 minspan_alpha=.5)<line_sep>y_mix=numpy.concatenate((y1[: numpy.newaxis] y2[: numpy.newaxis]) axis=1)<line_sep>model.fit(X y_mix)<line_sep># Print the model
print(model.trace())<line_sep>print(model.summary())<line_sep># Plot the model
y_hat=model.predict(X)<line_sep>fig=plt.figure()<line_sep>ax=fig.add_subplot(1 2 1)<line_sep>ax.plot(X[: 6] y_mix[: 0] 'r.')<line_sep>ax.plot(X[: 6] model.predict(X)[: 0] 'b.')<line_sep>ax=fig.add_subplot(1 2 2)<line_sep>ax.plot(X[: 6] y_mix[: 1] 'r.')<line_sep>ax.plot(X[: 6] model.predict(X)[: 1] 'b.')<line_sep>plt.show()<line_sep> |
<import_stmt>atexit<import_stmt>multiprocessing<import_stmt>pprint<import_stmt>signal<import_stmt>time<import_from_stmt>contextlib suppress<import_from_stmt>typing Callable Union Any List Mapping Sequence Tuple cast<import_from_stmt>. util<import_from_stmt>.consts DEFAULT_NAMESPACE<import_from_stmt>.process Process<import_from_stmt>.server tools<import_from_stmt>.state.state State<import_from_stmt>.task.map_plus map_plus<import_from_stmt>.task.swarm Swarm<class_stmt>ProcessList(list)<block_start><def_stmt>__str__ self<block_start><return>ProcessList.__qualname__+": "+pprint.pformat(list(self))<block_end><def_stmt>__repr__ self<block_start><return>"<"+self.__str__()+">"<block_end>@staticmethod<def_stmt>_wait_or_catch_exc process:Process timeout:Union[int float]=<none><arrow>Union[Exception Any]<block_start><try_stmt><block_start><return>process.wait(timeout)<block_end><except_stmt>Exception<as>e<block_start><return>e<block_end><block_end><def_stmt>wait self timeout:Union[int float]=<none> safe:bool=<false><arrow>List[Union[Any Exception]]<block_start>"""
Call :py:meth:`~Process.wait()` on all the Processes in this list.
:param timeout:
Same as :py:meth:`~Process.wait()`.
This parameter controls the timeout for all the Processes combined,
not a single :py:meth:`~Process.wait()` call.
:param safe:
Suppress any errors that occur while waiting for a Process.
The return value of failed :py:meth:`~Process.wait()` calls are substituted with the ``Exception`` that occurred.
:return:
A ``list`` containing the values returned by child Processes of this Context.
"""<if_stmt>safe<block_start>_wait=self._wait_or_catch_exc<block_end><else_stmt><block_start>_wait=Process.wait<block_end><if_stmt>timeout<is><none><block_start><return>[_wait(process)<for>process self]<block_end><else_stmt><block_start>final=time.time()+timeout<line_sep><return>[_wait(process final-time.time())<for>process self]<block_end><block_end><def_stmt>start self<block_start>"""
Call :py:meth:`~Process.start()` on all the child processes of this Context
Ignores if a Process is already started, unlike :py:meth:`~Process.start()`,
which throws an ``AssertionError``.
"""<with_stmt>suppress(AssertionError)<block_start><for_stmt>process self<block_start>process.start()<block_end><block_end><block_end><def_stmt>stop self<block_start>"""
Call :py:meth:`~Process.stop()` on all the Processes in this list.
Retains the same order as ``Context.process_list``.
:return:
A ``list`` containing the exitcodes of the child Processes of this Context.
"""<line_sep><return>[proc.stop()<for>proc self]<block_end><block_end><class_stmt>Context#: The :py:class:`multiprocessing.Process` object for the server.
<block_start>server_process:multiprocessing.Process<def_stmt>__init__ self server_address:str=<none> * start_server:bool=<true> backend:Callable=multiprocessing.Process wait:bool=<false> cleanup:bool=<true> namespace:str=DEFAULT_NAMESPACE **process_kwargs<arrow><none><block_start>r"""
Provides a high level interface to :py:class:`State` and :py:class:`Process`.
Primarily used to manage and launch processes.
All processes launched using a Context, share the same state.
Don't share a Context object between Processes / Threads.
A Context object is not thread-safe.
:param server_address:
The address of the server.
If this is set to ``None``, a random address will be generated.
:param start_server:
Whether to start the ZProc server.
It is started automatically by default.
If this is set to ``None``, then you must either -
- Start a server using a different Context object.
- Start one manually, using :py:func:`start_server`.
In both cases,
it the user's responsibility to make sure that the ``server_address`` argument
is satisfied.
.. note::
If the server is not started before-hand,
the Context object will block infinitely, waiting for the server to respond.
In case you want to play around,
the :py:func:`ping` function is handy,
since it let's you *detect* the presence of a server at a given address.
:param backend:
.. include:: /api/snippets/backend.rst
:param wait:
Wait for all running process to finish their work before exiting.
Alternative to manually calling :py:meth:`~Context.wait` at exit.
:param cleanup:
Whether to cleanup the process tree before exiting.
Registers a signal handler for ``SIGTERM``, and an ``atexit`` handler.
:param \*\*process_kwargs:
Keyword arguments that :py:class:`~Process` takes,
except ``server_address`` and ``target``.
If provided,
these will be used while creating processes using this Context.
"""<line_sep>#: A :py:class:`ProcessList` object containing all Processes created under this Context.
self.process_list=ProcessList()<line_sep>#: Passed on from the constructor. This is read-only.
self.backend=backend<line_sep>#: Passed on from the constructor. This is read-only.
self.namespace=namespace<line_sep>#: Passed on from the constructor.
self.process_kwargs=process_kwargs<line_sep>self.process_kwargs.setdefault("namespace" self.namespace)<line_sep>self.process_kwargs.setdefault("backend" self.backend)<line_sep>self.server_address=cast(str server_address)<line_sep>"""The server's address.
This holds the address this Context is connected to,
not necessarily the value provided in the constructor.
This is read-only."""<if_stmt>start_server<block_start>self.start_server()<block_end><assert_stmt>self.server_address<is><not><none> ("Couldn't determine the server address. "<concat>"Hint: Either provide the `server_address` parameter, "<concat>"or pass `start_server=True`.")<line_sep># register cleanup before wait, so that wait runs before cleanup.
# (order of execution is reversed)
<if_stmt>cleanup<block_start>atexit.register(util.clean_process_tree)<if_stmt>util.is_main_thread()<block_start>signal.signal(signal.SIGTERM util.clean_process_tree)<block_end><block_end><if_stmt>wait<block_start>atexit.register(self.wait)<block_end><block_end><def_stmt>__str__ self<block_start><return>"%s - server: %r at %#x"%(self.__class__.__qualname__ self.server_address id(self) )<block_end><def_stmt>__repr__ self<block_start><return>util.enclose_in_brackets(self.__str__())<block_end><def_stmt>create_state self value:dict=<none> * namespace:str=<none><block_start>"""
Creates a new :py:class:`State` object, sharing the same zproc server as this Context.
:param value:
If provided, call ``state.update(value)``.
:param namespace:
Use this as the namespace for the :py:class:`State` object,
instead of this :py:class:`Context`\ 's namespace.
:return:
A :py:class:`State` object.
"""<if_stmt>namespace<is><none><block_start>namespace=self.namespace<block_end>state=State(self.server_address namespace=namespace)<if_stmt>value<is><not><none><block_start>state.update(value)<block_end><return>state<block_end><def_stmt>create_swarm self count:int=<none><block_start>swarm=Swarm(self.server_address namespace=self.namespace)<line_sep>swarm.start(count)<line_sep><return>swarm<block_end><def_stmt>start_server self<arrow>Tuple[multiprocessing.Process str]<block_start>out=tools.start_server(self.server_address backend=self.backend)<line_sep>self.server_process,self.server_address=out<line_sep><return>out<block_end><def_stmt>_process self target:Callable=<none> **process_kwargs<arrow>Union[Process Callable]<block_start>r"""
Produce a child process bound to this context.
Can be used both as a function and decorator:
.. code-block:: python
:caption: Usage
@zproc.process(pass_context=True) # you may pass some arguments here
def p1(ctx):
print('hello', ctx)
@zproc.process # or not...
def p2(state):
print('hello', state)
def p3(state):
print('hello', state)
zproc.process(p3) # or just use as a good ol' function
:param target:
Passed on to the :py:class:`Process` constructor.
*Must be omitted when using this as a decorator.*
:param \*\*process_kwargs:
.. include:: /api/context/params/process_kwargs.rst
:return: The :py:class:`Process` instance produced.
"""<line_sep>process=Process(self.server_address target **{**self.process_kwargs **process_kwargs})<line_sep>self.process_list.append(process)<line_sep><return>process<block_end><def_stmt>spawn self *targets:Callable count:int=1 **process_kwargs<block_start>r"""
Produce one or many child process(s) bound to this context.
:param \*targets:
Passed on to the :py:class:`Process` constructor, one at a time.
:param count:
The number of processes to spawn for each item in ``targets``.
:param \*\*process_kwargs:
.. include:: /api/context/params/process_kwargs.rst
:return:
A ``ProcessList`` of the :py:class:`Process` instance(s) produced.
"""<if_stmt><not>targets<block_start><def_stmt>wrapper target:Callable<block_start><return>self.spawn(target count=count **process_kwargs)<block_end><return>wrapper<block_end><if_stmt>len(targets)<times>count<eq>1<block_start><return>self._process(targets[0] **process_kwargs)<block_end><return>ProcessList(self._process(target **process_kwargs)<for>_ range(count)<for>target targets)<block_end><def_stmt>spawn_map self target:Callable map_iter:Sequence[Any]=<none> * map_args:Sequence[Sequence[Any]]=<none> args:Sequence=<none> map_kwargs:Sequence[Mapping[str Any]]=<none> kwargs:Mapping=<none> **process_kwargs<block_start><return>ProcessList(map_plus(<lambda>*args **kwargs:self._process(target args=args kwargs=kwargs **process_kwargs) map_iter map_args args map_kwargs kwargs ))<block_end><def_stmt>wait self timeout:Union[int float]=<none> safe:bool=<false><arrow>List[Union[Any Exception]]<block_start>"""
alias for :py:meth:`ProcessList.wait()`
"""<line_sep><return>self.process_list.wait(timeout safe)<block_end><def_stmt>start_all self<block_start>"""
alias for :py:meth:`ProcessList.start_all()`
"""<line_sep><return>self.process_list.start()<block_end><def_stmt>stop_all self<block_start>"""
alias for :py:meth:`ProcessList.stop_all()`
"""<line_sep><return>self.process_list.stop()<block_end><def_stmt>ping self **kwargs<block_start>r"""
Ping the zproc server.
:param \*\*kwargs: Keyword arguments that :py:func:`ping` takes, except ``server_address``.
:return: Same as :py:func:`ping`
"""<line_sep><return>tools.ping(self.server_address **kwargs)<block_end><block_end> |
<import_from_stmt>aiopg.sa SAConnection<as>SAConn<import_from_stmt>aiopg.sa.result RowProxy<import_from_stmt>graph.types RowsProxy<import_from_stmt>graph.constants OBJECT_NOT_FOUND_ERROR<import_from_stmt>graph.chat.tables rooms messages <line_sep>__all__=['select_rooms' 'select_messages_by_room_id' 'select_room' 'create_message' 'delete_message' ]<line_sep># selects
<async_keyword><def_stmt>select_rooms conn:SAConn<arrow>RowsProxy<block_start>cursor=<await>conn.execute(rooms.select().order_by(rooms.c.id))<line_sep><return><await>cursor.fetchall()<block_end><async_keyword><def_stmt>select_room conn:SAConn id:int<arrow>RowProxy<block_start>cursor=<await>conn.execute(rooms.select().where(rooms.c.id<eq>id))<line_sep>item=<await>cursor.fetchone()<assert_stmt>item OBJECT_NOT_FOUND_ERROR<line_sep><return>item<block_end><async_keyword><def_stmt>select_messages_by_room_id conn:SAConn room_id:int<arrow>RowsProxy<block_start>query=messages.select().where(messages.c.room_id<eq>room_id).order_by(messages.c.id)<line_sep>cursor=<await>conn.execute(query)<line_sep><return><await>cursor.fetchall()<block_end># create
<async_keyword><def_stmt>create_message conn:SAConn room_id:int owner_id:int body:str <arrow>RowProxy<block_start>query=messages.insert().values(body=body owner_id=owner_id room_id=room_id).returning(messages.c.id messages.c.owner_id)<line_sep>res=<await>conn.execute(query)<line_sep><return><await>res.fetchone()<block_end># delete
<async_keyword><def_stmt>delete_message conn:SAConn id:int<arrow><none><block_start><await>conn.execute(messages.delete().where(messages.c.id<eq>id))<block_end> |
"""Defines hooks that can run during training."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>lasagne<import_stmt>numpy<as>np<import_from_stmt>sklearn metrics<class_stmt>LoggingHook(object)<block_start>"""This hook writes information to a log file."""<def_stmt>__init__ self logger<block_start>"""Initializes a new instance of the LoggingHook class.
Args:
logger: A logger instance.
"""<line_sep>self._logger=logger<block_end><def_stmt>update self **kwargs<block_start>"""Executes the hook.
Args:
**kwargs: Optimizer state dictionary.
"""<line_sep>self._logger.log(key="status" message="Log at iteration %d"%kwargs["update_counter"])<line_sep>self._logger.log(key="update_counter" message=kwargs["update_counter"])<line_sep>self._logger.log(key="update_runtime" message=kwargs["runtime"])<line_sep>self._logger.log(key="losses" message=np.asarray(kwargs["losses"]))<block_end><block_end><class_stmt>SnapshotHook(object)<block_start>"""Hook for storing snapshots of the network's weights."""<def_stmt>__init__ self filename network interval<block_start>"""Initializes a new instance of the SnapshotHook class.
Args:
filename: The base filename of the model.
network: The network instance to store.
interval: The snapshot interval.
"""<line_sep>self._filename=filename<line_sep>self._network=network<line_sep>self._interval=interval<block_end><def_stmt>update self **kwargs<block_start>"""Executed the hook.
Args:
**kwargs: The optimizer dictionary.
"""<line_sep># Run the hook now?
<if_stmt>kwargs["update_counter"]%self._interval<eq>0# Yes
<block_start>np.savez("%s_snapshot_%d.npz"%(self._filename kwargs["update_counter"]) *lasagne.layers.get_all_param_values(self._network))<block_end><block_end><block_end><class_stmt>SegmentationValidationHook(object)<block_start>"""Performs a validation run for semantic segmentation."""<def_stmt>__init__ self val_fn data_provider logger interval=300 num_classes=19<block_start>"""Initializes a new instance of the SegmentationValidationHook class.
Args:
val_fn: A function that returns the predictions for each image and
a list of losses.
data_provider: A chianti data provider.
logger: A logger instance.
interval: The validation interval.
"""<line_sep>self._val_fn=val_fn<line_sep>self._data_provider=data_provider<line_sep>self._logger=logger<line_sep>self._interval=interval<line_sep>self._num_classes=num_classes<block_end><def_stmt>update self **kwargs<block_start>"""Runs the validation hook."""<line_sep>update_now=kwargs["update_counter"]%self._interval<eq>0<if_stmt>update_now<and>kwargs["update_counter"]<g>0<block_start>self._logger.log(key="validation_checkpoint" message=kwargs["update_counter"])<line_sep>self._logger.log(key="status" message="-> Start validation run")<line_sep># Initialize the confusion matrix
conf_matrix=np.zeros((self._num_classes self._num_classes)).astype('int64')<line_sep>accumulated_loss=0<line_sep>self._data_provider.reset()<for_stmt>batch_counter range(self._data_provider.get_num_batches())<block_start>self._logger.log(key="status" message="--> Validate batch %d/%d"%(batch_counter+1 self._data_provider.get_num_batches()))<line_sep>batch=self._data_provider.next()<line_sep>images=batch[0]<line_sep>targets=batch[1]<line_sep>predictions,loss=self._val_fn(images targets)<line_sep>accumulated_loss<augadd>loss<line_sep># Mark the don't care predictions
# Flatten the predictions and targets
flat_predictions=predictions.flatten()<line_sep>non_void_pixels=(np.max(targets axis=1)<ne>0.0).flatten()<line_sep>flat_targets=np.argmax(targets axis=1).flatten()<line_sep># Select the non-don't cares
flat_targets=flat_targets[non_void_pixels]<line_sep>flat_predictions=flat_predictions[non_void_pixels]<line_sep>conf_matrix<augadd>metrics.confusion_matrix(flat_targets flat_predictions labels=np.arange(self._num_classes dtype='int64'))<block_end>accumulated_loss<augdiv>self._data_provider.get_num_batches()<line_sep>self._logger.log(key="conf_matrix" message=conf_matrix)<line_sep>self._logger.log(key="validation_loss" message=accumulated_loss)<block_end><block_end><block_end> |
# Copyright (c) 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>sahara.service.edp hdfs_helper<import_from_stmt>sahara.service.edp job_utils<import_from_stmt>sahara.service.edp.oozie engine<as>oozie_engine<import_from_stmt>sahara.service.edp.oozie.workflow_creator workflow_factory<import_from_stmt>sahara.service.edp.spark engine<as>spark_engine<import_from_stmt>sahara.service.edp.storm engine<as>storm_engine<import_from_stmt>sahara.utils edp<line_sep>JOB_TYPE_HIVE=edp.JOB_TYPE_HIVE<line_sep>JOB_TYPE_SPARK=edp.JOB_TYPE_SPARK<line_sep>JOB_TYPE_JAVA=edp.JOB_TYPE_JAVA<line_sep>JOB_TYPE_SHELL=edp.JOB_TYPE_SHELL<line_sep>JOB_TYPE_PIG=edp.JOB_TYPE_PIG<line_sep>JOB_TYPE_STORM=edp.JOB_TYPE_STORM<line_sep>JOB_TYPE_PYLEUS=edp.JOB_TYPE_PYLEUS<line_sep>JOB_TYPE_MAPREDUCE=edp.JOB_TYPE_MAPREDUCE<line_sep>JOB_TYPE_MAPREDUCE_STREAMING=edp.JOB_TYPE_MAPREDUCE_STREAMING<line_sep>JOB_TYPES_ALL=edp.JOB_TYPES_ALL<line_sep>JOB_STATUS_SUCCEEDED=edp.JOB_STATUS_SUCCEEDED<class_stmt>PluginsStormJobEngine(storm_engine.StormJobEngine)<block_start><def_stmt>__init__ self cluster **kwargs<block_start>super(PluginsStormJobEngine self).__init__(cluster)<block_end><block_end><class_stmt>PluginsStormPyleusJobEngine(storm_engine.StormPyleusJobEngine)<block_start><def_stmt>__init__ self cluster **kwargs<block_start>super(PluginsStormPyleusJobEngine self).__init__(cluster)<block_end><block_end><class_stmt>PluginsSparkJobEngine(spark_engine.SparkJobEngine)<block_start><def_stmt>__init__ self cluster **kwargs<block_start>super(PluginsSparkJobEngine self).__init__(cluster)<block_end><block_end><class_stmt>PluginsSparkShellJobEngine(spark_engine.SparkShellJobEngine)<block_start><def_stmt>__init__ self cluster **kwargs<block_start>super(PluginsSparkShellJobEngine self).__init__(cluster)<block_end><block_end><class_stmt>PluginsOozieJobEngine(oozie_engine.OozieJobEngine)<block_start><def_stmt>__init__ self cluster **kwargs<block_start>super(PluginsOozieJobEngine self).__init__(cluster)<block_end><block_end><def_stmt>get_hive_shared_conf_path hdfs_user **kwargs<block_start><return>edp.get_hive_shared_conf_path(hdfs_user)<block_end><def_stmt>compare_job_type job_type *args **kwargs<block_start><return>edp.compare_job_type(job_type *args **kwargs)<block_end><def_stmt>get_builtin_binaries job configs **kwargs<block_start><return>edp.get_builtin_binaries(job configs)<block_end><def_stmt>create_dir_hadoop2 r dir_name hdfs_user **kwargs<block_start>hdfs_helper.create_dir_hadoop2(r dir_name hdfs_user)<block_end><def_stmt>create_hbase_common_lib r **kwargs<block_start>hdfs_helper.create_hbase_common_lib(r)<block_end><def_stmt>get_plugin cluster **kwargs<block_start><return>job_utils.get_plugin(cluster)<block_end><def_stmt>get_possible_job_config job_type **kwargs<block_start><return>workflow_factory.get_possible_job_config(job_type)<block_end><def_stmt>get_possible_mapreduce_configs **kwargs<block_start><return>workflow_factory.get_possible_mapreduce_configs()<block_end> |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>os<import_stmt>tensorflow<as>tf# Requires Tensorflow >=2.1
<import_from_stmt>tensorboard.plugins projector<import_stmt>tensorflow_datasets<as>tfds<line_sep># This demo expands upon the word embeddings tutorial found
# here: https://www.tensorflow.org/tutorials/text/word_embeddings)
# and is intended to demonstrate the use of the embedding projector.
LOG_DIR=os.getenv("LOGDIR")<or>"/tmp/projector_demo"# Tensorboard log dir
METADATA_FNAME="meta.tsv"# Labels will be stored here
STEP=0<line_sep># Load imdb reviews dataset
(train_data test_data),info=tfds.load("imdb_reviews/subwords8k" split=(tfds.Split.TRAIN tfds.Split.TEST) with_info=<true> as_supervised=<true> )<line_sep>encoder=info.features["text"].encoder<line_sep># shuffle, pad, and train the data.
train_batches=train_data.shuffle(1000).padded_batch(10 padded_shapes=((<none> ) ()))<line_sep>test_batches=test_data.shuffle(1000).padded_batch(10 padded_shapes=((<none> ) ()))<line_sep>train_batch,train_labels=next(iter(train_batches))<line_sep>embedding_dim=16<line_sep># Create a basic embedding layer
embedding=tf.keras.layers.Embedding(encoder.vocab_size embedding_dim)<line_sep>model=tf.keras.Sequential([embedding tf.keras.layers.GlobalAveragePooling1D() tf.keras.layers.Dense(16 activation="relu") tf.keras.layers.Dense(1) ])<line_sep># Compile model
model.compile(optimizer="adam" loss=tf.keras.losses.BinaryCrossentropy(from_logits=<true>) metrics=["accuracy"] )<line_sep># Train model
history=model.fit(train_batches epochs=1 validation_data=test_batches validation_steps=20)<line_sep># Fetch the embedding layer and get the weights.
# Make sure to remove the first element, as it is padding.
weights=tf.Variable(model.layers[0].get_weights()[0][1:])<def_stmt>register_embedding weights labels log_dir<arrow><none><block_start>"""Saves a metadata file (labels) and a checkpoint (derived from weights)
and configures the Embedding Projector to read from the appropriate locations.
Args:
weights: tf.Variable with the weights of the embedding layer to be displayed.
labels: list of labels corresponding to the weights.
logdir: Directory into which to store the config file, as a `str`.
"""<line_sep># Create a checkpoint from embedding, the filename and key are
# name of the tensor.
checkpoint=tf.train.Checkpoint(embedding=weights)<line_sep>checkpoint.save(os.path.join(LOG_DIR "embedding.ckpt"))<line_sep># Save Labels separately on a line-by-line manner.
<with_stmt>open(os.path.join(log_dir METADATA_FNAME) "w")<as>f<block_start><for_stmt>label labels<block_start>f.write("{}\n".format(label))<block_end><block_end># Set up config
config=projector.ProjectorConfig()<line_sep>embedding=config.embeddings.add()<line_sep># The name of the tensor will be suffixed by `/.ATTRIBUTES/VARIABLE_VALUE`
embedding.tensor_name="embedding/.ATTRIBUTES/VARIABLE_VALUE"<line_sep>embedding.metadata_path=METADATA_FNAME<line_sep>projector.visualize_embeddings(log_dir config)<block_end># Save Files
register_embedding(weights encoder.subwords LOG_DIR)<line_sep> |
"""
The Federated EMNIST dataset.
The Federated EMNIST dataset originates from the EMNIST dataset, which contains
817851 images, each of which is a 28x28 greyscale image in 1 out of 62 classes.
The difference between the Federated EMNIST dataset and its original counterpart
is that this dataset is already partitioned by the client ID, using the data
provider IDs included in the original EMNIST dataset. As a result of this
partitioning, there are 3597 clients in total, each of which has 227.37 images
on average (std is 88.84). For each client, 90% data samples are used for
training, while the remaining samples are used for testing.
Reference:
<NAME>, <NAME>, <NAME>, and <NAME>, "EMNIST: Extending MNIST to
handwritten letters," in the 2017 International Joint Conference on Neural
Networks (IJCNN).
"""<import_stmt>json<import_stmt>logging<import_stmt>os<import_stmt>numpy<as>np<import_from_stmt>torch.utils.data Dataset<import_from_stmt>torchvision transforms<import_from_stmt>plato.config Config<import_from_stmt>plato.datasources base<class_stmt>CustomDictDataset(Dataset)<block_start>""" Custom dataset from a dictionary with support of transforms. """<def_stmt>__init__ self loaded_data transform=<none><block_start>""" Initializing the custom dataset. """<line_sep>super().__init__()<line_sep>self.loaded_data=loaded_data<line_sep>self.transform=transform<block_end><def_stmt>__getitem__ self index<block_start>sample=self.loaded_data['x'][index]<line_sep>target=self.loaded_data['y'][index]<if_stmt>self.transform<block_start>sample=self.transform(sample)<block_end><return>sample target<block_end><def_stmt>__len__ self<block_start><return>len(self.loaded_data['y'])<block_end><block_end><class_stmt>ReshapeListTransform<block_start>""" The transform that reshapes an image. """<def_stmt>__init__ self new_shape<block_start>self.new_shape=new_shape<block_end><def_stmt>__call__ self img<block_start><return>np.array(img dtype=np.float32).reshape(self.new_shape)<block_end><block_end><class_stmt>DataSource(base.DataSource)<block_start>"""The FEMNIST dataset."""<def_stmt>__init__ self client_id=0<block_start>super().__init__()<line_sep>self.trainset=<none><line_sep>self.testset=<none><line_sep>root_path=os.path.join(Config().data.data_path 'FEMNIST' 'packaged_data')<if_stmt>client_id<eq>0# If we are on the federated learning server
<block_start>data_dir=os.path.join(root_path 'test')<line_sep>data_url="https://jiangzhifeng.s3.us-east-2.amazonaws.com/FEMNIST/test/"+str(client_id)+".zip"<block_end><else_stmt><block_start>data_dir=os.path.join(root_path 'train')<line_sep>data_url="https://jiangzhifeng.s3.us-east-2.amazonaws.com/FEMNIST/train/"+str(client_id)+".zip"<block_end><if_stmt><not>os.path.exists(os.path.join(data_dir str(client_id)))<block_start>logging.info("Downloading the Federated EMNIST dataset "<concat>"with the client datasets pre-partitioned. This may take a while." )<line_sep>self.download(url=data_url data_path=data_dir)<block_end>loaded_data=DataSource.read_data(file_path=os.path.join(data_dir str(client_id) 'data.json'))<line_sep>_transform=transforms.Compose([ReshapeListTransform((28 28 1)) transforms.ToPILImage() transforms.RandomCrop(28 padding=2 padding_mode="constant" fill=1.0) transforms.RandomResizedCrop(28 scale=(0.8 1.2) ratio=(4./5. 5./4.)) transforms.RandomRotation(5 fill=1.0) transforms.ToTensor() transforms.Normalize(0.9637 0.1597) ])<line_sep>dataset=CustomDictDataset(loaded_data=loaded_data transform=_transform)<if_stmt>client_id<eq>0# testing dataset on the server
<block_start>self.testset=dataset<block_end><else_stmt># training dataset on one of the clients
<block_start>self.trainset=dataset<block_end><block_end>@staticmethod<def_stmt>read_data file_path<block_start>""" Reading the dataset specific to a client_id. """<with_stmt>open(file_path 'r')<as>fin<block_start>loaded_data=json.load(fin)<block_end><return>loaded_data<block_end><def_stmt>num_train_examples self<block_start><return>len(self.trainset)<block_end><def_stmt>num_test_examples self<block_start><return>len(self.testset)<block_end><block_end> |
<import_from_future_stmt> absolute_import division print_function<def_stmt>kin_vec start_key start_xyz end_key end_xyz width=<none><block_start>start_altloc=start_key[0:1]<if_stmt>start_altloc<eq>' '<block_start>start_altloc_txt=""<block_end><else_stmt><block_start>start_altloc_txt=" '%s'"%start_altloc.lower()<block_end>end_altloc=end_key[0:1]<if_stmt>end_altloc<eq>' '<block_start>end_altloc_txt=""<block_end><else_stmt><block_start>end_altloc_txt=" '%s'"%end_altloc.lower()<block_end><if_stmt>width<is><none><block_start><return>"{%s} P%s %.3f %.3f %.3f {%s} L%s %.3f %.3f %.3f\n"%(start_key start_altloc_txt start_xyz[0] start_xyz[1] start_xyz[2] end_key end_altloc_txt end_xyz[0] end_xyz[1] end_xyz[2])<block_end><else_stmt><block_start><return>"{%s} P%s %.3f %.3f %.3f {%s} L%s width%d %.3f %.3f %.3f\n"%(start_key start_altloc_txt start_xyz[0] start_xyz[1] start_xyz[2] end_key end_altloc_txt width end_xyz[0] end_xyz[1] end_xyz[2])<block_end><block_end> |
<import_stmt>unittest<import_stmt>tempfile<import_stmt>json<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>supervised.preprocessing.datetime_transformer DateTimeTransformer<class_stmt>DateTimeTransformerTest(unittest.TestCase)<block_start><def_stmt>test_transformer self<block_start>d={"col1":["2020/06/01" "2020/06/02" "2020/06/03" "2021/06/01" "2022/06/01" ]}<line_sep>df=pd.DataFrame(data=d)<line_sep>df["col1"]=pd.to_datetime(df["col1"])<line_sep>df_org=df.copy()<line_sep>transf=DateTimeTransformer()<line_sep>transf.fit(df "col1")<line_sep>df=transf.transform(df)<line_sep>self.assertTrue(df.shape[0]<eq>5)<line_sep>self.assertTrue("col1"<not><in>df.columns)<line_sep>self.assertTrue("col1_Year"<in>df.columns)<line_sep>transf2=DateTimeTransformer()<line_sep>transf2.from_json(transf.to_json())<line_sep>df2=transf2.transform(df_org)<line_sep>self.assertTrue("col1"<not><in>df2.columns)<line_sep>self.assertTrue("col1_Year"<in>df2.columns)<block_end><block_end> |
"""Urls for the Zinnia comments"""<import_from_stmt>django.conf.urls url<import_from_stmt>zinnia.urls _<import_from_stmt>zinnia.views.comments CommentSuccess<line_sep>urlpatterns=[url(_(r'^success/$') CommentSuccess.as_view() name='comment_success') ]<line_sep> |
"""Unit test for keytabs
"""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_stmt>io<import_stmt>os<import_stmt>shutil<import_stmt>tempfile<import_stmt>unittest<import_stmt>mock<import_from_stmt>treadmill keytabs<class_stmt>KeytabsTest(unittest.TestCase)<block_start>"""test keytabs function
"""<def_stmt>setUp self<block_start>self.spool_dir=tempfile.mkdtemp()<block_end><def_stmt>tearDown self<block_start>shutil.rmtree(self.spool_dir)<block_end><def_stmt>_touch_file self name<block_start><with_stmt>io.open(os.path.join(self.spool_dir name) 'w')<block_start><pass><block_end><block_end>@mock.patch('treadmill.subproc.check_call')<def_stmt>test_add_keytabs_to_file self mock_check_call<block_start>"""test add keytabs princ files into dest file
"""<line_sep>self._touch_file('HTTP#foo@realm')<line_sep>self._touch_file('HTTP#bar@realm')<line_sep>self._touch_file('host#foo@realm')<line_sep>self._touch_file('host#bar@realm')<line_sep>keytabs.add_keytabs_to_file(self.spool_dir 'host' 'krb5.keytab')<try_stmt><block_start>mock_check_call.assert_called_once_with(['kt_add' 'krb5.keytab' os.path.join(self.spool_dir 'host#foo@realm') os.path.join(self.spool_dir 'host#bar@realm') ])<block_end><except_stmt>AssertionError# then should called with files in other order
<block_start>mock_check_call.assert_called_once_with(['kt_add' 'krb5.keytab' os.path.join(self.spool_dir 'host#bar@realm') os.path.join(self.spool_dir 'host#foo@realm') ])<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end> |
<import_from_stmt>django.contrib.flatpages.sitemaps FlatPageSitemap<import_from_stmt>django.contrib.sitemaps views<import_from_stmt>django.urls include path<line_sep>urlpatterns=[path('flatpages/sitemap.xml' views.sitemap {'sitemaps':{'flatpages':FlatPageSitemap}} name='django.contrib.sitemaps.views.sitemap') path('flatpage_root/' include('django.contrib.flatpages.urls')) path('accounts/' include('django.contrib.auth.urls')) ]<line_sep> |
<import_stmt>dataclasses<import_stmt>pathlib<import_stmt>subprocess<import_from_stmt>typing DefaultDict List Sequence<import_stmt>dacite<import_from_stmt>pysen.command CommandBase<import_from_stmt>pysen.component ComponentBase RunOptions<import_from_stmt>pysen.path change_dir<import_from_stmt>pysen.plugin PluginBase<import_from_stmt>pysen.pyproject_model Config PluginConfig<import_from_stmt>pysen.reporter Reporter<import_from_stmt>pysen.runner_options PathContext<import_from_stmt>pysen.setting SettingFile<class_stmt>ShellCommand(CommandBase)<block_start><def_stmt>__init__ self name:str base_dir:pathlib.Path cmd:Sequence[str]<arrow><none><block_start>self._name=name<line_sep>self._base_dir=base_dir<line_sep>self._cmd=cmd<block_end>@property<def_stmt>name self<arrow>str<block_start><return>self._name<block_end><def_stmt>__call__ self reporter:Reporter<arrow>int<block_start><with_stmt>change_dir(self._base_dir)<block_start><try_stmt><block_start>ret=subprocess.run(self._cmd)<line_sep>reporter.logger.info(f"{self._cmd} returns {ret.returncode}")<line_sep><return>ret.returncode<block_end><except_stmt>BaseException<as>e<block_start>reporter.logger.info(f"an error occured while executing: {self._cmd}\n{e}")<line_sep><return>255<block_end><block_end><block_end><block_end><class_stmt>ShellComponent(ComponentBase)<block_start><def_stmt>__init__ self name:str cmd:Sequence[str] targets:Sequence[str]<arrow><none><block_start>self._name=name<line_sep>self._cmd=cmd<line_sep>self._targets=targets<block_end>@property<def_stmt>name self<arrow>str<block_start><return>self._name<block_end><def_stmt>export_settings self paths:PathContext files:DefaultDict[str SettingFile] <arrow><none><block_start>print(f"Called export_settings at {self._name}: do nothing")<block_end>@property<def_stmt>targets self<arrow>Sequence[str]<block_start><return>self._targets<block_end><def_stmt>create_command self target:str paths:PathContext options:RunOptions<arrow>CommandBase<block_start><assert_stmt>target<in>self._targets<line_sep><return>ShellCommand(self._name paths.base_dir self._cmd)<block_end><block_end>@dataclasses.dataclass<class_stmt>ShellPluginConfig<block_start>name:str<line_sep>command:List[str]<line_sep>targets:List[str]<block_end><class_stmt>ShellPlugin(PluginBase)<block_start><def_stmt>load self file_path:pathlib.Path config_data:PluginConfig root:Config<arrow>Sequence[ComponentBase]<block_start><assert_stmt>(config_data.config<is><not><none>) f"{config_data.location}.config must be not None"<line_sep>config=dacite.from_dict(ShellPluginConfig config_data.config dacite.Config(strict=<true>))<line_sep><return>[ShellComponent(config.name config.command config.targets)]<block_end><block_end># NOTE(igarashi): This is the entry point of a plugin method
<def_stmt>plugin <arrow>PluginBase<block_start><return>ShellPlugin()<block_end> |
"""Transforms the XML module definitions parsed from the PDF into a verilog representation"""<import_from_stmt>lxml etree<import_from_stmt>datetime datetime<def_stmt>format_port name width type **kwargs<block_start>wstr=''<if>int(width)<eq>1<else>'[%s:0]\t'%width<line_sep><return>'\t%s\t%s%s;\n'%(type wstr name)<block_end><def_stmt>format_attrib name type default **kwargs<block_start><if_stmt>type<eq>'STRING'<block_start>default='"%s"'%default# need to ensure strings are quoted
<block_end><return>'\tparameter %s = %s;\n'%(name default)<block_end><def_stmt>process infile outfile<block_start>tree=etree.parse(infile)<line_sep>root=tree.getroot()<with_stmt>open(outfile "w")<as>output<block_start>output.write('// Automatically generated from %s on %s\n\n'%(infile datetime.now().isoformat()))<for_stmt>module root.getchildren()<block_start>ports=module.xpath('port')<line_sep>attrs=module.xpath('attribute')<line_sep>output.write('module %s (%s);\n'%(module.attrib['name'] ', '.join([port.attrib['name']<for>port ports])))<for_stmt>port ports<block_start>output.write(format_port(**dict(port.attrib)))<block_end><if_stmt>len(attrs)<block_start>output.write('\n')<block_end><for_stmt>attr attrs<block_start>output.write(format_attrib(**dict(attr.attrib)))<block_end>output.write('endmodule\n\n')<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>argparse<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--input' '-i' nargs='?' default='cells_xtra.xml')<line_sep>parser.add_argument('--output' '-o' nargs='?' default='cells_xtra.v')<line_sep>args=parser.parse_args()<line_sep>process(args.input args.output)<block_end> |
<import_from_future_stmt> division<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>os<import_stmt>logging<import_from_stmt>torch.utils.data DataLoader Dataset Sampler<line_sep>logger=logging.getLogger('DeepAR.Data')<class_stmt>TrainDataset(Dataset)<block_start><def_stmt>__init__ self data_path data_name num_class<block_start>self.data=np.load(os.path.join(data_path f'train_data_{data_name}.npy'))<line_sep>self.label=np.load(os.path.join(data_path f'train_label_{data_name}.npy'))<line_sep>self.train_len=self.data.shape[0]<line_sep>logger.info(f'train_len: {self.train_len}')<line_sep>logger.info(f'building datasets from {data_path}...')<block_end><def_stmt>__len__ self<block_start><return>self.train_len<block_end><def_stmt>__getitem__ self index<block_start><return>(self.data[index : :-1] int(self.data[index 0 -1]) self.label[index])<block_end><block_end><class_stmt>TestDataset(Dataset)<block_start><def_stmt>__init__ self data_path data_name num_class<block_start>self.data=np.load(os.path.join(data_path f'test_data_{data_name}.npy'))<line_sep>self.v=np.load(os.path.join(data_path f'test_v_{data_name}.npy'))<line_sep>self.label=np.load(os.path.join(data_path f'test_label_{data_name}.npy'))<line_sep>self.test_len=self.data.shape[0]<line_sep>logger.info(f'test_len: {self.test_len}')<line_sep>logger.info(f'building datasets from {data_path}...')<block_end><def_stmt>__len__ self<block_start><return>self.test_len<block_end><def_stmt>__getitem__ self index<block_start><return>(self.data[index : :-1] int(self.data[index 0 -1]) self.v[index] self.label[index])<block_end><block_end><class_stmt>WeightedSampler(Sampler)<block_start><def_stmt>__init__ self data_path data_name replacement=<true><block_start>v=np.load(os.path.join(data_path f'train_v_{data_name}.npy'))<line_sep>self.weights=torch.as_tensor(np.abs(v[: 0])/np.sum(np.abs(v[: 0])) dtype=torch.double)<line_sep>logger.info(f'weights: {self.weights}')<line_sep>self.num_samples=self.weights.shape[0]<line_sep>logger.info(f'num samples: {self.num_samples}')<line_sep>self.replacement=replacement<block_end><def_stmt>__iter__ self<block_start><return>iter(torch.multinomial(self.weights self.num_samples self.replacement).tolist())<block_end><def_stmt>__len__ self<block_start><return>self.num_samples<block_end><block_end> |
<import_stmt>torch<import_stmt>os<def_stmt>download_process_data path="colab_demo"<block_start>os.makedirs(path exist_ok=<true>)<line_sep>print("Downloading data")<line_sep>torch.hub.download_url_to_file('https://image-editing-test-12345.s3-us-west-2.amazonaws.com/colab_examples/lsun_bedroom1.pth' os.path.join(path 'lsun_bedroom1.pth'))<line_sep>torch.hub.download_url_to_file('https://image-editing-test-12345.s3-us-west-2.amazonaws.com/colab_examples/lsun_bedroom2.pth' os.path.join(path 'lsun_bedroom2.pth'))<line_sep>torch.hub.download_url_to_file('https://image-editing-test-12345.s3-us-west-2.amazonaws.com/colab_examples/lsun_bedroom3.pth' os.path.join(path 'lsun_bedroom3.pth'))<line_sep>torch.hub.download_url_to_file('https://image-editing-test-12345.s3-us-west-2.amazonaws.com/colab_examples/lsun_edit.pth' os.path.join(path 'lsun_edit.pth'))<line_sep>torch.hub.download_url_to_file('https://image-editing-test-12345.s3-us-west-2.amazonaws.com/colab_examples/lsun_church.pth' os.path.join(path 'lsun_church.pth'))<line_sep>print("Data downloaded")<block_end> |
"""Fix setting analysis_info_id_seq
Revision ID: f8291ab1d6be
Revises: <PASSWORD>
Create Date: 2021-07-15 16:49:05.354455
"""<line_sep># revision identifiers, used by Alembic.
revision='f8291ab1d6be'<line_sep>down_revision='<PASSWORD>'<line_sep>branch_labels=<none><line_sep>depends_on=<none><import_from_stmt>alembic op<def_stmt>upgrade <block_start>ctx=op.get_context()<line_sep>dialect=ctx.dialect.name<if_stmt>dialect<eq>'postgresql'<block_start>op.execute("""
SELECT SETVAL(
'analysis_info_id_seq',
(SELECT MAX(id) + 1 FROM analysis_info)
)
""")<block_end><block_end> |
# Copyright 2019 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
"""Tests the Path class."""<import_stmt>copy<import_stmt>gc<import_stmt>pytest<import_stmt>random<import_stmt>torch<import_stmt>warnings<import_stmt>weakref<import_from_stmt>helpers helpers<as>h<import_from_stmt>helpers validation<as>v<line_sep>tests=['Path']<line_sep>depends=['signature' 'logsignature']<line_sep>signatory=v.validate_tests(tests depends)<def_stmt>_update_lengths_update_grads maxlength<block_start>update_lengths=[]<line_sep>update_grads=[]<line_sep>num=int(torch.randint(low=0 high=3 size=(1 )))<for_stmt>_ range(num)<block_start>update_lengths.append(int(torch.randint(low=1 high=maxlength size=(1 ))))<line_sep>update_grads.append(random.choice([<true> <false>]))<block_end><return>update_lengths update_grads<block_end><def_stmt>test_path <block_start>"""Tests that Path behaves correctly."""<line_sep># Test small edge cases thoroughly
<for_stmt>device h.get_devices()<block_start><for_stmt>batch_size (1 2)<block_start><for_stmt>input_stream,basepoints zip((1 2) ((<true> h.without_grad h.with_grad) (<false> <true> h.without_grad h.with_grad)))<block_start><for_stmt>input_channels (1 2)<block_start><for_stmt>depth (1 2)<block_start><for_stmt>scalar_term (<true> <false>)<block_start><for_stmt>path_grad (<false> <true>)<block_start>basepoint=random.choice(basepoints)<line_sep>update_lengths,update_grads=_update_lengths_update_grads(3)<line_sep>_test_path(device path_grad batch_size input_stream input_channels depth basepoint update_lengths update_grads scalar_term extrarandom=<false> which='all')<block_end><block_end><block_end><block_end><block_end><block_end><block_end># Randomly test larger cases
<for_stmt>_ range(50)<block_start>device=random.choice(h.get_devices())<line_sep>batch_size=random.choice((1 2 5))<line_sep>input_stream=random.choice([3 6 10])<line_sep>input_channels=random.choice([1 2 6])<line_sep>depth=random.choice([1 2 4 6])<line_sep>basepoint=random.choice([<false> <true> h.without_grad h.with_grad])<line_sep>path_grad=random.choice([<false> <true>])<line_sep>update_lengths,update_grads=_update_lengths_update_grads(10)<line_sep>scalar_term=random.choice([<false> <true>])<line_sep>_test_path(device path_grad batch_size input_stream input_channels depth basepoint update_lengths update_grads scalar_term extrarandom=<true> which='random')<block_end># Do at least one large test
<for_stmt>device h.get_devices()<block_start>_test_path(device path_grad=<true> batch_size=5 input_stream=10 input_channels=6 depth=6 basepoint=<true> update_lengths=[5 6] update_grads=[<false> <true>] scalar_term=<false> extrarandom=<false> which='none')<block_end><block_end><def_stmt>_randint value<block_start><return>torch.randint(low=0 high=value size=(1 )).item()<block_end><def_stmt>_test_path device path_grad batch_size input_stream input_channels depth basepoint update_lengths update_grads scalar_term extrarandom which<block_start>path=h.get_path(batch_size input_stream input_channels device path_grad)<line_sep>basepoint=h.get_basepoint(batch_size input_channels device basepoint)<line_sep>path_obj=signatory.Path(path depth basepoint=basepoint scalar_term=scalar_term)<if_stmt>isinstance(basepoint torch.Tensor)<block_start>full_path=torch.cat([basepoint.unsqueeze(1) path] dim=1)<block_end><elif_stmt>basepoint<is><true><block_start>full_path=torch.cat([torch.zeros(batch_size 1 input_channels device=device dtype=torch.double) path] dim=1)<block_end><else_stmt><block_start>full_path=path<block_end><if_stmt><not>path_grad<and><not>(isinstance(basepoint torch.Tensor)<and>basepoint.requires_grad)<block_start>backup_path_obj=copy.deepcopy(path_obj)<line_sep># derived objects to test
copy_path_obj=copy.copy(path_obj)<line_sep>shuffle_path_obj1,perm1=path_obj.shuffle()<line_sep>shuffle_path_obj2,perm2=copy.deepcopy(path_obj).shuffle_()<line_sep>getitem1=_randint(batch_size)<line_sep>getitem_path_obj1=path_obj[getitem1]# integer
all_derived=[(copy_path_obj slice(<none>)) (shuffle_path_obj1 perm1) (shuffle_path_obj2 perm2) (getitem_path_obj1 getitem1)]<line_sep>start=_randint(batch_size)<line_sep>end=_randint(batch_size)<line_sep>getitem2=slice(start end)<line_sep>getitem3=torch.randint(low=0 high=batch_size size=(_randint(int(1.5<times>batch_size)) ))<line_sep>getitem4=torch.randint(low=0 high=batch_size size=(_randint(int(1.5<times>batch_size)) )).numpy()<line_sep>getitem5=torch.randint(low=0 high=batch_size size=(_randint(int(1.5<times>batch_size)) )).tolist()<try_stmt><block_start>getitem_path_obj2=path_obj[getitem2]# slice, perhaps a 'null' slice
<block_end><except_stmt>IndexError<as>e<block_start><if_stmt>start<ge>end<block_start><pass><block_end><else_stmt><block_start>pytest.fail(str(e))<block_end><block_end><else_stmt><block_start>all_derived.append((getitem_path_obj2 getitem2))<block_end><try_stmt><block_start>getitem_path_obj3=path_obj[getitem3]# 1D tensor
<block_end><except_stmt>IndexError<as>e<block_start><if_stmt>len(getitem3)<eq>0<block_start><pass><block_end><else_stmt><block_start>pytest.fail(str(e))<block_end><block_end><else_stmt><block_start>all_derived.append((getitem_path_obj3 getitem3))<block_end><try_stmt><block_start>getitem_path_obj4=path_obj[getitem4]# array
<block_end><except_stmt>IndexError<as>e<block_start><if_stmt>len(getitem4)<eq>0<block_start><pass><block_end><else_stmt><block_start>pytest.fail(str(e))<block_end><block_end><else_stmt><block_start>all_derived.append((getitem_path_obj4 getitem4))<block_end><try_stmt><block_start>getitem_path_obj5=path_obj[getitem5]# list
<block_end><except_stmt>IndexError<as>e<block_start><if_stmt>len(getitem5)<eq>0<block_start><pass><block_end><else_stmt><block_start>pytest.fail(str(e))<block_end><block_end><else_stmt><block_start>all_derived.append((getitem_path_obj5 getitem5))<block_end><if_stmt>which<eq>'random'<block_start>all_derived=[random.choice(all_derived)]<block_end><elif_stmt>which<eq>'none'<block_start>all_derived=[]<block_end><for_stmt>derived_path_obj,derived_index all_derived# tests that the derived objects do what they claim to do
<block_start>_test_derived(path_obj derived_path_obj derived_index extrarandom)<line_sep># tests that the derived objects are consistent wrt themselves
full_path_=full_path[derived_index]<if_stmt>isinstance(derived_index int)<block_start>full_path_=full_path_.unsqueeze(0)<block_end>_test_path_obj(full_path_.size(0) input_channels device derived_path_obj full_path_ depth update_lengths update_grads scalar_term extrarandom)<block_end># tests that the changes to the derived objects have not affected the original
<assert_stmt>path_obj<eq>backup_path_obj<block_end># finally test the original object
_test_path_obj(batch_size input_channels device path_obj full_path depth update_lengths update_grads scalar_term extrarandom)<block_end><def_stmt>_test_path_obj batch_size input_channels device path_obj full_path depth update_lengths update_grads scalar_term extrarandom# First of all test a Path with no updates
<block_start>_test_signature(path_obj full_path depth scalar_term extrarandom)<line_sep>_test_logsignature(path_obj full_path depth extrarandom)<line_sep>_test_equality(path_obj)<assert_stmt>path_obj.depth<eq>depth<if_stmt>len(update_lengths)<g>1# Then test Path with variable amounts of updates
<block_start><for_stmt>length,grad zip(update_lengths update_grads)<block_start>new_path=torch.rand(batch_size length input_channels dtype=torch.double device=device requires_grad=grad)<line_sep>path_obj.update(new_path)<line_sep>full_path=torch.cat([full_path new_path] dim=1)<block_end>_test_signature(path_obj full_path depth scalar_term extrarandom)<line_sep>_test_logsignature(path_obj full_path depth extrarandom)<line_sep>_test_equality(path_obj)<assert_stmt>path_obj.depth<eq>depth<block_end><block_end><def_stmt>_test_signature path_obj full_path depth scalar_term extrarandom<block_start><def_stmt>candidate start=<none> end=<none><block_start><return>path_obj.signature(start end)<block_end><def_stmt>true start end<block_start><return>signatory.signature(full_path[: start:end] depth scalar_term=scalar_term)<block_end><def_stmt>extra true_signature<block_start><assert_stmt>(path_obj.signature_size(-3) path_obj.signature_size(-1))<eq>true_signature.shape<assert_stmt>path_obj.signature_channels()<eq>true_signature.size(-1)<assert_stmt>path_obj.shape<eq>full_path.shape<assert_stmt>path_obj.channels()<eq>full_path.size(-1)<block_end>_test_operation(path_obj candidate true extra '_BackwardShortcutBackward' extrarandom)<block_end><def_stmt>_test_logsignature path_obj full_path depth extrarandom<block_start><if_stmt>extrarandom<block_start><if_stmt>random.choice([<true> <false> <false>])<block_start>modes=h.all_modes<block_end><else_stmt><block_start>modes=(h.expand_mode h.words_mode)<block_end><block_end><else_stmt><block_start>modes=h.all_modes<block_end><for_stmt>mode modes<block_start><def_stmt>candidate start=<none> end=<none><block_start><with_stmt>warnings.catch_warnings()<block_start>warnings.filterwarnings('ignore' message="The logsignature with mode='brackets' has been requested on "<concat>"the GPU." category=UserWarning)<line_sep><return>path_obj.logsignature(start end mode=mode)<block_end><block_end><def_stmt>true start end<block_start><with_stmt>warnings.catch_warnings()<block_start>warnings.filterwarnings('ignore' message="The logsignature with mode='brackets' has been requested on "<concat>"the GPU." category=UserWarning)<line_sep><return>signatory.logsignature(full_path[: start:end] depth mode=mode)<block_end><block_end><def_stmt>extra true_logsignature<block_start><if_stmt>mode<ne>h.expand_mode<block_start><assert_stmt>(path_obj.logsignature_size(-3) path_obj.logsignature_size(-1))<eq>true_logsignature.shape<assert_stmt>path_obj.logsignature_channels()<eq>true_logsignature.size(-1)<block_end><block_end>_test_operation(path_obj candidate true extra '_SignatureToLogsignatureFunctionBackward' extrarandom)<block_end><block_end><def_stmt>_test_equality path_obj<block_start><assert_stmt>path_obj<eq>path_obj<assert_stmt><not>(path_obj<ne>path_obj)<line_sep>shuffled_path_obj,perm=path_obj.shuffle()<assert_stmt>shuffled_path_obj<eq>path_obj[perm]<assert_stmt><not>(shuffled_path_obj<ne>path_obj[perm])<block_end><def_stmt>_test_derived path_obj derived_path_obj derived_index extrarandom<block_start><def_stmt>candidate start=<none> end=<none><block_start><return>torch.cat(derived_path_obj.path dim=-2)<block_end><def_stmt>true start end<block_start><return>torch.cat(path_obj.path dim=-2)[derived_index]<block_end><def_stmt>extra true_path<block_start><pass><block_end>_test_operation(path_obj candidate true extra <none> extrarandom)<def_stmt>candidate start=<none> end=<none><block_start><return>derived_path_obj.signature(start end)<block_end><def_stmt>true start end<block_start><return>path_obj.signature(start end)[derived_index]<block_end><def_stmt>extra true_signature<block_start><pass><block_end>_test_operation(path_obj candidate true extra '_BackwardShortcutBackward' extrarandom)<if_stmt>extrarandom<block_start><if_stmt>random.choice([<true> <false> <false>])<block_start>modes=h.all_modes<block_end><else_stmt><block_start>modes=(h.expand_mode h.words_mode)<block_end><block_end><else_stmt><block_start>modes=h.all_modes<block_end><for_stmt>mode modes<block_start><def_stmt>candidate start=<none> end=<none><block_start><with_stmt>warnings.catch_warnings()<block_start>warnings.filterwarnings('ignore' message="The logsignature with mode='brackets' has been requested on "<concat>"the GPU." category=UserWarning)<line_sep><return>derived_path_obj.logsignature(start end mode=mode)<block_end><block_end><def_stmt>true start end<block_start><with_stmt>warnings.catch_warnings()<block_start>warnings.filterwarnings('ignore' message="The logsignature with mode='brackets' has been requested on "<concat>"the GPU." category=UserWarning)<line_sep><return>path_obj.logsignature(start end mode=mode)[derived_index]<block_end><block_end><def_stmt>extra true_logsignature<block_start><pass><block_end>_test_operation(path_obj candidate true extra '_SignatureToLogsignatureFunctionBackward' extrarandom)<block_end><block_end><def_stmt>_boundaries length<block_start><yield>-length-1<line_sep><yield>-length<line_sep><yield>-1<line_sep><yield>0<line_sep><yield>1<line_sep><yield>length-1<line_sep><yield>length<line_sep><yield><none><block_end><def_stmt>_start_end length extrarandom<block_start><for_stmt>start _boundaries(length)<block_start><for_stmt>end _boundaries(length)<block_start><if_stmt>(<not>extrarandom)<or>random.choice([<true> <false>])<block_start><yield>start end<block_end><block_end><block_end><for_stmt>_ range(5)<block_start>start=int(torch.randint(low=-length high=length size=(1 )))<line_sep>end=int(torch.randint(low=-length high=length size=(1 )))<line_sep><yield>start end<block_end><block_end><def_stmt>_test_operation path_obj candidate true extra backward_name extrarandom# We perform multiple tests here.
# Test #1: That the memory usage is consistent
# Test #2: That the backward 'ctx' is correctly garbage collected
# Test #3: The forward accuracy of a particular operation
# Test #4: The backward accuracy of the same operation
<block_start><def_stmt>one_iteration start end<block_start>gc.collect()<if_stmt>torch.cuda.is_available()<block_start>torch.cuda.synchronize()<line_sep>torch.cuda.reset_max_memory_allocated()<block_end><try_stmt><block_start>tensor=candidate(start end)<block_end><except_stmt>ValueError<as>e<block_start><try_stmt><block_start>true(start end)<block_end><except_stmt>ValueError<block_start><return>0<block_end><else_stmt><block_start>pytest.fail(str(e))<block_end><block_end><try_stmt><block_start>true_tensor=true(start end)<block_end><except_stmt>ValueError<as>e<block_start>pytest.fail(str(e))<block_end>h.diff(tensor true_tensor)# Test #3
extra(true_tensor)# Any extra tests
<if_stmt>tensor.requires_grad<block_start>grad=torch.rand_like(tensor)<line_sep>tensor.backward(grad)<line_sep>path_grads=[]<for_stmt>path path_obj.path<block_start><if_stmt>path.grad<is><none><block_start>path_grads.append(<none>)<block_end><else_stmt><block_start>path_grads.append(path.grad.clone())<line_sep>path.grad.zero_()<block_end><block_end>true_tensor.backward(grad)<for_stmt>path,path_grad zip(path_obj.path path_grads)<block_start><if_stmt>path_grad<is><none><block_start><assert_stmt>(path.grad<is><none>)<or>(path.grad.nonzero().numel()<eq>0)<block_end><else_stmt><block_start>h.diff(path.grad path_grad)# Test #4
path.grad.zero_()<block_end><block_end>ctx=tensor.grad_fn<assert_stmt>type(ctx).__name__<eq>backward_name<line_sep>ref=weakref.ref(ctx)<del_stmt>ctx<del_stmt>tensor<line_sep>gc.collect()<assert_stmt>ref()<is><none># Test #2
<block_end><if_stmt>torch.cuda.is_available()<block_start>torch.cuda.synchronize()<line_sep><return>torch.cuda.max_memory_allocated()<block_end><else_stmt><block_start><return>0<block_end><block_end># Computations involving the start or not operate differently, so we take the max over both
memory_used=max(one_iteration(0 <none>) one_iteration(1 <none>))<for_stmt>start,end _start_end(path_obj.size(1) extrarandom)# This one seems to be a bit inconsistent with how much memory is used on each run, so we give some
# leeway by doubling
<block_start><assert_stmt>one_iteration(start end)<le>2<times>memory_used<block_end><block_end> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.