Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
388,300 | def fixpointmethod(self, cfg_node):
JOIN = self.join(cfg_node)
if isinstance(cfg_node, AssignmentNode):
arrow_result = JOIN
if cfg_node.left_hand_side not in cfg_node.right_hand_side_variables:
arrow_result = self.arrow(JOIN, cfg_node.left_hand_side)
arrow_result = arrow_result | self.lattice.el2bv[cfg_node]
constraint_table[cfg_node] = arrow_result
else:
constraint_table[cfg_node] = JOIN | The most important part of PyT, where we perform
the variant of reaching definitions to find where sources reach. |
388,301 | def animate(self, animation, static, score, best, appear):
surface = pygame.Surface((self.game_width, self.game_height), 0)
surface.fill(self.BACKGROUND)
for y in range(self.COUNT_Y):
for x in range(self.COUNT_X):
x1, y1 = self.get_tile_location(x, y)
x1 -= self.origin[0]
y1 -= self.origin[1]
surface.blit(self.tiles[static.get((x, y), 0)], (x1, y1))
clock = pygame.time.Clock()
if score:
score_label = self.label_font.render( % score, True, (119, 110, 101))
w1, h1 = score_label.get_size()
if best:
best_label = self.label_font.render( % best, True, (119, 110, 101))
w2, h2 = best_label.get_size()
for frame in range(self.ANIMATION_FRAMES):
clock.tick(60)
pygame.event.pump()
self.screen.blit(surface, self.origin)
dt = (frame + 0.) / self.ANIMATION_FRAMES
for tile in animation:
self.screen.blit(self.tiles[tile.value], tile.get_position(dt))
scale = dt ** 0.5
w, h = int(self.cell_width * scale) & ~1, int(self.cell_height * scale) & ~1
for x, y, value in appear:
self.screen.blit(self._scale_tile(value, w, h),
self._center_tile(self.get_tile_location(x, y), (w, h)))
if best or score:
(x1, y1), (x2, y2), w, h = self.draw_scores()
if score:
self.screen.blit(score_label, (x1 + (w - w1) / 2, y1 + (h - h1) / 2 - dt * h))
if best:
self.screen.blit(best_label, (x2 + (w - w2) / 2, y2 + (h - h2) / 2 - dt * h))
pygame.display.flip() | Handle animation. |
388,302 | def compute(self, inputVector, learn, activeArray, applyLateralInhibition=True):
if not isinstance(inputVector, np.ndarray):
raise TypeError("Input vector must be a numpy array, not %s" %
str(type(inputVector)))
if inputVector.size != self._numInputs:
raise ValueError(
"Input vector dimensions don't match. Expecting %s but got %s" % (
inputVector.size, self._numInputs))
self._updateBookeepingVars(learn)
inputVector = np.array(inputVector, dtype=realDType)
inputVector.reshape(-1)
self._overlaps = self._calculateOverlap(inputVector)
if learn:
self._boostedOverlaps = self._boostFactors * self._overlaps
else:
self._boostedOverlaps = self._overlaps
if applyLateralInhibition == True:
activeColumns = self._inhibitColumnsWithLateral(self._boostedOverlaps, self.lateralConnections)
else:
activeColumns = self._inhibitColumns(self._boostedOverlaps)
activeArray.fill(0)
activeArray[activeColumns] = 1.0
if learn:
self._adaptSynapses(inputVector, activeColumns, self._boostedOverlaps)
self._updateDutyCycles(self._overlaps, activeColumns)
self._bumpUpWeakColumns()
self._updateBoostFactors()
self._updateAvgActivityPairs(activeArray)
epsilon = self.lateralLearningRate
if epsilon > 0:
self._updateLateralConnections(epsilon, self.avgActivityPairs)
if self._isUpdateRound():
self._updateInhibitionRadius()
self._updateMinDutyCycles()
return activeArray | This is the primary public method of the LateralPooler class. This
function takes a input vector and outputs the indices of the active columns.
If 'learn' is set to True, this method also updates the permanences of the
columns and their lateral inhibitory connection weights. |
388,303 | def get_page(self, index=None):
if index is None:
widget = self.pages_widget.currentWidget()
else:
widget = self.pages_widget.widget(index)
return widget.widget() | Return page widget |
388,304 | def _adaptSynapses(self, inputVector, activeColumns, synPermActiveInc, synPermInactiveDec):
inputIndices = numpy.where(inputVector > 0)[0]
permChanges = numpy.zeros(self.getNumInputs(), dtype=REAL_DTYPE)
permChanges.fill(-1 * synPermInactiveDec)
permChanges[inputIndices] = synPermActiveInc
perm = numpy.zeros(self.getNumInputs(), dtype=REAL_DTYPE)
potential = numpy.zeros(self.getNumInputs(), dtype=REAL_DTYPE)
for i in activeColumns:
self.getPermanence(i, perm)
self.getPotential(i, potential)
maskPotential = numpy.where(potential > 0)[0]
perm[maskPotential] += permChanges[maskPotential]
self._updatePermanencesForColumn(perm, i, raisePerm=False) | The primary method in charge of learning. Adapts the permanence values of
the synapses based on the input vector, and the chosen columns after
inhibition round. Permanence values are increased for synapses connected to
input bits that are turned on, and decreased for synapses connected to
inputs bits that are turned off.
Parameters:
----------------------------
@param inputVector:
A numpy array of 0's and 1's that comprises the input to
the spatial pooler. There exists an entry in the array
for every input bit.
@param activeColumns:
An array containing the indices of the columns that
survived inhibition.
@param synPermActiveInc:
Permanence increment for active inputs
@param synPermInactiveDec:
Permanence decrement for inactive inputs |
388,305 | def to_array(tensor):
if tensor.HasField("segment"):
raise ValueError(
"Currently not supporting loading segments.")
if tensor.data_type == TensorProto.UNDEFINED:
raise ValueError("The data type is not defined.")
tensor_dtype = tensor.data_type
np_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[tensor_dtype]
storage_type = mapping.TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE[tensor_dtype]
storage_np_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[storage_type]
storage_field = mapping.STORAGE_TENSOR_TYPE_TO_FIELD[storage_type]
dims = tensor.dims
if tensor.data_type == TensorProto.STRING:
utf8_strings = getattr(tensor, storage_field)
ss = list(s.decode() for s in utf8_strings)
return np.asarray(ss).astype(np_dtype).reshape(dims)
if tensor.HasField("raw_data"):
return np.frombuffer(
tensor.raw_data,
dtype=np_dtype).reshape(dims)
else:
data = getattr(tensor, storage_field),
if (tensor_dtype == TensorProto.COMPLEX64
or tensor_dtype == TensorProto.COMPLEX128):
data = combine_pairs_to_complex(data)
return (
np.asarray(
data,
dtype=storage_np_dtype)
.astype(np_dtype)
.reshape(dims)
) | Converts a tensor def object to a numpy array.
Inputs:
tensor: a TensorProto object.
Returns:
arr: the converted array. |
388,306 | def calculate_incorrect_name_dict(graph: BELGraph) -> Mapping[str, str]:
missing = defaultdict(list)
for _, e, ctx in graph.warnings:
if not isinstance(e, (MissingNamespaceNameWarning, MissingNamespaceRegexWarning)):
continue
missing[e.namespace].append(e.name)
return dict(missing) | Group all of the incorrect identifiers in a dict of {namespace: list of erroneous names}.
:return: A dictionary of {namespace: list of erroneous names} |
388,307 | def delta(feat, N):
if N < 1:
raise ValueError()
NUMFRAMES = len(feat)
denominator = 2 * sum([i**2 for i in range(1, N+1)])
delta_feat = numpy.empty_like(feat)
padded = numpy.pad(feat, ((N, N), (0, 0)), mode=)
for t in range(NUMFRAMES):
delta_feat[t] = numpy.dot(numpy.arange(-N, N+1), padded[t : t+2*N+1]) / denominator
return delta_feat | Compute delta features from a feature vector sequence.
:param feat: A numpy array of size (NUMFRAMES by number of features) containing features. Each row holds 1 feature vector.
:param N: For each frame, calculate delta features based on preceding and following N frames
:returns: A numpy array of size (NUMFRAMES by number of features) containing delta features. Each row holds 1 delta feature vector. |
388,308 | def log_level_from_string(str_level):
levels = {
: logging.CRITICAL,
: logging.ERROR,
: logging.WARNING,
: logging.INFO,
: logging.DEBUG,
}
try:
return levels[str_level.upper()]
except KeyError:
pass
except AttributeError:
if str_level in [logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR, logging.CRITICAL]:
return str_level
return logging.NOTSET | Returns the proper log level core based on a given string
:param str_level: Log level string
:return: The log level code |
388,309 | def extend_schema(schema, documentAST=None):
assert isinstance(schema, GraphQLSchema), "Must provide valid GraphQLSchema"
assert documentAST and isinstance(
documentAST, ast.Document
), "Must provide valid Document AST"
type_definition_map = {}
type_extensions_map = defaultdict(list)
for _def in documentAST.definitions:
if isinstance(
_def,
(
ast.ObjectTypeDefinition,
ast.InterfaceTypeDefinition,
ast.EnumTypeDefinition,
ast.UnionTypeDefinition,
ast.ScalarTypeDefinition,
ast.InputObjectTypeDefinition,
),
):
extended_type_name = _def.definition.name.value
existing_type = schema.get_type(extended_type_name)
if not existing_type:
raise GraphQLError(
(
+ "exist in the existing schema."
).format(extended_type_name),
[_def.definition],
)
if not isinstance(existing_type, GraphQLObjectType):
raise GraphQLError(
.format(extended_type_name),
[_def.definition],
)
type_extensions_map[extended_type_name].append(_def)
def get_type_from_def(type_def):
type = _get_named_type(type_def.name)
assert type, "Invalid schema"
return type
def get_type_from_AST(astNode):
type = _get_named_type(astNode.name.value)
if not type:
raise GraphQLError(
(
+ "either in the original schema, or is added in a type definition."
).format(astNode.name.value),
[astNode],
)
return type
def _get_named_type(typeName):
cached_type_def = type_def_cache.get(typeName)
if cached_type_def:
return cached_type_def
existing_type = schema.get_type(typeName)
if existing_type:
type_def = extend_type(existing_type)
type_def_cache[typeName] = type_def
return type_def
type_ast = type_definition_map.get(typeName)
if type_ast:
type_def = build_type(type_ast)
type_def_cache[typeName] = type_def
return type_def
+ "schema. It cannot also be defined in this type extension."
).format(type.name, field_name),
[field],
)
new_field_map[field_name] = GraphQLField(
build_field_type(field.type),
args=build_input_values(field.arguments),
resolver=cannot_execute_client_schema,
)
return new_field_map
def extend_field_type(type):
if isinstance(type, GraphQLList):
return GraphQLList(extend_field_type(type.of_type))
if isinstance(type, GraphQLNonNull):
return GraphQLNonNull(extend_field_type(type.of_type))
return get_type_from_def(type)
def build_type(type_ast):
_type_build = {
ast.ObjectTypeDefinition: build_object_type,
ast.InterfaceTypeDefinition: build_interface_type,
ast.UnionTypeDefinition: build_union_type,
ast.ScalarTypeDefinition: build_scalar_type,
ast.EnumTypeDefinition: build_enum_type,
ast.InputObjectTypeDefinition: build_input_object_type,
}
func = _type_build.get(type(type_ast))
if func:
return func(type_ast)
def build_object_type(type_ast):
return GraphQLObjectType(
type_ast.name.value,
interfaces=lambda: build_implemented_interfaces(type_ast),
fields=lambda: build_field_map(type_ast),
)
def build_interface_type(type_ast):
return GraphQLInterfaceType(
type_ast.name.value,
fields=lambda: build_field_map(type_ast),
resolve_type=cannot_execute_client_schema,
)
def build_union_type(type_ast):
return GraphQLUnionType(
type_ast.name.value,
types=list(map(get_type_from_AST, type_ast.types)),
resolve_type=cannot_execute_client_schema,
)
def build_scalar_type(type_ast):
return GraphQLScalarType(
type_ast.name.value,
serialize=lambda *args, **kwargs: None,
parse_value=lambda *args, **kwargs: False,
parse_literal=lambda *args, **kwargs: False,
)
def build_enum_type(type_ast):
return GraphQLEnumType(
type_ast.name.value,
values={v.name.value: GraphQLEnumValue() for v in type_ast.values},
)
def build_input_object_type(type_ast):
return GraphQLInputObjectType(
type_ast.name.value,
fields=lambda: build_input_values(type_ast.fields, GraphQLInputObjectField),
)
def build_implemented_interfaces(type_ast):
return list(map(get_type_from_AST, type_ast.interfaces))
def build_field_map(type_ast):
return {
field.name.value: GraphQLField(
build_field_type(field.type),
args=build_input_values(field.arguments),
resolver=cannot_execute_client_schema,
)
for field in type_ast.fields
}
def build_input_values(values, input_type=GraphQLArgument):
input_values = OrderedDict()
for value in values:
type = build_field_type(value.type)
input_values[value.name.value] = input_type(
type, default_value=value_from_ast(value.default_value, type)
)
return input_values
def build_field_type(type_ast):
if isinstance(type_ast, ast.ListType):
return GraphQLList(build_field_type(type_ast.type))
if isinstance(type_ast, ast.NonNullType):
return GraphQLNonNull(build_field_type(type_ast.type))
return get_type_from_AST(type_ast)
if not type_extensions_map and not type_definition_map:
return schema
type_def_cache = {
"String": GraphQLString,
"Int": GraphQLInt,
"Float": GraphQLFloat,
"Boolean": GraphQLBoolean,
"ID": GraphQLID,
"__Schema": __Schema,
"__Directive": __Directive,
"__DirectiveLocation": __DirectiveLocation,
"__Type": __Type,
"__Field": __Field,
"__InputValue": __InputValue,
"__EnumValue": __EnumValue,
"__TypeKind": __TypeKind,
}
query_type = get_type_from_def(schema.get_query_type())
existing_mutation_type = schema.get_mutation_type()
mutationType = (
existing_mutation_type and get_type_from_def(existing_mutation_type) or None
)
existing_subscription_type = schema.get_subscription_type()
subscription_type = (
existing_subscription_type
and get_type_from_def(existing_subscription_type)
or None
)
types = [get_type_from_def(_def) for _def in schema.get_type_map().values()]
types += [get_type_from_AST(_def) for _def in type_definition_map.values()]
return GraphQLSchema(
query=query_type,
mutation=mutationType,
subscription=subscription_type,
directives=schema.get_directives(),
types=types,
) | Produces a new schema given an existing schema and a document which may
contain GraphQL type extensions and definitions. The original schema will
remain unaltered.
Because a schema represents a graph of references, a schema cannot be
extended without effectively making an entire copy. We do not know until it's
too late if subgraphs remain unchanged.
This algorithm copies the provided schema, applying extensions while
producing the copy. The original schema remains unaltered. |
388,310 | def _get_normalized_args(parser):
env = os.environ
if in env and env[] != sys.argv[0] and len(sys.argv) >= 1 and " " in sys.argv[1]:
return parser.parse_args(shlex.split(sys.argv[1]) + sys.argv[2:])
else:
return parser.parse_args() | Return the parsed command line arguments.
Support the case when executed from a shebang, where all the
parameters come in sys.argv[1] in a single string separated
by spaces (in this case, the third parameter is what is being
executed) |
388,311 | def reload_config(self, async=True, verbose=False):
api_version = float(self.api_version()[])
if api_version < 4.5:
async = False
url = .format(
self.rest_url, , if async else
)
return self.__auth_req_post(url, verbose=verbose) | Initiate a config reload. This may take a while on large installations. |
388,312 | def get_user_contact_lists_contacts(self, id, contact_list_id, **data):
return self.get("/users/{0}/contact_lists/{0}/contacts/".format(id,contact_list_id), data=data) | GET /users/:id/contact_lists/:contact_list_id/contacts/
Returns the :format:`contacts <contact>` on the contact list
as ``contacts``. |
388,313 | def empty(cls: Type[BoardT], *, chess960: bool = False) -> BoardT:
return cls(None, chess960=chess960) | Creates a new empty board. Also see :func:`~chess.Board.clear()`. |
388,314 | def coordinate(self, panes=[], index=0):
y = 0
for i, element in enumerate(self.panes):
x = 0
if isinstance(element, list):
current_height = 0
for j, pane in enumerate(element):
if pane.hidden: continue
current_width = pane.width
current_height = pane.height
upper = ((y, x), (y, x+current_width))
lower = ((y+(current_height if current_height > 1 else 0), x),
(y+(current_height if current_height > 1 else 0), x+current_width))
pane.coords = [upper, lower]
x += current_width
y += (current_height+1 if current_height > 1 else 1)
else:
if element.hidden: continue
current_width = element.width
current_height = element.height
upper = ((y, x), (y, x+current_width))
lower = ((y+(current_height if current_height > 1 else 0), x),
(y+(current_height if current_height > 1 else 0), x+current_width))
element.coords = [upper, lower]
y += (current_height+1 if current_height > 1 else 1)
if self.debug:
coordinates = "Coordinates: " + str([p.coords for p in self])
if len(coordinates) > self.width:
coordinates = coordinates[:self.width - 3]
coordinates +=
self.addstr(self.height-3, 0, coordinates) | Update pane coordinate tuples based on their height and width relative to other panes
within the dimensions of the current window.
We account for panes with a height of 1 where the bottom coordinates are the same as the top.
Account for floating panes and self-coordinating panes adjacent to panes set to EXPAND.
Coordinates are of the form:
[
((top-left-from-top, top-left-from-left),
(top-right-from-top, top-right-from-left)),
((bottom-left-from-top, bottom-left-from-left),
(bottom-right-from-top, bottom-right-from-left))
]
We can then use these to determine things such as whether corners are inverted and how
many characters may be drawn |
388,315 | def _is_dynamic(v: Var) -> bool:
return (
Maybe(v.meta)
.map(lambda m: m.get(SYM_DYNAMIC_META_KEY, None))
.or_else_get(False)
) | Return True if the Var holds a value which should be compiled to a dynamic
Var access. |
388,316 | def as_create_table(self, table_name, overwrite=False):
exec_sql =
sql = self.stripped()
if overwrite:
exec_sql = f
exec_sql += f
return exec_sql | Reformats the query into the create table as query.
Works only for the single select SQL statements, in all other cases
the sql query is not modified.
:param superset_query: string, sql query that will be executed
:param table_name: string, will contain the results of the
query execution
:param overwrite, boolean, table table_name will be dropped if true
:return: string, create table as query |
388,317 | def _format_vector(self, vecs, form=):
if form == :
return np.meshgrid(*vecs, indexing=)
elif form == :
vecs = np.meshgrid(*vecs, indexing=)
return np.rollaxis(np.array(np.broadcast_arrays(*vecs)),0,self.dim+1)
elif form == :
return vecs
else:
return [v[self._coord_slicers[i]] for i,v in enumerate(vecs)] | Format a 3d vector field in certain ways, see `coords` for a description
of each formatting method. |
388,318 | def focus_window(winhandle, path=None, name=None, sleeptime=.01):
import utool as ut
import time
print( + winhandle)
args = [, , winhandle]
ut.cmd(*args, verbose=False, quiet=True)
time.sleep(sleeptime) | sudo apt-get install xautomation
apt-get install autokey-gtk
wmctrl -xa gnome-terminal.Gnome-terminal
wmctrl -xl |
388,319 | def _next_state(index, event_time, transition_set, population_view):
if len(transition_set) == 0 or index.empty:
return
outputs, decisions = transition_set.choose_new_state(index)
groups = _groupby_new_state(index, outputs, decisions)
if groups:
for output, affected_index in sorted(groups, key=lambda x: str(x[0])):
if output == :
pass
elif isinstance(output, Transient):
if not isinstance(output, State):
raise ValueError(.format(output))
output.transition_effect(affected_index, event_time, population_view)
output.next_state(affected_index, event_time, population_view)
elif isinstance(output, State):
output.transition_effect(affected_index, event_time, population_view)
else:
raise ValueError(.format(output)) | Moves a population between different states using information from a `TransitionSet`.
Parameters
----------
index : iterable of ints
An iterable of integer labels for the simulants.
event_time : pandas.Timestamp
When this transition is occurring.
transition_set : TransitionSet
A set of potential transitions available to the simulants.
population_view : vivarium.framework.population.PopulationView
A view of the internal state of the simulation. |
388,320 | def message(self, value):
if value is not None:
assert type(value) in (unicode, QString), \
" attribute: type is not or !".format("message", value)
self.__message = value | Setter for **self.__message** attribute.
:param value: Attribute value.
:type value: unicode |
388,321 | def guess_message_type(message):
if isinstance(message, APPConfigMessage):
return MsgType.CONFIG
elif isinstance(message, APPJoinMessage):
return MsgType.JOIN
elif isinstance(message, APPDataMessage):
return MsgType.DATA
elif isinstance(message, APPUpdateMessage):
return MsgType.UPDATE
elif isinstance(message, APPUnjoinMessage):
return MsgType.UNJOIN
return None | Guess the message type based on the class of message
:param message: Message to guess the type for
:type message: APPMessage
:return: The corresponding message type (MsgType) or None if not found
:rtype: None | int |
388,322 | def pp_prep(self, mlt_df):
if len(self.pp_props) == 0:
return
if self.pp_space is None:
self.logger.warn("pp_space is None, using 10...\n")
self.pp_space=10
if self.pp_geostruct is None:
self.logger.warn("pp_geostruct is None,"\
" using ExpVario with contribution=1 and a=(pp_space*max(delr,delc))")
pp_dist = self.pp_space * float(max(self.m.dis.delr.array.max(),
self.m.dis.delc.array.max()))
v = pyemu.geostats.ExpVario(contribution=1.0,a=pp_dist)
self.pp_geostruct = pyemu.geostats.GeoStruct(variograms=v)
pp_df = mlt_df.loc[mlt_df.suffix==self.pp_suffix,:]
layers = pp_df.layer.unique()
pp_dict = {l:list(pp_df.loc[pp_df.layer==l,"prefix"].unique()) for l in layers}
for i,l in enumerate(layers):
p = set(pp_dict[l])
for ll in layers[i+1:]:
pp = set(pp_dict[ll])
d = pp - p
pp_dict[ll] = list(d)
pp_array_file = {p:m for p,m in zip(pp_df.prefix,pp_df.mlt_file)}
self.logger.statement("pp_dict: {0}".format(str(pp_dict)))
self.log("calling setup_pilot_point_grid()")
if self.use_pp_zones:
if np.all([isinstance(v, dict) for v in self.k_zone_dict.values()]):
ib = {p.split()[-1]: k_dict for p, k_dict in self.k_zone_dict.items()}
for attr in pp_df.attr_name.unique():
if attr not in [p.split()[-1] for p in ib.keys()]:
if not in ib.keys():
warnings.warn("Dictionary of dictionaries passed as zones, {0} not in keys: {1}. "
"Will use ibound for zones".format(attr, ib.keys()), PyemuWarning)
else:
self.logger.statement(
"Dictionary of dictionaries passed as pp zones, "
"using for {0}".format(attr))
if not in ib.keys():
ib[] = {k: self.m.bas6.ibound[k].array for k in range(self.m.nlay)}
else:
ib = {: self.k_zone_dict}
else:
ib = {}
for k in range(self.m.nlay):
a = self.m.bas6.ibound[k].array.copy()
a[a>0] = 1
ib[k] = a
for k,i in ib.items():
if np.any(i<0):
u,c = np.unique(i[i>0], return_counts=True)
counts = dict(zip(u,c))
mx = -1.0e+10
imx = None
for u,c in counts.items():
if c > mx:
mx = c
imx = u
self.logger.warn("resetting negative ibound values for PP zone"+ \
"array in layer {0} : {1}".format(k+1,u))
i[i<0] = u
ib = {: ib}
pp_df = pyemu.pp_utils.setup_pilotpoints_grid(self.m,
ibound=ib,
use_ibound_zones=self.use_pp_zones,
prefix_dict=pp_dict,
every_n_cell=self.pp_space,
pp_dir=self.m.model_ws,
tpl_dir=self.m.model_ws,
shapename=os.path.join(
self.m.model_ws,"pp.shp"))
self.logger.statement("{0} pilot point parameters created".
format(pp_df.shape[0]))
self.logger.statement("pilot point :{0}".
format(.join(pp_df.pargp.unique())))
self.log("calling setup_pilot_point_grid()")
pargp = pp_df.pargp.unique()
pp_dfs_k = {}
fac_files = {}
pp_processed = set()
pp_df.loc[:,"fac_file"] = np.NaN
for pg in pargp:
ks = pp_df.loc[pp_df.pargp==pg,"k"].unique()
if len(ks) == 0:
self.logger.lraise("something is wrong in fac calcs for par group {0}".format(pg))
if len(ks) == 1:
if np.all([isinstance(v, dict) for v in ib.values()]):
if np.any([pg.startswith(p) for p in ib.keys()]):
p = next(p for p in ib.keys() if pg.startswith(p))
ib_k = ib[p][ks[0]]
else:
p =
ib_k = ib[p][ks[0]]
else:
ib_k = ib[ks[0]]
if len(ks) != 1:
self.logger.warn("multiple k values for {0},forming composite zone array...".format(pg))
ib_k = np.zeros((self.m.nrow,self.m.ncol))
for k in ks:
t = ib[k].copy()
t[t<1] = 0
ib_k[t>0] = t[t>0]
k = int(ks[0])
kattr_id = "{}_{}".format(k, p)
kp_id = "{}_{}".format(k, pg)
if kp_id not in pp_dfs_k.keys():
self.log("calculating factors for p={0}, k={1}".format(pg, k))
fac_file = os.path.join(self.m.model_ws, "pp_k{0}.fac".format(kattr_id))
var_file = fac_file.replace("{0}.fac", ".var.dat")
pp_df_k = pp_df.loc[pp_df.pargp == pg]
if kattr_id not in pp_processed:
self.logger.statement("saving krige variance file:{0}"
.format(var_file))
self.logger.statement("saving krige factors file:{0}"
.format(fac_file))
ok_pp = pyemu.geostats.OrdinaryKrige(self.pp_geostruct, pp_df_k)
ok_pp.calc_factors_grid(self.m.sr, var_filename=var_file, zone_array=ib_k)
ok_pp.to_grid_factors_file(fac_file)
pp_processed.add(kattr_id)
fac_files[kp_id] = fac_file
self.log("calculating factors for p={0}, k={1}".format(pg, k))
pp_dfs_k[kp_id] = pp_df_k
for kp_id, fac_file in fac_files.items():
k = int(kp_id.split()[0])
pp_prefix = kp_id.split(, 1)[-1]
fac_file = os.path.split(fac_file)[-1]
self.log("processing pp_prefix:{0}".format(pp_prefix))
if pp_prefix not in pp_array_file.keys():
self.logger.lraise("{0} not in self.pp_array_file.keys()".
format(pp_prefix,.
join(pp_array_file.keys())))
out_file = os.path.join(self.arr_mlt,os.path.split(pp_array_file[pp_prefix])[-1])
pp_files = pp_df.loc[pp_df.pp_filename.apply(lambda x: pp_prefix in x),"pp_filename"]
if pp_files.unique().shape[0] != 1:
self.logger.lraise("wrong number of pp_files found:{0}".format(.join(pp_files)))
pp_file = os.path.split(pp_files.iloc[0])[-1]
pp_df.loc[pp_df.pargp==pp_prefix,"fac_file"] = fac_file
pp_df.loc[pp_df.pargp==pp_prefix,"pp_file"] = pp_file
pp_df.loc[pp_df.pargp==pp_prefix,"out_file"] = out_file
pp_df.loc[:,"pargp"] = pp_df.pargp.apply(lambda x: "pp_{0}".format(x))
out_files = mlt_df.loc[mlt_df.mlt_file.
apply(lambda x: x.endswith(self.pp_suffix)),"mlt_file"]
for out_file in out_files:
pp_df_pf = pp_df.loc[pp_df.out_file==out_file,:]
fac_files = pp_df_pf.fac_file
if fac_files.unique().shape[0] != 1:
self.logger.lraise("wrong number of fac files:{0}".format(str(fac_files.unique())))
fac_file = fac_files.iloc[0]
pp_files = pp_df_pf.pp_file
if pp_files.unique().shape[0] != 1:
self.logger.lraise("wrong number of pp files:{0}".format(str(pp_files.unique())))
pp_file = pp_files.iloc[0]
mlt_df.loc[mlt_df.mlt_file==out_file,"fac_file"] = fac_file
mlt_df.loc[mlt_df.mlt_file==out_file,"pp_file"] = pp_file
self.par_dfs[self.pp_suffix] = pp_df
mlt_df.loc[mlt_df.suffix==self.pp_suffix,"tpl_file"] = np.NaN | prepare pilot point based parameterizations
Parameters
----------
mlt_df : pandas.DataFrame
a dataframe with multiplier array information
Note
----
calls pyemu.pp_utils.setup_pilot_points_grid() |
388,323 | def filter_minreads_samples_from_table(table, minreads=1, inplace=True):
logger = logging.getLogger(__name__)
logger.debug( % minreads)
samp_sum = table.sum(axis=)
samp_ids = table.ids(axis=)
bad_samples = samp_ids[samp_sum < minreads]
if len(bad_samples) > 0:
logger.warn(
% (len(bad_samples), minreads))
table = table.filter(bad_samples, axis=,
inplace=inplace, invert=True)
else:
logger.debug( % minreads)
return table | Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table |
388,324 | def get(self, sid):
return ReservationContext(
self._version,
workspace_sid=self._solution[],
worker_sid=self._solution[],
sid=sid,
) | Constructs a ReservationContext
:param sid: The sid
:returns: twilio.rest.taskrouter.v1.workspace.worker.reservation.ReservationContext
:rtype: twilio.rest.taskrouter.v1.workspace.worker.reservation.ReservationContext |
388,325 | def solveAndNotify(proto, exercise):
exercise.solvedBy(proto.user)
proto.callRemote(ce.NotifySolved,
identifier=exercise.identifier,
title=exercise.title) | The user at the given AMP protocol has solved the given exercise.
This will log the solution and notify the user. |
388,326 | def make_type(typename, lineno, implicit=False):
assert isinstance(typename, str)
if not SYMBOL_TABLE.check_is_declared(typename, lineno, ):
return None
type_ = symbols.TYPEREF(SYMBOL_TABLE.get_entry(typename), lineno, implicit)
return type_ | Converts a typename identifier (e.g. 'float') to
its internal symbol table entry representation.
Creates a type usage symbol stored in a AST
E.g. DIM a As Integer
will access Integer type |
388,327 | def to_import(self):
properties = self.get_properties()
fw_uid = self.get_framework_uuid()
try:
name = properties[pelix.remote.PROP_ENDPOINT_NAME]
except KeyError:
name = "{0}.{1}".format(fw_uid, self.get_service_id())
configurations = self.get_configuration_types()
specifications = self.get_interfaces()
return ImportEndpoint(
self.get_id(),
fw_uid,
configurations,
name,
specifications,
properties,
) | Converts an EndpointDescription bean to an ImportEndpoint
:return: An ImportEndpoint bean |
388,328 | def revnet_cifar_base():
hparams = revnet_base()
hparams.num_channels_init_block = 32
hparams.first_batch_norm = [False, True, True]
hparams.init_stride = 1
hparams.init_kernel_size = 3
hparams.init_maxpool = False
hparams.strides = [1, 2, 2]
hparams.batch_size = 128
hparams.weight_decay = 1e-4
hparams.learning_rate = 0.1
hparams.learning_rate_cosine_cycle_steps = 5000
return hparams | Tiny hparams suitable for CIFAR/etc. |
388,329 | def api_representation(self, content_type):
payload = dict(Subject=self.subject, Body=dict(ContentType=content_type, Content=self.body))
if self.sender is not None:
payload.update(From=self.sender.api_representation())
if any(isinstance(item, str) for item in self.to):
self.to = [Contact(email=email) for email in self.to]
recipients = [contact.api_representation() for contact in self.to]
payload.update(ToRecipients=recipients)
if self.cc:
if any(isinstance(email, str) for email in self.cc):
self.cc = [Contact(email) for email in self.cc]
cc_recipients = [contact.api_representation() for contact in self.cc]
payload.update(CcRecipients=cc_recipients)
if self.bcc:
if any(isinstance(email, str) for email in self.bcc):
self.bcc = [Contact(email) for email in self.bcc]
bcc_recipients = [contact.api_representation() for contact in self.bcc]
payload.update(BccRecipients=bcc_recipients)
if self._attachments:
payload.update(Attachments=[attachment.api_representation() for attachment in self._attachments])
payload.update(Importance=str(self.importance))
return dict(Message=payload) | Returns the JSON representation of this message required for making requests to the API.
Args:
content_type (str): Either 'HTML' or 'Text' |
388,330 | def get_albums_for_artist(self, artist, full_album_art_uri=False):
subcategories = [artist]
result = self.get_album_artists(
full_album_art_uri=full_album_art_uri,
subcategories=subcategories,
complete_result=True)
reduced = [item for item in result if item.__class__ == DidlMusicAlbum]
result[:] = reduced
result._metadata.update({
: reduced,
: ,
: len(reduced),
: len(reduced)
})
return result | Get an artist's albums.
Args:
artist (str): an artist's name.
full_album_art_uri: whether the album art URI should be
absolute (i.e. including the IP address). Default `False`.
Returns:
A `SearchResult` instance. |
388,331 | def disable_hostgroup_svc_checks(self, hostgroup):
for host_id in hostgroup.get_hosts():
if host_id in self.daemon.hosts:
for service_id in self.daemon.hosts[host_id].services:
if service_id in self.daemon.services:
self.disable_svc_check(self.daemon.services[service_id]) | Disable service checks for a hostgroup
Format of the line that triggers function call::
DISABLE_HOSTGROUP_SVC_CHECKS;<hostgroup_name>
:param hostgroup: hostgroup to disable
:type hostgroup: alignak.objects.hostgroup.Hostgroup
:return: None |
388,332 | def discover_engines(self, executor=None):
if executor is None:
executor = getattr(settings, , {}).get(, )
self.executor = self.load_executor(executor)
logger.info(
__("Loaded executor.", str(self.executor.__class__.__module__).replace(, ))
)
expression_engines = getattr(settings, , [])
self.expression_engines = self.load_expression_engines(expression_engines)
logger.info(__(
"Found {} expression engines: {}", len(self.expression_engines), .join(self.expression_engines.keys())
))
execution_engines = getattr(settings, , [])
self.execution_engines = self.load_execution_engines(execution_engines)
logger.info(__(
"Found {} execution engines: {}", len(self.execution_engines), .join(self.execution_engines.keys())
)) | Discover configured engines.
:param executor: Optional executor module override |
388,333 | def _sign(private_key, data, hash_algorithm, rsa_pss_padding=False):
if not isinstance(private_key, PrivateKey):
raise TypeError(pretty_message(
,
type_name(private_key)
))
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
,
type_name(data)
))
valid_hash_algorithms = set([, , , , , ])
if private_key.algorithm == and not rsa_pss_padding:
valid_hash_algorithms |= set([])
if hash_algorithm not in valid_hash_algorithms:
valid_hash_algorithms_error =
if private_key.algorithm == and not rsa_pss_padding:
valid_hash_algorithms_error +=
raise ValueError(pretty_message(
,
valid_hash_algorithms_error,
repr(hash_algorithm)
))
if private_key.algorithm != and rsa_pss_padding:
raise ValueError(pretty_message(
,
private_key.algorithm.upper()
))
if private_key.algorithm == and hash_algorithm == :
if len(data) > private_key.byte_size - 11:
raise ValueError(pretty_message(
,
private_key.byte_size,
len(data)
))
rsa = None
try:
rsa = libcrypto.EVP_PKEY_get1_RSA(private_key.evp_pkey)
if is_null(rsa):
handle_openssl_error(0)
buffer_size = libcrypto.EVP_PKEY_size(private_key.evp_pkey)
signature_buffer = buffer_from_bytes(buffer_size)
signature_length = libcrypto.RSA_private_encrypt(
len(data),
data,
signature_buffer,
rsa,
LibcryptoConst.RSA_PKCS1_PADDING
)
handle_openssl_error(signature_length)
return bytes_from_buffer(signature_buffer, signature_length)
finally:
if rsa:
libcrypto.RSA_free(rsa)
evp_md_ctx = None
rsa = None
dsa = None
dsa_sig = None
ec_key = None
ecdsa_sig = None
try:
if libcrypto_version_info < (1, 1):
evp_md_ctx = libcrypto.EVP_MD_CTX_create()
else:
evp_md_ctx = libcrypto.EVP_MD_CTX_new()
evp_md = {
: libcrypto.EVP_md5,
: libcrypto.EVP_sha1,
: libcrypto.EVP_sha224,
: libcrypto.EVP_sha256,
: libcrypto.EVP_sha384,
: libcrypto.EVP_sha512
}[hash_algorithm]()
if libcrypto_version_info < (1,):
if private_key.algorithm == and rsa_pss_padding:
digest = getattr(hashlib, hash_algorithm)(data).digest()
rsa = libcrypto.EVP_PKEY_get1_RSA(private_key.evp_pkey)
if is_null(rsa):
handle_openssl_error(0)
buffer_size = libcrypto.EVP_PKEY_size(private_key.evp_pkey)
em_buffer = buffer_from_bytes(buffer_size)
res = libcrypto.RSA_padding_add_PKCS1_PSS(
rsa,
em_buffer,
digest,
evp_md,
LibcryptoConst.EVP_MD_CTX_FLAG_PSS_MDLEN
)
handle_openssl_error(res)
signature_buffer = buffer_from_bytes(buffer_size)
signature_length = libcrypto.RSA_private_encrypt(
buffer_size,
em_buffer,
signature_buffer,
rsa,
LibcryptoConst.RSA_NO_PADDING
)
handle_openssl_error(signature_length)
elif private_key.algorithm == :
buffer_size = libcrypto.EVP_PKEY_size(private_key.evp_pkey)
signature_buffer = buffer_from_bytes(buffer_size)
signature_length = new(libcrypto, )
res = libcrypto.EVP_DigestInit_ex(evp_md_ctx, evp_md, null())
handle_openssl_error(res)
res = libcrypto.EVP_DigestUpdate(evp_md_ctx, data, len(data))
handle_openssl_error(res)
res = libcrypto.EVP_SignFinal(
evp_md_ctx,
signature_buffer,
signature_length,
private_key.evp_pkey
)
handle_openssl_error(res)
signature_length = deref(signature_length)
elif private_key.algorithm == :
digest = getattr(hashlib, hash_algorithm)(data).digest()
dsa = libcrypto.EVP_PKEY_get1_DSA(private_key.evp_pkey)
if is_null(dsa):
handle_openssl_error(0)
dsa_sig = libcrypto.DSA_do_sign(digest, len(digest), dsa)
if is_null(dsa_sig):
handle_openssl_error(0)
buffer_size = libcrypto.i2d_DSA_SIG(dsa_sig, null())
signature_buffer = buffer_from_bytes(buffer_size)
signature_pointer = buffer_pointer(signature_buffer)
signature_length = libcrypto.i2d_DSA_SIG(dsa_sig, signature_pointer)
handle_openssl_error(signature_length)
elif private_key.algorithm == :
digest = getattr(hashlib, hash_algorithm)(data).digest()
ec_key = libcrypto.EVP_PKEY_get1_EC_KEY(private_key.evp_pkey)
if is_null(ec_key):
handle_openssl_error(0)
ecdsa_sig = libcrypto.ECDSA_do_sign(digest, len(digest), ec_key)
if is_null(ecdsa_sig):
handle_openssl_error(0)
buffer_size = libcrypto.i2d_ECDSA_SIG(ecdsa_sig, null())
signature_buffer = buffer_from_bytes(buffer_size)
signature_pointer = buffer_pointer(signature_buffer)
signature_length = libcrypto.i2d_ECDSA_SIG(ecdsa_sig, signature_pointer)
handle_openssl_error(signature_length)
else:
buffer_size = libcrypto.EVP_PKEY_size(private_key.evp_pkey)
signature_buffer = buffer_from_bytes(buffer_size)
signature_length = new(libcrypto, , buffer_size)
evp_pkey_ctx_pointer_pointer = new(libcrypto, )
res = libcrypto.EVP_DigestSignInit(
evp_md_ctx,
evp_pkey_ctx_pointer_pointer,
evp_md,
null(),
private_key.evp_pkey
)
handle_openssl_error(res)
evp_pkey_ctx_pointer = unwrap(evp_pkey_ctx_pointer_pointer)
if rsa_pss_padding:
res = libcrypto.EVP_PKEY_CTX_ctrl(
evp_pkey_ctx_pointer,
LibcryptoConst.EVP_PKEY_RSA,
-1,
LibcryptoConst.EVP_PKEY_CTRL_RSA_PADDING,
LibcryptoConst.RSA_PKCS1_PSS_PADDING,
null()
)
handle_openssl_error(res)
res = libcrypto.EVP_PKEY_CTX_ctrl(
evp_pkey_ctx_pointer,
LibcryptoConst.EVP_PKEY_RSA,
LibcryptoConst.EVP_PKEY_OP_SIGN | LibcryptoConst.EVP_PKEY_OP_VERIFY,
LibcryptoConst.EVP_PKEY_CTRL_RSA_PSS_SALTLEN,
-1,
null()
)
handle_openssl_error(res)
res = libcrypto.EVP_DigestUpdate(evp_md_ctx, data, len(data))
handle_openssl_error(res)
res = libcrypto.EVP_DigestSignFinal(evp_md_ctx, signature_buffer, signature_length)
handle_openssl_error(res)
signature_length = deref(signature_length)
return bytes_from_buffer(signature_buffer, signature_length)
finally:
if evp_md_ctx:
if libcrypto_version_info < (1, 1):
libcrypto.EVP_MD_CTX_destroy(evp_md_ctx)
else:
libcrypto.EVP_MD_CTX_free(evp_md_ctx)
if rsa:
libcrypto.RSA_free(rsa)
if dsa:
libcrypto.DSA_free(dsa)
if dsa_sig:
libcrypto.DSA_SIG_free(dsa_sig)
if ec_key:
libcrypto.EC_KEY_free(ec_key)
if ecdsa_sig:
libcrypto.ECDSA_SIG_free(ecdsa_sig) | Generates an RSA, DSA or ECDSA signature
:param private_key:
The PrivateKey to generate the signature with
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha224", "sha256", "sha384" or "sha512"
:param rsa_pss_padding:
If the private_key is an RSA key, this enables PSS padding
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the signature |
388,334 | def get_data_home(data_home=None):
data_home_default = Path(__file__).ancestor(3).child(,
)
if data_home is None:
data_home = os.environ.get(, data_home_default)
if not os.path.exists(data_home):
os.makedirs(data_home)
return data_home | Return the path of the revrand data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'revrand_data'
in the user home folder.
Alternatively, it can be set by the 'REVRAND_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created. |
388,335 | def insert_many(objects, using="default"):
if not objects:
return
import django.db.models
from django.db import connections
from django.db import transaction
con = connections[using]
model = objects[0].__class__
fields = [f for f in model._meta.fields
if not isinstance(f, django.db.models.AutoField)]
parameters = []
for o in objects:
params = tuple(f.get_db_prep_save(f.pre_save(o, True), connection=con)
for f in fields)
parameters.append(params)
table = model._meta.db_table
column_names = ",".join(con.ops.quote_name(f.column) for f in fields)
placeholders = ",".join(("%s",) * len(fields))
con.cursor().executemany("insert into %s (%s) values (%s)"
% (table, column_names, placeholders), parameters)
transaction.commit_unless_managed(using=using) | Insert list of Django objects in one SQL query. Objects must be
of the same Django model. Note that save is not called and signals
on the model are not raised.
Mostly from: http://people.iola.dk/olau/python/bulkops.py |
388,336 | def exists(self, filename):
if is_package(filename):
filepath = os.path.join(self.connection["mount_point"],
"Packages", filename)
else:
filepath = os.path.join(self.connection["mount_point"],
"Scripts", filename)
return os.path.exists(filepath) | Report whether a file exists on the distribution point.
Determines file type by extension.
Args:
filename: Filename you wish to check. (No path! e.g.:
"AdobeFlashPlayer-14.0.0.176.pkg") |
388,337 | def create(self, validated_data):
courses = validated_data.pop("courses", [])
encoded_videos = validated_data.pop("encoded_videos", [])
video = Video.objects.create(**validated_data)
EncodedVideo.objects.bulk_create(
EncodedVideo(video=video, **video_data)
for video_data in encoded_videos
)
for course_video, image_name in courses:
course_video.video = video
course_video.save()
if image_name:
VideoImage.create_or_update(course_video, image_name)
return video | Create the video and its nested resources. |
388,338 | def array(self):
return np.geomspace(self.start, self.stop, self.num, self.endpoint) | return the underlying numpy array |
388,339 | def cal_k_vinet_from_v(v, v0, k0, k0p):
x = v / v0
y = np.power(x, 1. / 3.)
eta = 1.5 * (k0p - 1.)
k = k0 * np.power(y, -2.) * (1. + (eta * y + 1.) * (1. - y)) * \
unp.exp((1. - y) * eta)
return k | calculate bulk modulus in GPa
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at reference conditions
:return: bulk modulus at high pressure in GPa |
388,340 | def _CreatePlacemark(self, parent, name, style_id=None, visible=True,
description=None):
placemark = ET.SubElement(parent, )
placemark_name = ET.SubElement(placemark, )
placemark_name.text = name
if description is not None:
desc_tag = ET.SubElement(placemark, )
desc_tag.text = description
if style_id is not None:
styleurl = ET.SubElement(placemark, )
styleurl.text = % style_id
if not visible:
visibility = ET.SubElement(placemark, )
visibility.text =
return placemark | Create a KML Placemark element.
Args:
parent: The parent ElementTree.Element instance.
name: The placemark name as a string.
style_id: If not None, the id of a style to use for the placemark.
visible: Whether the placemark is initially visible or not.
description: A description string or None.
Returns:
The placemark ElementTree.Element instance. |
388,341 | def optimize(lattice,
positions,
numbers,
displacements,
forces,
alm_options=None,
p2s_map=None,
p2p_map=None,
log_level=0):
from alm import ALM
with ALM(lattice, positions, numbers) as alm:
natom = len(numbers)
alm.set_verbosity(log_level)
nkd = len(np.unique(numbers))
if not in alm_options:
rcs = -np.ones((2, nkd, nkd), dtype=)
elif type(alm_options[]) is float:
rcs = np.ones((2, nkd, nkd), dtype=)
rcs[0] *= -1
rcs[1] *= alm_options[]
alm.define(2, rcs)
alm.set_displacement_and_force(displacements, forces)
if in alm_options:
solver = alm_options[]
else:
solver =
info = alm.optimize(solver=solver)
fc2 = extract_fc2_from_alm(alm,
natom,
atom_list=p2s_map,
p2s_map=p2s_map,
p2p_map=p2p_map)
fc3 = _extract_fc3_from_alm(alm,
natom,
p2s_map=p2s_map,
p2p_map=p2p_map)
return fc2, fc3 | Calculate force constants
lattice : array_like
Basis vectors. a, b, c are given as column vectors.
shape=(3, 3), dtype='double'
positions : array_like
Fractional coordinates of atomic points.
shape=(num_atoms, 3), dtype='double'
numbers : array_like
Atomic numbers.
shape=(num_atoms,), dtype='intc'
displacements : array_like
Atomic displacement patterns in supercells in Cartesian.
dtype='double', shape=(supercells, num_atoms, 3)
forces : array_like
Forces in supercells.
dtype='double', shape=(supercells, num_atoms, 3)
alm_options : dict, optional
Default is None.
List of keys
cutoff_distance : float
solver : str
Either 'SimplicialLDLT' or 'dense'. Default is
'SimplicialLDLT'. |
388,342 | def get_or_create_hosted_zone(client, zone_name):
zone_id = get_hosted_zone_by_name(client, zone_name)
if zone_id:
return zone_id
logger.debug("Zone %s does not exist, creating.", zone_name)
reference = uuid.uuid4().hex
response = client.create_hosted_zone(Name=zone_name,
CallerReference=reference)
return parse_zone_id(response["HostedZone"]["Id"]) | Get the Id of an existing zone, or create it.
Args:
client (:class:`botocore.client.Route53`): The connection used to
interact with Route53's API.
zone_name (string): The name of the DNS hosted zone to create.
Returns:
string: The Id of the Hosted Zone. |
388,343 | def modify_server(self, UUID, **kwargs):
body = dict()
body[] = {}
for arg in kwargs:
if arg not in Server.updateable_fields:
Exception(.format(arg))
body[][arg] = kwargs[arg]
res = self.request(, .format(UUID), body)
server = res[]
IPAddresses = IPAddress._create_ip_address_objs(server.pop(),
cloud_manager=self)
storages = Storage._create_storage_objs(server.pop(),
cloud_manager=self)
return Server(
server,
ip_addresses=IPAddresses,
storage_devices=storages,
populated=True,
cloud_manager=self
) | modify_server allows updating the server's updateable_fields.
Note: Server's IP-addresses and Storages are managed by their own add/remove methods. |
388,344 | def _get_generator(parser, extract, keep, check_maf):
if extract is not None:
parser = Extractor(parser, names=extract)
for data in parser.iter_genotypes():
data.genotypes = data.genotypes[keep]
if check_maf:
data.code_minor()
yield data | Generates the data (with extract markers and keep, if required. |
388,345 | def _process_reservations(self, reservations):
reservations = reservations[]
private_ip_addresses = []
private_hostnames = []
public_ips = []
public_hostnames = []
for reservation in reservations:
for instance in reservation[]:
private_ip_addresses.append(instance[])
private_hostnames.append(instance[])
if in instance:
public_ips.append(instance[])
elif not self.remove_nones:
public_ips.append(None)
if ( in instance) & (not self.remove_nones):
public_hostnames.append(instance[])
elif not self.remove_nones:
public_hostnames.append(None)
return {
: {
: private_ip_addresses,
: private_hostnames
},
: {
: public_ips,
: public_hostnames
},
: reservations
} | Given a dict with the structure of a response from boto3.ec2.describe_instances(...),
find the public/private ips.
:param reservations:
:return: |
388,346 | def xread(self, streams, count=None, block=None):
pieces = []
if block is not None:
if not isinstance(block, (int, long)) or block < 0:
raise DataError()
pieces.append(Token.get_token())
pieces.append(str(block))
if count is not None:
if not isinstance(count, (int, long)) or count < 1:
raise DataError()
pieces.append(Token.get_token())
pieces.append(str(count))
if not isinstance(streams, dict) or len(streams) == 0:
raise DataError()
pieces.append(Token.get_token())
keys, values = izip(*iteritems(streams))
pieces.extend(keys)
pieces.extend(values)
return self.execute_command(, *pieces) | Block and monitor multiple streams for new data.
streams: a dict of stream names to stream IDs, where
IDs indicate the last ID already seen.
count: if set, only return this many items, beginning with the
earliest available.
block: number of milliseconds to wait, if nothing already present. |
388,347 | def get_nn_shell_info(self, structure, site_idx, shell):
all_nn_info = self.get_all_nn_info(structure)
sites = self._get_nn_shell_info(structure, all_nn_info, site_idx, shell)
output = []
for info in sites:
orig_site = structure[info[]]
info[] = Site(orig_site.species,
orig_site._coords,
properties=orig_site.properties)
output.append(info)
return output | Get a certain nearest neighbor shell for a certain site.
Determines all non-backtracking paths through the neighbor network
computed by `get_nn_info`. The weight is determined by multiplying
the weight of the neighbor at each hop through the network. For
example, a 2nd-nearest-neighbor that has a weight of 1 from its
1st-nearest-neighbor and weight 0.5 from the original site will
be assigned a weight of 0.5.
As this calculation may involve computing the nearest neighbors of
atoms multiple times, the calculation starts by computing all of the
neighbor info and then calling `_get_nn_shell_info`. If you are likely
to call this method for more than one site, consider calling `get_all_nn`
first and then calling this protected method yourself.
Args:
structure (Molecule): Input structure
site_idx (int): index of site for which to determine neighbor
information.
shell (int): Which neighbor shell to retrieve (1 == 1st NN shell)
Returns:
list of dictionaries. Each entry in the list is information about
a certain neighbor in the structure, in the same format as
`get_nn_info`. |
388,348 | def update(self, time):
total_acceleration = Vector.null()
max_jerk = self.max_acceleration
for behavior in self.behaviors:
acceleration, importance = behavior.update()
weighted_acceleration = acceleration * importance
total_acceleration += weighted_acceleration
self.acceleration = total_acceleration
Sprite.update(self, time)
if self.velocity.magnitude > 0.0:
self.facing = self.velocity.normal | Update acceleration. Accounts for the importance and
priority (order) of multiple behaviors. |
388,349 | def subscribe(self, handler, topic=None, options=None):
def proxy_handler(*args, **kwargs):
return self._callbacks_runner.put(partial(handler, *args, **kwargs))
return self._async_session.subscribe(proxy_handler, topic=topic, options=options) | Subscribe to a topic for receiving events.
Replace :meth:`autobahn.wamp.interface.IApplicationSession.subscribe` |
388,350 | def join_room(self, room_name):
logging.debug(.format(ro=room_name))
for room in self.rooms:
if room.name == room_name:
room.add_user(self)
self._rooms[room_name] = room
room.welcome(self)
break
else:
room = Room(room_name)
self.rooms.append(room)
self._rooms[room_name] = room
room.add_user(self) | Connects to a given room
If it does not exist it is created |
388,351 | def get_template_dirs():
temp_glob = rel_to_cwd(, , , )
temp_groups = glob(temp_glob)
temp_groups = [get_parent_dir(path, 2) for path in temp_groups]
return set(temp_groups) | Return a set of all template directories. |
388,352 | def pass_outflow_v1(self):
flu = self.sequences.fluxes.fastaccess
out = self.sequences.outlets.fastaccess
out.q[0] += flu.outflow | Update the outlet link sequence |dam_outlets.Q|. |
388,353 | def replace_namespaced_horizontal_pod_autoscaler_status(self, name, namespace, body, **kwargs):
kwargs[] = True
if kwargs.get():
return self.replace_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, body, **kwargs)
return data | replace status of the specified HorizontalPodAutoscaler
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_horizontal_pod_autoscaler_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the HorizontalPodAutoscaler (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V2beta1HorizontalPodAutoscaler body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V2beta1HorizontalPodAutoscaler
If the method is called asynchronously,
returns the request thread. |
388,354 | def get_project(self, project_id, include_capabilities=None, include_history=None):
route_values = {}
if project_id is not None:
route_values[] = self._serialize.url(, project_id, )
query_parameters = {}
if include_capabilities is not None:
query_parameters[] = self._serialize.query(, include_capabilities, )
if include_history is not None:
query_parameters[] = self._serialize.query(, include_history, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize(, response) | GetProject.
Get project with the specified id or name, optionally including capabilities.
:param str project_id:
:param bool include_capabilities: Include capabilities (such as source control) in the team project result (default: false).
:param bool include_history: Search within renamed projects (that had such name in the past).
:rtype: :class:`<TeamProject> <azure.devops.v5_0.core.models.TeamProject>` |
388,355 | def strip_cdata(text):
if not is_cdata(text):
return text
xml = "<e>{0}</e>".format(text)
node = etree.fromstring(xml)
return node.text | Removes all CDATA blocks from `text` if it contains them.
Note:
If the function contains escaped XML characters outside of a
CDATA block, they will be unescaped.
Args:
A string containing one or more CDATA blocks.
Returns:
An XML unescaped string with CDATA block qualifiers removed. |
388,356 | def _writeData(self, command, device, params=()):
sequence = []
if self._compact:
sequence.append(command | 0x80)
else:
sequence.append(self._BAUD_DETECT)
sequence.append(device)
sequence.append(command)
for param in params:
sequence.append(param)
if self._crc:
sequence.append(crc7(sequence))
self._serial.write(bytearray(sequence))
self._log and self._log.debug("Wrote byte sequence: %s",
[hex(num) for num in sequence]) | Write the data to the device.
:Parameters:
command : `int`
The command to write to the device.
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol.
params : `tuple`
Sequence of bytes to write.
:Exceptions:
* `SerialTimeoutException`
If the low level serial package times out.
* `SerialException`
IO error when the port is not open. |
388,357 | def list(self, end_date=values.unset, friendly_name=values.unset,
minutes=values.unset, start_date=values.unset,
task_channel=values.unset, split_by_wait_time=values.unset, limit=None,
page_size=None):
return list(self.stream(
end_date=end_date,
friendly_name=friendly_name,
minutes=minutes,
start_date=start_date,
task_channel=task_channel,
split_by_wait_time=split_by_wait_time,
limit=limit,
page_size=page_size,
)) | Lists TaskQueuesStatisticsInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param datetime end_date: Filter cumulative statistics by an end date.
:param unicode friendly_name: Filter the TaskQueue stats based on a TaskQueue's name
:param unicode minutes: Filter cumulative statistics by up to 'x' minutes in the past.
:param datetime start_date: Filter cumulative statistics by a start date.
:param unicode task_channel: Filter real-time and cumulative statistics by TaskChannel.
:param unicode split_by_wait_time: A comma separated values for viewing splits of tasks canceled and accepted above the given threshold in seconds.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.taskrouter.v1.workspace.task_queue.task_queues_statistics.TaskQueuesStatisticsInstance] |
388,358 | def dispatch(self, tree):
for omp in metadata.get(tree, openmp.OMPDirective):
deps = list()
for dep in omp.deps:
old_file = self.f
self.f = io.StringIO()
self.dispatch(dep)
deps.append(self.f.getvalue())
self.f = old_file
directive = omp.s.format(*deps)
self._Expr(ast.Expr(ast.Str(s=directive)))
if isinstance(tree, list):
for t in tree:
self.dispatch(t)
return
meth = getattr(self, "_" + tree.__class__.__name__)
meth(tree) | Dispatcher function, dispatching tree type T to method _T. |
388,359 | def add_trits(left, right):
target_len = max(len(left), len(right))
res = [0] * target_len
left += [0] * (target_len - len(left))
right += [0] * (target_len - len(right))
carry = 0
for i in range(len(res)):
res[i], carry = _full_add_trits(left[i], right[i], carry)
return res | Adds two sequences of trits together.
The result is a list of trits equal in length to the longer of the
two sequences.
.. note::
Overflow is possible.
For example, ``add_trits([1], [1])`` returns ``[-1]``. |
388,360 | def npartitions(self):
if self.mode == :
return self.tordd().getNumPartitions()
else:
notsupported(self.mode) | Get number of partitions (Spark only). |
388,361 | def select_best_url(self):
best_url = self.parsed_urls[0]
try:
yield best_url
except Exception:
self.unsuccessful_calls[best_url] += 1
if self.unsuccessful_calls[best_url] > self.max_failures:
self.parsed_urls.rotate(-1)
self.unsuccessful_calls[best_url] = 0
raise
else:
self.unsuccessful_calls[best_url] = 0 | Select `best` url.
Since urls are pre-sorted w.r.t. their ping times, we simply return the first element
from the list. And we always return the same url unless we observe greater than max
allowed number of consecutive failures. In this case, we would return the next `best`
url, and append the previous best one to the end of list (essentially rotate to the left
by one element). |
388,362 | def send_like(self, *, user_id, times=1):
return super().__getattr__() \
(user_id=user_id, times=times) | 发送好友赞
------------
:param int user_id: 对方 QQ 号
:param int times: 赞的次数,每个好友每天最多 10 次
:return: None
:rtype: None |
388,363 | def config(self):
return self._h._get_resource(
resource=(, self.name, ),
obj=ConfigVars, app=self
) | The envs for this app. |
388,364 | def centralManager_didConnectPeripheral_(self, manager, peripheral):
logger.debug()
peripheral.setDelegate_(self)
peripheral.discoverServices_(None)
device = device_list().get(peripheral)
if device is not None:
device._set_connected() | Called when a device is connected. |
388,365 | def portfolio_prices(
symbols=("AAPL", "GLD", "GOOG", "$SPX", "XOM", "msft"),
start=datetime.datetime(2005, 1, 1),
end=datetime.datetime(2011, 12, 31),
normalize=True,
allocation=None,
price_type=,
):
symbols = normalize_symbols(symbols)
start = util.normalize_date(start)
end = util.normalize_date(end)
if allocation is None:
allocation = [1. / len(symbols)] * len(symbols)
if len(allocation) < len(symbols):
allocation = list(allocation) + [1. / len(symbols)] * (len(symbols) - len(allocation))
total = np.sum(allocation.sum)
allocation = np.array([(float(a) / total) for a in allocation])
timestamps = du.getNYSEdays(start, end, datetime.timedelta(hours=16))
ls_keys = [price_type]
ldf_data = da.get_data(timestamps, symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
na_price = d_data[price_type].values
if normalize:
na_price /= na_price[0, :]
na_price *= allocation
return np.sum(na_price, axis=1) | Calculate the Sharpe Ratio and other performance metrics for a portfolio
Arguments:
symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc
start (datetime): The date at the start of the period being analyzed.
end (datetime): The date at the end of the period being analyzed.
normalize (bool): Whether to normalize prices to 1 at the start of the time series.
allocation (list of float): The portion of the portfolio allocated to each equity. |
388,366 | def run_as(user, domain, password, filename, logon_flag=1, work_dir="",
show_flag=Properties.SW_SHOWNORMAL):
ret = AUTO_IT.AU3_RunAs(
LPCWSTR(user), LPCWSTR(domain), LPCWSTR(password), INT(logon_flag),
LPCWSTR(filename), LPCWSTR(work_dir), INT(show_flag)
)
return ret | Runs an external program.
:param user: username The user name to use.
:param domain: The domain name to use.
:param password: The password to use.
:param logon_flag: 0 = do not load the user profile, 1 = (default) load
the user profile, 2 = use for net credentials only
:param filename: The name of the executable (EXE, BAT, COM, or PIF) to run.
:param work_dir: The working directory.
:param show_flag: The "show" flag of the executed program:
SW_HIDE = Hidden window
SW_MINIMIZE = Minimized window
SW_MAXIMIZE = Maximized window
:return: |
388,367 | def root(self, scope, names):
parent = scope.scopename
if parent:
parent = parent[-1]
if parent.parsed:
parsed_names = []
for name in names:
ampersand_count = name.count()
if ampersand_count:
filtered_parts = []
for part in parent.parsed:
if part and part[0] not in self._subp:
filtered_parts.append(part)
permutations = list(
utility.permutations_with_replacement(
filtered_parts, ampersand_count))
for permutation in permutations:
parsed = []
for name_part in name:
if name_part == "&":
parent_part = permutation.pop(0)
if parsed and parsed[-1].endswith():
parsed.extend()
if parent_part[-1] == :
parent_part.pop()
parsed.extend(parent_part)
else:
parsed.append(name_part)
parsed_names.append(parsed)
else:
for part in parent.parsed:
if part and part[0] not in self._subp:
parsed = []
if name[0] == "@media":
parsed.extend(name)
else:
parsed.extend(part)
if part[-1] != :
parsed.append()
parsed.extend(name)
parsed_names.append(parsed)
else:
parsed_names.append(name)
return parsed_names
return names | Find root of identifier, from scope
args:
scope (Scope): current scope
names (list): identifier name list (, separated identifiers)
returns:
list |
388,368 | def update_scheme(current, target):
target_p = urlparse(target)
if not target_p.scheme and target_p.netloc:
return "{0}:{1}".format(urlparse(current).scheme,
urlunparse(target_p))
elif not target_p.scheme and not target_p.netloc:
return "{0}://{1}".format(urlparse(current).scheme,
urlunparse(target_p))
else:
return target | Take the scheme from the current URL and applies it to the
target URL if the target URL startswith // or is missing a scheme
:param current: current URL
:param target: target URL
:return: target URL with the current URLs scheme |
388,369 | def _encrypt(cipher, key, data, iv, padding):
if not isinstance(key, byte_cls):
raise TypeError(pretty_message(
,
type_name(key)
))
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
,
type_name(data)
))
if cipher != and not isinstance(iv, byte_cls):
raise TypeError(pretty_message(
,
type_name(iv)
))
if cipher != and not padding:
raise ValueError()
evp_cipher_ctx = None
try:
evp_cipher_ctx = libcrypto.EVP_CIPHER_CTX_new()
if is_null(evp_cipher_ctx):
handle_openssl_error(0)
evp_cipher, buffer_size = _setup_evp_encrypt_decrypt(cipher, data)
if iv is None:
iv = null()
if cipher in set([, ]):
res = libcrypto.EVP_EncryptInit_ex(evp_cipher_ctx, evp_cipher, null(), null(), null())
handle_openssl_error(res)
res = libcrypto.EVP_CIPHER_CTX_set_key_length(evp_cipher_ctx, len(key))
handle_openssl_error(res)
if cipher == :
res = libcrypto.EVP_CIPHER_CTX_ctrl(
evp_cipher_ctx,
LibcryptoConst.EVP_CTRL_SET_RC2_KEY_BITS,
len(key) * 8,
null()
)
handle_openssl_error(res)
evp_cipher = null()
res = libcrypto.EVP_EncryptInit_ex(evp_cipher_ctx, evp_cipher, null(), key, iv)
handle_openssl_error(res)
if padding is not None:
res = libcrypto.EVP_CIPHER_CTX_set_padding(evp_cipher_ctx, int(padding))
handle_openssl_error(res)
buffer = buffer_from_bytes(buffer_size)
output_length = new(libcrypto, )
res = libcrypto.EVP_EncryptUpdate(evp_cipher_ctx, buffer, output_length, data, len(data))
handle_openssl_error(res)
output = bytes_from_buffer(buffer, deref(output_length))
res = libcrypto.EVP_EncryptFinal_ex(evp_cipher_ctx, buffer, output_length)
handle_openssl_error(res)
output += bytes_from_buffer(buffer, deref(output_length))
return output
finally:
if evp_cipher_ctx:
libcrypto.EVP_CIPHER_CTX_free(evp_cipher_ctx) | Encrypts plaintext
:param cipher:
A unicode string of "aes128", "aes192", "aes256", "des",
"tripledes_2key", "tripledes_3key", "rc2", "rc4"
:param key:
The encryption key - a byte string 5-32 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the ciphertext |
388,370 | def rst_table(self, array):
cell_dict = {}
for i, row in enumerate(array):
for j, val in enumerate(row):
if j not in cell_dict:
cell_dict[j] = []
cell_dict[j].append(val)
for item in cell_dict:
cell_dict[item] = max([len(x) for x in cell_dict[item]]) + 1
num_cols = len(array[0])
form =
for col in range(num_cols):
form += (cell_dict[col] + 1) *
form +=
form +=
for i, row in enumerate(array):
form +=
for j, val in enumerate(row):
cell_width = cell_dict[j]
form += str(val) + (cell_width - len(val)) * +
form.rstrip()
form +=
form +=
if i == 0:
sign =
else:
sign =
for col in range(num_cols):
form += (cell_dict[col] + 1) * sign
form +=
form +=
return form | Given an array, the function formats and returns and table in rST format. |
388,371 | def ssn(self):
def _checksum(digits):
factors = (9, 8, 7, 6, 5, 4, 3, 2, -1)
s = 0
for i in range(len(digits)):
s += digits[i] * factors[i]
return s
while True:
digits = self.generator.random.sample(range(10), 8)
s = _checksum(digits)
digits.append((s % 11) % 10)
if 0 == (_checksum(digits) % 11):
break
bsn = "".join([str(e) for e in digits])
return bsn | Returns a 9 digits Dutch SSN called "burgerservicenummer (BSN)".
the Dutch "burgerservicenummer (BSN)" needs to pass the "11-proef",
which is a check digit approach; this function essentially reverses
the checksum steps to create a random valid BSN (which is 9 digits). |
388,372 | def index(self, weighted=True, prune=False):
warnings.warn(
"CrunchCube.index() is deprecated. Use CubeSlice.index_table().",
DeprecationWarning,
)
return Index.data(self, weighted, prune) | Return cube index measurement.
This function is deprecated. Use index_table from CubeSlice. |
388,373 | def set_outgoing(self, value):
if not isinstance(value, list):
raise TypeError("OutgoingList new value must be a list")
for element in value:
if not isinstance(element, str):
raise TypeError("OutgoingList elements in variable must be of String class")
self.__outgoing_list = value | Setter for 'outgoing' field.
:param value - a new value of 'outgoing' field. Must be a list of IDs (String type) of outgoing flows. |
388,374 | def get(self, *args, **kwargs):
return self.model._default_manager.get(*args, **kwargs) | Quick and dirty hack to fix change_view and delete_view; they use
self.queryset(request).get(...) to get the object they should work
with. Our modifications to the queryset when INCLUDE_ANCESTORS is
enabled make get() fail often with a MultipleObjectsReturned
exception. |
388,375 | def batch(self, client=None):
client = self._require_client(client)
return Batch(self, client) | Return a batch to use as a context manager.
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current topic.
:rtype: :class:`Batch`
:returns: A batch to use as a context manager. |
388,376 | def get_user_bookmarks(self, id, **data):
return self.get("/users/{0}/bookmarks/".format(id), data=data) | GET /users/:id/bookmarks/
Gets all the user's saved events.
In order to update the saved events list, the user must unsave or save each event.
A user is authorized to only see his/her saved events. |
388,377 | def split_key(key):
if key == KEY_SEP:
return ()
key_chunks = tuple(key.strip(KEY_SEP).split(KEY_SEP))
if key_chunks[0].startswith(KEY_SEP):
return (key_chunks[0][len(KEY_SEP):],) + key_chunks[1:]
else:
return key_chunks | Splits a node key. |
388,378 | def load_airpassengers(as_series=False):
rslt = np.array([
112, 118, 132, 129, 121, 135, 148, 148, 136, 119, 104, 118,
115, 126, 141, 135, 125, 149, 170, 170, 158, 133, 114, 140,
145, 150, 178, 163, 172, 178, 199, 199, 184, 162, 146, 166,
171, 180, 193, 181, 183, 218, 230, 242, 209, 191, 172, 194,
196, 196, 236, 235, 229, 243, 264, 272, 237, 211, 180, 201,
204, 188, 235, 227, 234, 264, 302, 293, 259, 229, 203, 229,
242, 233, 267, 269, 270, 315, 364, 347, 312, 274, 237, 278,
284, 277, 317, 313, 318, 374, 413, 405, 355, 306, 271, 306,
315, 301, 356, 348, 355, 422, 465, 467, 404, 347, 305, 336,
340, 318, 362, 348, 363, 435, 491, 505, 404, 359, 310, 337,
360, 342, 406, 396, 420, 472, 548, 559, 463, 407, 362, 405,
417, 391, 419, 461, 472, 535, 622, 606, 508, 461, 390, 432
]).astype(np.float64)
if as_series:
return pd.Series(rslt)
return rslt | Monthly airline passengers.
The classic Box & Jenkins airline data. Monthly totals of international
airline passengers, 1949 to 1960.
Parameters
----------
as_series : bool, optional (default=False)
Whether to return a Pandas series. If False, will return a 1d
numpy array.
Returns
-------
rslt : array-like, shape=(n_samples,)
The time series vector.
Examples
--------
>>> from pmdarima.datasets import load_airpassengers
>>> load_airpassengers() # doctest: +SKIP
np.array([
112, 118, 132, 129, 121, 135, 148, 148, 136, 119, 104, 118,
115, 126, 141, 135, 125, 149, 170, 170, 158, 133, 114, 140,
145, 150, 178, 163, 172, 178, 199, 199, 184, 162, 146, 166,
171, 180, 193, 181, 183, 218, 230, 242, 209, 191, 172, 194,
196, 196, 236, 235, 229, 243, 264, 272, 237, 211, 180, 201,
204, 188, 235, 227, 234, 264, 302, 293, 259, 229, 203, 229,
242, 233, 267, 269, 270, 315, 364, 347, 312, 274, 237, 278,
284, 277, 317, 313, 318, 374, 413, 405, 355, 306, 271, 306,
315, 301, 356, 348, 355, 422, 465, 467, 404, 347, 305, 336,
340, 318, 362, 348, 363, 435, 491, 505, 404, 359, 310, 337,
360, 342, 406, 396, 420, 472, 548, 559, 463, 407, 362, 405,
417, 391, 419, 461, 472, 535, 622, 606, 508, 461, 390, 432])
>>> load_airpassengers(True).head()
0 112.0
1 118.0
2 132.0
3 129.0
4 121.0
dtype: float64
Notes
-----
This is monthly data, so *m* should be set to 12 when using in a seasonal
context.
References
----------
.. [1] Box, G. E. P., Jenkins, G. M. and Reinsel, G. C. (1976)
"Time Series Analysis, Forecasting and Control. Third Edition."
Holden-Day. Series G. |
388,379 | def _add_cadd_score(self, variant_obj, info_dict):
cadd_score = info_dict.get()
if cadd_score:
logger.debug("Updating cadd_score to: {0}".format(
cadd_score))
variant_obj.cadd_score = float(cadd_score) | Add the cadd score to the variant
Args:
variant_obj (puzzle.models.Variant)
info_dict (dict): A info dictionary |
388,380 | def parse_device(lines):
name, status_line, device = parse_device_header(lines.pop(0))
if not status_line:
status_line = lines.pop(0)
status = parse_device_status(status_line, device["personality"])
bitmap = None
resync = None
for line in lines:
if line.startswith(" bitmap:"):
bitmap = parse_device_bitmap(line)
elif line.startswith(" ["):
resync = parse_device_resync_progress(line)
elif line.startswith(" \tresync="):
resync = parse_device_resync_standby(line)
else:
raise NotImplementedError("unknown device line: {0}".format(line))
device.update({
"status": status,
"bitmap": bitmap,
"resync": resync,
})
return (name, device) | Parse all the lines of a device block.
A device block is composed of a header line with the name of the device and
at least one extra line describing the device and its status. The extra
lines have a varying format depending on the status and personality of the
device (e.g. RAID1 vs RAID5, healthy vs recovery/resync). |
388,381 | def shell(self, name=, user=None, password=None, root=0, verbose=1, write_password=1, no_db=0, no_pw=0):
raise NotImplementedError | Opens a SQL shell to the given database, assuming the configured database
and user supports this feature. |
388,382 | def course_or_program_exist(self, course_id, program_uuid):
course_exists = course_id and CourseApiClient().get_course_details(course_id)
program_exists = program_uuid and CourseCatalogApiServiceClient().program_exists(program_uuid)
return course_exists or program_exists | Return whether the input course or program exist. |
388,383 | def image_info(call=None, kwargs=None):
if call != :
raise SaltCloudSystemExit(
)
if kwargs is None:
kwargs = {}
name = kwargs.get(, None)
image_id = kwargs.get(, None)
if image_id:
if name:
log.warning(
image_id\name\
image_id\
)
elif name:
image_id = get_image_id(kwargs={: name})
else:
raise SaltCloudSystemExit(
name or an \ to be provided.:NAME').text] = _xml_to_dict(tree)
return info | Retrieves information for a given image. Either a name or an image_id must be
supplied.
.. versionadded:: 2016.3.0
name
The name of the image for which to gather information. Can be used instead
of ``image_id``.
image_id
The ID of the image for which to gather information. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f image_info opennebula name=my-image
salt-cloud --function image_info opennebula image_id=5 |
388,384 | def AddLeNetModel(model, data):
conv1 = brew.conv(model, data, , dim_in=1, dim_out=20, kernel=5)
pool1 = brew.max_pool(model, conv1, , kernel=2, stride=2)
conv2 = brew.conv(model, pool1, , dim_in=20, dim_out=100, kernel=5)
pool2 = brew.max_pool(model, conv2, , kernel=2, stride=2)
fc3 = brew.fc(model, pool2, , dim_in=100 * 4 * 4, dim_out=500)
relu = brew.relu(model, fc3, fc3)
pred = brew.fc(model, relu, , 500, 10)
softmax = brew.softmax(model, pred, )
return softmax | This part is the standard LeNet model: from data to the softmax prediction.
For each convolutional layer we specify dim_in - number of input channels
and dim_out - number or output channels. Also each Conv and MaxPool layer changes the
image size. For example, kernel of size 5 reduces each side of an image by 4.
While when we have kernel and stride sizes equal 2 in a MaxPool layer, it divides
each side in half. |
388,385 | def mount(cls, device, mount_directory, fs=None, options=None, cmd_timeout=None, sudo=False):
cmd = [] if sudo is False else []
cmd.extend([, device, os.path.abspath(mount_directory)])
if fs is not None:
cmd.extend([, fs])
if options is not None and len(options) > 0:
cmd.append()
cmd.extend(options)
subprocess.check_output(cmd, timeout=cmd_timeout) | Mount a device to mount directory
:param device: device to mount
:param mount_directory: target directory where the given device will be mounted to
:param fs: optional, filesystem on the specified device. If specifies - overrides OS filesystem \
detection with this value.
:param options: specifies mount options (OS/filesystem dependent)
:param cmd_timeout: if specified - timeout with which this mount command should be evaluated (if \
command isn't complete within the given timeout - an exception will be raised)
:param sudo: whether to use sudo to run mount command
:return: None |
388,386 | def _pool_one_shape(features_2d, area_width, area_height, batch_size,
width, height, depth, fn=tf.reduce_max, name=None):
with tf.name_scope(name, default_name="pool_one_shape"):
images = []
for y_shift in range(area_height):
image_height = tf.maximum(height - area_height + 1 + y_shift, 0)
for x_shift in range(area_width):
image_width = tf.maximum(width - area_width + 1 + x_shift, 0)
area = features_2d[:, y_shift:image_height, x_shift:image_width, :]
flatten_area = tf.reshape(area, [batch_size, -1, depth, 1])
images.append(flatten_area)
image_tensor = tf.concat(images, axis=3)
max_tensor = fn(image_tensor, axis=3)
return max_tensor | Pools for an area in features_2d.
Args:
features_2d: a Tensor in a shape of [batch_size, height, width, depth].
area_width: the max width allowed for an area.
area_height: the max height allowed for an area.
batch_size: the batch size.
width: the width of the memory.
height: the height of the memory.
depth: the depth of the features.
fn: the TF function for the pooling.
name: the op name.
Returns:
pool_tensor: A Tensor of shape [batch_size, num_areas, depth] |
388,387 | def _remove_prefix(name):
if isinstance(name, str):
return _do_remove_prefix(name)
return [_do_remove_prefix(nm) for nm in name] | Strip the possible prefix 'Table: ' from one or more table names. |
388,388 | def subst_quoted_strings(sql, params):
parts = sql.split()
params_dont_match = "number of parameters doesn%s\\\\\\", "\\\'.join(out) | Reverse operation to mark_quoted_strings - substitutes '@' by params. |
388,389 | def get_name(self, use_alias=True):
if self.desc:
direction =
else:
direction =
if use_alias:
return .format(self.field.get_identifier(), direction)
return .format(self.field.get_select_sql(), direction) | Gets the name to reference the sorted field
:return: the name to reference the sorted field
:rtype: str |
388,390 | def combine(self, other, func, fill_value=None, overwrite=True):
other_idxlen = len(other.index)
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
if col not in self.columns:
new_dtype = other_dtype
try:
series = series.astype(new_dtype, copy=False)
except ValueError:
pass
else:
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
arr = func(series, otherSeries)
arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
return self._constructor(result, index=new_index,
columns=new_columns) | Perform column-wise combine with another DataFrame.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : bool, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
DataFrame
Combination of the provided DataFrames.
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method.
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0 |
388,391 | def to_method(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args[1:], **kwargs)
return wrapper | Lift :func:`func` to a method; it will be called with the first argument
'self' ignored.
:param func: Any callable object |
388,392 | def verify_embedding(emb, source, target, ignore_errors=()):
for error in diagnose_embedding(emb, source, target):
eclass = error[0]
if eclass not in ignore_errors:
raise eclass(*error[1:])
return True | A simple (exception-raising) diagnostic for minor embeddings.
See :func:`diagnose_embedding` for a more detailed diagnostic / more information.
Args:
emb (dict): a dictionary mapping source nodes to arrays of target nodes
source (graph or edgelist): the graph to be embedded
target (graph or edgelist): the graph being embedded into
Raises:
EmbeddingError: a catch-all class for the below
MissingChainError: in case a key is missing from `emb`, or the associated chain is empty
ChainOverlapError: in case two chains contain the same target node
DisconnectedChainError: in case a chain is disconnected
InvalidNodeError: in case a chain contains a node label not found in `target`
MissingEdgeError: in case a source edge is not represented by any target edges
Returns:
bool: True (if no exception is raised) |
388,393 | def _range_from_slice(myslice, start=None, stop=None, step=None, length=None):
assert isinstance(myslice, slice)
step = myslice.step if myslice.step is not None else step
if step is None:
step = 1
start = myslice.start if myslice.start is not None else start
if start is None:
start = 0
stop = myslice.stop if myslice.stop is not None else stop
if length is not None:
stop_inferred = floor(start + step * length)
if stop is not None and stop < stop_inferred:
raise ValueError(" ({stop}) and ".format(stop=stop) +
" ({length}) ".format(length=length) +
"are not compatible.")
stop = stop_inferred
if stop is None and length is None:
raise ValueError(" and cannot be both unspecified.")
myrange = np.arange(start, stop, step)
if length is not None:
assert len(myrange) == length
return myrange | Convert a slice to an array of integers. |
388,394 | def get_certificates(self):
for certificate in self.user_data.certificates:
certificate[] = certificate[].strip()
return self.user_data.certificates | Get user's certificates. |
388,395 | def from_header(self, header):
if header is None:
return SpanContext()
try:
match = re.search(_TRACE_CONTEXT_HEADER_RE, header)
except TypeError:
logging.warning(
.format(header.__class__.__name__))
raise
if match:
trace_id = match.group(1)
span_id = match.group(3)
trace_options = match.group(5)
if trace_options is None:
trace_options = 1
span_context = SpanContext(
trace_id=trace_id,
span_id=span_id,
trace_options=TraceOptions(trace_options),
from_header=True)
return span_context
else:
logging.warning(
.format(header))
return SpanContext() | Generate a SpanContext object using the trace context header.
The value of enabled parsed from header is int. Need to convert to
bool.
:type header: str
:param header: Trace context header which was extracted from the HTTP
request headers.
:rtype: :class:`~opencensus.trace.span_context.SpanContext`
:returns: SpanContext generated from the trace context header. |
388,396 | def _render_template_block_nodelist(nodelist, block_name, context):
for node in nodelist:
if isinstance(node, BlockNode):
context.render_context[BLOCK_CONTEXT_KEY].push(node.name, node)
| Recursively iterate over a node to find the wanted block. |
388,397 | def f_contains(self, item, with_links=True, shortcuts=False, max_depth=None):
try:
search_string = item.v_full_name
parent_full_name = self.v_full_name
if not search_string.startswith(parent_full_name):
return False
if parent_full_name != :
search_string = search_string[len(parent_full_name) + 1:]
else:
search_string = search_string
shortcuts = False
except AttributeError:
search_string = item
item = None
if search_string == :
return False
try:
result = self.f_get(search_string,
shortcuts=shortcuts, max_depth=max_depth, with_links=with_links)
except AttributeError:
return False
if item is not None:
return id(item) == id(result)
else:
return True | Checks if the node contains a specific parameter or result.
It is checked if the item can be found via the
:func:`~pypet.naturalnaming.NNGroupNode.f_get` method.
:param item: Parameter/Result name or instance.
If a parameter or result instance is supplied it is also checked if
the provided item and the found item are exactly the same instance, i.e.
`id(item)==id(found_item)`.
:param with_links:
If links are considered.
:param shortcuts:
Shortcuts is `False` the name you supply must
be found in the tree WITHOUT hopping over nodes in between.
If `shortcuts=False` and you supply a
non colon separated (short) name, than the name must be found
in the immediate children of your current node.
Otherwise searching via shortcuts is allowed.
:param max_depth:
If shortcuts is `True` than the maximum search depth
can be specified. `None` means no limit.
:return: True or False |
388,398 | def _RunIpRoute(self, args=None, options=None):
args = args or []
options = options or {}
command = [, ]
command.extend(args)
for item in options.items():
command.extend(item)
try:
process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
except OSError as e:
self.logger.warning(, command, str(e))
else:
if process.returncode:
message =
self.logger.warning(message, command, stderr.strip())
else:
return stdout.decode(, )
return | Run a command with ip route and return the response.
Args:
args: list, the string ip route command args to execute.
options: dict, the string parameters to append to the ip route command.
Returns:
string, the standard output from the ip route command execution. |
388,399 | def finalize(self, **kwargs):
self.set_title("")
self.ax.legend(loc="best", frameon=True)
if self.hist:
plt.setp(self.xhax.get_xticklabels(), visible=False)
plt.setp(self.yhax.get_yticklabels(), visible=False)
plt.sca(self.ax)
plt.tight_layout() | Finalize executes any remaining image modifications making it ready to show. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.