input
stringlengths 11
7.65k
| target
stringlengths 22
8.26k
|
---|---|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def get_iterations(self):
"""!
@return (uint) Amount of iterations that were done by the EM algorithm.
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def get_evolution_means(self):
"""!
@return (list) Mean of each cluster on each step of clustering.
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def get_evolution_covariances(self):
"""!
@return (list) Covariance matrix (or variance in case of one-dimensional data) of each cluster on each step of clustering.
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def get_evolution_clusters(self):
"""!
@return (list) Allocated clusters on each step of clustering.
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def notify(self, means, covariances, clusters):
"""!
@brief This method is used by the algorithm to notify observer about changes where the algorithm
should provide new values: means, covariances and allocated clusters.
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def show_clusters(clusters, sample, covariances, means, figure=None, display=True):
"""!
@brief Draws clusters and in case of two-dimensional dataset draws their ellipses.
@details Allocated figure by this method should be closed using `close()` method of this visualizer.
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def close(figure):
"""!
@brief Closes figure object that was used or allocated by the visualizer.
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def animate_cluster_allocation(data, observer, animation_velocity = 75, movie_fps = 1, save_movie = None):
"""!
@brief Animates clustering process that is performed by EM algorithm.
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def init_frame():
return frame_generation(0)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def frame_generation(index_iteration):
figure.clf()
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __draw_ellipses(figure, visualizer, clusters, covariances, means):
ax = figure.get_axes()[0]
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __draw_ellipse(ax, x, y, angle, width, height, color):
if (width > 0.0) and (height > 0.0):
ax.plot(x, y, color=color, marker='x', markersize=6)
ellipse = patches.Ellipse((x, y), width, height, alpha=0.2, angle=-angle, linewidth=2, fill=True, zorder=2, color=color)
ax.add_patch(ellipse)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __init__(self, data, amount_clusters, means=None, variances=None, observer=None, tolerance=0.00001, iterations=100):
"""!
@brief Initializes Expectation-Maximization algorithm for cluster analysis.
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def process(self):
"""!
@brief Run clustering process of the algorithm.
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def get_clusters(self):
"""!
@return (list) Allocated clusters where each cluster is represented by list of indexes of points from dataset,
for example, two cluster may have following representation [[0, 1, 4], [2, 3, 5, 6]].
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def get_centers(self):
"""!
@return (list) Corresponding centers (means) of clusters.
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def get_covariances(self):
"""!
@return (list) Corresponding variances (or covariances in case of multi-dimensional data) of clusters.
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def get_probabilities(self):
"""!
@brief Returns 2-dimensional list with belong probability of each object from data to cluster correspondingly,
where that first index is for cluster and the second is for point.
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __erase_empty_clusters(self):
clusters, means, variances, pic, gaussians, rc = [], [], [], [], [], []
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __notify(self):
if self.__observer is not None:
self.__observer.notify(self.__means, self.__variances, self.__clusters)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __extract_clusters(self):
self.__clusters = [[] for _ in range(self.__amount_clusters)]
for index_point in range(len(self.__data)):
candidates = []
for index_cluster in range(self.__amount_clusters):
candidates.append((index_cluster, self.__rc[index_cluster][index_point]))
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __log_likelihood(self):
likelihood = 0.0
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __probabilities(self, index_cluster, index_point):
divider = 0.0
for i in range(self.__amount_clusters):
divider += self.__pic[i] * self.__gaussians[i][index_point]
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __expectation_step(self):
self.__gaussians = [ [] for _ in range(self.__amount_clusters) ]
for index in range(self.__amount_clusters):
self.__gaussians[index] = gaussian(self.__data, self.__means[index], self.__variances[index])
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __maximization_step(self):
self.__pic = []
self.__means = []
self.__variances = []
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __get_stop_condition(self):
for covariance in self.__variances:
if numpy.linalg.norm(covariance) == 0.0:
return True
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __update_covariance(self, means, rc, mc):
covariance = 0.0
for index_point in range(len(self.__data)):
deviation = numpy.array([self.__data[index_point] - means])
covariance += rc[index_point] * deviation.T.dot(deviation)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __update_mean(self, rc, mc):
mean = 0.0
for index_point in range(len(self.__data)):
mean += rc[index_point] * self.__data[index_point]
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __normalize_probabilities(self):
for index_point in range(len(self.__data)):
probability = 0.0
for index_cluster in range(len(self.__clusters)):
probability += self.__rc[index_cluster][index_point]
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __normalize_probability(self, index_point, probability):
if probability == 0.0:
return
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __verify_arguments(self):
"""!
@brief Verify input parameters for the algorithm and throw exception in case of incorrectness.
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __enter__(self):
"""
Syntax sugar which helps in celery tasks, cron jobs, and other scripts
Usage:
with Tenant.objects.get(schema_name='test') as tenant:
# run some code in tenant test
# run some code in previous tenant (public probably)
"""
connection = connections[get_tenant_database_alias()]
self._previous_tenant.append(connection.tenant)
self.activate()
return self
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
async def handler(client, request):
if request.path == "/v6/challenge":
assert request.encode().decode() == CHALLENGE_REQUEST
response = http.HTTPResponse(200)
response.json = {
"challenge": "vaNgVZZH7gUse0y3t8Cksuln-TAVtvBmcD-ow59qp0E=",
"data": "dlL7ZBNSLmYo1hUlKYZiUA=="
}
return response
else:
assert request.encode().decode() == TOKEN_REQUEST
response = http.HTTPResponse(200)
response.json = {
"device_auth_token": "device token"
}
return response
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __exit__(self, exc_type, exc_val, exc_tb):
connection = connections[get_tenant_database_alias()]
connection.set_tenant(self._previous_tenant.pop())
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def activate(self):
"""
Syntax sugar that helps at django shell with fast tenant changing
Usage:
Tenant.objects.get(schema_name='test').activate()
"""
connection = connections[get_tenant_database_alias()]
connection.set_tenant(self)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def deactivate(cls):
"""
Syntax sugar, return to public schema
Usage:
test_tenant.deactivate()
# or simpler
Tenant.deactivate()
"""
connection = connections[get_tenant_database_alias()]
connection.set_schema_to_public()
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def save(self, verbosity=1, *args, **kwargs):
connection = connections[get_tenant_database_alias()]
is_new = self.pk is None
has_schema = hasattr(connection, 'schema_name')
if has_schema and is_new and connection.schema_name != get_public_schema_name():
raise Exception("Can't create tenant outside the public schema. "
"Current schema is %s." % connection.schema_name)
elif has_schema and not is_new and connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't update tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
super().save(*args, **kwargs)
if has_schema and is_new and self.auto_create_schema:
try:
self.create_schema(check_if_exists=True, verbosity=verbosity)
post_schema_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
except Exception:
# We failed creating the tenant, delete what we created and
# re-raise the exception
self.delete(force_drop=True)
raise
elif is_new:
# although we are not using the schema functions directly, the signal might be registered by a listener
schema_needs_to_be_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
elif not is_new and self.auto_create_schema and not schema_exists(self.schema_name):
# Create schemas for existing models, deleting only the schema on failure
try:
self.create_schema(check_if_exists=True, verbosity=verbosity)
post_schema_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
except Exception:
# We failed creating the schema, delete what we created and
# re-raise the exception
self._drop_schema()
raise
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def serializable_fields(self):
""" in certain cases the user model isn't serializable so you may want to only send the id """
return self
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def _drop_schema(self, force_drop=False):
""" Drops the schema"""
connection = connections[get_tenant_database_alias()]
has_schema = hasattr(connection, 'schema_name')
if has_schema and connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't delete tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
if has_schema and schema_exists(self.schema_name) and (self.auto_drop_schema or force_drop):
self.pre_drop()
cursor = connection.cursor()
cursor.execute('DROP SCHEMA "%s" CASCADE' % self.schema_name)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def pre_drop(self):
"""
This is a routine which you could override to backup the tenant schema before dropping.
:return:
"""
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def delete(self, force_drop=False, *args, **kwargs):
"""
Deletes this row. Drops the tenant's schema if the attribute
auto_drop_schema set to True.
"""
self._drop_schema(force_drop)
super().delete(*args, **kwargs)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def create_schema(self, check_if_exists=False, sync_schema=True,
verbosity=1):
"""
Creates the schema 'schema_name' for this tenant. Optionally checks if
the schema already exists before creating it. Returns true if the
schema was created, false otherwise.
"""
# safety check
connection = connections[get_tenant_database_alias()]
_check_schema_name(self.schema_name)
cursor = connection.cursor()
if check_if_exists and schema_exists(self.schema_name):
return False
fake_migrations = get_creation_fakes_migrations()
if sync_schema:
if fake_migrations:
# copy tables and data from provided model schema
base_schema = get_tenant_base_schema()
clone_schema = CloneSchema()
clone_schema.clone_schema(base_schema, self.schema_name)
call_command('migrate_schemas',
tenant=True,
fake=True,
schema_name=self.schema_name,
interactive=False,
verbosity=verbosity)
else:
# create the schema
cursor.execute('CREATE SCHEMA "%s"' % self.schema_name)
call_command('migrate_schemas',
tenant=True,
schema_name=self.schema_name,
interactive=False,
verbosity=verbosity)
connection.set_schema_to_public()
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def get_primary_domain(self):
"""
Returns the primary domain of the tenant
"""
try:
domain = self.domains.get(is_primary=True)
return domain
except get_tenant_domain_model().DoesNotExist:
return None
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def reverse(self, request, view_name):
"""
Returns the URL of this tenant.
"""
http_type = 'https://' if request.is_secure() else 'http://'
domain = get_current_site(request).domain
url = ''.join((http_type, self.schema_name, '.', domain, reverse(view_name)))
return url
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def get_tenant_type(self):
"""
Get the type of tenant. Will only work for multi type tenants
:return: str
"""
return getattr(self, settings.MULTI_TYPE_DATABASE_FIELD)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def save(self, *args, **kwargs):
# Get all other primary domains with the same tenant
domain_list = self.__class__.objects.filter(tenant=self.tenant, is_primary=True).exclude(pk=self.pk)
# If we have no primary domain yet, set as primary domain by default
self.is_primary = self.is_primary or (not domain_list.exists())
if self.is_primary:
# Remove primary status of existing domains for tenant
domain_list.update(is_primary=False)
super().save(*args, **kwargs)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def addOperators(self, num, target):
"""
Adapted from https://leetcode.com/discuss/58614/java-standard-backtrace-ac-solutoin-short-and-clear
Algorithm:
1. DFS
2. Special handling for multiplication
3. Detect invalid number with leading 0's
:type num: str
:type target: int
:rtype: List[str]
"""
ret = []
self.dfs(num, target, 0, "", 0, 0, ret)
return ret
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def dfs(self, num, target, pos, cur_str, cur_val, mul, ret):
if pos >= len(num):
if cur_val == target:
ret.append(cur_str)
else:
for i in xrange(pos, len(num)):
if i != pos and num[pos] == "0":
continue
nxt_val = int(num[pos:i+1])
if not cur_str:
self.dfs(num, target, i+1, "%d"%nxt_val, nxt_val, nxt_val, ret)
else:
self.dfs(num, target, i+1, cur_str+"+%d"%nxt_val, cur_val+nxt_val, nxt_val, ret)
self.dfs(num, target, i+1, cur_str+"-%d"%nxt_val, cur_val-nxt_val, -nxt_val, ret)
self.dfs(num, target, i+1, cur_str+"*%d"%nxt_val, cur_val-mul+mul*nxt_val, mul*nxt_val, ret)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def implementor(rpc_code, blocking=False):
"""
RPC implementation function.
"""
return partial(_add_implementor, rpc_code, blocking)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def _add_implementor(rpc_code, blocking, fn):
# Validate the argument types.
if type(rpc_code) is not int:
raise TypeError("Expected int, got %r instead" % type(rpc_code))
if type(blocking) is not bool:
raise TypeError("Expected bool, got %r instead" % type(blocking))
if not callable(fn):
raise TypeError("Expected callable, got %r instead" % type(fn))
# Validate the RPC code.
if rpc_code in rpcMap:
try:
msg = "Duplicated RPC implementors for code %d: %s and %s"
msg %= (rpc_code, rpcMap[rpc_code][0].__name__, fn.__name__)
except Exception:
msg = "Duplicated RPC implementors for code: %d" % rpc_code
raise SyntaxError(msg)
# TODO: use introspection to validate the function signature
# Register the implementor.
rpcMap[rpc_code] = (fn, blocking)
# Return the implementor. No wrapping is needed! :)
return fn
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def rpc_bulk(orchestrator, audit_name, rpc_code, *arguments):
# Get the implementor for the RPC code.
# Raise NotImplementedError if it's not defined.
try:
method, blocking = rpcMap[rpc_code]
except KeyError:
raise NotImplementedError("RPC code not implemented: %r" % rpc_code)
# This can't be done with blocking implementors!
if blocking:
raise NotImplementedError(
"Cannot run blocking RPC calls in bulk. Code: %r" % rpc_code)
# Prepare a partial function call to the implementor.
caller = partial(method, orchestrator, audit_name)
# Use the built-in map() function to issue all the calls.
# This ensures we support the exact same interface and functionality.
return map(caller, *arguments)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def rpc_send_message(orchestrator, audit_name, message):
# Enqueue the ACK message.
orchestrator.enqueue_msg(message)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __init__(self, orchestrator):
"""
:param orchestrator: Orchestrator instance.
:type orchestrator: Orchestrator
"""
# Keep a reference to the Orchestrator.
self.__orchestrator = orchestrator
# Keep a reference to the global RPC map (it's faster this way).
self.__rpcMap = rpcMap
# Check all RPC messages have been mapped at this point.
missing = MSG_RPC_CODES.difference(self.__rpcMap.keys())
if missing:
msg = "Missing RPC implementors for codes: %s"
msg %= ", ".join(str(x) for x in sorted(missing))
raise SyntaxError(msg)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def orchestrator(self):
"""
:returns: Orchestrator instance.
:rtype: Orchestrator
"""
return self.__orchestrator
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def execute_rpc(self, audit_name, rpc_code, response_queue, args, kwargs):
"""
Honor a remote procedure call request from a plugin.
:param audit_name: Name of the audit requesting the call.
:type audit_name: str
:param rpc_code: RPC code.
:type rpc_code: int
:param response_queue: Response queue identity.
:type response_queue: str
:param args: Positional arguments to the call.
:type args: tuple
:param kwargs: Keyword arguments to the call.
:type kwargs: dict
"""
try:
# Get the implementor for the RPC code.
# Raise NotImplementedError if it's not defined.
try:
target, blocking = self.__rpcMap[rpc_code]
except KeyError:
raise NotImplementedError(
"RPC code not implemented: %r" % rpc_code)
# If it's a blocking call...
if blocking:
# Run the implementor in a new thread.
thread = Thread(
target = self._execute_rpc_implementor_background,
args = (
Config._context,
audit_name,
target,
response_queue,
args, kwargs),
)
thread.daemon = True
thread.start()
# If it's a non-blocking call...
else:
# Call the implementor directly.
self.execute_rpc_implementor(
audit_name, target, response_queue, args, kwargs)
# Catch exceptions and send them back.
except Exception:
if response_queue:
error = self.prepare_exception(*sys.exc_info())
try:
self.orchestrator.messageManager.send(
response_queue, (False, error))
except IOError:
import warnings
warnings.warn("RPC caller died!")
pass
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def _execute_rpc_implementor_background(self, context, audit_name, target,
response_queue, args, kwargs):
"""
Honor a remote procedure call request from a plugin,
from a background thread. Must only be used as the entry
point for said background thread!
:param context: Plugin execution context.
:type context: PluginContext
:param audit_name: Name of the audit requesting the call.
:type audit_name: str
:param target: RPC implementor function.
:type target: callable
:param response_queue: Response queue identity.
:type response_queue: str
:param args: Positional arguments to the call.
:type args: tuple
:param kwargs: Keyword arguments to the call.
:type kwargs: dict
"""
Config._context = context
self.execute_rpc_implementor(
audit_name, target, response_queue, args, kwargs)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def execute_rpc_implementor(self, audit_name, target, response_queue,
args, kwargs):
"""
Honor a remote procedure call request from a plugin.
:param audit_name: Name of the audit requesting the call.
:type audit_name: str
:param target: RPC implementor function.
:type target: callable
:param response_queue: Response queue identity.
:type response_queue: str
:param args: Positional arguments to the call.
:type args: tuple
:param kwargs: Keyword arguments to the call.
:type kwargs: dict
"""
try:
# Call the implementor and get the response.
response = target(self.orchestrator, audit_name, *args, **kwargs)
success = True
# Catch exceptions and prepare them for sending.
except Exception:
if response_queue:
response = self.prepare_exception(*sys.exc_info())
success = False
# If the call was synchronous,
# send the response/error back to the plugin.
if response_queue:
self.orchestrator.messageManager.send(
response_queue, (success, response))
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __init__(self, cost_withGradients):
super(CostModel, self).__init__()
self.cost_type = cost_withGradients
# --- Set-up evaluation cost
if self.cost_type is None:
self.cost_withGradients = constant_cost_withGradients
self.cost_type = 'Constant cost'
elif self.cost_type == 'evaluation_time':
self.cost_model = GPModel()
self.cost_withGradients = self._cost_gp_withGradients
self.num_updates = 0
else:
self.cost_withGradients = cost_withGradients
self.cost_type = 'User defined cost'
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __init__(self):
self.SOURCE_HTML_BASE_FOLDER_PATH = u"cameo_res\\source_html"
self.PARSED_RESULT_BASE_FOLDER_PATH = u"cameo_res\\parsed_result"
self.strWebsiteDomain = u"http://buzzorange.com/techorange"
self.dicSubCommandHandler = {
"index":self.downloadIndexPage,
"tag":self.downloadTagPag,
"news":self.downloadNewsPage
}
self.utility = Utility()
self.db = LocalDbForTECHORANGE()
self.driver = None
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def _cost_gp(self,x):
"""
Predicts the time cost of evaluating the function at x.
"""
m, _, _, _ = self.cost_model.predict_withGradients(x)
return np.exp(m)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def getUseageMessage(self):
return ("- TECHORANGE -\n"
"useage:\n"
"index - download entry page of TECHORANGE \n"
"tag - download not obtained tag page \n"
"news [tag] - download not obtained news [of given tag] \n")
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def _cost_gp_withGradients(self,x):
"""
Predicts the time cost and its gradient of evaluating the function at x.
"""
m, _, dmdx, _= self.cost_model.predict_withGradients(x)
return np.exp(m), np.exp(m)*dmdx
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def getDriver(self):
chromeDriverExeFilePath = "cameo_res\\chromedriver.exe"
driver = webdriver.Chrome(chromeDriverExeFilePath)
return driver
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def update_cost_model(self, x, cost_x):
"""
Updates the GP used to handle the cost.
param x: input of the GP for the cost model.
param x_cost: values of the time cost at the input locations.
"""
if self.cost_type == 'evaluation_time':
cost_evals = np.log(np.atleast_2d(np.asarray(cost_x)).T)
if self.num_updates == 0:
X_all = x
costs_all = cost_evals
else:
X_all = np.vstack((self.cost_model.model.X,x))
costs_all = np.vstack((self.cost_model.model.Y,cost_evals))
self.num_updates += 1
self.cost_model.updateModel(X_all, costs_all, None, None)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def initDriver(self):
if self.driver is None:
self.driver = self.getDriver()
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def quitDriver(self):
self.driver.quit()
self.driver = None
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def runSpider(self, lstSubcommand=None):
strSubcommand = lstSubcommand[0]
strArg1 = None
if len(lstSubcommand) == 2:
strArg1 = lstSubcommand[1]
self.initDriver() #init selenium driver
self.dicSubCommandHandler[strSubcommand](strArg1)
self.quitDriver() #quit selenium driver
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def downloadIndexPage(self, uselessArg1=None):
logging.info("download index page")
strIndexHtmlFolderPath = self.SOURCE_HTML_BASE_FOLDER_PATH + u"\\TECHORANGE"
if not os.path.exists(strIndexHtmlFolderPath):
os.mkdir(strIndexHtmlFolderPath) #mkdir source_html/TECHORANGE/
#科技報橘首頁
self.driver.get("https://buzzorange.com/techorange/")
#儲存 html
strIndexHtmlFilePath = strIndexHtmlFolderPath + u"\\index.html"
self.utility.overwriteSaveAs(strFilePath=strIndexHtmlFilePath, unicodeData=self.driver.page_source)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def downloadTagPag(self, uselessArg1=None):
logging.info("download tag page")
strTagHtmlFolderPath = self.SOURCE_HTML_BASE_FOLDER_PATH + u"\\TECHORANGE\\tag"
if not os.path.exists(strTagHtmlFolderPath):
os.mkdir(strTagHtmlFolderPath) #mkdir source_html/TECHORANGE/tag/
strTagWebsiteDomain = self.strWebsiteDomain + u"/tag"
#取得 Db 中尚未下載的 Tag 名稱
lstStrNotObtainedTagName = self.db.fetchallNotObtainedTagName()
for strNotObtainedTagName in lstStrNotObtainedTagName:
#略過名稱太長的 tag
if len(strNotObtainedTagName) > 60:
continue
strTagUrl = strTagWebsiteDomain + u"/" + strNotObtainedTagName
#tag 第0頁
intPageNum = 0
time.sleep(random.randint(2,5)) #sleep random time
self.driver.get(strTagUrl)
#儲存 html
strTagHtmlFilePath = strTagHtmlFolderPath + u"\\%d_%s_tag.html"%(intPageNum, strNotObtainedTagName)
self.utility.overwriteSaveAs(strFilePath=strTagHtmlFilePath, unicodeData=self.driver.page_source)
#tag 下一頁
elesNextPageA = self.driver.find_elements_by_css_selector("div.nav-links a.next.page-numbers")
while len(elesNextPageA) != 0:
time.sleep(random.randint(2,5)) #sleep random time
intPageNum = intPageNum+1
strTagUrl = elesNextPageA[0].get_attribute("href")
self.driver.get(strTagUrl)
#儲存 html
strTagHtmlFilePath = strTagHtmlFolderPath + u"\\%d_%s_tag.html"%(intPageNum, strNotObtainedTagName)
self.utility.overwriteSaveAs(strFilePath=strTagHtmlFilePath, unicodeData=self.driver.page_source)
#tag 再下一頁
elesNextPageA = self.driver.find_elements_by_css_selector("div.nav-links a.next.page-numbers")
#更新tag DB 為已抓取 (isGot = 1)
self.db.updateTagStatusIsGot(strTagName=strNotObtainedTagName)
logging.info("got tag %s"%strNotObtainedTagName)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def limitStrLessThen128Char(self, strStr=None):
if len(strStr) > 128:
logging.info("limit str less then 128 char")
return strStr[:127] + u"_"
else:
return strStr
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def downloadNewsPage(self, strTagName=None):
if strTagName is None:
#未指定 tag
lstStrObtainedTagName = self.db.fetchallCompletedObtainedTagName()
for strObtainedTagName in lstStrObtainedTagName:
self.downloadNewsPageWithGivenTagName(strTagName=strObtainedTagName)
else:
#有指定 tag 名稱
self.downloadNewsPageWithGivenTagName(strTagName=strTagName)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def downloadNewsPageWithGivenTagName(self, strTagName=None):
logging.info("download news page with tag %s"%strTagName)
strNewsHtmlFolderPath = self.SOURCE_HTML_BASE_FOLDER_PATH + u"\\TECHORANGE\\news"
if not os.path.exists(strNewsHtmlFolderPath):
os.mkdir(strNewsHtmlFolderPath) #mkdir source_html/TECHORANGE/news/
#取得 DB 紀錄中,指定 strTagName tag 的 news url
lstStrNewsUrl = self.db.fetchallNewsUrlByTagName(strTagName=strTagName)
intDownloadedNewsCount = 0#紀錄下載 news 頁面數量
timeStart = time.time() #計時開始時間點
timeEnd = None #計時結束時間點
for strNewsUrl in lstStrNewsUrl:
#檢查是否已下載
if not self.db.checkNewsIsGot(strNewsUrl=strNewsUrl):
if intDownloadedNewsCount%10 == 0: #計算下載10筆news所需時間
timeEnd = time.time()
timeCost = timeEnd - timeStart
logging.info("download 10 news cost %f sec"%timeCost)
timeStart = timeEnd
intDownloadedNewsCount = intDownloadedNewsCount+1
time.sleep(random.randint(2,5)) #sleep random time
self.driver.get(strNewsUrl)
#儲存 html
strNewsName = re.match("^https://buzzorange.com/techorange/[\d]{4}/[\d]{2}/[\d]{2}/(.*)/$", strNewsUrl).group(1)
strNewsName = self.limitStrLessThen128Char(strStr=strNewsName) #將名稱縮短小於128字完
strNewsHtmlFilePath = strNewsHtmlFolderPath + u"\\%s_news.html"%strNewsName
self.utility.overwriteSaveAs(strFilePath=strNewsHtmlFilePath, unicodeData=self.driver.page_source)
#更新news DB 為已抓取 (isGot = 1)
self.db.updateNewsStatusIsGot(strNewsUrl=strNewsUrl)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def write():
try:
p = round(weather.pressure(),2)
c = light.light()
print('{"light": '+str(c)+', "pressure": '+str(p)+' }')
except KeyboardInterrupt:
pass
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def signal_handler_mapping(self):
"""A dict mapping (signal number) -> (a method handling the signal)."""
# Could use an enum here, but we never end up doing any matching on the specific signal value,
# instead just iterating over the registered signals to set handlers, so a dict is probably
# better.
return {
signal.SIGINT: self._handle_sigint_if_enabled,
signal.SIGQUIT: self.handle_sigquit,
signal.SIGTERM: self.handle_sigterm,
}
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __init__(self):
self._ignore_sigint_lock = threading.Lock()
self._threads_ignoring_sigint = 0
self._ignoring_sigint_v2_engine = False
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def _check_sigint_gate_is_correct(self):
assert (
self._threads_ignoring_sigint >= 0
), "This should never happen, someone must have modified the counter outside of SignalHandler."
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def _handle_sigint_if_enabled(self, signum, _frame):
with self._ignore_sigint_lock:
self._check_sigint_gate_is_correct()
threads_ignoring_sigint = self._threads_ignoring_sigint
ignoring_sigint_v2_engine = self._ignoring_sigint_v2_engine
if threads_ignoring_sigint == 0 and not ignoring_sigint_v2_engine:
self.handle_sigint(signum, _frame)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def _toggle_ignoring_sigint_v2_engine(self, toggle: bool):
with self._ignore_sigint_lock:
self._ignoring_sigint_v2_engine = toggle
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def _ignoring_sigint(self):
with self._ignore_sigint_lock:
self._check_sigint_gate_is_correct()
self._threads_ignoring_sigint += 1
try:
yield
finally:
with self._ignore_sigint_lock:
self._threads_ignoring_sigint -= 1
self._check_sigint_gate_is_correct()
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def handle_sigint(self, signum, _frame):
raise KeyboardInterrupt("User interrupted execution with control-c!")
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __init__(self, signum, signame):
self.signum = signum
self.signame = signame
self.traceback_lines = traceback.format_stack()
super(SignalHandler.SignalHandledNonLocalExit, self).__init__()
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def handle_sigquit(self, signum, _frame):
raise self.SignalHandledNonLocalExit(signum, "SIGQUIT")
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def handle_sigterm(self, signum, _frame):
raise self.SignalHandledNonLocalExit(signum, "SIGTERM")
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def __new__(cls, *args, **kwargs):
raise TypeError("Instances of {} are not allowed to be constructed!".format(cls.__name__))
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def reset_should_print_backtrace_to_terminal(cls, should_print_backtrace):
"""Set whether a backtrace gets printed to the terminal error stream on a fatal error.
Class state:
- Overwrites `cls._should_print_backtrace_to_terminal`.
"""
cls._should_print_backtrace_to_terminal = should_print_backtrace
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def reset_log_location(cls, new_log_location: str) -> None:
"""Re-acquire file handles to error logs based in the new location.
Class state:
- Overwrites `cls._log_dir`, `cls._pid_specific_error_fileobj`, and
`cls._shared_error_fileobj`.
OS state:
- May create a new directory.
- Overwrites signal handlers for many fatal and non-fatal signals (but not SIGUSR2).
:raises: :class:`ExceptionSink.ExceptionSinkError` if the directory does not exist or is not
writable.
"""
# We could no-op here if the log locations are the same, but there's no reason not to have the
# additional safety of re-acquiring file descriptors each time (and erroring out early if the
# location is no longer writable).
try:
safe_mkdir(new_log_location)
except Exception as e:
raise cls.ExceptionSinkError(
"The provided log location path at '{}' is not writable or could not be created: {}.".format(
new_log_location, str(e)
),
e,
)
pid = os.getpid()
pid_specific_log_path = cls.exceptions_log_path(for_pid=pid, in_dir=new_log_location)
shared_log_path = cls.exceptions_log_path(in_dir=new_log_location)
assert pid_specific_log_path != shared_log_path
try:
pid_specific_error_stream = safe_open(pid_specific_log_path, mode="w")
shared_error_stream = safe_open(shared_log_path, mode="a")
except Exception as e:
raise cls.ExceptionSinkError(
"Error opening fatal error log streams for log location '{}': {}".format(
new_log_location, str(e)
)
)
# NB: mutate process-global state!
if faulthandler.is_enabled():
logger.debug("re-enabling faulthandler")
# Call Py_CLEAR() on the previous error stream:
# https://github.com/vstinner/faulthandler/blob/master/faulthandler.c
faulthandler.disable()
# Send a stacktrace to this file if interrupted by a fatal error.
faulthandler.enable(file=pid_specific_error_stream, all_threads=True)
# NB: mutate the class variables!
cls._log_dir = new_log_location
cls._pid_specific_error_fileobj = pid_specific_error_stream
cls._shared_error_fileobj = shared_error_stream
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def _exiter(self) -> Optional[Exiter]:
return ExceptionSink.get_global_exiter()
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def get_global_exiter(cls) -> Optional[Exiter]:
return cls._exiter
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def exiter_as(cls, new_exiter_fun: Callable[[Optional[Exiter]], Exiter]) -> Iterator[None]:
"""Temporarily override the global exiter.
NB: We don't want to try/finally here, because we want exceptions to propagate
with the most recent exiter installed in sys.excepthook.
If we wrap this in a try:finally, exceptions will be caught and exiters unset.
"""
previous_exiter = cls._exiter
new_exiter = new_exiter_fun(previous_exiter)
cls._reset_exiter(new_exiter)
yield
cls._reset_exiter(previous_exiter)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def exiter_as_until_exception(
cls, new_exiter_fun: Callable[[Optional[Exiter]], Exiter]
) -> Iterator[None]:
"""Temporarily override the global exiter, except this will unset it when an exception
happens."""
previous_exiter = cls._exiter
new_exiter = new_exiter_fun(previous_exiter)
try:
cls._reset_exiter(new_exiter)
yield
finally:
cls._reset_exiter(previous_exiter)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def _reset_exiter(cls, exiter: Optional[Exiter]) -> None:
"""Class state:
- Overwrites `cls._exiter`.
Python state:
- Overwrites sys.excepthook.
"""
logger.debug(f"overriding the global exiter with {exiter} (from {cls._exiter})")
# NB: mutate the class variables! This is done before mutating the exception hook, because the
# uncaught exception handler uses cls._exiter to exit.
cls._exiter = exiter
# NB: mutate process-global state!
sys.excepthook = cls._log_unhandled_exception_and_exit
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def reset_interactive_output_stream(
cls, interactive_output_stream, override_faulthandler_destination=True
):
"""Class state:
- Overwrites `cls._interactive_output_stream`.
OS state:
- Overwrites the SIGUSR2 handler.
This method registers a SIGUSR2 handler, which permits a non-fatal `kill -31 <pants pid>` for
stacktrace retrieval. This is also where the the error message on fatal exit will be printed to.
"""
try:
# NB: mutate process-global state!
# This permits a non-fatal `kill -31 <pants pid>` for stacktrace retrieval.
if override_faulthandler_destination:
faulthandler.register(
signal.SIGUSR2, interactive_output_stream, all_threads=True, chain=False
)
# NB: mutate the class variables!
cls._interactive_output_stream = interactive_output_stream
except ValueError:
# Warn about "ValueError: IO on closed file" when the stream is closed.
cls.log_exception(
"Cannot reset interactive_output_stream -- stream (probably stderr) is closed"
)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def exceptions_log_path(cls, for_pid=None, in_dir=None):
"""Get the path to either the shared or pid-specific fatal errors log file."""
if for_pid is None:
intermediate_filename_component = ""
else:
assert isinstance(for_pid, Pid)
intermediate_filename_component = ".{}".format(for_pid)
in_dir = in_dir or cls._log_dir
return os.path.join(
in_dir, ".pids", "exceptions{}.log".format(intermediate_filename_component)
)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def log_exception(cls, msg):
"""Try to log an error message to this process's error log and the shared error log.
NB: Doesn't raise (logs an error instead).
"""
pid = os.getpid()
fatal_error_log_entry = cls._format_exception_message(msg, pid)
# We care more about this log than the shared log, so write to it first.
try:
cls._try_write_with_flush(cls._pid_specific_error_fileobj, fatal_error_log_entry)
except Exception as e:
logger.error(
"Error logging the message '{}' to the pid-specific file handle for {} at pid {}:\n{}".format(
msg, cls._log_dir, pid, e
)
)
# Write to the shared log.
try:
# TODO: we should probably guard this against concurrent modification by other pants
# subprocesses somehow.
cls._try_write_with_flush(cls._shared_error_fileobj, fatal_error_log_entry)
except Exception as e:
logger.error(
"Error logging the message '{}' to the shared file handle for {} at pid {}:\n{}".format(
msg, cls._log_dir, pid, e
)
)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def _try_write_with_flush(cls, fileobj, payload):
"""This method is here so that it can be patched to simulate write errors.
This is because mock can't patch primitive objects like file objects.
"""
fileobj.write(payload)
fileobj.flush()
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def reset_signal_handler(cls, signal_handler):
"""Class state:
- Overwrites `cls._signal_handler`.
OS state:
- Overwrites signal handlers for SIGINT, SIGQUIT, and SIGTERM.
NB: This method calls signal.signal(), which will crash if not called from the main thread!
:returns: The :class:`SignalHandler` that was previously registered, or None if this is
the first time this method was called.
"""
assert isinstance(signal_handler, SignalHandler)
# NB: Modify process-global state!
for signum, handler in signal_handler.signal_handler_mapping.items():
signal.signal(signum, handler)
# Retry any system calls interrupted by any of the signals we just installed handlers for
# (instead of having them raise EINTR). siginterrupt(3) says this is the default behavior on
# Linux and OSX.
signal.siginterrupt(signum, False)
previous_signal_handler = cls._signal_handler
# NB: Mutate the class variables!
cls._signal_handler = signal_handler
return previous_signal_handler
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def trapped_signals(cls, new_signal_handler):
"""A contextmanager which temporarily overrides signal handling.
NB: This method calls signal.signal(), which will crash if not called from the main thread!
"""
previous_signal_handler = cls.reset_signal_handler(new_signal_handler)
try:
yield
finally:
cls.reset_signal_handler(previous_signal_handler)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def ignoring_sigint(cls):
"""A contextmanager which disables handling sigint in the current signal handler. This
allows threads that are not the main thread to ignore sigint.
NB: Only use this if you can't use ExceptionSink.trapped_signals().
Class state:
- Toggles `self._ignore_sigint` in `cls._signal_handler`.
"""
with cls._signal_handler._ignoring_sigint():
yield
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def toggle_ignoring_sigint_v2_engine(cls, toggle: bool) -> None:
assert cls._signal_handler is not None
cls._signal_handler._toggle_ignoring_sigint_v2_engine(toggle)
|
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
|
def _iso_timestamp_for_now(cls):
return datetime.datetime.now().isoformat()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.