hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79431f57fea0b57501f0b741fc5525f36deb446e | 15,940 | py | Python | google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | 1 | 2022-03-30T05:23:29.000Z | 2022-03-30T05:23:29.000Z | google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec
from google.cloud.aiplatform_v1.types import feature_monitoring_stats
from google.cloud.aiplatform_v1.types import io
from google.cloud.aiplatform_v1.types import job_state
from google.cloud.aiplatform_v1.types import model_monitoring
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1",
manifest={
"ModelDeploymentMonitoringObjectiveType",
"ModelDeploymentMonitoringJob",
"ModelDeploymentMonitoringBigQueryTable",
"ModelDeploymentMonitoringObjectiveConfig",
"ModelDeploymentMonitoringScheduleConfig",
"ModelMonitoringStatsAnomalies",
},
)
class ModelDeploymentMonitoringObjectiveType(proto.Enum):
r"""The Model Monitoring Objective types."""
MODEL_DEPLOYMENT_MONITORING_OBJECTIVE_TYPE_UNSPECIFIED = 0
RAW_FEATURE_SKEW = 1
RAW_FEATURE_DRIFT = 2
FEATURE_ATTRIBUTION_SKEW = 3
FEATURE_ATTRIBUTION_DRIFT = 4
class ModelDeploymentMonitoringJob(proto.Message):
r"""Represents a job that runs periodically to monitor the
deployed models in an endpoint. It will analyze the logged
training & prediction data to detect any abnormal behaviors.
Attributes:
name (str):
Output only. Resource name of a
ModelDeploymentMonitoringJob.
display_name (str):
Required. The user-defined name of the
ModelDeploymentMonitoringJob. The name can be up
to 128 characters long and can be consist of any
UTF-8 characters.
Display name of a ModelDeploymentMonitoringJob.
endpoint (str):
Required. Endpoint resource name. Format:
``projects/{project}/locations/{location}/endpoints/{endpoint}``
state (google.cloud.aiplatform_v1.types.JobState):
Output only. The detailed state of the
monitoring job. When the job is still creating,
the state will be 'PENDING'. Once the job is
successfully created, the state will be
'RUNNING'. Pause the job, the state will be
'PAUSED'.
Resume the job, the state will return to
'RUNNING'.
schedule_state (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob.MonitoringScheduleState):
Output only. Schedule state when the
monitoring job is in Running state.
model_deployment_monitoring_objective_configs (Sequence[google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringObjectiveConfig]):
Required. The config for monitoring
objectives. This is a per DeployedModel config.
Each DeployedModel needs to be configured
separately.
model_deployment_monitoring_schedule_config (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringScheduleConfig):
Required. Schedule config for running the
monitoring job.
logging_sampling_strategy (google.cloud.aiplatform_v1.types.SamplingStrategy):
Required. Sample Strategy for logging.
model_monitoring_alert_config (google.cloud.aiplatform_v1.types.ModelMonitoringAlertConfig):
Alert config for model monitoring.
predict_instance_schema_uri (str):
YAML schema file uri describing the format of
a single instance, which are given to format
this Endpoint's prediction (and explanation). If
not set, we will generate predict schema from
collected predict requests.
sample_predict_instance (google.protobuf.struct_pb2.Value):
Sample Predict instance, same format as
[PredictRequest.instances][google.cloud.aiplatform.v1.PredictRequest.instances],
this can be set as a replacement of
[ModelDeploymentMonitoringJob.predict_instance_schema_uri][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.predict_instance_schema_uri].
If not set, we will generate predict schema from collected
predict requests.
analysis_instance_schema_uri (str):
YAML schema file uri describing the format of a single
instance that you want Tensorflow Data Validation (TFDV) to
analyze.
If this field is empty, all the feature data types are
inferred from
[predict_instance_schema_uri][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.predict_instance_schema_uri],
meaning that TFDV will use the data in the exact format(data
type) as prediction request/response. If there are any data
type differences between predict instance and TFDV instance,
this field can be used to override the schema. For models
trained with Vertex AI, this field must be set as all the
fields in predict instance formatted as string.
bigquery_tables (Sequence[google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringBigQueryTable]):
Output only. The created bigquery tables for
the job under customer project. Customer could
do their own query & analysis. There could be 4
log tables in maximum:
1. Training data logging predict
request/response 2. Serving data logging predict
request/response
log_ttl (google.protobuf.duration_pb2.Duration):
The TTL of BigQuery tables in user projects
which stores logs. A day is the basic unit of
the TTL and we take the ceil of TTL/86400(a
day). e.g. { second: 3600} indicates ttl = 1
day.
labels (Sequence[google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob.LabelsEntry]):
The labels with user-defined metadata to
organize your ModelDeploymentMonitoringJob.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this
ModelDeploymentMonitoringJob was created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this
ModelDeploymentMonitoringJob was updated most
recently.
next_schedule_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this monitoring
pipeline will be scheduled to run for the next
round.
stats_anomalies_base_directory (google.cloud.aiplatform_v1.types.GcsDestination):
Stats anomalies base folder path.
encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec):
Customer-managed encryption key spec for a
ModelDeploymentMonitoringJob. If set, this
ModelDeploymentMonitoringJob and all
sub-resources of this
ModelDeploymentMonitoringJob will be secured by
this key.
enable_monitoring_pipeline_logs (bool):
If true, the scheduled monitoring pipeline logs are sent to
Google Cloud Logging, including pipeline status and
anomalies detected. Please note the logs incur cost, which
are subject to `Cloud Logging
pricing <https://cloud.google.com/logging#pricing>`__.
error (google.rpc.status_pb2.Status):
Output only. Only populated when the job's state is
``JOB_STATE_FAILED`` or ``JOB_STATE_CANCELLED``.
"""
class MonitoringScheduleState(proto.Enum):
r"""The state to Specify the monitoring pipeline."""
MONITORING_SCHEDULE_STATE_UNSPECIFIED = 0
PENDING = 1
OFFLINE = 2
RUNNING = 3
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
endpoint = proto.Field(proto.STRING, number=3,)
state = proto.Field(proto.ENUM, number=4, enum=job_state.JobState,)
schedule_state = proto.Field(proto.ENUM, number=5, enum=MonitoringScheduleState,)
model_deployment_monitoring_objective_configs = proto.RepeatedField(
proto.MESSAGE, number=6, message="ModelDeploymentMonitoringObjectiveConfig",
)
model_deployment_monitoring_schedule_config = proto.Field(
proto.MESSAGE, number=7, message="ModelDeploymentMonitoringScheduleConfig",
)
logging_sampling_strategy = proto.Field(
proto.MESSAGE, number=8, message=model_monitoring.SamplingStrategy,
)
model_monitoring_alert_config = proto.Field(
proto.MESSAGE, number=15, message=model_monitoring.ModelMonitoringAlertConfig,
)
predict_instance_schema_uri = proto.Field(proto.STRING, number=9,)
sample_predict_instance = proto.Field(
proto.MESSAGE, number=19, message=struct_pb2.Value,
)
analysis_instance_schema_uri = proto.Field(proto.STRING, number=16,)
bigquery_tables = proto.RepeatedField(
proto.MESSAGE, number=10, message="ModelDeploymentMonitoringBigQueryTable",
)
log_ttl = proto.Field(proto.MESSAGE, number=17, message=duration_pb2.Duration,)
labels = proto.MapField(proto.STRING, proto.STRING, number=11,)
create_time = proto.Field(
proto.MESSAGE, number=12, message=timestamp_pb2.Timestamp,
)
update_time = proto.Field(
proto.MESSAGE, number=13, message=timestamp_pb2.Timestamp,
)
next_schedule_time = proto.Field(
proto.MESSAGE, number=14, message=timestamp_pb2.Timestamp,
)
stats_anomalies_base_directory = proto.Field(
proto.MESSAGE, number=20, message=io.GcsDestination,
)
encryption_spec = proto.Field(
proto.MESSAGE, number=21, message=gca_encryption_spec.EncryptionSpec,
)
enable_monitoring_pipeline_logs = proto.Field(proto.BOOL, number=22,)
error = proto.Field(proto.MESSAGE, number=23, message=status_pb2.Status,)
class ModelDeploymentMonitoringBigQueryTable(proto.Message):
r"""ModelDeploymentMonitoringBigQueryTable specifies the BigQuery
table name as well as some information of the logs stored in
this table.
Attributes:
log_source (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringBigQueryTable.LogSource):
The source of log.
log_type (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringBigQueryTable.LogType):
The type of log.
bigquery_table_path (str):
The created BigQuery table to store logs. Customer could do
their own query & analysis. Format:
``bq://<project_id>.model_deployment_monitoring_<endpoint_id>.<tolower(log_source)>_<tolower(log_type)>``
"""
class LogSource(proto.Enum):
r"""Indicates where does the log come from."""
LOG_SOURCE_UNSPECIFIED = 0
TRAINING = 1
SERVING = 2
class LogType(proto.Enum):
r"""Indicates what type of traffic does the log belong to."""
LOG_TYPE_UNSPECIFIED = 0
PREDICT = 1
EXPLAIN = 2
log_source = proto.Field(proto.ENUM, number=1, enum=LogSource,)
log_type = proto.Field(proto.ENUM, number=2, enum=LogType,)
bigquery_table_path = proto.Field(proto.STRING, number=3,)
class ModelDeploymentMonitoringObjectiveConfig(proto.Message):
r"""ModelDeploymentMonitoringObjectiveConfig contains the pair of
deployed_model_id to ModelMonitoringObjectiveConfig.
Attributes:
deployed_model_id (str):
The DeployedModel ID of the objective config.
objective_config (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig):
The objective config of for the
modelmonitoring job of this deployed model.
"""
deployed_model_id = proto.Field(proto.STRING, number=1,)
objective_config = proto.Field(
proto.MESSAGE,
number=2,
message=model_monitoring.ModelMonitoringObjectiveConfig,
)
class ModelDeploymentMonitoringScheduleConfig(proto.Message):
r"""The config for scheduling monitoring job.
Attributes:
monitor_interval (google.protobuf.duration_pb2.Duration):
Required. The model monitoring job scheduling
interval. It will be rounded up to next full
hour. This defines how often the monitoring jobs
are triggered.
"""
monitor_interval = proto.Field(
proto.MESSAGE, number=1, message=duration_pb2.Duration,
)
class ModelMonitoringStatsAnomalies(proto.Message):
r"""Statistics and anomalies generated by Model Monitoring.
Attributes:
objective (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringObjectiveType):
Model Monitoring Objective those stats and
anomalies belonging to.
deployed_model_id (str):
Deployed Model ID.
anomaly_count (int):
Number of anomalies within all stats.
feature_stats (Sequence[google.cloud.aiplatform_v1.types.ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies]):
A list of historical Stats and Anomalies
generated for all Features.
"""
class FeatureHistoricStatsAnomalies(proto.Message):
r"""Historical Stats (and Anomalies) for a specific Feature.
Attributes:
feature_display_name (str):
Display Name of the Feature.
threshold (google.cloud.aiplatform_v1.types.ThresholdConfig):
Threshold for anomaly detection.
training_stats (google.cloud.aiplatform_v1.types.FeatureStatsAnomaly):
Stats calculated for the Training Dataset.
prediction_stats (Sequence[google.cloud.aiplatform_v1.types.FeatureStatsAnomaly]):
A list of historical stats generated by
different time window's Prediction Dataset.
"""
feature_display_name = proto.Field(proto.STRING, number=1,)
threshold = proto.Field(
proto.MESSAGE, number=3, message=model_monitoring.ThresholdConfig,
)
training_stats = proto.Field(
proto.MESSAGE,
number=4,
message=feature_monitoring_stats.FeatureStatsAnomaly,
)
prediction_stats = proto.RepeatedField(
proto.MESSAGE,
number=5,
message=feature_monitoring_stats.FeatureStatsAnomaly,
)
objective = proto.Field(
proto.ENUM, number=1, enum="ModelDeploymentMonitoringObjectiveType",
)
deployed_model_id = proto.Field(proto.STRING, number=2,)
anomaly_count = proto.Field(proto.INT32, number=3,)
feature_stats = proto.RepeatedField(
proto.MESSAGE, number=4, message=FeatureHistoricStatsAnomalies,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 44.901408 | 156 | 0.694981 |
79431f6c8620c5bb789b61127c3482d9a3da4ec4 | 5,620 | py | Python | sprites.py | Alien-X1/demo | 3080bece951dae0ec4c6ae4e5c9c68752c4da849 | [
"MIT"
] | null | null | null | sprites.py | Alien-X1/demo | 3080bece951dae0ec4c6ae4e5c9c68752c4da849 | [
"MIT"
] | null | null | null | sprites.py | Alien-X1/demo | 3080bece951dae0ec4c6ae4e5c9c68752c4da849 | [
"MIT"
] | null | null | null | # sprite classes for game
# i used some ideas from CodePylet https://www.youtube.com/watch?v=osDofIdja6s&t=1038s
# i also borrowed pretty much all of this from kids can code - thanks!
# on acceleration https://www.khanacademy.org/science/physics/one-dimensional-motion/kinematic-formulas/v/average-velocity-for-constant-acceleration
# on vectors: https://www.youtube.com/watch?v=ml4NSzCQobk
import pygame as pg
from pygame.sprite import Sprite
from random import randint
from settings import *
vec = pg.math.Vector2
class Player(Sprite):
def __init__(self):
Sprite.__init__(self)
self.image = pg.transform.scale(p_walk_mask, (1 * PIX, 2 * PIX))
self.rect = self.image.get_rect()
self.rect.center = (40, HEIGHT - 60)
self.pos = vec(40, HEIGHT - 60)
self.vel = vec(0, 0)
self.acc = vec(0, 0)
self.collide_g = False
self.max_jumps = PLAYER_MAX_JUMPS
self.max_vel = PLAYER_MAX_VEL
self.jump_vel = PLAYER_JUMP_VEL
self.jumps = 0
def update(self):
self.acc = vec(0, 0)
keys = pg.key.get_pressed()
if keys[pg.K_LEFT] and self.vel.x > -self.max_vel:
self.acc.x = -ACCELERATION
if keys[pg.K_RIGHT] and self.vel.x < self.max_vel:
self.acc.x = ACCELERATION
self.jump()
self.gravity()
self.friction()
self.edge()
# self.collision()
self.vel += self.acc
self.pos += self.vel + 0.5 * self.acc
self.rect.midbottom = self.pos
print(self.pos)
def jump(self):
keys = pg.key.get_pressed()
if keys[pg.K_UP] and (self.collide_g == True or self.jumps < self.max_jumps) and self.vel.y >= 0:
self.collide = False
self.jumps += 1
self.vel.y -= self.jump_vel
if self.collide_g == True :
self.jumps = 0
def gravity(self):
if self.collide_g == False and self.vel.y < 15:
self.acc.y = GRAVITY
def collision(self):
if self.pos.y >= HEIGHT - 20:
self.collide = True
def edge(self):
if self.pos.x < 15:
self.vel.x = 0
self.pos.x = 15
# elif self.pos.x > WIDTH + 15:
# self.pos.x = -15
def friction(self):
keys = pg.key.get_pressed()
if self.vel.x > 0 and not (keys[pg.K_LEFT] or keys[pg.K_RIGHT]):
self.acc.x = -FRICTION
elif self.vel.x < 0 and not (keys[pg.K_RIGHT] or keys[pg.K_LEFT]):
self.acc.x = FRICTION
class Baddie(Sprite):
def __init__(self):
Sprite.__init__(self)
self.image = pg.transform.scale(b_masks[randint(0,1)], (1 * PIX, 1 * PIX))
self.rect = self.image.get_rect()
self.rect.center = (WIDTH, HEIGHT / 2)
self.pos = vec(WIDTH, HEIGHT / 2)
self.vel = vec(-5, 0)
self.acc = vec(0, 0)
self.collide_g = False
self.max_jumps = 1
self.jumps = 0
def update(self):
self.gravity()
self.vel += self.acc
self.pos += self.vel + 0.5 * self.acc
self.rect.midbottom = self.pos
def gravity(self):
if self.collide_g == False and self.vel.y < 15:
self.acc.y = GRAVITY
class Immovable(Sprite):
def __init__(self, w, h, x, y):
Sprite.__init__(self)
self.image = pg.transform.scale(s_top_mask, (w * PIX, h * PIX))
self.rect = self.image.get_rect()
self.rect.center = (x * PIX + PIX / 2, y * PIX)
self.pos = (x * PIX + PIX / 2, y * PIX)
self.vel = vec(0, 0)
self.acc = vec(0, 0)
def update(self):
self.acc = vec(0, 0)
self.vel += self.acc
self.pos += self.vel + 0.5 * self.acc
self.rect.midtop = self.pos
# print(str(self.image)+" "+str(self.pos))
class Trap(Sprite):
def __init__(self, w, h, x, y):
Sprite.__init__(self)
self.image = pg.transform.scale(t_top_mask, (w * PIX, h * PIX))
self.rect = self.image.get_rect()
self.rect.center = (x * PIX + PIX / 2, y * PIX)
self.pos = (x * PIX + PIX / 2, y * PIX)
self.vel = vec(0, 0)
self.acc = vec(0, 0)
def update(self):
self.vel += self.acc
self.pos += self.vel + 0.5 * self.acc
self.rect.midtop = self.pos
if self.pos.y > HEIGHT:
self.kill()
class Powerup(Sprite):
def __init__(self, w, h, x, y, t):
Sprite.__init__(self)
self.image = pg.transform.scale(pow_mask, (w * PIX, h * PIX))
self.rect = self.image.get_rect()
self.rect.center = (x * PIX + PIX / 2, y * PIX)
self.pos = (x * PIX + PIX / 2, y * PIX)
self.type = t
self.vel = vec(0, 0)
self.acc = vec(0, 0)
def update(self):
self.acc = vec(0, 0)
self.vel += self.acc
self.pos += self.vel + 0.5 * self.acc
self.rect.midtop = self.pos
class Hidden(Sprite):
def __init__(self, w, h, x, y):
Sprite.__init__(self)
self.image = pg.transform.scale(h_mask, (w * PIX, h * PIX))
# self.color = RED
# self.color = NAVY
# self.image.fill(self.color)
self.glow = self.image.get_rect()
self.rect = self.image.get_rect()
self.rect.center = (x * PIX + PIX / 2, y * PIX)
self.pos = (x * PIX + PIX / 2, y * PIX)
self.vel = vec(0, 0)
self.acc = vec(0, 0)
def update(self):
self.acc = vec(0, 0)
self.vel += self.acc
self.pos += self.vel + 0.5 * self.acc
self.rect.midtop = self.pos | 33.058824 | 149 | 0.551957 |
79432040a1eefbd7c12c5cd2b1ceb7523a9fb53b | 4,004 | py | Python | tests/test_parametric_shapes/test_rotate_circle_shape.py | generein/paramak | cec738b278c285a17eaa69fc1f35ea4788204a8c | [
"MIT"
] | null | null | null | tests/test_parametric_shapes/test_rotate_circle_shape.py | generein/paramak | cec738b278c285a17eaa69fc1f35ea4788204a8c | [
"MIT"
] | null | null | null | tests/test_parametric_shapes/test_rotate_circle_shape.py | generein/paramak | cec738b278c285a17eaa69fc1f35ea4788204a8c | [
"MIT"
] | null | null | null | import math
import os
import unittest
from pathlib import Path
import pytest
from paramak import RotateCircleShape
class TestRotateCircleShape(unittest.TestCase):
def setUp(self):
self.test_shape = RotateCircleShape(points=[(60, 0)], radius=10)
def test_default_parameters(self):
"""Checks that the default parameters of a RotateCircleShape are correct."""
assert self.test_shape.rotation_angle == 360
assert self.test_shape.azimuth_placement_angle == 0
def test_absolute_shape_volume(self):
"""Creates RotateCircleShapes and checks that their volumes are correct."""
# See Issue #445
# assert self.test_shape.volume == pytest.approx(
# 2 * math.pi * 60 * math.pi * (10**2)
# )
self.test_shape.rotation_angle = 270
assert self.test_shape.volume() == pytest.approx(
2 * math.pi * 60 * math.pi * (10**2) * 0.75
)
def test_absolute_shape_areas(self):
"""Creates RotateCircleShapes and checks that the areas of each face are
correct."""
# See Issue #445
# assert self.test_shape.area == pytest.approx(
# math.pi * (10 * 2) * math.pi * (60 * 2))
# assert len(self.test_shape.areas) == 1
# assert self.test_shape.areas.count(pytest.approx(
# math.pi * (10 * 2) * math.pi * (60 * 2), rel=0.01)) == 1
self.test_shape.rotation_angle = 180
assert self.test_shape.area == pytest.approx(
((math.pi * (10**2)) * 2) + (math.pi * (10 * 2) * math.pi * (60 * 2) / 2),
rel=0.01,
)
assert len(self.test_shape.areas) == 3
assert self.test_shape.areas.count(pytest.approx(math.pi * (10**2))) == 2
assert (
self.test_shape.areas.count(
pytest.approx(math.pi * (10 * 2) * math.pi * (60 * 2) / 2, rel=0.01)
)
== 1
)
def test_relative_shape_volume_azimuth_placement_angle(self):
"""Creates two RotateCircleShapes with different
azimuth_placement_angles and checks that their relative volumes are
correct."""
self.test_shape.rotation_angle = 10
assert self.test_shape.volume() == pytest.approx(
(math.pi * 10**2) * ((2 * math.pi * 60) / 36)
)
self.test_shape.azimuth_placement_angle = [0, 90, 180, 270]
assert self.test_shape.volume() == pytest.approx(
(math.pi * 10**2) * ((2 * math.pi * 60) / 36) * 4
)
def test_cut_volume(self):
"""Creates a RotateCircleShape with another RotateCircleShape cut out
and checks that the volume is correct."""
outer_shape = RotateCircleShape(points=[(60, 0)], radius=15)
outer_shape_volume = outer_shape.volume()
outer_shape.cut = self.test_shape
assert outer_shape.volume() == pytest.approx(
outer_shape_volume - self.test_shape.volume()
)
def test_export_stp(self):
"""Exports and stp file with mode = solid and wire and checks
that the outputs exist and relative file sizes are correct."""
os.system("rm test_solid.stp test_solid2.stp test_wire.stp")
self.test_shape.export_stp("test_solid.stp", mode="solid")
self.test_shape.export_stp("test_solid2.stp")
self.test_shape.export_stp("test_wire.stp", mode="wire")
assert Path("test_solid.stp").exists() is True
assert Path("test_solid2.stp").exists() is True
assert Path("test_wire.stp").exists() is True
assert (
Path("test_solid.stp").stat().st_size
== Path("test_solid2.stp").stat().st_size
)
# the circle wire file is actually larger than the circle solid file
# assert Path("test_wire.stp").stat().st_size < \
# Path("test_solid2.stp").stat().st_size
os.system("rm test_solid.stp test_solid2.stp test_wire.stp")
if __name__ == "__main__":
unittest.main()
| 36.072072 | 86 | 0.60964 |
79432095898b506657384b3b2425f76f3e828a0c | 4,513 | py | Python | buildchain/buildchain/lint.py | n1603/metalk8s | 2f337a435380102055d3725f0cc2b6165818e880 | [
"Apache-2.0"
] | null | null | null | buildchain/buildchain/lint.py | n1603/metalk8s | 2f337a435380102055d3725f0cc2b6165818e880 | [
"Apache-2.0"
] | null | null | null | buildchain/buildchain/lint.py | n1603/metalk8s | 2f337a435380102055d3725f0cc2b6165818e880 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""Tasks for the linting.
This module runs the linting tools for several languages.
It provides a top level task to run all the linting tools, and each linting tool
is run in its own sub-task (so that you can run a single one and/or run several
linting tools in parallel).
Overview:
┌──────────────┐
╱───>│ lint:python │
╱ └──────────────┘
╱ ┌──────────────┐
╱ ───>│ lint:yaml │
┌────────┐╱ └──────────────┘
│ lint │
└────────┘╲ ┌──────────────┐
╲ ───>│ lint:shell │
╲ └──────────────┘
╲ ┌──────────────┐
╲───>│ lint:go │
└──────────────┘
"""
import os
import shlex
from pathlib import Path
import subprocess
from typing import Callable, Iterator, List, Optional, Tuple
import doit # type: ignore
from buildchain import config
from buildchain import constants
from buildchain import types
from buildchain import utils
def task_lint() -> Iterator[types.TaskDict]:
"""Run the linting tools."""
for create_lint_task in LINTERS:
yield create_lint_task()
# Python {{{
def lint_python() -> types.TaskDict:
"""Run Python linting."""
buildchain = constants.ROOT/'buildchain'
python_sources : List[Path] = [
buildchain/'dodo.py',
*buildchain.glob('buildchain/*.py'),
*buildchain.glob('buildchain/targets/*.py'),
]
cmd = ' '.join(map(shlex.quote, ['tox', '-e', 'lint-python']))
env = {'PATH': os.environ['PATH'], 'OSTYPE': os.uname().sysname}
return {
'name': 'python',
'title': utils.title_with_subtask_name('LINT'),
'doc': lint_python.__doc__,
'actions': [doit.action.CmdAction(cmd, env=env)],
'file_dep': python_sources,
}
# }}}
# Shell {{{
def lint_shell() -> types.TaskDict:
"""Run shell scripts linting."""
shell_scripts = [
filepath for filepath in utils.git_ls() if '.sh' in filepath.suffixes
]
return {
'name': 'shell',
'title': utils.title_with_subtask_name('LINT'),
'doc': lint_shell.__doc__,
'actions': [['tox', '-e', 'lint-shell']],
'file_dep': shell_scripts,
}
# }}}
# YAML {{{
def lint_yaml() -> types.TaskDict:
"""Run YAML linting."""
return {
'name': 'yaml',
'title': utils.title_with_subtask_name('LINT'),
'doc': lint_yaml.__doc__,
'actions': [['tox', '-e', 'lint-yaml']],
'file_dep': [
constants.ROOT/'eve/main.yml',
constants.ROOT/'salt/metalk8s/defaults.yaml'
],
}
# }}}
# Go {{{
def check_go_fmt() -> Optional[doit.exceptions.TaskError]:
"""Check if Go code is properly formatted."""
cwd = constants.STORAGE_OPERATOR_ROOT
cmd = [
config.ExtCommand.GOFMT.value, '-s', '-d',
*tuple(constants.STORAGE_OPERATOR_FMT_ARGS)
]
diff = subprocess.check_output(cmd, cwd=cwd)
if diff:
return doit.exceptions.TaskError(
msg='badly formatted Go code, please run `doit.sh format:go`'
)
return None
def check_go_codegen() -> Optional[doit.exceptions.TaskError]:
"""Check if the generated files are up to date."""
cwd = constants.STORAGE_OPERATOR_ROOT
git_diff = [config.ExtCommand.GIT.value, 'diff']
base = subprocess.check_output(git_diff)
for target in ('k8s', 'crds'):
cmd = [config.ExtCommand.OPERATOR_SDK.value, 'generate', target]
subprocess.check_call(cmd, cwd=cwd)
current = subprocess.check_output(git_diff)
# If the diff changed after running the code generation that means that
# the generated files are not in sync with the "source" files.
if current != base:
return doit.exceptions.TaskError(
msg='outdated generated Go files, did you run `doit.sh codegen:go`?'
)
return None
def lint_go() -> types.TaskDict:
"""Run Go linting."""
return {
'name': 'go',
'title': utils.title_with_subtask_name('LINT'),
'doc': lint_go.__doc__,
'actions': [check_go_fmt, check_go_codegen],
'task_dep': [
'check_for:gofmt', 'check_for:operator-sdk', 'check_for:git'
],
'file_dep': list(constants.STORAGE_OPERATOR_SOURCES),
}
# }}}
# List of available linter task.
LINTERS : Tuple[Callable[[], types.TaskDict], ...] = (
lint_python,
lint_shell,
lint_yaml,
lint_go,
)
__all__ = utils.export_only_tasks(__name__)
| 28.031056 | 80 | 0.580545 |
79432095f0357c278ce0a23a4aa66c822b5839ac | 2,365 | py | Python | examples/exs_flw_diff2.py | CALFEM/calfem-py | 26d4082ca6b907c48ad814733c733ae30a959657 | [
"MIT"
] | null | null | null | examples/exs_flw_diff2.py | CALFEM/calfem-py | 26d4082ca6b907c48ad814733c733ae30a959657 | [
"MIT"
] | null | null | null | examples/exs_flw_diff2.py | CALFEM/calfem-py | 26d4082ca6b907c48ad814733c733ae30a959657 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# example exs8
#----------------------------------------------------------------
# PURPOSE
# Analysis of two dimensional diffusion
#----------------------------------------------------------------
# REFERENCES
# Karl-Gunnar Olsson 1995-10-08
# Ola Dahlblom 2004-09-14
#----------------------------------------------------------------
import numpy as np
import calfem.vis_mpl as cfv
import calfem.core as cfc
# ----- System matrices -----
K = np.zeros((15,15))
f = np.zeros((15,1))
Coord = np.array([
[0, 0 ],[0.025, 0 ],
[0.05, 0 ],[0, 0.025],
[0.025, 0.025],[0.05, 0.025],
[0, 0.05 ],[0.025, 0.05 ],
[0.05, 0.05 ],[0, 0.075],
[0.025, 0.075],[0.05, 0.075],
[0, 0.1 ],[0.025, 0.1 ],
[0.05, 0.1 ]
])
Dof = np.array([
[1 ],[2 ],[3 ],
[4 ],[5 ],[6 ],
[7 ],[8 ],[9 ],
[10],[11],[12],
[13],[14],[15]
])
# ----- Element properties, topology and coordinates -----
ep = np.array([1])
D = np.array([
[1, 0],
[0, 1]
])
Edof = np.array([
[ 1, 2, 5, 4],
[ 2, 3, 6, 5],
[ 4, 5, 8, 7],
[ 5, 6, 9, 8],
[ 7, 8,11,10],
[ 8, 9,12,11],
[10,11,14,13],
[11,12,15,14],
])
Ex,Ey = cfc.coordxtr(Edof,Coord,Dof)
# ----- Generate FE-mesh -----
#clf; eldraw2(Ex,Ey,[1 3 0],Edof(:,1));
#disp('PRESS ENTER TO CONTINUE'); pause; clf;
# ----- Create and assemble element matrices -----
for i in range(8):
Ke = cfc.flw2qe(Ex[i],Ey[i],ep,D)
K = cfc.assem(Edof[i],K,Ke)
# ----- Solve equation system -----
bcPrescr = np.array([1,2,3,4,7,10,13,14,15])
bcVal = np.array([0,0,0,0,0,0,0.5e-3,1e-3,1e-3])
a,r = cfc.solveq(K,f,bcPrescr,bcVal)
# ----- Compute element flux vector -----
Ed = cfc.extractEldisp(Edof,a)
Es = np.zeros((8,2))
for i in range(8):
Es[i],Et = cfc.flw2qs(Ex[i],Ey[i],ep,D,Ed[i])
# ----- Draw flux vectors and contourlines -----
print(Ex)
print(Ey)
print(a)
print(Ed)
cfv.eldraw2(Ex, Ey, [1, 2, 1], range(1,Ex.shape[0]+1))
cfv.eliso2_mpl(Ex,Ey,Ed)
cfv.showAndWaitMpl()
#cfv.showAndWait()
#sfac=scalfact2(Ex,Ey,Es,0.5);
#eldraw2(Ex,Ey);
#elflux2(Ex,Ey,Es,[1,4],sfac);
#pltscalb2(sfac,[2e-2 0.06 0.01],4);
#disp('PRESS ENTER TO CONTINUE'); pause; clf;
#eldraw2(Ex,Ey,[1,3,0]);
#eliso2(Ex,Ey,Ed,5,[1,4]);
#hold off;
#echo off;
# ----------------- End --------------------------------
| 21.898148 | 65 | 0.471882 |
794320e3638bdcbc12a55b3937b8515c351f51ba | 879 | py | Python | algorithms/1360. Number of Days Between Two Dates.py | vuzway9132/leetcode | e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf | [
"MIT"
] | 1 | 2020-12-02T13:54:30.000Z | 2020-12-02T13:54:30.000Z | algorithms/1360. Number of Days Between Two Dates.py | vuzway9132/leetcode | e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf | [
"MIT"
] | null | null | null | algorithms/1360. Number of Days Between Two Dates.py | vuzway9132/leetcode | e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf | [
"MIT"
] | null | null | null | class Solution:
def leap_year(self, year):
return ((year % 400 == 0) or (year % 100 != 0 and year % 4 == 0))
def date_to_int(self, year, month, day):
month_length = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
ans = day - 1
while month != 0:
month -= 1
ans += month_length[month]
if month == 2 and self.leap_year(year):
ans += 1
ans += 365 * (year - 1971)
ans += (year - 1) // 4 - 1971 // 4
ans -= (year - 1) // 100 - 1971 // 100
ans += (year - 1) // 400 - 1971 // 400
return ans
def daysBetweenDates(self, date1: str, date2: str) -> int:
date1 = [int(i) for i in date1.split('-')]
date2 = [int(i) for i in date2.split('-')]
return abs(self.date_to_int(*date1) - self.date_to_int(*date2))
| 36.625 | 74 | 0.481229 |
794321123a7ac666ab7a65717825d18a1aad7a85 | 825 | py | Python | server/migrations/versions/0b369a32394d_configuration_table.py | momikey/liblio | c7ad4fd8d72369358863b90e34f3ed89ddef753c | [
"MIT"
] | null | null | null | server/migrations/versions/0b369a32394d_configuration_table.py | momikey/liblio | c7ad4fd8d72369358863b90e34f3ed89ddef753c | [
"MIT"
] | null | null | null | server/migrations/versions/0b369a32394d_configuration_table.py | momikey/liblio | c7ad4fd8d72369358863b90e34f3ed89ddef753c | [
"MIT"
] | null | null | null | """Configuration table
Revision ID: 0b369a32394d
Revises: e49c6bed4fb4
Create Date: 2019-10-23 19:26:06.737006
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '0b369a32394d'
down_revision = 'e49c6bed4fb4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('configuration',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('data', postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('configuration')
# ### end Alembic commands ###
| 25 | 78 | 0.70303 |
7943229625d18914b65d0ea293b9d90220d97914 | 7,287 | py | Python | plaid/model/asset_report_audit_copy_create_response.py | Aky87/plaid-python | af56d445a9e0178b33fb0443f8399dd51c024fe1 | [
"MIT"
] | 289 | 2015-01-21T20:13:18.000Z | 2022-03-30T18:33:50.000Z | plaid/model/asset_report_audit_copy_create_response.py | Aky87/plaid-python | af56d445a9e0178b33fb0443f8399dd51c024fe1 | [
"MIT"
] | 203 | 2015-01-06T22:00:50.000Z | 2022-03-24T21:28:55.000Z | plaid/model/asset_report_audit_copy_create_response.py | Aky87/plaid-python | af56d445a9e0178b33fb0443f8399dd51c024fe1 | [
"MIT"
] | 144 | 2015-01-02T22:42:05.000Z | 2022-02-18T20:00:58.000Z | """
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class AssetReportAuditCopyCreateResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'audit_copy_token': (str,), # noqa: E501
'request_id': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'audit_copy_token': 'audit_copy_token', # noqa: E501
'request_id': 'request_id', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, audit_copy_token, request_id, *args, **kwargs): # noqa: E501
"""AssetReportAuditCopyCreateResponse - a model defined in OpenAPI
Args:
audit_copy_token (str): A token that can be shared with a third party auditor to allow them to obtain access to the Asset Report. This token should be stored securely.
request_id (str): A unique identifier for the request, which can be used for troubleshooting. This identifier, like all Plaid identifiers, is case sensitive.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.audit_copy_token = audit_copy_token
self.request_id = request_id
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 40.938202 | 179 | 0.594895 |
794322eec302b2cd4cb3fbced8a58f08d06a7f44 | 1,362 | py | Python | masonite/commands/MigrateStatusCommand.py | w3x10e8/core | d8f0ca29c2bd5e86d199391fa916ce2f5c9b0f49 | [
"MIT"
] | null | null | null | masonite/commands/MigrateStatusCommand.py | w3x10e8/core | d8f0ca29c2bd5e86d199391fa916ce2f5c9b0f49 | [
"MIT"
] | null | null | null | masonite/commands/MigrateStatusCommand.py | w3x10e8/core | d8f0ca29c2bd5e86d199391fa916ce2f5c9b0f49 | [
"MIT"
] | null | null | null | """ Migrate Status Command """
import os
import sys
from subprocess import check_output
from cleo import Command
from masonite.packages import add_venv_site_packages
class MigrateStatusCommand(Command):
"""
Migrate status
migrate:status
"""
def handle(self):
sys.path.append(os.getcwd())
try:
add_venv_site_packages()
except ImportError:
self.comment(
'This command must be ran inside of the root of a Masonite project directory')
from wsgi import container
migration_directory = ['databases/migrations']
for key, value in container.providers.items():
if 'MigrationDirectory' in key:
migration_directory.append(value)
for directory in migration_directory:
self.line('')
if len(migration_directory) > 1:
self.info('Migrate Status: {}'.format(directory))
try:
output = bytes(check_output(
['orator', 'migrate:status', '-c',
'config/database.py', '-p', directory]
)).decode('utf-8')
self.line(
output.replace('Yes', '<info>Yes</info>')
.replace('No', '<comment>No</comment>'))
except Exception:
pass
| 28.978723 | 94 | 0.558003 |
7943239c3a5edae204f615f822e15bd1de3f8f70 | 2,646 | py | Python | eve/tests/methods/patch_atomic_concurrency.py | kinuax/eve | c62941992b2f66fa02b822581891bd7c18e76d9c | [
"BSD-3-Clause"
] | null | null | null | eve/tests/methods/patch_atomic_concurrency.py | kinuax/eve | c62941992b2f66fa02b822581891bd7c18e76d9c | [
"BSD-3-Clause"
] | null | null | null | eve/tests/methods/patch_atomic_concurrency.py | kinuax/eve | c62941992b2f66fa02b822581891bd7c18e76d9c | [
"BSD-3-Clause"
] | null | null | null | import simplejson as json
import sys
import eve.methods.common
from eve.tests import TestBase
from eve.utils import config
"""
Atomic Concurrency Checks
Prior to commit 54fd697 from 2016-November, ETags would be verified
twice during a patch. One ETag check would be non-atomic by Eve,
then again atomically by MongoDB during app.data.update(filter).
The atomic ETag check was removed during issue #920 in 54fd697
When running Eve in a scale-out environment (multiple processes),
concurrent simultaneous updates are sometimes allowed, because
the Python-only ETag check is not atomic.
There is a critical section in patch_internal() between get_document()
and app.data.update() where a competing Eve process can change the
document and ETag.
This test simulates another process changing data & ETag during
the critical section. The test patches get_document() to return an
intentionally wrong ETag.
"""
def get_document_simulate_concurrent_update(*args, **kwargs):
"""
Hostile version of get_document
This simluates another process updating MongoDB (and ETag) in
eve.methods.patch.patch_internal() during the critical area
between get_document() and app.data.update()
"""
document = eve.methods.common.get_document(*args, **kwargs)
document[config.ETAG] = "unexpected change!"
return document
class TestPatchAtomicConcurrent(TestBase):
def setUp(self):
"""
Patch eve.methods.patch.get_document with a hostile version
that simulates simultaneous updates
"""
self.original_get_document = sys.modules["eve.methods.patch"].get_document
sys.modules[
"eve.methods.patch"
].get_document = get_document_simulate_concurrent_update
return super(TestPatchAtomicConcurrent, self).setUp()
def test_etag_changed_after_get_document(self):
"""
Try to update a document after the ETag was adjusted
outside this process
"""
changes = {"ref": "1234567890123456789054321"}
_r, status = self.patch(
self.item_id_url, data=changes, headers=[("If-Match", self.item_etag)]
)
self.assertEqual(status, 412)
def tearDown(self):
"""Remove patch of eve.methods.patch.get_document"""
sys.modules["eve.methods.patch"].get_document = self.original_get_document
return super(TestPatchAtomicConcurrent, self).tearDown()
def patch(self, url, data, headers=[]):
headers.append(("Content-Type", "application/json"))
r = self.test_client.patch(url, data=json.dumps(data), headers=headers)
return self.parse_response(r)
| 35.756757 | 82 | 0.718443 |
79432406f1641a6e666e2c85181b522101dbcfd6 | 18,434 | py | Python | sdk/python/pulumi_google_native/pubsub/v1beta2/subscription_iam_policy.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 44 | 2021-04-18T23:00:48.000Z | 2022-02-14T17:43:15.000Z | sdk/python/pulumi_google_native/pubsub/v1beta2/subscription_iam_policy.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 354 | 2021-04-16T16:48:39.000Z | 2022-03-31T17:16:39.000Z | sdk/python/pulumi_google_native/pubsub/v1beta2/subscription_iam_policy.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 8 | 2021-04-24T17:46:51.000Z | 2022-01-05T10:40:21.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['SubscriptionIamPolicyArgs', 'SubscriptionIamPolicy']
@pulumi.input_type
class SubscriptionIamPolicyArgs:
def __init__(__self__, *,
subscription_id: pulumi.Input[str],
bindings: Optional[pulumi.Input[Sequence[pulumi.Input['BindingArgs']]]] = None,
etag: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a SubscriptionIamPolicy resource.
:param pulumi.Input[Sequence[pulumi.Input['BindingArgs']]] bindings: Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:[email protected]`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
:param pulumi.Input[str] etag: `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
:param pulumi.Input[int] version: Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
pulumi.set(__self__, "subscription_id", subscription_id)
if bindings is not None:
pulumi.set(__self__, "bindings", bindings)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if project is not None:
pulumi.set(__self__, "project", project)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "subscription_id")
@subscription_id.setter
def subscription_id(self, value: pulumi.Input[str]):
pulumi.set(self, "subscription_id", value)
@property
@pulumi.getter
def bindings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BindingArgs']]]]:
"""
Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:[email protected]`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
"""
return pulumi.get(self, "bindings")
@bindings.setter
def bindings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BindingArgs']]]]):
pulumi.set(self, "bindings", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "version", value)
class SubscriptionIamPolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
bindings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BindingArgs']]]]] = None,
etag: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
Note - this resource's API doesn't support deletion. When deleted, the resource will persist
on Google Cloud even though it will be deleted from Pulumi state.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BindingArgs']]]] bindings: Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:[email protected]`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
:param pulumi.Input[str] etag: `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
:param pulumi.Input[int] version: Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SubscriptionIamPolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
Note - this resource's API doesn't support deletion. When deleted, the resource will persist
on Google Cloud even though it will be deleted from Pulumi state.
:param str resource_name: The name of the resource.
:param SubscriptionIamPolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SubscriptionIamPolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
bindings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BindingArgs']]]]] = None,
etag: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SubscriptionIamPolicyArgs.__new__(SubscriptionIamPolicyArgs)
__props__.__dict__["bindings"] = bindings
__props__.__dict__["etag"] = etag
__props__.__dict__["project"] = project
if subscription_id is None and not opts.urn:
raise TypeError("Missing required property 'subscription_id'")
__props__.__dict__["subscription_id"] = subscription_id
__props__.__dict__["version"] = version
super(SubscriptionIamPolicy, __self__).__init__(
'google-native:pubsub/v1beta2:SubscriptionIamPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SubscriptionIamPolicy':
"""
Get an existing SubscriptionIamPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SubscriptionIamPolicyArgs.__new__(SubscriptionIamPolicyArgs)
__props__.__dict__["bindings"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["version"] = None
return SubscriptionIamPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def bindings(self) -> pulumi.Output[Sequence['outputs.BindingResponse']]:
"""
Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:[email protected]`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
"""
return pulumi.get(self, "bindings")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def version(self) -> pulumi.Output[int]:
"""
Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
return pulumi.get(self, "version")
| 84.949309 | 1,118 | 0.713302 |
794325bb871626ee5b889841fc5242524ef39ca1 | 12,861 | py | Python | hubblestack/module_runner/audit_runner.py | mew1033/hubble | 74a8a2ec03ff1497045876c1e0208f4ff9f8bc3b | [
"Apache-2.0"
] | null | null | null | hubblestack/module_runner/audit_runner.py | mew1033/hubble | 74a8a2ec03ff1497045876c1e0208f4ff9f8bc3b | [
"Apache-2.0"
] | null | null | null | hubblestack/module_runner/audit_runner.py | mew1033/hubble | 74a8a2ec03ff1497045876c1e0208f4ff9f8bc3b | [
"Apache-2.0"
] | null | null | null | import os
import logging
import fnmatch
import hubblestack.module_runner.runner
from hubblestack.module_runner.runner import Caller
import hubblestack.module_runner.comparator
from hubblestack.exceptions import HubbleCheckVersionIncompatibleError
from hubblestack.exceptions import HubbleCheckValidationError
log = logging.getLogger(__name__)
CHECK_STATUS = {
'Success': 'Success',
'Failure': 'Failure',
'Skipped': 'Skipped',
'Error': 'Error'
}
class AuditRunner(hubblestack.module_runner.runner.Runner):
"""
Audit runner
"""
def __init__(self):
super().__init__(Caller.AUDIT)
# overridden method
def _execute(self, audit_data_dict, audit_file, args):
# got data for one audit file
# lets parse, validate and execute one by one
tags = args.get('tags', '*')
labels = args.get('labels', None)
verbose = args.get('verbose', None)
result_list = []
boolean_expr_check_list = []
audit_profile = os.path.splitext(os.path.basename(audit_file))[0]
for audit_id, audit_data in audit_data_dict.items():
log.debug('Executing check-id: %s in audit profile: %s', audit_id, audit_profile)
audit_impl = self._get_matched_implementation(audit_id, audit_data, tags, labels)
if not audit_impl:
# no matched impl found
log.debug('No matched implementation found for check-id: %s in audit profile: %s', audit_id,
audit_profile)
continue
if not self._validate_audit_data(audit_id, audit_impl):
continue
try:
# version check
if not self._is_hubble_version_compatible(audit_id, audit_impl):
raise HubbleCheckVersionIncompatibleError('Version not compatible')
if self._is_boolean_expression(audit_impl):
# Check is boolean expression.
# Gather boolean expressions in separate list and evaluate after evaluating all other checks.
log.debug('Boolean expression found. Gathering it to evaluate later.')
boolean_expr_check_list.append({
'check_id': audit_id,
'audit_impl': audit_impl,
'audit_data': audit_data
})
else:
# handover to module
audit_result = self._execute_audit(audit_id, audit_impl, audit_data, verbose, audit_profile)
result_list.append(audit_result)
except (HubbleCheckValidationError, HubbleCheckVersionIncompatibleError) as herror:
# add into error/skipped section
result_list.append({
'check_id': audit_id,
'tag': audit_data['tag'],
'description': audit_data['description'],
'sub_check': audit_data.get('sub_check', False),
'check_result': CHECK_STATUS['Error'] if isinstance(herror, HubbleCheckValidationError) else
CHECK_STATUS['Skipped'],
'audit_profile': audit_profile
})
log.error(herror)
except Exception as exc:
log.error(exc)
# Evaluate boolean expressions
boolean_expr_result_list = self._evaluate_boolean_expression(
boolean_expr_check_list, verbose, audit_profile, result_list)
result_list = result_list + boolean_expr_result_list
# return list of results for a file
return result_list
# overridden method
def _validate_yaml_dictionary(self, yaml_dict):
return True
def _get_matched_implementation(self, audit_check_id, audit_data, tags, labels):
log.debug('Getting matching implementation')
# check if label passed is matching with the check or not.
# If label is not matched, no need to fetch matched implementation
if labels:
check_labels = audit_data.get('labels', [])
if not set(labels).issubset(check_labels):
log.debug('Not executing audit_check: %s, user passed label: %s did not match audit labels: %s',
audit_check_id, labels, check_labels)
return None
# check if tag passed matches with current check or not
# if tag is not matched, no need to fetch matched implementation
audit_check_tag = audit_data.get('tag', audit_check_id)
if not fnmatch.fnmatch(audit_check_tag, tags):
log.debug('Not executing audit_check: %s, user passed tag: %s did not match this audit tag: %s',
audit_check_id,
tags, audit_check_tag)
return None
# Lets look for matching implementation based on os.filter grain
for implementation in audit_data['implementations']:
target = implementation['filter'].get('grains', '*')
if __mods__['match.compound'](target):
return implementation
log.debug('No target matched for audit_check_id: %s', audit_check_id)
return None
def _validate_audit_data(self, audit_id, audit_impl):
if 'module' not in audit_impl:
log.error('Matched implementation does not have module mentioned, check_id: %s', audit_id)
return False
return True
def _is_boolean_expression(self, audit_impl):
return audit_impl.get('module', '') == 'bexpr'
def _execute_audit(self, audit_id, audit_impl, audit_data, verbose, audit_profile, result_list=None):
"""
Function to execute the module and return the result
:param audit_id:
:param audit_impl:
:param audit_data:
:param verbose:
:param audit_profile:
:return:
"""
audit_result = {
"check_id": audit_id,
"description": audit_data['description'],
"audit_profile": audit_profile,
"sub_check": audit_data.get('sub_check', False),
"tag": audit_data['tag'],
"module": audit_impl['module'],
"run_config": {
"filter": audit_impl['filter'],
}
}
failure_reason = audit_data.get('failure_reason', '')
invert_result = audit_data.get('invert_result', False)
# check if the type of invert_result is boolean
if not isinstance(invert_result, bool):
raise HubbleCheckValidationError('value of invert_result is not a boolean in audit_id: {0}'.format(audit_id))
return_no_exec = audit_impl.get('return_no_exec', False)
# check if the type of invert_result is boolean
if not isinstance(return_no_exec, bool):
raise HubbleCheckValidationError('value of return_no_exec is not a boolean in audit_id: {0}'.format(audit_id))
check_eval_logic = audit_impl.get('check_eval_logic', 'and')
if check_eval_logic:
check_eval_logic = check_eval_logic.lower().strip()
# check for check_eval_logic in check implementation. If not present default is 'and'
audit_result['run_config']['check_eval_logic'] = check_eval_logic
audit_result['invert_result'] = invert_result
# check if return_no_exec is true
if return_no_exec:
audit_result['run_config']['return_no_exec'] = True
check_result = CHECK_STATUS['Success']
if invert_result:
check_result = CHECK_STATUS['Failure']
audit_result['failure_reason'] = failure_reason
audit_result['check_result'] = check_result
return audit_result
# Check presence of implementation checks
if 'items' not in audit_impl:
raise HubbleCheckValidationError('No checks are present in audit_id: {0}'.format(audit_id))
if check_eval_logic not in ['and', 'or']:
raise HubbleCheckValidationError(
"Incorrect value provided for parameter 'check_eval_logic': %s" % check_eval_logic)
# Execute module validation of params
for audit_check in audit_impl['items']:
self._validate_module_params(audit_impl['module'], audit_id, audit_check)
# validate succeeded, lets execute it and prepare result dictionary
audit_result['run_config']['items'] = []
# calculate the check result based on check_eval_logic parameter.
# If check_eval_logic is 'and', all subchecks should pass for success.
# If check_eval_logic is 'or', any passed subcheck will result in success.
overall_result = check_eval_logic == 'and'
failure_reasons = []
for audit_check in audit_impl['items']:
mod_status, module_result_local = self._execute_module(audit_impl['module'], audit_id, audit_check,
extra_args=result_list)
# Invoke Comparator
comparator_status, comparator_result = hubblestack.module_runner.comparator.run(
audit_id, audit_check['comparator'], module_result_local, mod_status)
audit_result_local = {}
if comparator_status:
audit_result_local['check_result'] = CHECK_STATUS['Success']
else:
audit_result_local['check_result'] = CHECK_STATUS['Failure']
audit_result_local['failure_reason'] = comparator_result if comparator_result else module_result_local[
'error']
failure_reasons.append(audit_result_local['failure_reason'])
module_logs = {}
if not verbose:
log.debug('Non verbose mode')
module_logs = self._get_filtered_params_to_log(audit_impl['module'], audit_id, audit_check)
if not module_logs:
module_logs = {}
else:
log.debug('verbose mode')
module_logs = audit_check
audit_result_local = {**audit_result_local, **module_logs}
# add this result
audit_result['run_config']['items'].append(audit_result_local)
if check_eval_logic == 'and':
overall_result = overall_result and comparator_status
else:
overall_result = overall_result or comparator_status
# Update overall check result based on invert result
if invert_result:
log.debug("Inverting result for check: %s as invert_result is set to True" % audit_id)
overall_result = not overall_result
if overall_result:
audit_result['check_result'] = CHECK_STATUS['Success']
else:
audit_result['check_result'] = CHECK_STATUS['Failure']
# fetch failure reason. If it is not present in profile, combine all individual checks reasons.
if failure_reason:
audit_result['failure_reason'] = failure_reason
else:
if failure_reasons:
failure_reasons = set(failure_reasons)
audit_result['failure_reason'] = ', '.join(failure_reasons)
return audit_result
def _evaluate_boolean_expression(self, boolean_expr_check_list, verbose, audit_profile, result_list):
boolean_expr_result_list = []
if boolean_expr_check_list:
log.debug("Evaluating boolean expression checks")
for boolean_expr in boolean_expr_check_list:
try:
check_result = self._execute_audit(boolean_expr['check_id'], boolean_expr['audit_impl'],
boolean_expr['audit_data'], verbose, audit_profile, result_list)
boolean_expr_result_list.append(check_result)
except (HubbleCheckValidationError, HubbleCheckVersionIncompatibleError) as herror:
# add into error section
boolean_expr_result_list.append({
'check_id': boolean_expr['check_id'],
'tag': boolean_expr['audit_data']['tag'],
'sub_check': boolean_expr['audit_data'].get('sub_check', False),
'description': boolean_expr['audit_data']['description'],
'check_result': CHECK_STATUS['Error'] if isinstance(herror, HubbleCheckValidationError) else
CHECK_STATUS['Skipped'],
'audit_profile': audit_profile
})
log.error(herror)
except Exception as exc:
log.error(exc)
return boolean_expr_result_list
| 45.285211 | 122 | 0.612472 |
7943264c1335bb6e9808dbf21755f0962c30f1da | 1,821 | py | Python | AlgorithmFactories/ClassificationAlgorithmFactories/SVCAlgorithmFactory.py | CzakoZoltan08/AutoAI | 63436d159f0ac5ac1714868f5dad57efdc382ee8 | [
"MIT"
] | 2 | 2020-03-26T02:27:50.000Z | 2021-05-31T18:49:55.000Z | AlgorithmFactories/ClassificationAlgorithmFactories/SVCAlgorithmFactory.py | CzakoZoltan08/COVID-19-patient-filtering-using-AutomaticAI | 87c4cb1d2848a0258b0b6d652316cb69de382ff0 | [
"MIT"
] | null | null | null | AlgorithmFactories/ClassificationAlgorithmFactories/SVCAlgorithmFactory.py | CzakoZoltan08/COVID-19-patient-filtering-using-AutomaticAI | 87c4cb1d2848a0258b0b6d652316cb69de382ff0 | [
"MIT"
] | 3 | 2019-11-11T11:32:46.000Z | 2020-04-26T05:59:33.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 29 22:16:40 2019
@author: Zoltan
"""
from collections import OrderedDict
from sklearn.svm import SVC
from ..Algorithm import Algorithm
const_param = {
"probability": False
}
dicrete_hyper_parameter_list_of_shrinking = [True, False]
dicrete_hyper_parameter_list_of_degree = range(0, 210)
dicrete_hyper_parameter_list_of_kernel = ["linear", "poly", "rbf", "sigmoid"]
continuous_hyper_parameter_mapping_index_key_mapping = ["C", "gamma", "coef0"]
discrete_hyper_parameter_mapping = ["shrinking", "degree", "kernel"]
discrete_parameter_dict = OrderedDict()
discrete_parameter_dict["shrinking"] = dicrete_hyper_parameter_list_of_shrinking
discrete_parameter_dict["degree"] = dicrete_hyper_parameter_list_of_degree
discrete_parameter_dict["kernel"] = dicrete_hyper_parameter_list_of_kernel
parameter_constraint_dict = OrderedDict()
# dictionary of parameters
param_dict = OrderedDict()
param_dict['shrinking'] = True
param_dict['degree'] = 3
param_dict['kernel'] = "rbf"
param_dict['C'] = 1.0
param_dict['gamma'] = 0.1
param_dict['coef0'] = 0.1
bounds=[(0.0001,3.99),(0.0001,1.99),(0.0001,100.99),(0.001,1.99),(0.001,199.99),(0.001, 0.99)]
def get_algorithm():
return Algorithm(algorithm_type=SVC,
algorithm_name="SVC CLASSIFIER",
hyper_parameter_dict=param_dict,
discrete_hyper_parameter_dict=discrete_parameter_dict,
discrete_hyper_parameter_mapping=discrete_hyper_parameter_mapping,
continuous_hyper_parameter_mapping=continuous_hyper_parameter_mapping_index_key_mapping,
parameter_constraint_dict=parameter_constraint_dict,
constant_hyper_parameter_dict=const_param,
bounds=bounds) | 33.722222 | 109 | 0.724876 |
7943281d7e19ec9f47dd7d704905eb71007755a5 | 13,143 | py | Python | troposphere/autoscaling.py | vasinov/troposphere | db117248dfb0fc500ae9d10db34c42608240bb8d | [
"BSD-2-Clause"
] | null | null | null | troposphere/autoscaling.py | vasinov/troposphere | db117248dfb0fc500ae9d10db34c42608240bb8d | [
"BSD-2-Clause"
] | null | null | null | troposphere/autoscaling.py | vasinov/troposphere | db117248dfb0fc500ae9d10db34c42608240bb8d | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2012-2013, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSHelperFn, AWSObject, AWSProperty, FindInMap, If, Ref, cloudformation
from .validators import boolean, exactly_one, integer, mutually_exclusive
EC2_INSTANCE_LAUNCH = "autoscaling:EC2_INSTANCE_LAUNCH"
EC2_INSTANCE_LAUNCH_ERROR = "autoscaling:EC2_INSTANCE_LAUNCH_ERROR"
EC2_INSTANCE_TERMINATE = "autoscaling:EC2_INSTANCE_TERMINATE"
EC2_INSTANCE_TERMINATE_ERROR = "autoscaling:EC2_INSTANCE_TERMINATE_ERROR"
TEST_NOTIFICATION = "autoscaling:TEST_NOTIFICATION"
# Termination Policy constants
Default = "Default"
OldestInstance = "OldestInstance"
NewestInstance = "NewestInstance"
OldestLaunchConfiguration = "OldestLaunchConfiguration"
OldestLaunchTemplate = "OldestLaunchTemplate"
ClosestToNextInstanceHour = "ClosestToNextInstanceHour"
AllocationStrategy = "AllocationStrategy"
class Tag(AWSHelperFn):
def __init__(self, key, value, propogate):
self.data = {
"Key": key,
"Value": value,
"PropagateAtLaunch": propogate,
}
class Tags(AWSHelperFn):
defaultPropagateAtLaunch = True
manyType = [type([]), type(())]
def __init__(self, **kwargs):
self.tags = []
for k, v in sorted(kwargs.items()):
if type(v) in self.manyType:
propagate = boolean(v[1])
v = v[0]
else:
propagate = boolean(self.defaultPropagateAtLaunch)
self.tags.append(
{
"Key": k,
"Value": v,
"PropagateAtLaunch": propagate,
}
)
# append tags to list
def __add__(self, newtags):
newtags.tags = self.tags + newtags.tags
return newtags
def to_dict(self):
return self.tags
class MetadataOptions(AWSProperty):
props = {
"HttpEndpoint": (str, False),
"HttpPutResponseHopLimit": (integer, False),
"HttpTokens": (str, False),
}
class LifecycleHookSpecification(AWSProperty):
props = {
"DefaultResult": (str, False),
"HeartbeatTimeout": (integer, False),
"LifecycleHookName": (str, True),
"LifecycleTransition": (str, True),
"NotificationMetadata": (str, False),
"NotificationTargetARN": (str, False),
"RoleARN": (str, False),
}
class NotificationConfigurations(AWSProperty):
props = {
"TopicARN": (str, True),
"NotificationTypes": (list, True),
}
class MetricsCollection(AWSProperty):
props = {
"Granularity": (str, True),
"Metrics": (list, False),
}
class Metadata(AWSHelperFn):
def __init__(self, init, authentication=None):
self.validate(init, authentication)
# get keys and values from init and authentication
# if there's only one data point, then we know its the default
# cfn-init; where the key is 'config'
if len(init.data) == 1:
initKey, initValue = init.data.popitem()
self.data = {initKey: initValue}
else:
self.data = init.data
if authentication:
authKey, authValue = authentication.data.popitem()
self.data[authKey] = authValue
def validate(self, init, authentication):
if not isinstance(init, cloudformation.Init):
raise ValueError("init must be of type cloudformation.Init")
is_instance = isinstance(authentication, cloudformation.Authentication)
if authentication and not is_instance:
raise ValueError(
"authentication must be of type cloudformation.Authentication"
)
class LaunchTemplateSpecification(AWSProperty):
props = {
"LaunchTemplateId": (str, False),
"LaunchTemplateName": (str, False),
"Version": (str, True),
}
def validate(self):
template_ids = ["LaunchTemplateId", "LaunchTemplateName"]
exactly_one(self.__class__.__name__, self.properties, template_ids)
class InstancesDistribution(AWSProperty):
props = {
"OnDemandAllocationStrategy": (str, False),
"OnDemandBaseCapacity": (integer, False),
"OnDemandPercentageAboveBaseCapacity": (integer, False),
"SpotAllocationStrategy": (str, False),
"SpotInstancePools": (integer, False),
"SpotMaxPrice": (str, False),
}
class LaunchTemplateOverrides(AWSProperty):
props = {
"InstanceType": (str, False),
"WeightedCapacity": (str, False),
}
class LaunchTemplate(AWSProperty):
props = {
"LaunchTemplateSpecification": (LaunchTemplateSpecification, True),
"Overrides": ([LaunchTemplateOverrides], True),
}
class MixedInstancesPolicy(AWSProperty):
props = {
"InstancesDistribution": (InstancesDistribution, False),
"LaunchTemplate": (LaunchTemplate, True),
}
class AutoScalingGroup(AWSObject):
resource_type = "AWS::AutoScaling::AutoScalingGroup"
props = {
"AutoScalingGroupName": (str, False),
"AvailabilityZones": (list, False),
"CapacityRebalance": (boolean, False),
"Cooldown": (integer, False),
"DesiredCapacity": (integer, False),
"HealthCheckGracePeriod": (integer, False),
"HealthCheckType": (str, False),
"InstanceId": (str, False),
"LaunchConfigurationName": (str, False),
"LaunchTemplate": (LaunchTemplateSpecification, False),
"LifecycleHookSpecificationList": ([LifecycleHookSpecification], False),
"LoadBalancerNames": (list, False),
"MaxInstanceLifetime": (integer, False),
"MaxSize": (integer, True),
"MetricsCollection": ([MetricsCollection], False),
"MinSize": (integer, True),
"MixedInstancesPolicy": (MixedInstancesPolicy, False),
"NewInstancesProtectedFromScaleIn": (boolean, False),
"NotificationConfigurations": ([NotificationConfigurations], False),
"PlacementGroup": (str, False),
"ServiceLinkedRoleARN": (str, False),
"Tags": ((Tags, list), False),
"TargetGroupARNs": ([str], False),
"TerminationPolicies": ([str], False),
"VPCZoneIdentifier": (list, False),
}
def validate(self):
if "UpdatePolicy" in self.resource:
update_policy = self.resource["UpdatePolicy"]
if (
not isinstance(update_policy, AWSHelperFn)
and "AutoScalingRollingUpdate" in update_policy.properties
):
if not isinstance(update_policy.AutoScalingRollingUpdate, AWSHelperFn):
rolling_update = update_policy.AutoScalingRollingUpdate
min_instances = rolling_update.properties.get(
"MinInstancesInService", "0"
)
is_min_no_check = isinstance(min_instances, (If, FindInMap, Ref))
is_max_no_check = isinstance(self.MaxSize, (If, FindInMap, Ref))
if not (is_min_no_check or is_max_no_check):
max_count = int(self.MaxSize)
min_count = int(min_instances)
if min_count >= max_count:
raise ValueError(
"The UpdatePolicy attribute "
"MinInstancesInService must be less than the "
"autoscaling group's MaxSize"
)
instance_config_types = [
"LaunchConfigurationName",
"LaunchTemplate",
"InstanceId",
]
mutually_exclusive(
self.__class__.__name__, self.properties, instance_config_types
)
availability_zones = self.properties.get("AvailabilityZones")
vpc_zone_identifier = self.properties.get("VPCZoneIdentifier")
if not availability_zones and not vpc_zone_identifier:
raise ValueError(
"Must specify AvailabilityZones and/or "
"VPCZoneIdentifier: http://docs.aws.amazon.com/A"
"WSCloudFormation/latest/UserGuide/aws-propertie"
"s-as-group.html#cfn-as-group-vpczoneidentifier"
)
return True
class LaunchConfiguration(AWSObject):
resource_type = "AWS::AutoScaling::LaunchConfiguration"
props = {
"AssociatePublicIpAddress": (boolean, False),
"BlockDeviceMappings": (list, False),
"ClassicLinkVPCId": (str, False),
"ClassicLinkVPCSecurityGroups": ([str], False),
"EbsOptimized": (boolean, False),
"IamInstanceProfile": (str, False),
"ImageId": (str, True),
"InstanceId": (str, False),
"InstanceMonitoring": (boolean, False),
"InstanceType": (str, True),
"KernelId": (str, False),
"KeyName": (str, False),
"LaunchConfigurationName": (str, False),
"Metadata": (Metadata, False),
"MetadataOptions": (MetadataOptions, False),
"PlacementTenancy": (str, False),
"RamDiskId": (str, False),
"SecurityGroups": (list, False),
"SpotPrice": (str, False),
"UserData": (str, False),
}
class StepAdjustments(AWSProperty):
props = {
"MetricIntervalLowerBound": (integer, False),
"MetricIntervalUpperBound": (integer, False),
"ScalingAdjustment": (integer, True),
}
class MetricDimension(AWSProperty):
props = {
"Name": (str, True),
"Value": (str, True),
}
class CustomizedMetricSpecification(AWSProperty):
props = {
"Dimensions": ([MetricDimension], False),
"MetricName": (str, True),
"Namespace": (str, True),
"Statistic": (str, True),
"Unit": (str, False),
}
class PredefinedMetricSpecification(AWSProperty):
props = {
"PredefinedMetricType": (str, True),
"ResourceLabel": (str, False),
}
class TargetTrackingConfiguration(AWSProperty):
props = {
"CustomizedMetricSpecification": (CustomizedMetricSpecification, False),
"DisableScaleIn": (boolean, False),
"PredefinedMetricSpecification": (PredefinedMetricSpecification, False),
"TargetValue": (float, True),
}
class ScalingPolicy(AWSObject):
resource_type = "AWS::AutoScaling::ScalingPolicy"
props = {
"AdjustmentType": (str, False),
"AutoScalingGroupName": (str, True),
"Cooldown": (integer, False),
"EstimatedInstanceWarmup": (integer, False),
"MetricAggregationType": (str, False),
"MinAdjustmentMagnitude": (integer, False),
"PolicyType": (str, False),
"ScalingAdjustment": (integer, False),
"StepAdjustments": ([StepAdjustments], False),
"TargetTrackingConfiguration": (TargetTrackingConfiguration, False),
}
class ScheduledAction(AWSObject):
resource_type = "AWS::AutoScaling::ScheduledAction"
props = {
"AutoScalingGroupName": (str, True),
"DesiredCapacity": (integer, False),
"EndTime": (str, False),
"MaxSize": (integer, False),
"MinSize": (integer, False),
"Recurrence": (str, False),
"StartTime": (str, False),
}
class LifecycleHook(AWSObject):
resource_type = "AWS::AutoScaling::LifecycleHook"
props = {
"AutoScalingGroupName": (str, True),
"DefaultResult": (str, False),
"HeartbeatTimeout": (integer, False),
"LifecycleHookName": (str, False),
"LifecycleTransition": (str, True),
"NotificationMetadata": (str, False),
"NotificationTargetARN": (str, False),
"RoleARN": (str, False),
}
class Trigger(AWSObject):
resource_type = "AWS::AutoScaling::Trigger"
props = {
"AutoScalingGroupName": (str, True),
"BreachDuration": (integer, True),
"Dimensions": (list, True),
"LowerBreachScaleIncrement": (integer, False),
"LowerThreshold": (integer, True),
"MetricName": (str, True),
"Namespace": (str, True),
"Period": (integer, True),
"Statistic": (str, True),
"Unit": (str, False),
"UpperBreachScaleIncrement": (integer, False),
"UpperThreshold": (integer, True),
}
class EBSBlockDevice(AWSProperty):
# http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-template.html
props = {
"DeleteOnTermination": (boolean, False),
"Encrypted": (boolean, False),
"Iops": (integer, False),
"SnapshotId": (str, False),
"VolumeSize": (integer, False),
"VolumeType": (str, False),
}
class BlockDeviceMapping(AWSProperty):
# http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-mapping.html
props = {
"DeviceName": (str, True),
"Ebs": (EBSBlockDevice, False),
"NoDevice": (boolean, False),
"VirtualName": (str, False),
}
| 32.612903 | 121 | 0.610972 |
794328b682bff21ecfb9c543a51b834f02ebd719 | 609 | py | Python | gamer_registration_system/con/migrations/0002_event_creator.py | splummer/gamer_reg | 7cccbbf8e6e52e46594c8128a7e7a523b8202f03 | [
"MIT"
] | null | null | null | gamer_registration_system/con/migrations/0002_event_creator.py | splummer/gamer_reg | 7cccbbf8e6e52e46594c8128a7e7a523b8202f03 | [
"MIT"
] | null | null | null | gamer_registration_system/con/migrations/0002_event_creator.py | splummer/gamer_reg | 7cccbbf8e6e52e46594c8128a7e7a523b8202f03 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.8 on 2018-09-08 04:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('con', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='event',
name='creator',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| 26.478261 | 121 | 0.663383 |
79432a1e3cd9fe0d8a4ac78b9b1f9009e4ec3049 | 1,237 | py | Python | virtual/lib/python3.8/site-packages/flask_wtf/i18n.py | Lenus254/personal_blog | aac38e4b5372c86efa8e24db2e051fef8e5feef8 | [
"Unlicense"
] | 1,064 | 2015-01-08T09:39:47.000Z | 2021-05-25T09:56:13.000Z | virtual/lib/python3.8/site-packages/flask_wtf/i18n.py | Lenus254/personal_blog | aac38e4b5372c86efa8e24db2e051fef8e5feef8 | [
"Unlicense"
] | 257 | 2015-01-18T12:41:06.000Z | 2021-05-24T17:35:04.000Z | virtual/lib/python3.8/site-packages/flask_wtf/i18n.py | Lenus254/personal_blog | aac38e4b5372c86efa8e24db2e051fef8e5feef8 | [
"Unlicense"
] | 299 | 2015-01-20T20:14:17.000Z | 2021-05-17T10:37:00.000Z | from babel import support
from flask import current_app
from flask import request
from wtforms.i18n import messages_path
try:
from flask_babel import get_locale
except ImportError:
from flask_babelex import get_locale
__all__ = ("Translations", "translations")
def _get_translations():
"""Returns the correct gettext translations.
Copy from flask-babel with some modifications.
"""
if not request:
return None
# babel should be in extensions for get_locale
if "babel" not in current_app.extensions:
return None
translations = getattr(request, "wtforms_translations", None)
if translations is None:
translations = support.Translations.load(
messages_path(), [get_locale()], domain="wtforms"
)
request.wtforms_translations = translations
return translations
class Translations:
def gettext(self, string):
t = _get_translations()
return string if t is None else t.ugettext(string)
def ngettext(self, singular, plural, n):
t = _get_translations()
if t is None:
return singular if n == 1 else plural
return t.ungettext(singular, plural, n)
translations = Translations()
| 23.788462 | 65 | 0.687146 |
79432ae247d641386b3f81108b336fa966097a0e | 303 | py | Python | ExplicitIntegration/Term/__init__.py | robotsorcerer/LevelSetPy | 54064ee7fd0144e0d658dd4f6121cbc1fda664b9 | [
"MIT"
] | 4 | 2022-03-14T07:04:08.000Z | 2022-03-14T18:08:56.000Z | ExplicitIntegration/Term/__init__.py | robotsorcerer/LevelSetPy | 54064ee7fd0144e0d658dd4f6121cbc1fda664b9 | [
"MIT"
] | null | null | null | ExplicitIntegration/Term/__init__.py | robotsorcerer/LevelSetPy | 54064ee7fd0144e0d658dd4f6121cbc1fda664b9 | [
"MIT"
] | null | null | null | from .term_disc import *
from .term_trace_hess import *
from .term_convection import *
from .term_curvature import *
from .term_forcing import *
from .term_lax_friedrich import *
from .term_normal import *
from .term_sum import *
from .term_reinit import *
from .term_restrict_update import *
| 27.545455 | 36 | 0.768977 |
79432babadfa228f5938116346b39e4329aec745 | 3,101 | py | Python | cereal/services.py | bluetulippon/openpilot | 6c57630941cbcd9c0154790271ac7942c7e6c48d | [
"MIT"
] | 6 | 2020-12-01T06:35:17.000Z | 2022-03-21T09:52:29.000Z | cereal/services.py | bluetulippon/openpilot | 6c57630941cbcd9c0154790271ac7942c7e6c48d | [
"MIT"
] | null | null | null | cereal/services.py | bluetulippon/openpilot | 6c57630941cbcd9c0154790271ac7942c7e6c48d | [
"MIT"
] | 2 | 2021-11-19T02:49:09.000Z | 2021-11-22T17:24:18.000Z | #!/usr/bin/env python3
#
# Copyright (c) 2020-2022 [email protected] Chad_Peng(Pon).
# All Rights Reserved.
# Confidential and Proprietary - [email protected] Chad_Peng(Pon).
#
import os
from typing import Optional
TICI = os.path.isfile('/TICI')
RESERVED_PORT = 8022 # sshd
STARTING_PORT = 8001
def new_port(port: int):
port += STARTING_PORT
return port + 1 if port >= RESERVED_PORT else port
class Service:
def __init__(self, port: int, should_log: bool, frequency: float, decimation: Optional[int] = None):
self.port = port
self.should_log = should_log
self.frequency = frequency
self.decimation = decimation
DCAM_FREQ = 10. if not TICI else 20.
services = {
# service: (should_log, frequency, qlog decimation (optional))
"sensorEvents": (True, 100., 100),
"gpsNMEA": (True, 9.),
"deviceState": (True, 2., 1),
"can": (True, 100.),
"controlsState": (True, 100., 10),
"pandaStates": (True, 2., 1),
"peripheralState": (True, 2., 1),
"radarState": (True, 20., 5),
"roadEncodeIdx": (True, 20., 1),
"liveTracks": (True, 20.),
"sendcan": (True, 100., 139),
"logMessage": (True, 0.),
"liveCalibration": (True, 4., 4),
"androidLog": (True, 0.),
"carState": (True, 100., 10),
"carControl": (True, 100., 10),
"longitudinalPlan": (True, 20., 5),
"procLog": (True, 0.5),
"gpsLocationExternal": (True, 10., 1),
"ubloxGnss": (True, 10.),
"clocks": (True, 1., 1),
"ubloxRaw": (True, 20.),
"liveLocationKalman": (True, 20., 2),
"liveParameters": (True, 20., 2),
"cameraOdometry": (True, 20., 5),
"lateralPlan": (True, 20., 5),
"thumbnail": (True, 0.2, 1),
"carEvents": (True, 1., 1),
"carParams": (True, 0.02, 1),
"roadCameraState": (True, 20., 20),
"driverCameraState": (True, DCAM_FREQ, DCAM_FREQ),
"driverEncodeIdx": (True, DCAM_FREQ, 1),
"driverState": (True, DCAM_FREQ, DCAM_FREQ / 2),
"driverMonitoringState": (True, DCAM_FREQ, DCAM_FREQ / 2),
"wideRoadEncodeIdx": (True, 20., 1),
"wideRoadCameraState": (True, 20., 20),
"modelV2": (True, 20., 40),
"managerState": (True, 2., 1),
"uploaderState": (True, 0., 1),
"navInstruction": (True, 0.),
"navRoute": (True, 0.),
"navThumbnail": (True, 0.),
"speedCamera": (True, 100., 10),
# debug
"testJoystick": (False, 0.),
}
service_list = {name: Service(new_port(idx), *vals) for # type: ignore
idx, (name, vals) in enumerate(services.items())}
def build_header():
h = ""
h += "/* THIS IS AN AUTOGENERATED FILE, PLEASE EDIT services.py */\n"
h += "#ifndef __SERVICES_H\n"
h += "#define __SERVICES_H\n"
h += "struct service { char name[0x100]; int port; bool should_log; int frequency; int decimation; };\n"
h += "static struct service services[] = {\n"
for k, v in service_list.items():
should_log = "true" if v.should_log else "false"
decimation = -1 if v.decimation is None else v.decimation
h += ' { "%s", %d, %s, %d, %d },\n' % \
(k, v.port, should_log, v.frequency, decimation)
h += "};\n"
h += "#endif\n"
return h
if __name__ == "__main__":
print(build_header())
| 30.401961 | 106 | 0.620445 |
79432bbdf56732c3c9e96781c123f1a270e6f5b8 | 6,610 | py | Python | ogr/services/github/pull_request.py | AdarLavi/ogr | fe64b280c0fd5ef15d79991c233c67c9888ae486 | [
"MIT"
] | null | null | null | ogr/services/github/pull_request.py | AdarLavi/ogr | fe64b280c0fd5ef15d79991c233c67c9888ae486 | [
"MIT"
] | null | null | null | ogr/services/github/pull_request.py | AdarLavi/ogr | fe64b280c0fd5ef15d79991c233c67c9888ae486 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2018-2019 Red Hat, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime
import logging
from typing import Optional, List
from github import UnknownObjectException
from github.Label import Label as GithubLabel
from github.PullRequest import PullRequest as _GithubPullRequest
from ogr.abstract import PRComment, PRStatus, PullRequest
from ogr.exceptions import GithubAPIException
from ogr.services import github as ogr_github
from ogr.services.base import BasePullRequest
from ogr.services.github.comments import GithubPRComment
logger = logging.getLogger(__name__)
class GithubPullRequest(BasePullRequest):
_raw_pr: _GithubPullRequest
project: "ogr_github.GithubProject"
@property
def title(self) -> str:
return self._raw_pr.title
@title.setter
def title(self, new_title: str) -> None:
self._raw_pr.edit(title=new_title)
@property
def id(self) -> int:
return self._raw_pr.number
@property
def status(self) -> PRStatus:
return (
PRStatus.merged
if self._raw_pr.is_merged()
else PRStatus[self._raw_pr.state]
)
@property
def url(self) -> str:
return self._raw_pr.html_url
@property
def description(self) -> str:
return self._raw_pr.body
@description.setter
def description(self, new_description: str) -> None:
self._raw_pr.edit(body=new_description)
@property
def author(self) -> str:
return self._raw_pr.user.login
@property
def source_branch(self) -> str:
return self._raw_pr.head.ref
@property
def target_branch(self) -> str:
return self._raw_pr.base.ref
@property
def created(self) -> datetime.datetime:
return self._raw_pr.created_at
@property
def labels(self) -> List[GithubLabel]:
return list(self._raw_pr.get_labels())
@property
def diff_url(self) -> str:
return f"{self._raw_pr.html_url}/files"
def __str__(self) -> str:
return "Github" + super().__str__()
@staticmethod
def create(
project: "ogr_github.GithubProject",
title: str,
body: str,
target_branch: str,
source_branch: str,
fork_username: str = None,
) -> "PullRequest":
github_repo = project.github_repo
if project.is_fork and fork_username:
logger.warning(
f"{project.full_repo_name} is fork, ignoring fork_username arg"
)
if project.is_fork:
source_branch = f"{project.namespace}:{source_branch}"
github_repo = project.parent.github_repo
elif fork_username:
source_branch = f"{fork_username}:{source_branch}"
created_pr = github_repo.create_pull(
title=title, body=body, base=target_branch, head=source_branch
)
logger.info(f"PR {created_pr.id} created: {target_branch}<-{source_branch}")
return GithubPullRequest(created_pr, project)
@staticmethod
def get(project: "ogr_github.GithubProject", id: int) -> "PullRequest":
pr = project.github_repo.get_pull(number=id)
return GithubPullRequest(pr, project)
@staticmethod
def get_list(
project: "ogr_github.GithubProject", status: PRStatus = PRStatus.open
) -> List["PullRequest"]:
prs = project.github_repo.get_pulls(
# Github API has no status 'merged', just 'closed'/'opened'/'all'
state=status.name if status != PRStatus.merged else "closed",
sort="updated",
direction="desc",
)
if status == PRStatus.merged:
prs = list(prs) # Github PaginatedList into list()
for pr in prs:
if not pr.is_merged(): # parse merged PRs
prs.remove(pr)
try:
return [GithubPullRequest(pr, project) for pr in prs]
except UnknownObjectException:
return []
def update_info(
self, title: Optional[str] = None, description: Optional[str] = None
) -> "PullRequest":
try:
self._raw_pr.edit(title=title, body=description)
logger.info(f"PR updated: {self._raw_pr.url}")
return self
except Exception as ex:
raise GithubAPIException("there was an error while updating the PR", ex)
def _get_all_comments(self) -> List[PRComment]:
return [
GithubPRComment(parent=self, raw_comment=raw_comment)
for raw_comment in self._raw_pr.get_issue_comments()
]
def get_all_commits(self) -> List[str]:
return [commit.sha for commit in self._raw_pr.get_commits()]
def comment(
self,
body: str,
commit: Optional[str] = None,
filename: Optional[str] = None,
row: Optional[int] = None,
) -> "PRComment":
if not any([commit, filename, row]):
comment = self._raw_pr.create_issue_comment(body)
else:
github_commit = self.project.github_repo.get_commit(commit)
comment = self._raw_pr.create_comment(body, github_commit, filename, row)
return GithubPRComment(parent=self, raw_comment=comment)
def close(self) -> "PullRequest":
self._raw_pr.edit(state=PRStatus.closed.name)
return self
def merge(self) -> "PullRequest":
self._raw_pr.merge()
return self
def add_label(self, *labels: str) -> None:
for label in labels:
self._raw_pr.add_to_labels(label)
| 32.885572 | 85 | 0.657035 |
79432bc180b8ec47405573bf7adc3f4e22b3e9fe | 7,278 | py | Python | nipype/interfaces/tests/test_nilearn.py | felixsc1/nipype | e722d6170593583f16ddfcb95473e5d30b5f1d7c | [
"Apache-2.0"
] | 8 | 2019-05-29T09:38:30.000Z | 2021-01-20T03:36:59.000Z | nipype/interfaces/tests/test_nilearn.py | felixsc1/nipype | e722d6170593583f16ddfcb95473e5d30b5f1d7c | [
"Apache-2.0"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | nipype/interfaces/tests/test_nilearn.py | felixsc1/nipype | e722d6170593583f16ddfcb95473e5d30b5f1d7c | [
"Apache-2.0"
] | 2 | 2017-09-23T16:22:00.000Z | 2019-08-01T14:18:52.000Z | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import numpy as np
from ...testing import utils
from .. import nilearn as iface
from ...pipeline import engine as pe
import pytest
import numpy.testing as npt
no_nilearn = True
try:
__import__('nilearn')
no_nilearn = False
except ImportError:
pass
@pytest.mark.skipif(no_nilearn, reason="the nilearn library is not available")
class TestSignalExtraction():
filenames = {
'in_file': 'fmri.nii',
'label_files': 'labels.nii',
'4d_label_file': '4dlabels.nii',
'out_file': 'signals.tsv'
}
labels = ['CSF', 'GrayMatter', 'WhiteMatter']
global_labels = ['GlobalSignal'] + labels
@pytest.fixture(autouse=True, scope='class')
def setup_class(self, tmpdir_factory):
tempdir = tmpdir_factory.mktemp("test")
self.orig_dir = tempdir.chdir()
utils.save_toy_nii(self.fake_fmri_data, self.filenames['in_file'])
utils.save_toy_nii(self.fake_label_data, self.filenames['label_files'])
def test_signal_extract_no_shared(self):
# run
iface.SignalExtraction(
in_file=self.filenames['in_file'],
label_files=self.filenames['label_files'],
class_labels=self.labels,
incl_shared_variance=False).run()
# assert
self.assert_expected_output(self.labels, self.base_wanted)
def test_signal_extr_bad_label_list(self):
# run
with pytest.raises(ValueError):
iface.SignalExtraction(
in_file=self.filenames['in_file'],
label_files=self.filenames['label_files'],
class_labels=['bad'],
incl_shared_variance=False).run()
def test_signal_extr_equiv_4d_no_shared(self):
self._test_4d_label(
self.base_wanted,
self.fake_equiv_4d_label_data,
incl_shared_variance=False)
def test_signal_extr_4d_no_shared(self):
# set up & run & assert
self._test_4d_label(
self.fourd_wanted,
self.fake_4d_label_data,
incl_shared_variance=False)
def test_signal_extr_global_no_shared(self):
# set up
wanted_global = [[-4. / 6], [-1. / 6], [3. / 6], [-1. / 6], [-7. / 6]]
for i, vals in enumerate(self.base_wanted):
wanted_global[i].extend(vals)
# run
iface.SignalExtraction(
in_file=self.filenames['in_file'],
label_files=self.filenames['label_files'],
class_labels=self.labels,
include_global=True,
incl_shared_variance=False).run()
# assert
self.assert_expected_output(self.global_labels, wanted_global)
def test_signal_extr_4d_global_no_shared(self):
# set up
wanted_global = [[3. / 8], [-3. / 8], [1. / 8], [-7. / 8], [-9. / 8]]
for i, vals in enumerate(self.fourd_wanted):
wanted_global[i].extend(vals)
# run & assert
self._test_4d_label(
wanted_global,
self.fake_4d_label_data,
include_global=True,
incl_shared_variance=False)
def test_signal_extr_shared(self):
# set up
wanted = []
for vol in range(self.fake_fmri_data.shape[3]):
volume = self.fake_fmri_data[:, :, :, vol].flatten()
wanted_row = []
for reg in range(self.fake_4d_label_data.shape[3]):
region = self.fake_4d_label_data[:, :, :, reg].flatten()
wanted_row.append(
(volume * region).sum() / (region * region).sum())
wanted.append(wanted_row)
# run & assert
self._test_4d_label(wanted, self.fake_4d_label_data)
def test_signal_extr_traits_valid(self):
''' Test a node using the SignalExtraction interface.
Unlike interface.run(), node.run() checks the traits
'''
# run
node = pe.Node(
iface.SignalExtraction(
in_file=os.path.abspath(self.filenames['in_file']),
label_files=os.path.abspath(self.filenames['label_files']),
class_labels=self.labels,
incl_shared_variance=False),
name='SignalExtraction')
node.run()
# assert
# just checking that it passes trait validations
def _test_4d_label(self,
wanted,
fake_labels,
include_global=False,
incl_shared_variance=True):
# set up
utils.save_toy_nii(fake_labels, self.filenames['4d_label_file'])
# run
iface.SignalExtraction(
in_file=self.filenames['in_file'],
label_files=self.filenames['4d_label_file'],
class_labels=self.labels,
incl_shared_variance=incl_shared_variance,
include_global=include_global).run()
wanted_labels = self.global_labels if include_global else self.labels
# assert
self.assert_expected_output(wanted_labels, wanted)
def assert_expected_output(self, labels, wanted):
with open(self.filenames['out_file'], 'r') as output:
got = [line.split() for line in output]
labels_got = got.pop(0) # remove header
assert labels_got == labels
assert len(got) == self.fake_fmri_data.shape[
3], 'num rows and num volumes'
# convert from string to float
got = [[float(num) for num in row] for row in got]
for i, time in enumerate(got):
assert len(labels) == len(time)
for j, segment in enumerate(time):
npt.assert_almost_equal(segment, wanted[i][j], decimal=1)
# dj: self doesnt have orig_dir at this point, not sure how to change it.
# should work without it
# def teardown_class(self):
# self.orig_dir.chdir()
fake_fmri_data = np.array([[[[2, -1, 4, -2, 3], [4, -2, -5, -1, 0]],
[[-2, 0, 1, 4, 4], [-5, 3, -3, 1, -5]]],
[[[2, -2, -1, -2, -5], [3, 0, 3, -5, -2]],
[[-4, -2, -2, 1, -2], [3, 1, 4, -3, -2]]]])
fake_label_data = np.array([[[1, 0], [3, 1]], [[2, 0], [1, 3]]])
fake_equiv_4d_label_data = np.array(
[[[[1., 0., 0.], [0., 0., 0.]], [[0., 0., 1.], [1., 0., 0.]]],
[[[0., 1., 0.], [0., 0., 0.]], [[1., 0., 0.], [0., 0., 1.]]]])
base_wanted = [[-2.33333, 2, .5], [0, -2, .5], [-.3333333, -1, 2.5],
[0, -2, .5], [-1.3333333, -5, 1]]
fake_4d_label_data = np.array([[[[0.2, 0.3, 0.5], [0.1, 0.1, 0.8]],
[[0.1, 0.3, 0.6], [0.3, 0.4, 0.3]]],
[[[0.2, 0.2, 0.6], [0., 0.3, 0.7]],
[[0.3, 0.3, 0.4], [0.3, 0.4, 0.3]]]])
fourd_wanted = [[-5.0652173913, -5.44565217391, 5.50543478261], [
-7.02173913043, 11.1847826087, -4.33152173913
], [-19.0869565217, 21.2391304348,
-4.57608695652], [5.19565217391, -3.66304347826, -1.51630434783],
[-12.0, 3., 0.5]]
| 36.208955 | 79 | 0.55496 |
79432beb9ea639dbdb09aca51157508eb26315b4 | 42,383 | py | Python | src/openprocurement/tender/belowthreshold/tests/complaint_blanks.py | ProzorroUKR/openprocurement.api | 2855a99aa8738fb832ee0dbad4e9590bd3643511 | [
"Apache-2.0"
] | 10 | 2020-02-18T01:56:21.000Z | 2022-03-28T00:32:57.000Z | src/openprocurement/tender/belowthreshold/tests/complaint_blanks.py | quintagroup/openprocurement.api | 2855a99aa8738fb832ee0dbad4e9590bd3643511 | [
"Apache-2.0"
] | 26 | 2018-07-16T09:30:44.000Z | 2021-02-02T17:51:30.000Z | src/openprocurement/tender/belowthreshold/tests/complaint_blanks.py | ProzorroUKR/openprocurement.api | 2855a99aa8738fb832ee0dbad4e9590bd3643511 | [
"Apache-2.0"
] | 15 | 2019-08-08T10:50:47.000Z | 2022-02-05T14:13:36.000Z | # -*- coding: utf-8 -*-
from openprocurement.api.utils import get_now
from datetime import timedelta
from copy import deepcopy
from mock import patch
from openprocurement.tender.belowthreshold.tests.base import (
test_draft_claim, test_claim, test_author
)
# TenderComplaintResourceTest
def create_tender_complaint_invalid(self):
response = self.app.post_json(
"/tenders/some_id/complaints",
{"data": test_draft_claim},
status=404,
)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Not Found", "location": "url", "name": "tender_id"}]
)
request_path = "/tenders/{}/complaints".format(self.tender_id)
response = self.app.post(request_path, {"data": test_draft_claim}, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{"description": "Expecting value: line 1 column 1 (char 0)", "location": "body", "name": "data"}],
)
response = self.app.post(request_path, "data", content_type="application/json", status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{"description": "Expecting value: line 1 column 1 (char 0)", "location": "body", "name": "data"}],
)
response = self.app.post_json(request_path, "data", status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Data not available", "location": "body", "name": "data"}]
)
response = self.app.post_json(request_path, {"not_data": {}}, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Data not available", "location": "body", "name": "data"}]
)
response = self.app.post_json(request_path, {"data": {}}, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{"description": ["This field is required."], "location": "body", "name": "author"},
{"description": ["This field is required."], "location": "body", "name": "title"},
],
)
response = self.app.post_json(request_path, {"data": {"invalid_field": "invalid_value"}}, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Rogue field", "location": "body", "name": "invalid_field"}]
)
response = self.app.post_json(request_path, {"data": {"author": {"identifier": "invalid_value"}}}, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": {
"identifier": ["Please use a mapping for this field or Identifier instance instead of str."]
},
"location": "body",
"name": "author",
}
],
)
claim_data = deepcopy(test_draft_claim)
claim_data["author"] = {"identifier": {}}
response = self.app.post_json(
request_path,
{"data": claim_data},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": {
"contactPoint": ["This field is required."],
"identifier": {"scheme": ["This field is required."], "id": ["This field is required."]},
"name": ["This field is required."],
"address": ["This field is required."],
},
"location": "body",
"name": "author",
}
],
)
claim_data["author"] = {"name": "name", "identifier": {"uri": "invalid_value"}}
response = self.app.post_json(
request_path,
{
"data": claim_data
},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": {
"contactPoint": ["This field is required."],
"identifier": {
"scheme": ["This field is required."],
"id": ["This field is required."],
"uri": ["Not a well formed URL."],
},
"address": ["This field is required."],
},
"location": "body",
"name": "author",
}
],
)
claim_data = deepcopy(test_draft_claim)
claim_data["relatedLot"] = "0" * 32
response = self.app.post_json(
request_path,
{
"data": claim_data
},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{"description": ["relatedLot should be one of lots"], "location": "body", "name": "relatedLot"}],
)
claim_data = deepcopy(test_draft_claim)
del claim_data["type"]
with patch("openprocurement.tender.core.models.RELEASE_2020_04_19", get_now() - timedelta(days=1)):
response = self.app.post_json(
"/tenders/{}/complaints".format(self.tender_id),
{
"data": claim_data
},
status=422
)
self.assertEqual(
response.json,
{'status': 'error',
'errors': [{'description': ['This field is required'],
'location': 'body', 'name': 'type'}]}
)
response = self.app.get("/tenders/{}".format(self.tender_id))
if response.json["data"]["procurementMethodType"] == "belowThreshold":
claim_data["type"] = "complaint"
response = self.app.post_json(
"/tenders/{}/complaints".format(self.tender_id),
{
"data": claim_data
},
status=404
)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "text/plain")
def create_tender_complaint(self):
with patch("openprocurement.tender.core.models.RELEASE_2020_04_19", get_now() - timedelta(days=1)):
response = self.app.post_json(
"/tenders/{}/complaints".format(self.tender_id),
{
"data": test_claim
},
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
with patch("openprocurement.tender.core.models.RELEASE_2020_04_19", get_now() + timedelta(days=1)):
claim_data = deepcopy(test_claim)
del claim_data["type"]
response = self.app.post_json(
"/tenders/{}/complaints".format(self.tender_id),
{
"data": claim_data
},
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
complaint = response.json["data"]
status_date = response.json["data"]["date"]
owner_token = response.json["access"]["token"]
self.assertEqual(complaint["author"]["name"], self.test_author["name"])
self.assertIn("id", complaint)
self.assertIn(complaint["id"], response.headers["Location"])
self.assertIn("transfer", response.json["access"])
self.assertNotIn("transfer_token", response.json["data"])
tender = self.db.get(self.tender_id)
tender["status"] = "active.awarded"
tender["awardPeriod"] = {"endDate": "2014-01-01"}
self.db.save(tender)
response = self.app.patch_json(
"/tenders/{}/complaints/{}?acc_token={}".format(self.tender_id, complaint["id"], self.tender_token),
{"data": {"status": "answered"}},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
response.json["errors"],
[{"description": ["This field is required."], "location": "body", "name": "resolutionType"}],
)
response = self.app.patch_json(
"/tenders/{}/complaints/{}?acc_token={}".format(self.tender_id, complaint["id"], self.tender_token),
{"data": {"status": "answered", "resolutionType": "invalid", "resolution": "spam 100% " * 3}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["status"], "answered")
self.assertNotEqual(response.json["data"]["date"], status_date)
status_date = response.json["data"]["date"]
self.assertEqual(response.json["data"]["resolutionType"], "invalid")
self.assertEqual(response.json["data"]["resolution"], "spam 100% " * 3)
response = self.app.patch_json(
"/tenders/{}/complaints/{}?acc_token={}".format(self.tender_id, complaint["id"], owner_token),
{"data": {"satisfied": True, "status": "resolved"}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["status"], "resolved")
self.assertNotEqual(response.json["data"]["date"], status_date)
response = self.app.patch_json(
"/tenders/{}/complaints/{}?acc_token={}".format(self.tender_id, complaint["id"], owner_token),
{"data": {"status": "cancelled", "cancellationReason": "reason"}},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["errors"][0]["description"], "Can't update complaint in current (resolved) status")
response = self.app.get("/tenders/{}".format(self.tender_id))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["status"], "active.awarded")
self.set_status("unsuccessful")
response = self.app.post_json(
"/tenders/{}/complaints".format(self.tender_id),
{"data": test_claim},
status=403,
)
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
response.json["errors"][0]["description"], "Can't add complaint in current (unsuccessful) tender status"
)
def patch_tender_complaint(self):
response = self.app.post_json(
"/tenders/{}/complaints".format(self.tender_id),
{"data": test_draft_claim},
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
complaint = response.json["data"]
owner_token = response.json["access"]["token"]
response = self.app.patch_json(
"/tenders/{}/complaints/{}?acc_token={}".format(self.tender_id, complaint["id"], self.tender_token),
{"data": {"status": "cancelled", "cancellationReason": "reason"}},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["errors"][0]["description"], "Forbidden")
response = self.app.patch_json(
"/tenders/{}/complaints/{}?acc_token={}".format(self.tender_id, complaint["id"], owner_token),
{"data": {"title": "claim title"}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.json["data"]["title"], "claim title")
response = self.app.patch_json(
"/tenders/{}/complaints/{}?acc_token={}".format(self.tender_id, complaint["id"], owner_token),
{"data": {"status": "claim"}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.json["data"]["status"], "claim")
response = self.app.patch_json(
"/tenders/{}/complaints/{}?acc_token={}".format(self.tender_id, complaint["id"], self.tender_token),
{"data": {"resolution": "changing rules " * 2}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["resolution"], "changing rules " * 2)
response = self.app.patch_json(
"/tenders/{}/complaints/{}?acc_token={}".format(self.tender_id, complaint["id"], self.tender_token),
{"data": {"status": "answered", "resolutionType": "resolved", "resolution": "resolution text " * 2}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["status"], "answered")
self.assertEqual(response.json["data"]["resolutionType"], "resolved")
self.assertEqual(response.json["data"]["resolution"], "resolution text " * 2)
response = self.app.patch_json(
"/tenders/{}/complaints/{}?acc_token={}".format(self.tender_id, complaint["id"], owner_token),
{"data": {"satisfied": False}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["satisfied"], False)
response = self.app.patch_json(
"/tenders/{}/complaints/{}?acc_token={}".format(self.tender_id, complaint["id"], owner_token),
{"data": {"status": "cancelled", "cancellationReason": "reason"}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["status"], "cancelled")
self.assertEqual(response.json["data"]["cancellationReason"], "reason")
response = self.app.patch_json(
"/tenders/{}/complaints/some_id".format(self.tender_id),
{"data": {"status": "resolved", "resolution": "resolution text"}},
status=404,
)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Not Found", "location": "url", "name": "complaint_id"}]
)
response = self.app.patch_json(
"/tenders/some_id/complaints/some_id",
{"data": {"status": "resolved", "resolution": "resolution text"}},
status=404,
)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Not Found", "location": "url", "name": "tender_id"}]
)
response = self.app.get("/tenders/{}/complaints/{}".format(self.tender_id, complaint["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["status"], "cancelled")
self.assertEqual(response.json["data"]["cancellationReason"], "reason")
self.assertEqual(response.json["data"]["resolutionType"], "resolved")
self.assertEqual(response.json["data"]["resolution"], "resolution text " * 2)
response = self.app.post_json(
"/tenders/{}/complaints".format(self.tender_id),
{"data": test_draft_claim},
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
complaint = response.json["data"]
owner_token = response.json["access"]["token"]
self.set_status("complete")
response = self.app.patch_json(
"/tenders/{}/complaints/{}?acc_token={}".format(self.tender_id, complaint["id"], owner_token),
{"data": {"status": "claim"}},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
response.json["errors"][0]["description"], "Can't update complaint in current (complete) tender status"
)
def review_tender_complaint(self):
complaints = []
for i in range(3):
response = self.app.post_json(
"/tenders/{}/complaints".format(self.tender_id),
{
"data": test_claim
},
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
complaint = response.json["data"]
owner_token = response.json["access"]["token"]
complaints.append(complaint)
response = self.app.patch_json(
"/tenders/{}/complaints/{}?acc_token={}".format(self.tender_id, complaint["id"], self.tender_token),
{"data": {"status": "answered", "resolutionType": "resolved", "resolution": "resolution text " * 2}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["status"], "answered")
self.assertEqual(response.json["data"]["resolutionType"], "resolved")
self.assertEqual(response.json["data"]["resolution"], "resolution text " * 2)
response = self.app.patch_json(
"/tenders/{}/complaints/{}?acc_token={}".format(self.tender_id, complaint["id"], owner_token),
{"data": {"satisfied": False, "status": "resolved"}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["status"], "resolved")
def get_tender_complaint(self):
claim_data = deepcopy(test_draft_claim)
claim_data["author"] = getattr(self, "test_author", test_author)
response = self.app.post_json(
"/tenders/{}/complaints".format(self.tender_id),
{"data": claim_data},
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
complaint = response.json["data"]
del complaint["author"]
response = self.app.get("/tenders/{}/complaints/{}".format(self.tender_id, complaint["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"], complaint)
self.assertNotIn("transfer_token", response.json["data"])
response = self.app.get("/tenders/{}/complaints/some_id".format(self.tender_id), status=404)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Not Found", "location": "url", "name": "complaint_id"}]
)
response = self.app.get("/tenders/some_id/complaints/some_id", status=404)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Not Found", "location": "url", "name": "tender_id"}]
)
def get_tender_complaints(self):
claim_data = deepcopy(test_draft_claim)
claim_data["author"] = getattr(self, "test_author", test_author)
response = self.app.post_json(
"/tenders/{}/complaints".format(self.tender_id),
{"data": claim_data},
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
complaint = response.json["data"]
del complaint["author"]
response = self.app.get("/tenders/{}/complaints".format(self.tender_id))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"][0], complaint)
response = self.app.get("/tenders/some_id/complaints", status=404)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Not Found", "location": "url", "name": "tender_id"}]
)
# TenderLotAwardComplaintResourceTest
def lot_award_create_tender_complaint(self):
claim_data = deepcopy(test_claim)
claim_data["relatedLot"] = self.initial_lots[0]["id"]
response = self.app.post_json(
"/tenders/{}/complaints".format(self.tender_id),
{
"data": claim_data
},
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
complaint = response.json["data"]
owner_token = response.json["access"]["token"]
self.assertEqual(complaint["author"]["name"], self.test_author["name"])
self.assertIn("id", complaint)
self.assertIn(complaint["id"], response.headers["Location"])
tender = self.db.get(self.tender_id)
tender["status"] = "active.awarded"
tender["awardPeriod"] = {"endDate": "2014-01-01"}
self.db.save(tender)
response = self.app.patch_json(
"/tenders/{}/complaints/{}?acc_token={}".format(self.tender_id, complaint["id"], self.tender_token),
{"data": {"status": "answered"}},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
response.json["errors"],
[{"description": ["This field is required."], "location": "body", "name": "resolutionType"}],
)
response = self.app.patch_json(
"/tenders/{}/complaints/{}?acc_token={}".format(self.tender_id, complaint["id"], self.tender_token),
{"data": {"status": "answered", "resolutionType": "invalid", "resolution": "spam 100% " * 3}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["status"], "answered")
self.assertEqual(response.json["data"]["resolutionType"], "invalid")
self.assertEqual(response.json["data"]["resolution"], "spam 100% " * 3)
response = self.app.patch_json(
"/tenders/{}/complaints/{}?acc_token={}".format(self.tender_id, complaint["id"], owner_token),
{"data": {"satisfied": True, "status": "resolved"}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["status"], "resolved")
response = self.app.patch_json(
"/tenders/{}/complaints/{}?acc_token={}".format(self.tender_id, complaint["id"], owner_token),
{"data": {"status": "cancelled", "cancellationReason": "reason"}},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["errors"][0]["description"], "Can't update complaint in current (resolved) status")
response = self.app.get("/tenders/{}".format(self.tender_id))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["status"], "active.awarded")
self.set_status("unsuccessful")
response = self.app.post_json(
"/tenders/{}/complaints".format(self.tender_id),
{"data": test_draft_claim},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
response.json["errors"][0]["description"], "Can't add complaint in current (unsuccessful) tender status"
)
# TenderComplaintDocumentResourceTest
def not_found(self):
response = self.app.post(
"/tenders/some_id/complaints/some_id/documents", status=404, upload_files=[("file", "name.doc", b"content")]
)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Not Found", "location": "url", "name": "tender_id"}]
)
response = self.app.post(
"/tenders/{}/complaints/some_id/documents".format(self.tender_id),
status=404,
upload_files=[("file", "name.doc", b"content")],
)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Not Found", "location": "url", "name": "complaint_id"}]
)
response = self.app.post(
"/tenders/{}/complaints/{}/documents?acc_token={}".format(
self.tender_id, self.complaint_id, self.complaint_owner_token
),
status=404,
upload_files=[("invalid_value", "name.doc", b"content")],
)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(response.json["errors"], [{"description": "Not Found", "location": "body", "name": "file"}])
response = self.app.get("/tenders/some_id/complaints/some_id/documents", status=404)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Not Found", "location": "url", "name": "tender_id"}]
)
response = self.app.get("/tenders/{}/complaints/some_id/documents".format(self.tender_id), status=404)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Not Found", "location": "url", "name": "complaint_id"}]
)
response = self.app.get("/tenders/some_id/complaints/some_id/documents/some_id", status=404)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Not Found", "location": "url", "name": "tender_id"}]
)
response = self.app.get("/tenders/{}/complaints/some_id/documents/some_id".format(self.tender_id), status=404)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Not Found", "location": "url", "name": "complaint_id"}]
)
response = self.app.get(
"/tenders/{}/complaints/{}/documents/some_id".format(self.tender_id, self.complaint_id), status=404
)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Not Found", "location": "url", "name": "document_id"}]
)
response = self.app.put(
"/tenders/some_id/complaints/some_id/documents/some_id",
status=404,
upload_files=[("file", "name.doc", b"content2")],
)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Not Found", "location": "url", "name": "tender_id"}]
)
response = self.app.put(
"/tenders/{}/complaints/some_id/documents/some_id".format(self.tender_id),
status=404,
upload_files=[("file", "name.doc", b"content2")],
)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Not Found", "location": "url", "name": "complaint_id"}]
)
response = self.app.put(
"/tenders/{}/complaints/{}/documents/some_id".format(self.tender_id, self.complaint_id),
status=404,
upload_files=[("file", "name.doc", b"content2")],
)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Not Found", "location": "url", "name": "document_id"}]
)
def create_tender_complaint_document(self):
response = self.app.post(
"/tenders/{}/complaints/{}/documents?acc_token={}".format(self.tender_id, self.complaint_id, self.tender_token),
upload_files=[("file", "name.doc", b"content")],
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
response.json["errors"][0]["description"], "Can't add document in current (draft) complaint status"
)
response = self.app.post(
"/tenders/{}/complaints/{}/documents?acc_token={}".format(
self.tender_id, self.complaint_id, self.complaint_owner_token
),
upload_files=[("file", "name.doc", b"content")],
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
doc_id = response.json["data"]["id"]
self.assertIn(doc_id, response.headers["Location"])
self.assertEqual("name.doc", response.json["data"]["title"])
key = response.json["data"]["url"].split("?")[-1]
response = self.app.get("/tenders/{}/complaints/{}/documents".format(self.tender_id, self.complaint_id))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(doc_id, response.json["data"][0]["id"])
self.assertEqual("name.doc", response.json["data"][0]["title"])
response = self.app.get("/tenders/{}/complaints/{}/documents?all=true".format(self.tender_id, self.complaint_id))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(doc_id, response.json["data"][0]["id"])
self.assertEqual("name.doc", response.json["data"][0]["title"])
response = self.app.get(
"/tenders/{}/complaints/{}/documents/{}?download=some_id".format(self.tender_id, self.complaint_id, doc_id),
status=404,
)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Not Found", "location": "url", "name": "download"}]
)
response = self.app.get(
"/tenders/{}/complaints/{}/documents/{}?{}".format(self.tender_id, self.complaint_id, doc_id, key)
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/msword")
self.assertEqual(response.content_length, 7)
self.assertEqual(response.body, b"content")
response = self.app.get("/tenders/{}/complaints/{}/documents/{}".format(self.tender_id, self.complaint_id, doc_id))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertEqual("name.doc", response.json["data"]["title"])
self.set_status("complete")
response = self.app.post(
"/tenders/{}/complaints/{}/documents?acc_token={}".format(
self.tender_id, self.complaint_id, self.complaint_owner_token
),
upload_files=[("file", "name.doc", b"content")],
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
response.json["errors"][0]["description"], "Can't add document in current (complete) tender status"
)
def put_tender_complaint_document(self):
response = self.app.post(
"/tenders/{}/complaints/{}/documents?acc_token={}".format(
self.tender_id, self.complaint_id, self.complaint_owner_token
),
upload_files=[("file", "name.doc", b"content")],
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
doc_id = response.json["data"]["id"]
self.assertIn(doc_id, response.headers["Location"])
response = self.app.put(
"/tenders/{}/complaints/{}/documents/{}?acc_token={}".format(
self.tender_id, self.complaint_id, doc_id, self.complaint_owner_token
),
status=404,
upload_files=[("invalid_name", "name.doc", b"content")],
)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(response.json["errors"], [{"description": "Not Found", "location": "body", "name": "file"}])
response = self.app.put(
"/tenders/{}/complaints/{}/documents/{}?acc_token={}".format(
self.tender_id, self.complaint_id, doc_id, self.tender_token
),
upload_files=[("file", "name.doc", b"content2")],
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["errors"][0]["description"], "Can update document only author")
response = self.app.put(
"/tenders/{}/complaints/{}/documents/{}?acc_token={}".format(
self.tender_id, self.complaint_id, doc_id, self.complaint_owner_token
),
upload_files=[("file", "name.doc", b"content2")],
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(doc_id, response.json["data"]["id"])
key = response.json["data"]["url"].split("?")[-1]
response = self.app.get(
"/tenders/{}/complaints/{}/documents/{}?{}".format(self.tender_id, self.complaint_id, doc_id, key)
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/msword")
self.assertEqual(response.content_length, 8)
self.assertEqual(response.body, b"content2")
response = self.app.get("/tenders/{}/complaints/{}/documents/{}".format(self.tender_id, self.complaint_id, doc_id))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertEqual("name.doc", response.json["data"]["title"])
response = self.app.put(
"/tenders/{}/complaints/{}/documents/{}?acc_token={}".format(
self.tender_id, self.complaint_id, doc_id, self.complaint_owner_token
),
"content3",
content_type="application/msword",
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(doc_id, response.json["data"]["id"])
key = response.json["data"]["url"].split("?")[-1]
response = self.app.get(
"/tenders/{}/complaints/{}/documents/{}?{}".format(self.tender_id, self.complaint_id, doc_id, key)
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/msword")
self.assertEqual(response.content_length, 8)
self.assertEqual(response.body, b"content3")
response = self.app.patch_json(
"/tenders/{}/complaints/{}?acc_token={}".format(self.tender_id, self.complaint_id, self.complaint_owner_token),
{"data": {"status": "claim"}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.json["data"]["status"], "claim")
response = self.app.put(
"/tenders/{}/complaints/{}/documents/{}?acc_token={}".format(
self.tender_id, self.complaint_id, doc_id, self.complaint_owner_token
),
"content",
content_type="application/msword",
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
response.json["errors"][0]["description"], "Can't update document in current (claim) complaint status"
)
self.set_status("complete")
response = self.app.put(
"/tenders/{}/complaints/{}/documents/{}?acc_token={}".format(
self.tender_id, self.complaint_id, doc_id, self.complaint_owner_token
),
upload_files=[("file", "name.doc", b"content3")],
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
response.json["errors"][0]["description"], "Can't update document in current (complete) tender status"
)
def patch_tender_complaint_document(self):
response = self.app.post(
"/tenders/{}/complaints/{}/documents?acc_token={}".format(
self.tender_id, self.complaint_id, self.complaint_owner_token
),
upload_files=[("file", "name.doc", b"content")],
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
doc_id = response.json["data"]["id"]
self.assertIn(doc_id, response.headers["Location"])
response = self.app.patch_json(
"/tenders/{}/complaints/{}/documents/{}?acc_token={}".format(
self.tender_id, self.complaint_id, doc_id, self.tender_token
),
{"data": {"description": "document description"}},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["errors"][0]["description"], "Can update document only author")
response = self.app.patch_json(
"/tenders/{}/complaints/{}/documents/{}?acc_token={}".format(
self.tender_id, self.complaint_id, doc_id, self.complaint_owner_token
),
{"data": {"description": "document description"}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(doc_id, response.json["data"]["id"])
response = self.app.get("/tenders/{}/complaints/{}/documents/{}".format(self.tender_id, self.complaint_id, doc_id))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertEqual("document description", response.json["data"]["description"])
response = self.app.patch_json(
"/tenders/{}/complaints/{}?acc_token={}".format(self.tender_id, self.complaint_id, self.complaint_owner_token),
{"data": {"status": "claim"}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.json["data"]["status"], "claim")
response = self.app.patch_json(
"/tenders/{}/complaints/{}/documents/{}?acc_token={}".format(
self.tender_id, self.complaint_id, doc_id, self.complaint_owner_token
),
{"data": {"description": "document description"}},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
response.json["errors"][0]["description"], "Can't update document in current (claim) complaint status"
)
self.set_status("complete")
response = self.app.patch_json(
"/tenders/{}/complaints/{}/documents/{}?acc_token={}".format(
self.tender_id, self.complaint_id, doc_id, self.complaint_owner_token
),
{"data": {"description": "document description"}},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
response.json["errors"][0]["description"], "Can't update document in current (complete) tender status"
)
| 42.941236 | 120 | 0.647595 |
79432c1b4c88335d74ba18fb0e5549d2d7b2d29e | 409 | py | Python | reo/migrations/0095_chpmodel_cooling_thermal_factor.py | akuam1/REopt_Lite_API | fb5a88ee52351b725fda5c15712b617f6e97ddca | [
"BSD-3-Clause"
] | 41 | 2020-02-21T08:25:17.000Z | 2022-01-14T23:06:42.000Z | reo/migrations/0095_chpmodel_cooling_thermal_factor.py | akuam1/REopt_Lite_API | fb5a88ee52351b725fda5c15712b617f6e97ddca | [
"BSD-3-Clause"
] | 167 | 2020-02-17T17:26:47.000Z | 2022-01-20T20:36:54.000Z | reo/migrations/0095_chpmodel_cooling_thermal_factor.py | akuam1/REopt_Lite_API | fb5a88ee52351b725fda5c15712b617f6e97ddca | [
"BSD-3-Clause"
] | 31 | 2020-02-20T00:22:51.000Z | 2021-12-10T05:48:08.000Z | # Generated by Django 2.2.13 on 2021-01-08 02:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reo', '0094_merge_20210105_1749'),
]
operations = [
migrations.AddField(
model_name='chpmodel',
name='cooling_thermal_factor',
field=models.FloatField(blank=True, null=True),
),
]
| 21.526316 | 59 | 0.613692 |
79432c94940c0d15b3bda25427f3464041fa2048 | 1,960 | py | Python | dump_match/sun3d.py | hoverinc/OANet | 51d71ff3f57161e912ec72420cd91cf7db64ab74 | [
"MIT"
] | 209 | 2019-08-13T23:53:03.000Z | 2022-03-24T11:33:07.000Z | dump_match/sun3d.py | hoverinc/OANet | 51d71ff3f57161e912ec72420cd91cf7db64ab74 | [
"MIT"
] | 273 | 2021-01-30T16:45:26.000Z | 2022-03-16T15:02:33.000Z | dump_match/sun3d.py | hoverinc/OANet | 51d71ff3f57161e912ec72420cd91cf7db64ab74 | [
"MIT"
] | 45 | 2019-08-16T08:27:17.000Z | 2022-02-05T00:50:35.000Z | import argparse
from dataset import Dataset
def str2bool(v):
return v.lower() in ("true", "1")
# Parse command line arguments.
parser = argparse.ArgumentParser(description='extract sift.')
parser.add_argument('--raw_data_path', type=str, default='../raw_data/',
help='raw data path. default:../raw_data/')
parser.add_argument('--dump_dir', type=str, default='../data_dump/',
help='data dump path. default:../data_dump')
parser.add_argument('--desc_name', type=str, default='sift-2000',
help='prefix of desc filename, default:sift-2000')
parser.add_argument('--vis_th', type=int, default=0.35,
help='visibility threshold')
if __name__ == "__main__":
config = parser.parse_args()
test_seqs = ['te-brown1/', 'te-brown2/', 'te-brown3/', 'te-brown4/', 'te-brown5/', 'te-hotel1/', \
'te-harvard1/', 'te-harvard2/', 'te-harvard3/', 'te-harvard4/', \
'te-mit1/', 'te-mit2/', 'te-mit3/', 'te-mit4/', 'te-mit5/']
sun3d_te = Dataset(config.raw_data_path+'sun3d_test/', config.dump_dir, 'sun3d-'+config.desc_name+'-test.hdf5', \
test_seqs, 'test', config.desc_name, \
config.vis_th, 1000, config.raw_data_path+'pairs/')
# uncomment these lines if you want generate traning data for SUN3D.
'''
with open('sun3d_train.txt','r') as ofp:
train_seqs = ofp.read().split('\n')
if len(train_seqs[-1]) == 0:
del train_seqs[-1]
train_seqs = [seq.replace('/','-')[:-1] for seq in train_seqs]
print('train seq len '+str(len(train_seqs)))
sun3d_tr_va = Dataset(config.raw_data_path+'sun3d_train/', config.dump_dir, 'sun3d-'+config.desc_name+'-val.hdf5', \
train_seqs, 'val', config.desc_name, \
config.vis_th, 100, None)
sun3d_tr_tr = Dataset(config.raw_data_path+'sun3d_train/', config.dump_dir, 'sun3d-'+config.desc_name+'-train.hdf5', \
train_seqs, 'train', config.desc_name, \
config.vis_th, 10000, None)
'''
| 46.666667 | 122 | 0.646429 |
79432c951b4fdcc10073929821c93bbe684f4f12 | 10,111 | py | Python | config_generator.py | kerkkoh/dayzconfiggen | f2519ce033c85f634e8622c10de3971ce83fb2f9 | [
"MIT"
] | null | null | null | config_generator.py | kerkkoh/dayzconfiggen | f2519ce033c85f634e8622c10de3971ce83fb2f9 | [
"MIT"
] | null | null | null | config_generator.py | kerkkoh/dayzconfiggen | f2519ce033c85f634e8622c10de3971ce83fb2f9 | [
"MIT"
] | null | null | null | #####################################################
### Lazy DayZ config generator 1.0.0 - By Kerkkoh ###
#####################################################
### Prerequisites: - Python >3.0 ###
### License: MIT License ###
### Use at your own discretion! ###
#####################################################
#
# Use "py config_generator.py --help" to see details on usage!
#
import os
import sys
from datetime import datetime
args = sys.argv
header = '''###################################################
## Lazy DayZ config generator 1.0.0 - By Kerkkoh ##
## Prerequisites: - Python >3.0 ##
## License: MIT License ##
## Use at your own discretion! ##
###################################################'''
if '--help' in args and len(args) == 2:
print(header + '''
USAGE:
py config_generator.py [FLAGS] [OPTIONS] [pdrive] [folder]
FLAGS:
-d, --debug Activate debug mode
-o, --override Override existing config.cpp if any present
if this flag isn't present the config is saved as config.gen.cpp recursively with
more and more .gen in the name if previous generated files exist.
-r, --recursive Run for each subfolder and produce a config at EACH subfolder's root (i.e. MANY configs)
-s, --single Generate a SINGLE config file at the root containing entries from all SUBFOLDERS
-h, --help Prints this lovely message...
-p, --purge Removes any config.gen.cpp and config.gen.cpp and so on...
OPTIONS:
+c, ++className=<value> Classname used in the cfgPatches entry, defaults to generated_<addonName>
+p, ++prefix=<value> Prefix in cfgVehicles entries, underscore is applied to the end automatically, defaults to Land
+b, ++baseClass=<value> Class that all objects will inherit, defaults to HouseNoDestruct
+r, ++requiredAddons=<requiredAddons> List in format "Addon1","Addon2",...,"AddonX" that is input as requiredAddons, defaults to "DZ_Data"
ARGS:
<pdrive> The prefix of the <folder> path, i.e. the P-drive, which get cut out of the <folder> path to form
a path for the model in the class e.g. P:\\mod\\model.p3d gets turned to mod\\model.p3d -- defaults to P:\\
<folder> Folder to process
EXAMPLE:
$ py config_generator.py P:\\ P:\\DZ\\structures\\roads\\Tunnels
Outputs
class CfgPatches
{
class generated_Tunnels
{
units[] = { "Land_Tunnel_Biathlon_Left", ... };
weapons[] = {};
requiredVersion = 0.1;
requiredAddons[] = { "DZ_Data" };
};
};
class CfgVehicles
{
class HouseNoDestruct;
class Land_Tunnel_Biathlon_Left: HouseNoDestruct
{
scope = 2;
displayName = "Land_Tunnel_Biathlon_Left";
descriptionShort = "Autogenerated class Land_Tunnel_Biathlon_Left";
model = "DZ\\structures\\roads\\Tunnels\\Tunnel_Biathlon_Left.p3d";
};
...
};
NOTE!
Python filesystem might not recognize the P-drive as an actual drive, so the workaround is to use your P-drive location in the paths instead.
So if your P-drive is mounted to C:\\dayzstuff, instead of
$ py config_generator.py P:\\ P:\\DZ\\structures\\roads\\Tunnels
you should run
$ py config_generator.py C:\\dayzstuff C:\\dayzstuff\\DZ\\structures\\roads\\Tunnels
''')
quit()
if len(args) < 3:
print(header + '''
USAGE:
py config_generator.py [FLAGS] [OPTIONS] [pdrive] [folder]
For more information try --help''')
quit()
# Flags
DEBUG = False
OVERRIDE_CONFIG = False
RECURSIVE = False
SINGLE = False
PURGE = False
# Options
CFGPATCHES_CLASSNAME = ''
CLASSNAME_PREFIX = 'Land'
BASECLASS = 'HouseNoDestruct'
REQUIRED_ADDONS = '"DZ_Data"'
# Args
PDRIVE_ARG = args[-2]
FOLDER_ARG = args[-1]
def log(what):
if DEBUG:
print(what)
def error(what):
if DEBUG:
raise Exception(what)
else:
print('!! ERROR !!')
print(what)
print('Can\'t run script further...')
print('!! ERROR !!')
for arg in args:
if arg[0] == '-' and arg[1] != '-':
flag = arg[1:].lower()
if flag == 'd':
DEBUG = True
if flag == 'o':
OVERRIDE_CONFIG = True
if flag == 'r':
RECURSIVE = True
if flag == 's':
SINGLE = True
if flag == 'p':
PURGE = True
continue
if arg[0] == '-' and arg[1] == '-':
flag = arg[2:].lower()
if flag == 'debug':
DEBUG = True
if flag == 'override':
OVERRIDE_CONFIG = True
if flag == 'recursive':
RECURSIVE = True
if flag == 'single':
SINGLE = True
if flag == 'purge':
PURGE = True
continue
if arg[0] == '+' and arg[1] != '+':
opt = arg[1].lower()
val = arg.split('=')[1]
if opt == 'c':
CFGPATCHES_CLASSNAME = val
if opt == 'p':
CLASSNAME_PREFIX = val
if opt == 'b':
BASECLASS = val
if opt == 'r':
REQUIRED_ADDONS = val
continue
if arg[0] == '+' and arg[1] == '+':
opt = arg[2:].lower()
val = arg.split('=')[1]
if opt == 'classname':
CFGPATCHES_CLASSNAME = val
if opt == 'prefix':
CLASSNAME_PREFIX = val
if opt == 'baseclass':
BASECLASS = val
if opt == 'requiredaddons':
REQUIRED_ADDONS = val
if len(CFGPATCHES_CLASSNAME) == 0:
CFGPATCHES_CLASSNAME = 'generated_{}'.format(FOLDER_ARG.split('\\')[-1])
log('\\___ Flags: DEBUG = {}, OVERRIDE_CONFIG = {}, RECURSIVE = {}, SINGLE = {}'.format(DEBUG, OVERRIDE_CONFIG, RECURSIVE, SINGLE))
log('\\___ Options: CFGPATCHES_CLASSNAME = {}, CLASSNAME_PREFIX = {}, BASECLASS = {}, REQUIRED_ADDONS = {}'.format(CFGPATCHES_CLASSNAME, CLASSNAME_PREFIX, BASECLASS, REQUIRED_ADDONS))
tag = '''//////////////////////////////////////////////////////////////
// DayZ Config Generator: {0}
// Produced from Kerkkoh's DayZ Config Generator version 1.0.0
// https://github.com/kerkkoh/dayzconfiggen
// 'now' is {1}
//////////////////////////////////////////////////////////////
'''.format(FOLDER_ARG.split(PDRIVE_ARG)[-1], datetime.now().strftime("%m/%d/%Y, %H:%M:%S"))
cfgPatchesTemplate = tag + '''class CfgPatches
{{
class {0}
{{
units[] = {{ {1} }};
weapons[] = {{}};
requiredVersion = 0.1;
requiredAddons[] = {{ {2} }};
}};
}};
class CfgVehicles
{{
class {3};
'''
cfgVehiclesTemplate = ''' class {0}: {1}
{{
scope = 2;
displayName = "{0}";
descriptionShort = "Autogenerated class {0}";
model = "{2}";
}};
'''
def safeSave(filepath, extension, contents):
file = '{}.{}'.format(filepath, extension)
log('Trying safely save into ' + file)
if os.path.isfile(file):
safeSave(filepath + '.gen', extension, contents)
else:
print('Saved config into ' + file)
f = open(file, "w")
f.write(contents)
f.close()
def purge(filepath, extension):
file = '{}.{}'.format(filepath, extension)
log('Trying to purge ' + file)
if os.path.isfile(file):
os.remove(file)
purge(filepath + '.gen', extension)
def generator(pdrive, folder, isroot):
if not os.path.isdir(folder):
return error('The folder (last parameter) "{}" is not a folder'.format(folder))
log('-->Folder {} is valid.'.format(folder))
if not os.path.isdir(pdrive):
return error('The P-drive (second last parameter) "{}" is not a folder'.format(folder))
if pdrive not in folder:
return error('P-drive path "{}" is not contained within the folder path "{}" -> can not generate engine compatible paths for models'.format(pdrive, folder))
log('-->P-drive path {} is valid.'.format(pdrive))
if (folder[-1] != '\\'):
folder = folder + '\\'
if (pdrive[-1] != '\\'):
pdrive = pdrive + '\\'
def fullPath(f):
return os.path.join(folder, f)
subdirs = [fullPath(f) for f in os.listdir(folder) if os.path.isdir(fullPath(f)) and f != 'source']
units = ''
entries = []
if PURGE:
log('Purging...')
purge(fullPath('config.gen'), 'cpp')
if RECURSIVE and not SINGLE:
for subdir in subdirs:
generator(pdrive, subdir, False)
if SINGLE:
for subdir in subdirs:
subunits, subentries = generator(pdrive, subdir, False)
units += subunits
entries.extend(subentries)
files = [f.split(".")[0] for f in os.listdir(folder) if os.path.isfile(fullPath(f)) and f.split('.')[-1] == 'p3d']
files.sort()
for p3d in files:
classname = '{}_{}'.format(CLASSNAME_PREFIX, p3d)
units += '"{}", '.format(classname)
engineFolder = folder.split(pdrive)[-1]
p3dpath = '{}{}.p3d'.format(engineFolder, p3d)
log('---> Generating class {} for {}'.format(classname, p3dpath))
entries.append(cfgVehiclesTemplate.format(classname, BASECLASS, p3dpath))
if SINGLE and not isroot:
return [units, entries]
if len(units) > 0:
units = units[:-2]
else:
return
config = cfgPatchesTemplate.format(CFGPATCHES_CLASSNAME, units, REQUIRED_ADDONS, BASECLASS)
for e in entries:
config += e
config += '};'
configPath = fullPath('config.cpp')
if os.path.isfile(configPath):
if OVERRIDE_CONFIG:
print('Removing ' + configPath)
os.remove(configPath)
else:
safeSave(fullPath('config'), 'cpp', config)
else:
print('Creating a new config ' + configPath)
f = open(configPath, 'x')
f.write(config)
f.close()
print(header)
print('Starting generator...')
generator(args[-2], FOLDER_ARG, True)
print('Done!')
| 32.934853 | 183 | 0.55138 |
79432ded82cb8378f0689611a6d609c0012bccba | 7,824 | py | Python | tensorflow_addons/optimizers/moving_average.py | leondgarse/addons | 6c512e1ec49ae14ca61f1bbf0fd3403204ba21e2 | [
"Apache-2.0"
] | 1,560 | 2018-11-26T23:57:34.000Z | 2022-03-27T10:37:34.000Z | tensorflow_addons/optimizers/moving_average.py | leondgarse/addons | 6c512e1ec49ae14ca61f1bbf0fd3403204ba21e2 | [
"Apache-2.0"
] | 2,067 | 2018-11-28T04:40:23.000Z | 2022-03-31T11:36:50.000Z | tensorflow_addons/optimizers/moving_average.py | leondgarse/addons | 6c512e1ec49ae14ca61f1bbf0fd3403204ba21e2 | [
"Apache-2.0"
] | 679 | 2018-11-27T14:39:25.000Z | 2022-03-31T10:09:22.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorflow_addons.optimizers import AveragedOptimizerWrapper
from tensorflow_addons.utils import types
from typing import Union
from typeguard import typechecked
@tf.keras.utils.register_keras_serializable(package="Addons")
class MovingAverage(AveragedOptimizerWrapper):
"""Optimizer that computes a moving average of the variables.
Empirically it has been found that using the moving average of the trained
parameters of a deep network is better than using its trained parameters
directly. This optimizer allows you to compute this moving average and swap
the variables at save time so that any code outside of the training loop
will use by default the average values instead of the original ones.
Example of usage:
```python
opt = tf.keras.optimizers.SGD(learning_rate)
opt = tfa.optimizers.MovingAverage(opt)
```
"""
@typechecked
def __init__(
self,
optimizer: types.Optimizer,
average_decay: types.FloatTensorLike = 0.99,
num_updates: Union[None, int, tf.Variable] = None,
start_step: int = 0,
dynamic_decay: bool = False,
name: str = "MovingAverage",
**kwargs,
):
r"""Construct a new MovingAverage optimizer.
Args:
optimizer: str or `tf.keras.optimizers.Optimizer` that will be
used to compute and apply gradients.
average_decay: float. Decay to use to maintain the moving averages
of trained variables.
num_updates: Optional count of the number of updates applied to
variables.
start_step: int. What step to start the moving average.
dynamic_decay: bool. Whether to change the decay based on the number
of optimizer updates. Decay will start at 0.1 and gradually
increase up to `average_decay` after each optimizer update.
name: Optional name for the operations created when applying
gradients. Defaults to "MovingAverage".
**kwargs: keyword arguments. Allowed to be {`clipnorm`,
`clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by
norm; `clipvalue` is clip gradients by value, `decay` is
included for backward compatibility to allow time inverse
decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
"""
super().__init__(optimizer, name, **kwargs)
self._num_updates = num_updates
if self._num_updates is not None:
if isinstance(self._num_updates, tf.Variable):
tf.debugging.assert_integer(
self._num_updates,
(
'type of argument "num_updates" must be '
"int; got {} instead".format(self._num_updates.dtype)
),
)
num_updates = tf.cast(self._num_updates, tf.float32, name="num_updates")
average_decay = tf.minimum(
average_decay, (1.0 + num_updates) / (10.0 + num_updates)
)
self._set_hyper("average_decay", average_decay)
self._start_step = start_step
self._dynamic_decay = dynamic_decay
@tf.function
def _get_decay(self, step: tf.Tensor):
average_decay = self._get_hyper("average_decay", tf.dtypes.float32)
step = tf.cast(step, tf.float32)
if step < self._start_step:
return tf.constant(0.0, tf.float32)
elif self._dynamic_decay:
step_count = step - self._start_step
return tf.minimum(average_decay, (1.0 + step_count) / (10.0 + step_count))
else:
return average_decay
def _prepare_local(self, var_device, var_dtype, apply_state):
super()._prepare_local(var_device, var_dtype, apply_state)
apply_state[(var_device, var_dtype)]["tfa_ma_decay"] = self._get_decay(
self._optimizer.iterations
)
def average_op(self, var, average_var, local_apply_state):
return tf.keras.backend.moving_average_update(
average_var, var, local_apply_state["tfa_ma_decay"]
)
def get_config(self):
config = {
"average_decay": self._serialize_hyperparameter("average_decay"),
"num_updates": self._num_updates,
"start_step": self._start_step,
"dynamic_decay": self._dynamic_decay,
}
base_config = super().get_config()
return {**base_config, **config}
def _create_slots(self, var_list):
self._optimizer._create_slots(var_list=var_list)
for var in var_list:
self.add_slot(var, "average", var.read_value())
self._average_weights = [self.get_slot(var, "average") for var in var_list]
self._model_weights = var_list
def shadow_copy(self, model_weights):
"""Creates shadow variables for the given model weights."""
for var in model_weights:
self.add_slot(var, "average", initializer="zeros")
self._average_weights = [self.get_slot(var, "average") for var in model_weights]
self._model_weights = model_weights
@property
def has_shadow_copy(self):
"""Whether this optimizer has created shadow variables."""
return self._model_weights is not None
def swap_weights(self):
"""Swap the average and moving weights.
This is a convenience method to allow one to evaluate the averaged weights
at test time. Loads the weights stored in `self._average_weights` into the model,
keeping a copy of the original model weights. Swapping twice will return
the original weights.
"""
if tf.distribute.in_cross_replica_context():
strategy = tf.distribute.get_strategy()
return strategy.run(self._swap_weights, args=())
else:
raise ValueError(
"Swapping weights must occur under a " "tf.distribute.Strategy"
)
@tf.function
def _swap_weights(self):
def fn_0(a, b):
return a.assign_add(b, use_locking=self._use_locking)
def fn_1(b, a):
return b.assign(a - b, use_locking=self._use_locking)
def fn_2(a, b):
return a.assign_sub(b, use_locking=self._use_locking)
def swap(strategy, a, b):
"""Swap `a` and `b` and mirror to all devices."""
for a_element, b_element in zip(a, b):
strategy.extended.update(
a_element, fn_0, args=(b_element,)
) # a = a + b
strategy.extended.update(
b_element, fn_1, args=(a_element,)
) # b = a - b
strategy.extended.update(
a_element, fn_2, args=(b_element,)
) # a = a - b
ctx = tf.distribute.get_replica_context()
return ctx.merge_call(swap, args=(self._average_weights, self._model_weights))
| 40.53886 | 89 | 0.627045 |
79432df34cbd7edf5ac9e30a44697c0d28de2fb5 | 3,961 | py | Python | build/lib/tec/ic/ia/pc2/test_astar.py | Fuabioo/Proyecto-Corto-2-3 | 44bdfd5f2774e2d0d8c8af79dc55dac340f6f4b0 | [
"MIT"
] | null | null | null | build/lib/tec/ic/ia/pc2/test_astar.py | Fuabioo/Proyecto-Corto-2-3 | 44bdfd5f2774e2d0d8c8af79dc55dac340f6f4b0 | [
"MIT"
] | null | null | null | build/lib/tec/ic/ia/pc2/test_astar.py | Fuabioo/Proyecto-Corto-2-3 | 44bdfd5f2774e2d0d8c8af79dc55dac340f6f4b0 | [
"MIT"
] | null | null | null | """
Testing module
"""
import pytest
import tec.ic.ia.pc2.g08
def get_rectangle_input():
"""
Obtains pre-defined testing base astar that is assured not to fail
"""
args = g08.get_args()
args.a_estrella = True
args.vision = 2
args.zanahorias = 2
args.tablero_inicial = "5x4.txt"
result = g08.get_result(algorithm="AStar", args=args)
return result
def get_two_carrot_input():
"""
Obtains pre-defined testing base astar that is assured not to fail
"""
args = g08.get_args()
args.a_estrella = True
args.vision = 2
args.zanahorias = 2
args.tablero_inicial = "5x4.txt"
result = g08.get_result(algorithm="AStar", args=args)
return result
def get_one_carrot_input():
"""
Obtains pre-defined testing base astar that is assured not to fail
"""
args = g08.get_args()
args.a_estrella = True
args.vision = 2
args.zanahorias = 2
args.tablero_inicial = "4x4(1).txt"
result = g08.get_result(algorithm="AStar", args=args)
return result
def get_25_by_25_input():
"""
Obtains pre-defined testing base astar that is assured not to fail
"""
args = g08.get_args()
args.a_estrella = True
args.vision = 10
args.zanahorias = 10
args.tablero_inicial = "25x25(16).txt"
result = g08.get_result(algorithm="AStar", args=args)
return result
class TestCompletness(object):
""" Test object for proof that the algorithm actually CAN finish """
def test_completeness_one(self):
"""
Two present carrots and bunny needs two carrots
"""
result = get_two_carrot_input()
result.run()
assert result.args.zanahorias == 0
def test_completeness_two(self):
"""
One present carrots but bunny needs two
"""
result = get_one_carrot_input()
result.run()
assert result.args.zanahorias > 0
def test_completeness_three(self):
"""
One present carrots but bunny needs two
"""
result = get_25_by_25_input()
result.run()
assert result.args.zanahorias > 0
class TestEnviroment(object):
""" Test object for enviromental variables """
def test_enviroment_one(self):
"""
Test for carrot not present in the enviroment
-> ValueError: carrot not present in the enviroment
"""
enviroment = {"bunny": (0, 0)}
result = get_two_carrot_input()
result.set_enviroment(enviroment)
with pytest.raises(ValueError):
result.run()
def test_enviroment_two(self):
"""
Test for bunny not present in the enviroment
-> ValueError: bunny not present in the enviroment
"""
enviroment = {"carrot": [(0, 2), (2, 1), (3, 2)]}
result = get_two_carrot_input()
result.set_enviroment(enviroment)
with pytest.raises(ValueError):
result.run()
def test_enviroment_three(self):
"""
Test for incorrect bunny
-> ValueError: bunny's value must not be None
"""
enviroment = {"bunny": 0, "carrot": [(0, 2), (2, 1), (3, 2)]}
result = get_two_carrot_input()
result.set_enviroment(enviroment)
with pytest.raises(ValueError):
result.run()
def test_enviroment_four(self):
"""
Test for incorrect carrot
-> ValueError: carrot's value must not be None
"""
enviroment = {"bunny": (0, 0), "carrot": []}
result = get_two_carrot_input()
result.set_enviroment(enviroment)
with pytest.raises(ValueError):
result.run()
def test_enviroment_five(self):
"""
Test for correct enviroment, no error shoud be encountered
"""
result = get_two_carrot_input()
try:
result.run()
except ValueError:
raise pytest.fail("ValueError encountered")
| 27.506944 | 72 | 0.609695 |
79432eb2ef4f21e777459487f94612b1f6759a8a | 1,448 | py | Python | app/geobox/process/__init__.py | omniscale/gbi-client | c8af68ede195150b2aca0516ac8e030fe4ba1f6d | [
"Apache-2.0"
] | 2 | 2018-10-24T06:32:42.000Z | 2021-01-20T02:25:05.000Z | app/geobox/process/__init__.py | omniscale/gbi-client | c8af68ede195150b2aca0516ac8e030fe4ba1f6d | [
"Apache-2.0"
] | null | null | null | app/geobox/process/__init__.py | omniscale/gbi-client | c8af68ede195150b2aca0516ac8e030fe4ba1f6d | [
"Apache-2.0"
] | 17 | 2018-10-24T06:32:45.000Z | 2022-02-09T13:10:54.000Z | # This file is part of the GBI project.
# Copyright (C) 2012 Omniscale GmbH & Co. KG <http://omniscale.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from geobox.process.vector import VectorExportProcess, VectorImportProcess
from geobox.process.raster import RasterImportProcess, RasterExportProcess
from geobox.process.replication import ReplicationProcess
from geobox.model.tasks import (
RasterImportTask,
VectorExportTask, VectorImportTask, ReplicationTask, RasterExportTask,
)
task_class_mapping = {
'vector_export': VectorExportTask,
'vector_import': VectorImportTask,
'replication': ReplicationTask,
'raster_import': RasterImportTask,
'raster_export': RasterExportTask,
}
task_process_mapping = {
'vector_export': VectorExportProcess,
'vector_import': VectorImportProcess,
'replication': ReplicationProcess,
'raster_import': RasterImportProcess,
'raster_export': RasterExportProcess,
}
| 36.2 | 74 | 0.773481 |
79432f80dd4cc827a06cde717022fb448395ad01 | 1,283 | py | Python | tests/python/gaia-ui-tests/gaiatest/mixins/imagecompare.py | NickProgramm/gaia | 975a35c0f5010df341e96d6c5ec60217f5347412 | [
"Apache-2.0"
] | 3 | 2016-08-17T08:52:51.000Z | 2020-03-29T04:56:45.000Z | tests/python/gaia-ui-tests/gaiatest/mixins/imagecompare.py | NickProgramm/gaia | 975a35c0f5010df341e96d6c5ec60217f5347412 | [
"Apache-2.0"
] | 1 | 2017-02-21T21:36:12.000Z | 2017-02-21T21:36:30.000Z | tests/python/gaia-ui-tests/gaiatest/mixins/imagecompare.py | NickProgramm/gaia | 975a35c0f5010df341e96d6c5ec60217f5347412 | [
"Apache-2.0"
] | 1 | 2021-11-18T21:21:19.000Z | 2021-11-18T21:21:19.000Z | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
class GaiaImageCompareArguments(object):
name = 'Gaia Image Compare'
args = [
[['--store-reference-image'],
{'action': 'store_true',
'default': False,
'help': 'Store the captured screenshots as reference images',
}],
[['--fuzz-factor'],
{'type': int,
'default': 15,
'metavar': int,
'help': 'fuzz value supplied to ImageMagick call, in percentage. Default value is %(default)s percent.',
}],
[['--reference-path'],
{'default': 'reference_images',
'help': 'Location of reference images, relative to the current location, Default folder is %(default)s',
}],
[['--screenshots-path'],
{'default': 'screenshots',
'help': 'Path of screenshot images, relative to the current location, Default folder is %(default)s',
}]
]
# verify_usage
def verify_usage_handler(self, args):
if not 0 <= args.fuzz_factor <= 100:
raise ValueError('fuzz_factor must be between 0 and 100')
| 37.735294 | 114 | 0.587685 |
794330e8319f2b979a19cd8b7e79bdc9f111d6b9 | 12,711 | py | Python | src/fuzzylogic/functions.py | amogorkon/fuzzy | 07779ea36c979224dbb6ebd87d19b57fd8e3ed9b | [
"MIT"
] | null | null | null | src/fuzzylogic/functions.py | amogorkon/fuzzy | 07779ea36c979224dbb6ebd87d19b57fd8e3ed9b | [
"MIT"
] | null | null | null | src/fuzzylogic/functions.py | amogorkon/fuzzy | 07779ea36c979224dbb6ebd87d19b57fd8e3ed9b | [
"MIT"
] | null | null | null |
"""
General-purpose functions that map R -> [0,1].
These functions work as closures.
The inner function uses the variables of the outer function.
These functions work in two steps: prime and call.
In the first step the function is constructed, initialized and
constants pre-evaluated. In the second step the actual value
is passed into the function, using the arguments of the first step.
Definitions
-----------
These functions are used to determine the *membership* of a value x in a fuzzy-
set. Thus, the 'height' is the variable 'm' in general.
In a normal set there is at least one m with m == 1. This is the default.
In a non-normal set, the global maximum and minimum is skewed.
The following definitions are for normal sets.
The intervals with non-zero m are called 'support', short s_m
The intervals with m == 1 are called 'core', short c_m
The intervals with max(m) are called "height"
The intervals m != 1 and m != 0 are called 'boundary'.
The intervals with m == 0 are called 'unsupported', short no_m
In a fuzzy set with one and only one m == 1, this element is called 'prototype'.
"""
from math import exp, isinf, isnan, log
#####################
# SPECIAL FUNCTIONS #
#####################
def inv(g):
"""Invert the given function within the unit-interval.
For sets, the ~ operator uses this. It is equivalent to the TRUTH value of FALSE.
"""
def f(x):
return 1 - g(x)
return f
def noop():
"""Do nothing and return the value as is.
Useful for testing.
"""
def f(x):
return x
return f
def constant(c):
"""Return always the same value, no matter the input.
Useful for testing.
>>> f = constant(1)
>>> f(0)
1
"""
def f(_):
return c
return f
def alpha(*, floor=0, ceiling=1, func,
floor_clip=None, ceiling_clip=None):
"""Clip a function's values.
This is used to either cut off the upper or lower part of a graph.
Actually, this is more like a hedge but doesn't make sense for sets.
"""
assert floor <= ceiling
assert 0 <= floor
assert ceiling <= 1
floor_clip = floor if floor_clip is None else floor_clip
ceiling_clip = ceiling if ceiling_clip is None else ceiling_clip
#assert 0 <= floor_clip <= ceiling_clip <= 1, "%s <= %s"%(floor_clip, ceiling_clip)
def f(x):
m = func(x)
if m >= ceiling:
return ceiling_clip
elif m <= floor:
return floor_clip
else:
return m
return f
def normalize(height, func):
"""Map [0,1] to [0,1] so that max(array) == 1."""
assert 0 < height <= 1
def f(x):
return func(x) / height
return f
def moderate(func):
"""Map [0,1] -> [0,1] with bias towards 0.5.
For instance this is needed to dampen extremes.
"""
def f(x):
return 1/2 + 4 * (func(x) - 1/2)**3
return f
########################
# MEMBERSHIP FUNCTIONS #
########################
def singleton(p, *, no_m=0, c_m=1):
"""A single spike.
>>> f = singleton(2)
>>> f(1)
0
>>> f(2)
1
"""
assert 0 <= no_m < c_m <= 1
def f(x):
return c_m if x == p else no_m
return f
def linear(m:float=0, b:float=0) -> callable:
"""A textbook linear function with y-axis section and gradient.
f(x) = m*x + b
BUT CLIPPED.
>>> f = linear(1, -1)
>>> f(-2) # should be -3 but clipped
0
>>> f(0) # should be -1 but clipped
0
>>> f(1)
0
>>> f(1.5)
0.5
>>> f(2)
1
>>> f(3) # should be 2 but clipped
1
"""
def f(x) -> float:
y = m * x + b
if y <= 0:
return 0
elif y >= 1:
return 1
else:
return y
return f
def bounded_linear(low, high, *, c_m=1, no_m=0, inverse=False):
"""Variant of the linear function with gradient being determined by bounds.
The bounds determine minimum and maximum value-mappings,
but also the gradient. As [0, 1] must be the bounds for y-values,
left and right bounds specify 2 points on the graph, for which the formula
f(x) = y = (y2 - y1) / (x2 - x1) * (x - x1) + y1 = (y2 - y1) / (x2 - x1) *
(x - x2) + y2
(right_y - left_y) / ((right - left) * (x - self.left) + left_y)
works.
>>> f = bounded_linear(2, 3)
>>> f(1)
0.0
>>> f(2)
0.0
>>> f(2.5)
0.5
>>> f(3)
1.0
>>> f(4)
1.0
"""
assert low < high, "low must be less than high"
assert c_m > no_m, "core_m must be greater than unsupported_m"
if inverse:
c_m, no_m = no_m, c_m
gradient = (c_m - no_m) / (high - low)
# special cases found by hypothesis
def g_0(_):
return (c_m + no_m) / 2
if gradient == 0:
return g_0
def g_inf(x):
asymptode = (high + low) / 2
if x < asymptode:
return no_m
elif x > asymptode:
return c_m
else:
return (c_m + no_m) / 2
if isinf(gradient):
return g_inf
def f(x):
y = gradient * (x - low) + no_m
if y < 0:
return 0.
if y > 1:
return 1.
return y
return f
def R(low, high):
"""Simple alternative for bounded_linear().
THIS FUNCTION ONLY CAN HAVE A POSITIVE SLOPE -
USE THE S() FUNCTION FOR NEGATIVE SLOPE.
"""
assert low < high, f"{low} >? {high}"
def f(x):
if x < low or isinf(high - low):
return 0
elif low <= x <= high:
return (x - low) / (high - low)
else:
return 1
return f
def S(low, high):
"""Simple alternative for bounded_linear.
THIS FUNCTION ONLY CAN HAVE A NEGATIVE SLOPE -
USE THE R() FUNCTION FOR POSITIVE SLOPE.
"""
assert low < high, f"{low}, {high}"
def f(x):
if x <= low:
return 1
elif low < x < high:
# factorized to avoid nan
return high / (high - low) - x / (high - low)
else:
return 0
return f
def rectangular(low:float, high:float, *, c_m:float=1, no_m:float=0) -> callable:
"""Basic rectangular function that returns the core_y for the core else 0.
______
| |
____| |___
"""
assert low < high, f'{low}, {high}'
def f(x:float) -> float:
return no_m if x < low or high < x else c_m
return f
def triangular(low, high, *, c=None, c_m=1, no_m=0):
r"""Basic triangular norm as combination of two linear functions.
/\
____/ \___
"""
assert low < high, 'low must be less than high.'
assert no_m < c_m
c = c if c is not None else (low + high) / 2.
assert low < c < high, "peak must be inbetween"
left_slope = bounded_linear(low, c, no_m=0, c_m=c_m)
right_slope = inv(bounded_linear(c, high, no_m=0, c_m=c_m))
def f(x):
return left_slope(x) if x <= c else right_slope(x)
return f
def trapezoid(low, c_low, c_high, high, *, c_m=1, no_m=0):
r"""Combination of rectangular and triangular, for convenience.
____
/ \
____/ \___
"""
assert low < c_low <= c_high < high
assert 0 <= no_m < c_m <= 1
left_slope = bounded_linear(low, c_low, c_m=c_m, no_m=no_m)
right_slope = bounded_linear(c_high, high, c_m=c_m, no_m=no_m,
inverse=True)
def f(x):
if x < low or high < x:
return no_m
elif x < c_low:
return left_slope(x)
elif x > c_high:
return right_slope(x)
else:
return c_m
return f
def sigmoid(L, k, x0):
"""Special logistic function.
http://en.wikipedia.org/wiki/Logistic_function
f(x) = L / (1 + e^(-k*(x-x0)))
with
x0 = x-value of the midpoint
L = the curve's maximum value
k = steepness
"""
# need to be really careful here, otherwise we end up in nanland
assert 0 < L <= 1, 'L invalid.'
def f(x):
if isnan(k*x):
# e^(0*inf) = 1
o = 1.
else:
try:
o = exp(-k*(x - x0))
except OverflowError:
o = float("inf")
return L / (1 + o)
return f
def bounded_sigmoid(low, high, inverse=False):
"""
Calculate a weight based on the sigmoid function.
Specify the lower limit where f(x) = 0.1 and the
upper with f(x) = 0.9 and calculate the steepness and elasticity
based on these. We don't need the general logistic function as we
operate on [0,1].
core idea:
f(x) = 1. / (1. + exp(x * (4. * log(3)) / (low - high)) *
9 * exp(low * -(4. * log(3)) / (low - high)))
How I got this? IIRC I was playing around with linear equations and
boundary conditions of sigmoid funcs on wolframalpha..
previously factored to:
k = -(4. * log(3)) / (low - high)
o = 9 * exp(low * k)
return 1 / (1 + exp(-k * x) * o)
vars
----
low: x-value with f(x) = 0.1
for x < low: m -> 0
high: x-value with f(x) = 0.9
for x > high: m -> 1
>>> f = bounded_sigmoid(0, 1)
>>> f(0)
0.1
>>> round(f(1), 2)
0.9
>>> round(f(100000), 2)
1.0
>>> round(f(-100000), 2)
0.0
"""
assert low < high, 'low must be less than high'
if inverse:
low, high = high, low
k = (4. * log(3)) / (low - high)
try:
# if high - low underflows to 0..
if isinf(k):
p = 0.
# just in case k -> 0 and low -> inf
elif isnan(-k * low):
p = 1.
else:
p = exp(-k * low)
except OverflowError:
p = float("inf")
def f(x):
try:
# e^(0*inf) = 1 for both -inf and +inf
q = 1. if (isinf(k) and x == 0) or (k == 0 and isinf(x)) else exp(x * k)
except OverflowError:
q = float("inf")
# e^(inf)*e^(-inf) = 1
r = p * q
if isnan(r):
r = 1
return 1 / (1 + 9 * r)
return f
def bounded_exponential(k=0.1, limit=1):
"""Function that goes through the origin and approaches a limit.
k determines the steepness. The function defined for [0, +inf).
Useful for things that can't be below 0 but may not have a limit like temperature
or time, so values are always defined.
f(x)=limit-limit/e^(k*x)
Again: This function assumes x >= 0, there are no checks for this assumption!
"""
assert limit > 0
assert k > 0
def f(x):
try:
return limit - limit/exp(k*x)
except OverflowError:
return limit
return f
def simple_sigmoid(k=0.229756):
"""Sigmoid variant with only one parameter (steepness).
The midpoint is 0.
The slope is positive for positive k and negative k.
f(x) is within [0,1] for any real k and x.
>>> f = simple_sigmoid()
>>> round(f(-1000), 2)
0.0
>>> f(0)
0.5
>>> round(f(1000), 2)
1.0
>>> round(f(-20), 2)
0.01
>>> round(f(20), 2)
0.99
"""
def f(x):
if (isinf(x) and k == 0):
return 1/2
try:
return 1 / (1 + exp(x * -k))
except OverflowError:
return 0.
return f
def triangular_sigmoid(low, high, c=None):
"""Version of triangular using sigmoids instead of linear.
THIS FUNCTION PEAKS AT 0.9
>>> g = triangular_sigmoid(2, 4)
>>> g(2)
0.1
>>> round(g(3), 2)
0.9
"""
assert low < high, "low must be less than high"
c = c if c is not None else (low + high) / 2.
assert low < c < high, "c must be inbetween"
left_slope = bounded_sigmoid(low, c)
right_slope = inv(bounded_sigmoid(c, high))
def f(x):
return left_slope(x) if x <= c else right_slope(x)
return f
def gauss(c, b, *, c_m=1):
"""Defined by ae^(-b(x-x0)^2), a gaussian distribution.
Basically a triangular sigmoid function, it comes close to human perception.
vars
----
c_m (a)
defines the maximum y-value of the graph
b
defines the steepness
c (x0)
defines the symmetry center/peak of the graph
"""
assert 0 < c_m <= 1
assert 0 < b, "b must be greater than 0"
def f(x):
try:
o = (x - c)**2
except OverflowError:
return 0
return c_m * exp(-b * o)
return f
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24.350575 | 87 | 0.533868 |
7943313b2002f12f4a5cdc2b678f56c9e1ac8252 | 6,539 | py | Python | rackio/api/log_hook.py | crivero7/rackio-framework | d3362041b1fc4c3af7eb51ac06b1f0f1b5aa497c | [
"MIT"
] | null | null | null | rackio/api/log_hook.py | crivero7/rackio-framework | d3362041b1fc4c3af7eb51ac06b1f0f1b5aa497c | [
"MIT"
] | null | null | null | rackio/api/log_hook.py | crivero7/rackio-framework | d3362041b1fc4c3af7eb51ac06b1f0f1b5aa497c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""rackio/api/log_hook.py
This module implements a hook class caller for decorating RackioResources.
"""
import logging
from .hook import rackio_hook
from datetime import datetime
from rackio.events import Event
from rackio.logger import LoggerEngine
class Log(object):
def __call__(self, request, response, resource, params):
username = request.media.get('username')
logging.info("{}: made {} request".format(username, resource.__class__.__name__))
log = rackio_hook.before(Log())
class NotifyAlarmOperation(object):
def __init__(self):
self._logger = LoggerEngine()
def get_app(self):
from ..core import Rackio
return Rackio()
def __call__(self, request, response, resource, params):
username = request.media.get('username')
action = request.media.get('action')
if 'alarm_name' in params:
alarm_name = params['alarm_name']
event_values = dict(user='{}'.format(username),
message='Alarm Operation',
description='Operation {} in {}'.format(action, alarm_name),
classification='{}'.format('user'),
priority='{}'.format(3),
date_time=datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
else:
event_values = dict(user='{}'.format(username),
message='Event',
description='Operation: {}'.format(action),
classification='{}'.format('user'),
priority='{}'.format(3),
date_time=datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
event = Event(**event_values)
self._logger.write_event(event)
notify_alarm_operation = rackio_hook.before(NotifyAlarmOperation())
class NotifyRestartSystems(object):
def __init__(self):
self._logger = LoggerEngine()
def get_app(self):
from ..core import Rackio
return Rackio()
def __call__(self, request, response, resource, params):
app = self.get_app()
username = request.media.get('username')
machines = app.get_machines()
for machine, _, _ in machines:
if hasattr(machine, 'restart'):
event_values = {
'user': '{}'.format(username),
'message': 'All system restarted',
'description': '{} machine was switched to {}'.format(machine.name, "restarting"),
'classification': '{}'.format(machine.classification),
'priority': '{}'.format(machine.priority),
'criticity': '{}'.format(machine.criticity),
'date_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
event = Event(**event_values)
self._logger.write_event(event)
notify_restart_systems = rackio_hook.before(NotifyRestartSystems())
class NotifyTransition(object):
def __init__(self):
self._logger = LoggerEngine()
def get_app(self):
from ..core import Rackio
return Rackio()
def __call__(self, request, response, resource, params):
app = self.get_app()
system_name = params['system_name']
username = request.media.get('username')
machine = app.get_machine(system_name)
current_state = machine.current_state.name.lower()
action = request.media.get('action')
transition = getattr(machine, '{}_to_{}'.format(current_state, action))
target_transition = transition.target.destinations[0].identifier
event_values = {
'user': '{}'.format(username),
'message': 'Engine transition',
'description': '{} machine was switched to {}'.format(machine.name, target_transition),
'classification': '{}'.format(machine.classification),
'priority': '{}'.format(machine.priority),
'criticity': '{}'.format(machine.criticity),
'date_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
event = Event(**event_values)
self._logger.write_event(event)
notify_transition = rackio_hook.before(NotifyTransition())
class NotifyPriority(object):
def __init__(self):
self._logger = LoggerEngine()
def get_app(self):
from ..core import Rackio
return Rackio()
def __call__(self, request, response, resource, params):
app = self.get_app()
system_name = params['system_name']
username = request.media.get('username')
priority = request.media.get('priority')
machine = app.get_machine(system_name)
event_values = {
'user': '{}'.format(username),
'message': 'User operation',
'description': '{} machine priority was updated'.format(machine.name),
'classification': '{}'.format(machine.classification),
'priority': '{}'.format(priority),
'criticity': '{}'.format(machine.criticity),
'date_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
event = Event(**event_values)
self._logger.write_event(event)
notify_priority = rackio_hook.before(NotifyPriority())
class NotifyOperationMode(object):
def __init__(self):
self._logger = LoggerEngine()
def get_app(self):
from ..core import Rackio
return Rackio()
def __call__(self, request, response, resource, params):
app = self.get_app()
system_name = params['system_name']
username = request.media.get('username')
mode = request.media.get('mode')
machine = app.get_machine(system_name)
event_values = {
'user': '{}'.format(username),
'message': 'User operation',
'description': '{} machine operation mode was updated to {}'.format(machine.name, mode),
'classification': '{}'.format(machine.classification),
'priority': '{}'.format(machine.priority),
'criticity': '{}'.format(machine.criticity),
'date_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
event = Event(**event_values)
self._logger.write_event(event)
notify_operation_mode = rackio_hook.before(NotifyOperationMode()) | 30.556075 | 102 | 0.57547 |
79433353bf1d88c379ae0df358c3b277bc1dad4b | 1,248 | py | Python | inst/pythonScript/reformatRMgff.py | niccw/repeatCraft | 84ac019189a940d884ec61b5aab0b7b87a74ed52 | [
"MIT"
] | null | null | null | inst/pythonScript/reformatRMgff.py | niccw/repeatCraft | 84ac019189a940d884ec61b5aab0b7b87a74ed52 | [
"MIT"
] | null | null | null | inst/pythonScript/reformatRMgff.py | niccw/repeatCraft | 84ac019189a940d884ec61b5aab0b7b87a74ed52 | [
"MIT"
] | 1 | 2022-02-19T13:30:57.000Z | 2022-02-19T13:30:57.000Z | import sys
import re
if len(sys.argv) != 3:
sys.exit("Usage: rmGffAddClass2Attr.py <repeatmasker.gff> <repeatmasker.out> \
\nReformt repeatmasker gff. Type column -> General repeat class; Attribute -> ID:(repeat-family)")
rmgff = sys.argv[1]
rmout = sys.argv[2]
# Find out the repeat:repeatClass pair
classD = {}
with open(rmout, "r") as f:
for i in range(3): # Skip header
next(f)
for line in f:
[_, _, _, _, _, _, _, _, _, repeat, repeatClass, _, _, _, _] = line.rstrip().split()[0:15]
classD[repeat] = repeatClass
# Rewrite the attr in repeatmasker gff
print("##gff-version 3")
with open(rmgff, "r") as f:
for line in f:
if line.startswith("#"): # skip header
next(f)
else:
[seqid, source, T, start, end, score, strand, phase, remark] = line.rstrip().split("\t")
if re.search(".*Motif:.*", line):
family = re.findall("Motif:(.*)\"", remark)[0]
s, e = remark.split()[2:]
if re.search("Target=.*", line):
attr = re.findall("Target=(.*)$", line)[0]
family = attr.split()[0]
s = attr.split()[1]
e = attr.split()[2]
c = classD[family]
nremark = "Tstart=" + s + ";Tend=" + e + ";ID=" + family
print(*[seqid, source, c, start, end, score, strand, phase, nremark], sep="\t")
| 32 | 111 | 0.604968 |
79433375103ab5b7e74782416681f9bef016ca37 | 1,061 | py | Python | config/urls.py | evangelos-ch/MangAdventure | dab893db0bf93dba279fa9e3b7a01293013bc0f6 | [
"MIT"
] | 2 | 2018-09-24T03:46:55.000Z | 2018-10-12T16:20:25.000Z | config/urls.py | evangelos-ch/MangAdventure | dab893db0bf93dba279fa9e3b7a01293013bc0f6 | [
"MIT"
] | 6 | 2018-10-08T15:59:40.000Z | 2019-02-02T16:35:33.000Z | config/urls.py | evangelos-ch/MangAdventure | dab893db0bf93dba279fa9e3b7a01293013bc0f6 | [
"MIT"
] | null | null | null | """The URLconf of the config app."""
from importlib.util import find_spec
from django.conf import settings
from django.contrib.flatpages.views import flatpage
from django.urls import include, path
info_page = flatpage
if find_spec('csp'): # pragma: no cover
from csp.decorators import csp_update
info_page = csp_update(
style_src="'unsafe-inline'",
img_src="https:"
)(flatpage)
#: The URL patterns of the config app.
urlpatterns = [
path('info/', info_page, {'url': '/info/'}, name='info'),
path('privacy/', info_page, {'url': '/privacy/'}, name='privacy'),
]
if settings.DEBUG: # pragma: no cover
from django.conf.urls.static import static
urlpatterns += static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
)
urlpatterns += static(
settings.STATIC_URL, document_root=settings.STATIC_ROOT
)
if find_spec('debug_toolbar'):
from debug_toolbar import urls as djdt_urls
urlpatterns.append(path('__debug__/', include(djdt_urls)))
__all__ = ['urlpatterns']
| 29.472222 | 70 | 0.688973 |
794333c9298b785723c235685257c8252fc0bfe1 | 192 | py | Python | Strings/name_separator_FIXME.py | lvolkmann/couch-to-coder-python-exercises | afecb696d93eead9ba50613dc0723f2eca92d11a | [
"MIT"
] | null | null | null | Strings/name_separator_FIXME.py | lvolkmann/couch-to-coder-python-exercises | afecb696d93eead9ba50613dc0723f2eca92d11a | [
"MIT"
] | null | null | null | Strings/name_separator_FIXME.py | lvolkmann/couch-to-coder-python-exercises | afecb696d93eead9ba50613dc0723f2eca92d11a | [
"MIT"
] | null | null | null | """
Write a program to take the list of names below and print
"Hello, {first} {last}"
for each item in the list
"""
names = ["Jobs, Steve", "Gates, Bill", "Musk, Elon", "Hopper, Grace"]
| 14.769231 | 69 | 0.635417 |
794334942e74a338c678f0e4ef05361f3b3b08f9 | 255 | py | Python | PycharmProjects/cursoemvideo/AULA 11 CORES.py | AlexandreSoaresValerio/Pycharm | e2a9d1ae2833bf3e39244393a5bf807f892eaec5 | [
"MIT"
] | null | null | null | PycharmProjects/cursoemvideo/AULA 11 CORES.py | AlexandreSoaresValerio/Pycharm | e2a9d1ae2833bf3e39244393a5bf807f892eaec5 | [
"MIT"
] | null | null | null | PycharmProjects/cursoemvideo/AULA 11 CORES.py | AlexandreSoaresValerio/Pycharm | e2a9d1ae2833bf3e39244393a5bf807f892eaec5 | [
"MIT"
] | null | null | null | #print('\33[7;30móla mundo!\033[m')
#a = 3
#b = 5
#print('Os valores são \033[32m{}\033[m e \033[32m{}\033[m!!!'.format(a,b))
cores = {'limpa':'\033[m',
'azul' : '\033[34m',
'amarelo' : '\033[33m',
'pretoecranco':'\033[7;30m]'}
| 28.333333 | 75 | 0.505882 |
794334bbf984ef1390feb61798a933c2b416c57a | 889 | py | Python | tests/test_unit/test_compound.py | biosustain/multitfa | ab5a9bf6c19a94a221c9a137cccfcacdc2c1cb50 | [
"Apache-2.0"
] | 4 | 2020-11-27T20:25:57.000Z | 2021-03-12T22:01:34.000Z | tests/test_unit/test_compound.py | biosustain/multitfa | ab5a9bf6c19a94a221c9a137cccfcacdc2c1cb50 | [
"Apache-2.0"
] | 8 | 2020-11-29T23:56:31.000Z | 2021-04-09T11:23:17.000Z | tests/test_unit/test_compound.py | biosustain/multitfa | ab5a9bf6c19a94a221c9a137cccfcacdc2c1cb50 | [
"Apache-2.0"
] | 1 | 2021-11-11T09:22:57.000Z | 2021-11-11T09:22:57.000Z | import numpy as np
import pytest
from numpy.testing._private.utils import assert_approx_equal
from multitfa.core import Thermo_met
from .load_test_model import build_test_model
@pytest.fixture
def tfa_met():
tfa_model = build_test_model()
return tfa_model.metabolites.get_by_id("atp_c")
def test_variables(tfa_met):
assert tfa_met.concentration_variable
assert tfa_met.delG_err_variable
assert np.any(tfa_met.compound_vector)
def test_thermo_property(tfa_met):
assert_approx_equal(tfa_met.delG_f, -2259.1882733696866, significant=3)
assert not tfa_met.is_proton
assert tfa_met.equilibrator_accession
assert tfa_met.Kegg_id == "bigg.metabolite:atp"
def test_vars_bounds(tfa_met):
assert np.log(tfa_met.concentration_min) == tfa_met.concentration_variable.lb
assert np.log(tfa_met.concentration_max) == tfa_met.concentration_variable.ub
| 27.78125 | 81 | 0.79865 |
794335b797dd52baf274e28aafd409396ef5ef91 | 15,658 | py | Python | cassis/xmi.py | ArneDefauw/dkpro-cassis | 627687225cab8fae47d386cd50c442e5b239a2f1 | [
"Apache-2.0"
] | null | null | null | cassis/xmi.py | ArneDefauw/dkpro-cassis | 627687225cab8fae47d386cd50c442e5b239a2f1 | [
"Apache-2.0"
] | null | null | null | cassis/xmi.py | ArneDefauw/dkpro-cassis | 627687225cab8fae47d386cd50c442e5b239a2f1 | [
"Apache-2.0"
] | null | null | null | from collections import defaultdict
from io import BytesIO
from typing import Dict, IO, Union, List, Set, Iterable
import attr
from lxml import etree
from cassis.cas import Cas, Sofa, View, IdGenerator
from cassis.typesystem import FeatureStructure, TypeSystem
@attr.s
class ProtoView:
""" A view element from XMI. """
sofa = attr.ib(validator=attr.validators.instance_of(int)) # type: int
members = attr.ib(factory=list) # type: List[int]
def load_cas_from_xmi(source: Union[IO, str], typesystem: TypeSystem = None) -> Cas:
""" Loads a CAS from a XMI source.
Args:
source: The XML source. If `source` is a string, then it is assumed to be an XML string.
If `source` is a file-like object, then the data is read from it.
typesystem: The type system that belongs to this CAS. If `None`, an empty type system is provided.
Returns:
The deserialized CAS
"""
if typesystem is None:
typesystem = TypeSystem()
deserializer = CasXmiDeserializer()
if isinstance(source, str):
return deserializer.deserialize(BytesIO(source.encode("utf-8")), typesystem=typesystem)
else:
return deserializer.deserialize(source, typesystem=typesystem)
class CasXmiDeserializer:
def __init__(self):
self._max_xmi_id = 0
self._max_sofa_num = 0
def deserialize(self, source: Union[IO, str], typesystem: TypeSystem):
# namespaces
NS_XMI = "{http://www.omg.org/XMI}"
NS_CAS = "{http:///uima/cas.ecore}"
TAG_XMI = NS_XMI + "XMI"
TAG_CAS_SOFA = NS_CAS + "Sofa"
TAG_CAS_VIEW = NS_CAS + "View"
OUTSIDE_FS = 1
INSIDE_FS = 2
INSIDE_ARRAY = 3
sofas = {}
views = {}
feature_structures = {}
children = defaultdict(list)
context = etree.iterparse(source, events=("start", "end"))
state = OUTSIDE_FS
self._max_xmi_id = 0
self._max_sofa_num = 0
for event, elem in context:
# Ignore the 'xmi:XMI'
if elem.tag == TAG_XMI:
pass
elif elem.tag == TAG_CAS_SOFA:
if event == "end":
sofa = self._parse_sofa(elem)
sofas[sofa.xmiID] = sofa
elif elem.tag == TAG_CAS_VIEW:
if event == "end":
proto_view = self._parse_view(elem)
views[proto_view.sofa] = proto_view
else:
"""
In XMI, array element features can be encoded as
<cas:StringArray>
<elements>LNC</elements>
<elements>MTH</elements>
<elements>SNOMEDCT_US</elements>
</cas:StringArray>
In order to parse this with an incremental XML parser, we need to employ
a simple state machine. It is depicted in the following.
"start" "start"
+-----------+-------->+-----------+-------->+--------+
| Outside | | Inside | | Inside |
+--->+ feature | | feature | | array |
| structure | | structure | | element|
+-----------+<--------+-----------+<--------+--------+
"end" "end"
"""
if event == "start":
if state == OUTSIDE_FS:
# We saw the opening tag of a new feature structure
state = INSIDE_FS
elif state == INSIDE_FS:
# We saw the opening tag of an array element
state = INSIDE_ARRAY
else:
raise RuntimeError("Invalid state transition: [{0}] 'start'".format(state))
elif event == "end":
if state == INSIDE_FS:
# We saw the closing tag of a new feature
state = OUTSIDE_FS
fs = self._parse_feature_structure(typesystem, elem, children)
feature_structures[fs.xmiID] = fs
children.clear()
elif state == INSIDE_ARRAY:
# We saw the closing tag of an array element
children[elem.tag].append(elem.text)
state = INSIDE_FS
else:
raise RuntimeError("Invalid state transition: [{0}] 'end'".format(state))
else:
raise RuntimeError("Invalid XML event: [{0}]".format(event))
# Free already processed elements from memory
if event == "end":
self._clear_elem(elem)
# Post-process feature values
referenced_fs = set()
for xmi_id, fs in feature_structures.items():
t = typesystem.get_type(fs.type)
for feature in t.all_features:
feature_name = feature.name
if feature_name == "sofa":
value = getattr(fs, feature_name)
sofa = sofas[value]
setattr(fs, feature_name, sofa)
continue
if (
typesystem.is_primitive(feature.rangeTypeName)
or typesystem.is_primitive_collection(feature.rangeTypeName)
or typesystem.is_primitive_collection(fs.type)
):
# TODO: Parse feature values to their real type here, e.g. parse ints or floats
continue
# Resolve references here
value = getattr(fs, feature_name)
if value is None:
continue
# Resolve references
if typesystem.is_collection(fs.type, feature):
# A collection of references is a list of integers separated
# by single spaces, e.g. <foo:bar elements="1 2 3 42" />
targets = []
for ref in value.split():
target_id = int(ref)
target = feature_structures[target_id]
targets.append(target)
referenced_fs.add(target_id)
setattr(fs, feature_name, targets)
else:
target_id = int(value)
target = feature_structures[target_id]
referenced_fs.add(target_id)
setattr(fs, feature_name, target)
cas = Cas(typesystem=typesystem)
for sofa in sofas.values():
if sofa.sofaID == "_InitialView":
view = cas.get_view("_InitialView")
else:
view = cas.create_view(sofa.sofaID, xmiID=sofa.xmiID, sofaNum=sofa.sofaNum)
view.sofa_string = sofa.sofaString
view.sofa_mime = sofa.mimeType
# If a sofa has no members, then UIMA might omit the view. In that case,
# we create an empty view for it.
if sofa.xmiID in views:
proto_view = views[sofa.xmiID]
else:
proto_view = ProtoView(sofa.xmiID)
for member_id in proto_view.members:
annotation = feature_structures[member_id]
view.add_annotation(annotation, keep_id=True)
cas._xmi_id_generator = IdGenerator(self._max_xmi_id + 1)
cas._sofa_num_generator = IdGenerator(self._max_sofa_num + 1)
return cas
def _parse_sofa(self, elem) -> Sofa:
attributes = dict(elem.attrib)
attributes["xmiID"] = int(attributes.pop("{http://www.omg.org/XMI}id"))
attributes["sofaNum"] = int(attributes["sofaNum"])
self._max_xmi_id = max(attributes["xmiID"], self._max_xmi_id)
self._max_sofa_num = max(attributes["sofaNum"], self._max_sofa_num)
return Sofa(**attributes)
def _parse_view(self, elem) -> ProtoView:
attributes = elem.attrib
sofa = int(attributes["sofa"])
members = [int(e) for e in attributes.get("members", "").strip().split()]
result = ProtoView(sofa=sofa, members=members)
attr.validate(result)
return result
def _parse_feature_structure(self, typesystem: TypeSystem, elem, children: Dict[str, List[str]]):
# Strip the http prefix, replace / with ., remove the ecore part
# TODO: Error checking
typename = elem.tag[9:].replace("/", ".").replace("ecore}", "").strip()
AnnotationType = typesystem.get_type(typename)
attributes = dict(elem.attrib)
attributes.update(children)
# Map the xmi:id attribute to xmiID
attributes["xmiID"] = int(attributes.pop("{http://www.omg.org/XMI}id"))
if "begin" in attributes:
attributes["begin"] = int(attributes["begin"])
if "end" in attributes:
attributes["end"] = int(attributes["end"])
if "sofa" in attributes:
attributes["sofa"] = int(attributes["sofa"])
# Remap features that use a reserved Python name
if "self" in attributes:
attributes["self_"] = attributes.pop("self")
if "type" in attributes:
attributes["type_"] = attributes.pop("type")
self._max_xmi_id = max(attributes["xmiID"], self._max_xmi_id)
return AnnotationType(**attributes)
def _clear_elem(self, elem):
""" Frees XML nodes that already have been processed to save memory """
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
class CasXmiSerializer:
_COMMON_FIELD_NAMES = {"xmiID", "type"}
def __init__(self):
self._nsmap = {"xmi": "http://www.omg.org/XMI", "cas": "http:///uima/cas.ecore"}
self._urls_to_prefixes = {}
self._duplicate_namespaces = defaultdict(int)
def serialize(self, sink: Union[IO, str], cas: Cas, pretty_print=True):
xmi_attrs = {"{http://www.omg.org/XMI}version": "2.0"}
root = etree.Element(etree.QName(self._nsmap["xmi"], "XMI"), nsmap=self._nsmap, **xmi_attrs)
self._serialize_cas_null(root)
# Find all fs, even the ones that are not directly added to a sofa
for fs in sorted(cas._find_all_fs(), key=lambda a: a.xmiID):
self._serialize_feature_structure(cas, root, fs)
for sofa in cas.sofas:
self._serialize_sofa(root, sofa)
for view in cas.views:
self._serialize_view(root, view)
doc = etree.ElementTree(root)
etree.cleanup_namespaces(doc, top_nsmap=self._nsmap)
doc.write(sink, xml_declaration=True, pretty_print=pretty_print)
def _serialize_cas_null(self, root: etree.Element):
name = etree.QName(self._nsmap["cas"], "NULL")
elem = etree.SubElement(root, name)
elem.attrib["{http://www.omg.org/XMI}id"] = "0"
def _serialize_feature_structure(self, cas: Cas, root: etree.Element, fs: FeatureStructure):
ts = cas.typesystem
# The type name is a Java package, e.g. `org.myproj.Foo`.
parts = fs.type.split(".")
# The CAS type namespace is converted to an XML namespace URI by the following rule:
# replace all dots with slashes, prepend http:///, and append .ecore.
url = "http:///" + "/".join(parts[:-1]) + ".ecore"
# The cas prefix is the last component of the CAS namespace, which is the second to last
# element of the type (the last part is the type name without package name), e.g. `myproj`
raw_prefix = parts[-2]
typename = parts[-1]
# If the url has not been seen yet, compute the namespace and add it
if url not in self._urls_to_prefixes:
# If the prefix already exists, but maps to a different url, then add it with
# a number at the end, e.g. `type0`
new_prefix = raw_prefix
if raw_prefix in self._nsmap:
suffix = self._duplicate_namespaces[raw_prefix]
self._duplicate_namespaces[raw_prefix] += 1
new_prefix = raw_prefix + str(suffix)
self._nsmap[new_prefix] = url
self._urls_to_prefixes[url] = new_prefix
prefix = self._urls_to_prefixes[url]
name = etree.QName(self._nsmap[prefix], typename)
elem = etree.SubElement(root, name)
# Serialize common attributes
elem.attrib["{http://www.omg.org/XMI}id"] = str(fs.xmiID)
# Serialize feature attributes
t = ts.get_type(fs.type)
for feature in t.all_features:
if feature.name in CasXmiSerializer._COMMON_FIELD_NAMES:
continue
feature_name = feature.name
# Strip the underscore we added for reserved names
if feature._has_reserved_name:
feature_name = feature.name[:-1]
# Skip over 'None' features
value = getattr(fs, feature.name)
if value is None:
continue
if (ts.is_instance_of(fs.type, "uima.cas.StringArray") and feature_name == "elements") or ts.is_instance_of(
feature.rangeTypeName, "uima.cas.StringArray"
):
# String arrays need to be serialized to a series of child elements, as strings can
# contain whitespaces. Consider e.g. the array ['likes cats, 'likes dogs']. If we would
# serialize it as an attribute, it would look like
#
# <my:fs elements="likes cats likes dogs" />
#
# which looses the information about the whitespace. Instead, we serialize it to
#
# <my:fs>
# <elements>likes cats</elements>
# <elements>likes dogs</elements>
# </my:fs>
for e in value:
child = etree.SubElement(elem, feature_name)
child.text = e
elif feature_name == "sofa":
elem.attrib[feature_name] = str(value.xmiID)
elif ts.is_primitive(feature.rangeTypeName):
elem.attrib[feature_name] = str(value)
elif ts.is_collection(fs.type, feature):
elements = " ".join(str(e.xmiID) for e in value)
elem.attrib[feature_name] = elements
else:
# We need to encode non-primitive features as a reference
elem.attrib[feature_name] = str(value.xmiID)
def _serialize_sofa(self, root: etree.Element, sofa: Sofa):
name = etree.QName(self._nsmap["cas"], "Sofa")
elem = etree.SubElement(root, name)
elem.attrib["{http://www.omg.org/XMI}id"] = str(sofa.xmiID)
elem.attrib["sofaNum"] = str(sofa.sofaNum)
elem.attrib["sofaID"] = str(sofa.sofaID)
elem.attrib["mimeType"] = str(sofa.mimeType)
elem.attrib["sofaString"] = str(sofa.sofaString)
def _serialize_view(self, root: etree.Element, view: View):
name = etree.QName(self._nsmap["cas"], "View")
elem = etree.SubElement(root, name)
elem.attrib["sofa"] = str(view.sofa.xmiID)
elem.attrib["members"] = " ".join(sorted((str(x.xmiID) for x in view.get_all_annotations()), key=int))
| 39.540404 | 120 | 0.551092 |
7943371e32fefe890bc266be3e736f886dbfb4ca | 703 | py | Python | testes/cev/menu/__init__.py | pablocarracci/cev-python | dd6c2db80be84ec732fc5efd895e11d48d298258 | [
"MIT"
] | null | null | null | testes/cev/menu/__init__.py | pablocarracci/cev-python | dd6c2db80be84ec732fc5efd895e11d48d298258 | [
"MIT"
] | null | null | null | testes/cev/menu/__init__.py | pablocarracci/cev-python | dd6c2db80be84ec732fc5efd895e11d48d298258 | [
"MIT"
] | null | null | null | # Módulo menu do pacote cev
from cev.dado import le_int
def exibe_menu(opcoes, texto='MENU PRINCIPAL'):
'''Exibe um menu de opções e retorna a opção selecionada.'''
cabecalho(texto, '-', 25)
exibe_opcoes(opcoes)
print((len(texto) + 25) * '-')
return le_int('Sua Opção: ', 'ERRO: digite um número inteiro válido.')
def exibe_opcoes(opcoes):
'''Exibe as opções. '''
for i in range(len(opcoes)):
print(f'{i + 1} ‒ {opcoes[i]}')
def cabecalho(texto, estilo='~', preenchimento=4):
'''Escreve o texto no formato de um cabeçalho.'''
tamanho = len(texto) + preenchimento
print(tamanho * estilo)
print(f'{texto:^{tamanho}}')
print(tamanho * estilo)
| 27.038462 | 74 | 0.637269 |
79433801958347fa609680befdfa3a1c8b66f958 | 16,074 | py | Python | benchexec/tools/ultimate.py | carolemieux/benchexec | 8e4b999150a6b0e686c1e9cc62d38a05aed6f875 | [
"Apache-2.0"
] | null | null | null | benchexec/tools/ultimate.py | carolemieux/benchexec | 8e4b999150a6b0e686c1e9cc62d38a05aed6f875 | [
"Apache-2.0"
] | null | null | null | benchexec/tools/ultimate.py | carolemieux/benchexec | 8e4b999150a6b0e686c1e9cc62d38a05aed6f875 | [
"Apache-2.0"
] | null | null | null | """
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2015 Daniel Dietsch
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import functools
import logging
import os
import re
import subprocess
import sys
import benchexec.result as result
import benchexec.tools.template
import benchexec.util as util
from benchexec import BenchExecException
from benchexec.model import MEMLIMIT
from benchexec.tools.template import UnsupportedFeatureException
_OPTION_NO_WRAPPER = '--force-no-wrapper'
_SVCOMP17_VERSIONS = {"f7c3ed31"}
_SVCOMP17_FORBIDDEN_FLAGS = {"--full-output", "--architecture"}
_ULTIMATE_VERSION_REGEX = re.compile(r'^Version is (.*)$', re.MULTILINE)
# .jar files that are used as launcher arguments with most recent .jar first
_LAUNCHER_JARS = ["plugins/org.eclipse.equinox.launcher_1.3.100.v20150511-1540.jar"]
class UltimateTool(benchexec.tools.template.BaseTool):
"""
Abstract tool info for Ultimate-based tools.
"""
REQUIRED_PATHS = [
"artifacts.xml",
"config",
"configuration",
"cvc4",
"cvc4nyu",
"cvc4-LICENSE",
"features",
"LICENSE",
"LICENSE.GPL",
"LICENSE.GPL.LESSER",
"mathsat",
"mathsat-LICENSE",
"p2",
"plugins",
"README",
"Ultimate",
"Ultimate.ini",
"Ultimate.py",
"z3",
"z3-LICENSE"
]
REQUIRED_PATHS_SVCOMP17 = []
def __init__(self):
self._uses_propertyfile = False
@functools.lru_cache()
def executable(self):
exec = util.find_executable('Ultimate.py')
for (dirpath, dirnames, filenames) in os.walk(exec):
if 'Ultimate' in filenames and 'plugins' in dirnames:
return exec
break
# possibly another Ultimate.py was found, check in the current dir
current = os.getcwd()
for (dirpath, dirnames, filenames) in os.walk(current):
if 'Ultimate' in filenames and 'Ultimate.py' in filenames and 'plugins' in dirnames:
return './Ultimate.py'
break
sys.exit("ERROR: Could not find Ultimate executable in '{0}' or '{1}'".format(str(exec), str(current)))
def _ultimate_version(self, executable):
data_dir = os.path.join(os.path.dirname(executable), 'data')
launcher_jar = self._get_current_launcher_jar(executable)
cmds = [
# 2
["java", "-Xss4m", "-jar", launcher_jar, "-data", "@noDefault", "-ultimatedata", data_dir, "--version"],
# 1
["java", "-Xss4m", "-jar", launcher_jar, "-data", data_dir, "--version"],
]
self.api = len(cmds)
for cmd in cmds:
version = self._query_ultimate_version(cmd, self.api)
if version != '':
return version
self.api = self.api - 1
raise BenchExecException("Could not determine Ultimate version")
def _query_ultimate_version(self, cmd, api):
try:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
except OSError as e:
logging.warning('Cannot run Java to determine Ultimate version (API {0}): {1}'
.format(api, e.strerror))
return ''
if stderr:
logging.warning('Cannot determine Ultimate version (API {0}). Error output: {1}'
.format(api, util.decode_to_string(stderr)))
return ''
if process.returncode:
logging.warning(
'Cannot determine Ultimate version (API {0}). Exit code : {1}\nCommand was {2}'
.format(api, process.returncode, ' '.join(cmd)))
return ''
version_ultimate_match = _ULTIMATE_VERSION_REGEX.search(util.decode_to_string(stdout))
if not version_ultimate_match:
logging.warning(
'Cannot determine Ultimate version, output (API {0}): {1}'.format(api, util.decode_to_string(stdout)))
return ''
return version_ultimate_match.group(1)
@functools.lru_cache()
def _get_current_launcher_jar(self, executable):
ultimatedir = os.path.dirname(executable)
for jar in _LAUNCHER_JARS:
launcher_jar = os.path.join(ultimatedir, jar)
if os.path.isfile(launcher_jar):
return launcher_jar
raise FileNotFoundError('No suitable launcher jar found in {0}'.format(ultimatedir))
@functools.lru_cache()
def version(self, executable):
wrapper_version = self._version_from_tool(executable)
if wrapper_version in _SVCOMP17_VERSIONS:
# Keep reported version number for old versions as they were before
return wrapper_version
ultimate_version = self._ultimate_version(executable)
return ultimate_version + '-' + wrapper_version
@functools.lru_cache()
def _is_svcomp17_version(self, executable):
return self.version(executable) in _SVCOMP17_VERSIONS
@functools.lru_cache()
def _requires_ultimate_data(self, executable):
if self._is_svcomp17_version(executable):
return False
version = self.version(executable)
ult, wrapper = version.split("-")
major, minor, patch = ult.split(".")
# all versions before 0.1.24 do not require ultimatedata
return not (int(major) == 0 and int(minor) < 2 and int(patch) < 24)
def cmdline(self, executable, options, tasks, propertyfile=None, rlimits=None):
if rlimits is None:
rlimits = {}
self._uses_propertyfile = (propertyfile is not None)
if _OPTION_NO_WRAPPER in options:
# do not use old wrapper script even if property file is given
self._uses_propertyfile = False
propertyfile = None
options.remove(_OPTION_NO_WRAPPER)
if self._is_svcomp17_version(executable):
assert propertyfile
cmdline = [executable, propertyfile]
cmdline += [option for option in options if option not in _SVCOMP17_FORBIDDEN_FLAGS]
cmdline.append("--full-output")
cmdline += tasks
self.__assert_cmdline(cmdline, "cmdline contains empty or None argument when using SVCOMP17 mode: ")
return cmdline
if self._uses_propertyfile:
# use the old wrapper script if a property file is given
cmdline = [executable, '--spec', propertyfile]
if tasks:
cmdline += ['--file'] + tasks
cmdline += options
self.__assert_cmdline(cmdline, "cmdline contains empty or None argument when using default SVCOMP mode: ")
return cmdline
# if no property file is given and toolchain (-tc) is, use ultimate directly
if '-tc' in options or '--toolchain' in options:
# ignore executable (old executable is just around for backwards compatibility)
mem_bytes = rlimits.get(MEMLIMIT, None)
cmdline = ['java']
# -ea has to be given directly to java
if '-ea' in options:
options = [e for e in options if e != '-ea']
cmdline += ['-ea']
if mem_bytes:
cmdline += ['-Xmx' + str(mem_bytes)]
cmdline += ['-Xss4m']
cmdline += ['-jar', self._get_current_launcher_jar(executable)]
if self._requires_ultimate_data(executable):
if '-ultimatedata' not in options and '-data' not in options:
if self.api == 2:
cmdline += ['-data', '@noDefault', '-ultimatedata',
os.path.join(os.path.dirname(executable), 'data')]
if self.api == 1:
raise ValueError('Illegal option -ultimatedata for API {} and Ultimate version {}'
.format(self.api, self.version(executable)))
elif '-ultimatedata' in options and '-data' not in options:
if self.api == 2:
cmdline += ['-data', '@noDefault']
if self.api == 1:
raise ValueError('Illegal option -ultimatedata for API {} and Ultimate version {}'
.format(self.api, self.version(executable)))
else:
if '-data' not in options:
if self.api == 2 or self.api == 1:
cmdline += ['-data', os.path.join(os.path.dirname(executable), 'data')]
cmdline += options
if tasks:
cmdline += ['-i'] + tasks
self.__assert_cmdline(cmdline, "cmdline contains empty or None argument when using Ultimate raw mode: ")
return cmdline
# there is no way to run ultimate; not enough parameters
raise UnsupportedFeatureException(
"Unsupported argument combination: options={} propertyfile={} rlimits={}".format(options, propertyfile,
rlimits))
def __assert_cmdline(self, cmdline, msg):
assert all(cmdline), msg + str(cmdline)
pass
def program_files(self, executable):
paths = self.REQUIRED_PATHS_SVCOMP17 if self._is_svcomp17_version(executable) else self.REQUIRED_PATHS
return [executable] + self._program_files_from_executable(executable, paths)
def determine_result(self, returncode, returnsignal, output, is_timeout):
if self._uses_propertyfile:
return self._determine_result_with_propertyfile(returncode, returnsignal, output, is_timeout)
return self._determine_result_without_propertyfile(returncode, returnsignal, output, is_timeout)
def _determine_result_without_propertyfile(self, returncode, returnsignal, output, is_timeout):
# special strings in ultimate output
treeautomizer_sat = 'TreeAutomizerSatResult'
treeautomizer_unsat = 'TreeAutomizerUnsatResult'
unsupported_syntax_errorstring = 'ShortDescription: Unsupported Syntax'
incorrect_syntax_errorstring = 'ShortDescription: Incorrect Syntax'
type_errorstring = 'Type Error'
witness_errorstring = 'InvalidWitnessErrorResult'
exception_errorstring = 'ExceptionOrErrorResult'
safety_string = 'Ultimate proved your program to be correct'
all_spec_string = 'AllSpecificationsHoldResult'
unsafety_string = 'Ultimate proved your program to be incorrect'
mem_deref_false_string = 'pointer dereference may fail'
mem_deref_false_string_2 = 'array index can be out of bounds'
mem_free_false_string = 'free of unallocated memory possible'
mem_memtrack_false_string = 'not all allocated memory was freed'
termination_false_string = 'Found a nonterminating execution for the following ' \
'lasso shaped sequence of statements'
termination_true_string = 'TerminationAnalysisResult: Termination proven'
ltl_false_string = 'execution that violates the LTL property'
ltl_true_string = 'Buchi Automizer proved that the LTL property'
overflow_false_string = 'overflow possible'
for line in output:
if line.find(unsupported_syntax_errorstring) != -1:
return 'ERROR: UNSUPPORTED SYNTAX'
if line.find(incorrect_syntax_errorstring) != -1:
return 'ERROR: INCORRECT SYNTAX'
if line.find(type_errorstring) != -1:
return 'ERROR: TYPE ERROR'
if line.find(witness_errorstring) != -1:
return 'ERROR: INVALID WITNESS FILE'
if line.find(exception_errorstring) != -1:
return 'ERROR: EXCEPTION'
if self._contains_overapproximation_result(line):
return 'UNKNOWN: OverapproxCex'
if line.find(termination_false_string) != -1:
return 'FALSE(TERM)'
if line.find(termination_true_string) != -1:
return 'TRUE'
if line.find(ltl_false_string) != -1:
return 'FALSE(valid-ltl)'
if line.find(ltl_true_string) != -1:
return 'TRUE'
if line.find(unsafety_string) != -1:
return 'FALSE'
if line.find(mem_deref_false_string) != -1:
return 'FALSE(valid-deref)'
if line.find(mem_deref_false_string_2) != -1:
return 'FALSE(valid-deref)'
if line.find(mem_free_false_string) != -1:
return 'FALSE(valid-free)'
if line.find(mem_memtrack_false_string) != -1:
return 'FALSE(valid-memtrack)'
if line.find(overflow_false_string) != -1:
return 'FALSE(OVERFLOW)'
if line.find(safety_string) != -1 or line.find(all_spec_string) != -1:
return 'TRUE'
if line.find(treeautomizer_unsat) != -1:
return 'unsat'
if line.find(treeautomizer_sat) != -1 or line.find(all_spec_string) != -1:
return 'sat'
return result.RESULT_UNKNOWN
def _contains_overapproximation_result(self, line):
triggers = [
'Reason: overapproximation of',
'Reason: overapproximation of bitwiseAnd',
'Reason: overapproximation of bitwiseOr',
'Reason: overapproximation of bitwiseXor',
'Reason: overapproximation of shiftLeft',
'Reason: overapproximation of shiftRight',
'Reason: overapproximation of bitwiseComplement'
]
for trigger in triggers:
if line.find(trigger) != -1:
return True
return False
def _determine_result_with_propertyfile(self, returncode, returnsignal, output, is_timeout):
for line in output:
if line.startswith('FALSE(valid-free)'):
return result.RESULT_FALSE_FREE
elif line.startswith('FALSE(valid-deref)'):
return result.RESULT_FALSE_DEREF
elif line.startswith('FALSE(valid-memtrack)'):
return result.RESULT_FALSE_MEMTRACK
elif line.startswith('FALSE(TERM)'):
return result.RESULT_FALSE_TERMINATION
elif line.startswith('FALSE(OVERFLOW)'):
return result.RESULT_FALSE_OVERFLOW
elif line.startswith('FALSE'):
return result.RESULT_FALSE_REACH
elif line.startswith('TRUE'):
return result.RESULT_TRUE_PROP
elif line.startswith('UNKNOWN'):
return result.RESULT_UNKNOWN
elif line.startswith('ERROR'):
status = result.RESULT_ERROR
if line.startswith('ERROR: INVALID WITNESS FILE'):
status += ' (invalid witness file)'
return status
return result.RESULT_UNKNOWN
def get_value_from_output(self, lines, identifier):
# search for the text in output and get its value,
# stop after the first line, that contains the searched text
for line in lines:
if identifier in line:
start_position = line.find('=') + 1
return line[start_position:].strip()
return None
| 42.636605 | 118 | 0.610862 |
794338f37b8e54755950ac8200fc88a1d940442a | 1,957 | py | Python | website.py | MaTachi/html-to-wordpress | a3abf187dd47f3385898563e00a1e2c2d09f5625 | [
"MIT"
] | null | null | null | website.py | MaTachi/html-to-wordpress | a3abf187dd47f3385898563e00a1e2c2d09f5625 | [
"MIT"
] | null | null | null | website.py | MaTachi/html-to-wordpress | a3abf187dd47f3385898563e00a1e2c2d09f5625 | [
"MIT"
] | null | null | null | import sys
from flask import Flask, render_template, request, make_response
from backend import post
app = Flask(__name__)
@app.route('/', methods=['POST', 'GET'])
def index():
if request.method == 'POST':
remove_new_window_link = True if request.form.get(
'remove_new_window_link', False) else False
publish = True if request.form.get('publish', False) else False
result = post(request.form['title'],
request.form['url'],
remove_new_window_link=remove_new_window_link,
publish=publish,
delete_links=request.form['delete_links'],
cookies=request.form['cookies'])
template = render_template('posted.html', content=result[0],
title=result[1], link=result[2])
response = make_response(template)
response.set_cookie('remove_new_window_link',
'true' if remove_new_window_link else 'false')
response.set_cookie('publish', 'true' if publish else 'false')
response.set_cookie('delete_links', request.form['delete_links'])
response.set_cookie('cookies', request.form['cookies'])
return response
elif request.method == 'GET':
publish = 'true' == request.cookies.get('publish')
remove_new_window_link = 'true' == request.cookies.get(
'remove_new_window_link')
delete_links = request.cookies.get('delete_links', '')
cookies = request.cookies.get('cookies', '')
return render_template('index.html', publish=publish,
remove_new_window_link=remove_new_window_link,
delete_links=delete_links,
cookies=cookies)
if __name__ == '__main__':
host = sys.argv[1] if len(sys.argv) > 1 else '127.0.0.1'
app.run(debug=True, threaded=True, host=host)
| 42.543478 | 77 | 0.599387 |
79433a2202289ac9cfdcad2627678bd073b7498d | 1,823 | py | Python | petition.py | jemitk/petition-app | 75e6557bd0282f4c23be26ba3b539138ed820ced | [
"MIT"
] | null | null | null | petition.py | jemitk/petition-app | 75e6557bd0282f4c23be26ba3b539138ed820ced | [
"MIT"
] | null | null | null | petition.py | jemitk/petition-app | 75e6557bd0282f4c23be26ba3b539138ed820ced | [
"MIT"
] | null | null | null | from flask import Flask, render_template, flash, redirect, url_for, session
from flask_login import LoginManager, login_required, request, login_user
import MySQLdb
login_manager = LoginManager()
app = Flask(__name__)
app.debug = True
login_manager.init(app)
db = MySQLdb.connect(host = 'localhost',
user = 'Xiaoyu Chen',
passwd = "petitions",
db = "petitions")
cur = db.cursor()
@app.route('/')
def show_main_page():
#substitute the following file name with your template file name
return render_template('PETITION_TEMPLATE.html')
@app.route('/petitions')
@login_required
def show_petitions():
petitions_name = []
petitions_description = []
petitions_start_date = []
petitions_end_date = []
cur.execute('select * from petitions')
for p_name in cur[2]:
petitions_name.extend(p_name)
for p_descript in cur[3]:
petitions_description.extend(p_descript)
for p_start in cur[4]:
petitions_start_date.extend(p_start)
for p_end in cur(5):
petitions_end_date.extend(p_end)
posts_info = [petitions_name, petitions_description, petitions_start_date, petitions_end_date]
return render_template('PETITION_TEMPLATE.html', posts_info = posts_info)
@app.route('/login', methods = ['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_petitions'))
return render_template('login.html', error=error)
app.run()
| 29.403226 | 98 | 0.659901 |
79433b722f607de29a6b433571be5f75f18404d1 | 1,733 | py | Python | ambari-agent/src/main/python/ambari_agent/AmbariAgent.py | vsosrc/ambari | e3cc898672707bedf7597f2e16d684c8a00bba3b | [
"Apache-2.0"
] | 5 | 2018-06-03T05:19:40.000Z | 2021-04-16T17:10:49.000Z | ambari-agent/src/main/python/ambari_agent/AmbariAgent.py | vsosrc/ambari | e3cc898672707bedf7597f2e16d684c8a00bba3b | [
"Apache-2.0"
] | null | null | null | ambari-agent/src/main/python/ambari_agent/AmbariAgent.py | vsosrc/ambari | e3cc898672707bedf7597f2e16d684c8a00bba3b | [
"Apache-2.0"
] | 6 | 2019-05-07T13:24:39.000Z | 2021-02-15T14:12:37.000Z | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import sys
import subprocess
from Controller import AGENT_AUTO_RESTART_EXIT_CODE
AGENT_SCRIPT = "/usr/lib/python2.6/site-packages/ambari_agent/main.py"
AGENT_PID_FILE = "/var/run/ambari-agent/ambari-agent.pid"
# AGENT_AUTO_RESTART_EXIT_CODE = 77 is exit code which we return when restart_agent() is called
status = AGENT_AUTO_RESTART_EXIT_CODE
def main():
global status
if (os.environ.has_key("PYTHON")):
PYTHON = os.environ["PYTHON"]
else:
print("Key 'PYTHON' is not defined in environment variables")
sys.exit(1)
args = list(sys.argv)
del args[0]
mergedArgs = [PYTHON, AGENT_SCRIPT] + args
while status == AGENT_AUTO_RESTART_EXIT_CODE:
mainProcess = subprocess.Popen(mergedArgs)
mainProcess.communicate()
status = mainProcess.returncode
if os.path.isfile(AGENT_PID_FILE) and status == AGENT_AUTO_RESTART_EXIT_CODE:
os.remove(AGENT_PID_FILE)
if __name__ == "__main__":
main() | 32.698113 | 95 | 0.766301 |
79433bbc6c4edcbbffac8eac99fb537ccb5a96c9 | 2,618 | py | Python | tests/unit/test_dg.py | packit-service/packit | eb9e12411c739a94f062cc39c8bd5f657c2eec69 | [
"MIT"
] | 81 | 2019-02-07T15:38:34.000Z | 2020-07-16T06:33:02.000Z | tests/unit/test_dg.py | packit-service/packit | eb9e12411c739a94f062cc39c8bd5f657c2eec69 | [
"MIT"
] | 825 | 2019-02-07T15:08:16.000Z | 2020-08-02T08:11:23.000Z | tests/unit/test_dg.py | packit-service/packit | eb9e12411c739a94f062cc39c8bd5f657c2eec69 | [
"MIT"
] | 51 | 2019-02-08T09:56:29.000Z | 2020-06-17T15:34:00.000Z | # Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
import pytest
from flexmock import flexmock
from packit.config import Config, PackageConfig
from packit.distgit import DistGit
from packit.local_project import LocalProject
@pytest.mark.parametrize(
"title, description, branch, prs, exists",
[
(
"Update",
"Upstream tag: 0.4.0\nUpstream commit: 6957453b",
"f31",
[
flexmock(
title="Update",
target_branch="f31",
description="Upstream tag: 0.4.0\nUpstream commit: 6957453b",
author="packit",
)
],
True,
),
(
"Update",
"Upstream tag: 0.4.0\nUpstream commit: 6957453b",
"f31",
[
flexmock(
title="Update",
target_branch="f31",
description="Upstream tag: 0.4.0\nUpstream commit: 8957453b",
author="packit",
)
],
False,
),
(
"Update",
"Upstream tag: 0.4.0\nUpstream commit: 6957453b",
"f32",
[
flexmock(
title="Update",
target_branch="f31",
description="Upstream tag: 0.4.0\nUpstream commit: 6957453b",
author="packit",
)
],
False,
),
(
"Update",
"Upstream tag: 0.4.0\nUpstream commit: 6957453b",
"f31",
[
flexmock(
title="Update",
target_branch="f31",
description="Upstream tag: 0.4.0\nUpstream commit: 6957453b",
author="packit-stg",
)
],
False,
),
],
)
def test_existing_pr(title, description, branch, prs, exists):
user_mock = flexmock().should_receive("get_username").and_return("packit").mock()
local_project = LocalProject(
git_project=flexmock(service="something", get_pr_list=lambda: prs),
refresh=False,
git_service=flexmock(user=user_mock),
)
distgit = DistGit(
config=flexmock(Config()),
package_config=flexmock(PackageConfig()),
local_project=local_project,
)
pr = distgit.existing_pr(title, description, branch)
if exists:
assert pr is not None
else:
assert pr is None
| 29.088889 | 85 | 0.484339 |
79433c11b7534d4c963927bb359dbce020ad6a78 | 15,617 | py | Python | bin/pildriver.py | wgcv/Crashphone | eb8145a2695d6c9c344a1776f76d4bdd1647db58 | [
"Apache-2.0"
] | null | null | null | bin/pildriver.py | wgcv/Crashphone | eb8145a2695d6c9c344a1776f76d4bdd1647db58 | [
"Apache-2.0"
] | null | null | null | bin/pildriver.py | wgcv/Crashphone | eb8145a2695d6c9c344a1776f76d4bdd1647db58 | [
"Apache-2.0"
] | null | null | null | #!/home/wgcv/dev/Crashphone/bin/python
"""PILdriver, an image-processing calculator using PIL.
An instance of class PILDriver is essentially a software stack machine
(Polish-notation interpreter) for sequencing PIL image
transformations. The state of the instance is the interpreter stack.
The only method one will normally invoke after initialization is the
`execute' method. This takes an argument list of tokens, pushes them
onto the instance's stack, and then tries to clear the stack by
successive evaluation of PILdriver operators. Any part of the stack
not cleaned off persists and is part of the evaluation context for
the next call of the execute method.
PILDriver doesn't catch any exceptions, on the theory that these
are actually diagnostic information that should be interpreted by
the calling code.
When called as a script, the command-line arguments are passed to
a PILDriver instance. If there are no command-line arguments, the
module runs an interactive interpreter, each line of which is split into
space-separated tokens and passed to the execute method.
In the method descriptions below, a first line beginning with the string
`usage:' means this method can be invoked with the token that follows
it. Following <>-enclosed arguments describe how the method interprets
the entries on the stack. Each argument specification begins with a
type specification: either `int', `float', `string', or `image'.
All operations consume their arguments off the stack (use `dup' to
keep copies around). Use `verbose 1' to see the stack state displayed
before each operation.
Usage examples:
`show crop 0 0 200 300 open test.png' loads test.png, crops out a portion
of its upper-left-hand corner and displays the cropped portion.
`save rotated.png rotate 30 open test.tiff' loads test.tiff, rotates it
30 degrees, and saves the result as rotated.png (in PNG format).
"""
# by Eric S. Raymond <[email protected]>
# $Id$
# TO DO:
# 1. Add PILFont capabilities, once that's documented.
# 2. Add PILDraw operations.
# 3. Add support for composing and decomposing multiple-image files.
#
from __future__ import print_function
from PIL import Image
class PILDriver:
verbose = 0
def do_verbose(self):
"""usage: verbose <int:num>
Set verbosity flag from top of stack.
"""
self.verbose = int(self.do_pop())
# The evaluation stack (internal only)
stack = [] # Stack of pending operations
def push(self, item):
"Push an argument onto the evaluation stack."
self.stack = [item] + self.stack
def top(self):
"Return the top-of-stack element."
return self.stack[0]
# Stack manipulation (callable)
def do_clear(self):
"""usage: clear
Clear the stack.
"""
self.stack = []
def do_pop(self):
"""usage: pop
Discard the top element on the stack.
"""
top = self.stack[0]
self.stack = self.stack[1:]
return top
def do_dup(self):
"""usage: dup
Duplicate the top-of-stack item.
"""
if hasattr(self, 'format'): # If it's an image, do a real copy
dup = self.stack[0].copy()
else:
dup = self.stack[0]
self.stack = [dup] + self.stack
def do_swap(self):
"""usage: swap
Swap the top-of-stack item with the next one down.
"""
self.stack = [self.stack[1], self.stack[0]] + self.stack[2:]
# Image module functions (callable)
def do_new(self):
"""usage: new <int:xsize> <int:ysize> <int:color>:
Create and push a greyscale image of given size and color.
"""
xsize = int(self.do_pop())
ysize = int(self.do_pop())
color = int(self.do_pop())
self.push(Image.new("L", (xsize, ysize), color))
def do_open(self):
"""usage: open <string:filename>
Open the indicated image, read it, push the image on the stack.
"""
self.push(Image.open(self.do_pop()))
def do_blend(self):
"""usage: blend <image:pic1> <image:pic2> <float:alpha>
Replace two images and an alpha with the blended image.
"""
image1 = self.do_pop()
image2 = self.do_pop()
alpha = float(self.do_pop())
self.push(Image.blend(image1, image2, alpha))
def do_composite(self):
"""usage: composite <image:pic1> <image:pic2> <image:mask>
Replace two images and a mask with their composite.
"""
image1 = self.do_pop()
image2 = self.do_pop()
mask = self.do_pop()
self.push(Image.composite(image1, image2, mask))
def do_merge(self):
"""usage: merge <string:mode> <image:pic1> [<image:pic2> [<image:pic3> [<image:pic4>]]]
Merge top-of stack images in a way described by the mode.
"""
mode = self.do_pop()
bandlist = []
for band in mode:
bandlist.append(self.do_pop())
self.push(Image.merge(mode, bandlist))
# Image class methods
def do_convert(self):
"""usage: convert <string:mode> <image:pic1>
Convert the top image to the given mode.
"""
mode = self.do_pop()
image = self.do_pop()
self.push(image.convert(mode))
def do_copy(self):
"""usage: copy <image:pic1>
Make and push a true copy of the top image.
"""
self.dup()
def do_crop(self):
"""usage: crop <int:left> <int:upper> <int:right> <int:lower> <image:pic1>
Crop and push a rectangular region from the current image.
"""
left = int(self.do_pop())
upper = int(self.do_pop())
right = int(self.do_pop())
lower = int(self.do_pop())
image = self.do_pop()
self.push(image.crop((left, upper, right, lower)))
def do_draft(self):
"""usage: draft <string:mode> <int:xsize> <int:ysize>
Configure the loader for a given mode and size.
"""
mode = self.do_pop()
xsize = int(self.do_pop())
ysize = int(self.do_pop())
self.push(self.draft(mode, (xsize, ysize)))
def do_filter(self):
"""usage: filter <string:filtername> <image:pic1>
Process the top image with the given filter.
"""
from PIL import ImageFilter
filter = eval("ImageFilter." + self.do_pop().upper())
image = self.do_pop()
self.push(image.filter(filter))
def do_getbbox(self):
"""usage: getbbox
Push left, upper, right, and lower pixel coordinates of the top image.
"""
bounding_box = self.do_pop().getbbox()
self.push(bounding_box[3])
self.push(bounding_box[2])
self.push(bounding_box[1])
self.push(bounding_box[0])
def do_getextrema(self):
"""usage: extrema
Push minimum and maximum pixel values of the top image.
"""
extrema = self.do_pop().extrema()
self.push(extrema[1])
self.push(extrema[0])
def do_offset(self):
"""usage: offset <int:xoffset> <int:yoffset> <image:pic1>
Offset the pixels in the top image.
"""
xoff = int(self.do_pop())
yoff = int(self.do_pop())
image = self.do_pop()
self.push(image.offset(xoff, yoff))
def do_paste(self):
"""usage: paste <image:figure> <int:xoffset> <int:yoffset> <image:ground>
Paste figure image into ground with upper left at given offsets.
"""
figure = self.do_pop()
xoff = int(self.do_pop())
yoff = int(self.do_pop())
ground = self.do_pop()
if figure.mode == "RGBA":
ground.paste(figure, (xoff, yoff), figure)
else:
ground.paste(figure, (xoff, yoff))
self.push(ground)
def do_resize(self):
"""usage: resize <int:xsize> <int:ysize> <image:pic1>
Resize the top image.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
image = self.do_pop()
self.push(image.resize((xsize, ysize)))
def do_rotate(self):
"""usage: rotate <int:angle> <image:pic1>
Rotate image through a given angle
"""
angle = int(self.do_pop())
image = self.do_pop()
self.push(image.rotate(angle))
def do_save(self):
"""usage: save <string:filename> <image:pic1>
Save image with default options.
"""
filename = self.do_pop()
image = self.do_pop()
image.save(filename)
def do_save2(self):
"""usage: save2 <string:filename> <string:options> <image:pic1>
Save image with specified options.
"""
filename = self.do_pop()
options = self.do_pop()
image = self.do_pop()
image.save(filename, None, options)
def do_show(self):
"""usage: show <image:pic1>
Display and pop the top image.
"""
self.do_pop().show()
def do_thumbnail(self):
"""usage: thumbnail <int:xsize> <int:ysize> <image:pic1>
Modify the top image in the stack to contain a thumbnail of itself.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
self.top().thumbnail((xsize, ysize))
def do_transpose(self):
"""usage: transpose <string:operator> <image:pic1>
Transpose the top image.
"""
transpose = self.do_pop().upper()
image = self.do_pop()
self.push(image.transpose(transpose))
# Image attributes
def do_format(self):
"""usage: format <image:pic1>
Push the format of the top image onto the stack.
"""
self.push(self.do_pop().format)
def do_mode(self):
"""usage: mode <image:pic1>
Push the mode of the top image onto the stack.
"""
self.push(self.do_pop().mode)
def do_size(self):
"""usage: size <image:pic1>
Push the image size on the stack as (y, x).
"""
size = self.do_pop().size
self.push(size[0])
self.push(size[1])
# ImageChops operations
def do_invert(self):
"""usage: invert <image:pic1>
Invert the top image.
"""
from PIL import ImageChops
self.push(ImageChops.invert(self.do_pop()))
def do_lighter(self):
"""usage: lighter <image:pic1> <image:pic2>
Pop the two top images, push an image of the lighter pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.lighter(image1, image2))
def do_darker(self):
"""usage: darker <image:pic1> <image:pic2>
Pop the two top images, push an image of the darker pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.darker(image1, image2))
def do_difference(self):
"""usage: difference <image:pic1> <image:pic2>
Pop the two top images, push the difference image
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.difference(image1, image2))
def do_multiply(self):
"""usage: multiply <image:pic1> <image:pic2>
Pop the two top images, push the multiplication image.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.multiply(image1, image2))
def do_screen(self):
"""usage: screen <image:pic1> <image:pic2>
Pop the two top images, superimpose their inverted versions.
"""
from PIL import ImageChops
image2 = self.do_pop()
image1 = self.do_pop()
self.push(ImageChops.screen(image1, image2))
def do_add(self):
"""usage: add <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled sum with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.add(image1, image2, scale, offset))
def do_subtract(self):
"""usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled difference with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.subtract(image1, image2, scale, offset))
# ImageEnhance classes
def do_color(self):
"""usage: color <image:pic1>
Enhance color in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Color(image)
self.push(enhancer.enhance(factor))
def do_contrast(self):
"""usage: contrast <image:pic1>
Enhance contrast in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Contrast(image)
self.push(enhancer.enhance(factor))
def do_brightness(self):
"""usage: brightness <image:pic1>
Enhance brightness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Brightness(image)
self.push(enhancer.enhance(factor))
def do_sharpness(self):
"""usage: sharpness <image:pic1>
Enhance sharpness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Sharpness(image)
self.push(enhancer.enhance(factor))
# The interpreter loop
def execute(self, list):
"Interpret a list of PILDriver commands."
list.reverse()
while len(list) > 0:
self.push(list[0])
list = list[1:]
if self.verbose:
print("Stack: " + repr(self.stack))
top = self.top()
if not isinstance(top, str):
continue
funcname = "do_" + top
if not hasattr(self, funcname):
continue
else:
self.do_pop()
func = getattr(self, funcname)
func()
if __name__ == '__main__':
import sys
try:
import readline
except ImportError:
pass # not available on all platforms
# If we see command-line arguments, interpret them as a stack state
# and execute. Otherwise go interactive.
driver = PILDriver()
if len(sys.argv[1:]) > 0:
driver.execute(sys.argv[1:])
else:
print("PILDriver says hello.")
while True:
try:
if sys.version_info[0] >= 3:
line = input('pildriver> ')
else:
line = raw_input('pildriver> ')
except EOFError:
print("\nPILDriver says goodbye.")
break
driver.execute(line.split())
print(driver.stack)
# The following sets edit modes for GNU EMACS
# Local Variables:
# mode:python
# End:
| 29.521739 | 95 | 0.593136 |
79433c2c64a9ab2c02fd981a102c7df6825fc446 | 8,931 | py | Python | docparser/objdetmetrics_lib/BoundingBox.py | DS3Lab/DocParser | cbff40804dc0b1fd676d5776341a7b0ab20326a8 | [
"MIT"
] | 45 | 2020-05-22T03:44:38.000Z | 2022-03-10T12:43:55.000Z | docparser/objdetmetrics_lib/BoundingBox.py | DS3Lab/DocParser | cbff40804dc0b1fd676d5776341a7b0ab20326a8 | [
"MIT"
] | 24 | 2020-05-26T09:38:31.000Z | 2022-03-12T00:37:55.000Z | docparser/objdetmetrics_lib/BoundingBox.py | DS3Lab/DocParser | cbff40804dc0b1fd676d5776341a7b0ab20326a8 | [
"MIT"
] | 10 | 2020-07-22T16:34:50.000Z | 2022-03-29T11:25:49.000Z | from docparser.objdetmetrics_lib.utils import *
class BoundingBox:
def __init__(self,
imageName,
classId,
x,
y,
w,
h,
typeCoordinates=CoordinatesType.Absolute,
imgSize=None,
bbType=BBType.GroundTruth,
classConfidence=None,
format=BBFormat.XYWH,
bbox_id=None,
column=None):
"""Constructor.
Args:
imageName: String representing the image name.
classId: String value representing class id.
x: Float value representing the X upper-left coordinate of the bounding box.
y: Float value representing the Y upper-left coordinate of the bounding box.
w: Float value representing the width bounding box.
h: Float value representing the height bounding box.
typeCoordinates: (optional) Enum (Relative or Absolute) represents if the bounding box
coordinates (x,y,w,h) are absolute or relative to size of the image. Default:'Absolute'.
imgSize: (optional) 2D vector (width, height)=>(int, int) represents the size of the
image of the bounding box. If typeCoordinates is 'Relative', imgSize is required.
bbType: (optional) Enum (Groundtruth or Detection) identifies if the bounding box
represents a ground truth or a detection. If it is a detection, the classConfidence has
to be informed.
classConfidence: (optional) Float value representing the confidence of the detected
class. If detectionType is Detection, classConfidence needs to be informed.
format: (optional) Enum (BBFormat.XYWH or BBFormat.XYX2Y2) indicating the format of the
coordinates of the bounding boxes. BBFormat.XYWH: <left> <top> <width> <height>
BBFormat.XYX2Y2: <left> <top> <right> <bottom>.
bbox_id: (optional) A unique ID (per image) to show which ground truth bbox a detection
was matched with
"""
self._imageName = imageName
self._typeCoordinates = typeCoordinates
if typeCoordinates == CoordinatesType.Relative and imgSize is None:
raise IOError(
'Parameter \'imgSize\' is required. It is necessary to inform the image size.')
if bbType == BBType.Detected and classConfidence is None:
raise IOError(
'For bbType=\'Detection\', it is necessary to inform the classConfidence value.')
self._classConfidence = classConfidence
self._bbType = bbType
self._classId = classId
self._format = format
self._column = column
# If relative coordinates, convert to absolute values
# For relative coords: (x,y,w,h)=(X_center/img_width , Y_center/img_height)
if (typeCoordinates == CoordinatesType.Relative):
(self._x, self._y, self._w, self._h) = convertToAbsoluteValues(imgSize, (x, y, w, h))
self._width_img = imgSize[0]
self._height_img = imgSize[1]
if format == BBFormat.XYWH:
self._x2 = self._w
self._y2 = self._h
self._w = self._x2 - self._x
self._h = self._y2 - self._y
else:
raise IOError(
'For relative coordinates, the format must be XYWH (x,y,width,height)')
# For absolute coords: (x,y,w,h)=real bb coords
else:
self._x = x
self._y = y
if format == BBFormat.XYWH:
self._w = w
self._h = h
self._x2 = self._x + self._w
self._y2 = self._y + self._h
else: # format == BBFormat.XYX2Y2: <left> <top> <right> <bottom>.
self._x2 = w
self._y2 = h
self._w = self._x2 - self._x
self._h = self._y2 - self._y
if imgSize is None:
self._width_img = None
self._height_img = None
else:
self._width_img = imgSize[0]
self._height_img = imgSize[1]
self._bbox_id = bbox_id
def setAbsoluteBoundingBox(self, x, y, w, h):
self._x = x
self._y = y
self._w = w
self._h = h
self._x2 = self._x + self._w
self._y2 = self._y + self._h
def getAbsoluteBoundingBox(self, format=BBFormat.XYWH):
if format == BBFormat.XYWH:
return (self._x, self._y, self._w, self._h)
elif format == BBFormat.XYX2Y2:
return (self._x, self._y, self._x2, self._y2)
def getRelativeBoundingBox(self, imgSize=None):
if imgSize is None and self._width_img is None and self._height_img is None:
raise IOError(
'Parameter \'imgSize\' is required. It is necessary to inform the image size.')
if imgSize is None:
return convertToRelativeValues((imgSize[0], imgSize[1]),
(self._x, self._y, self._w, self._h))
else:
return convertToRelativeValues((self._width_img, self._height_img),
(self._x, self._y, self._w, self._h))
def getImageName(self):
return self._imageName
def getBboxID(self):
return self._bbox_id
def getColumn(self):
return self._column
def setColumn(self, column):
self._column = column
def setBboxID(self, bbox_id):
self._bbox_id = bbox_id
def getConfidence(self):
return self._classConfidence
def getFormat(self):
return self._format
def getClassId(self):
return self._classId
def setClassId(self, new_class_id):
self._classId = new_class_id
def getImageSize(self):
return (self._width_img, self._height_img)
def getCoordinatesType(self):
return self._typeCoordinates
def getBBType(self):
return self._bbType
@staticmethod
def compare(det1, det2):
det1BB = det1.getAbsoluteBoundingBox()
det1ImgSize = det1.getImageSize()
det2BB = det2.getAbsoluteBoundingBox()
det2ImgSize = det2.getImageSize()
if det1.getClassId() == det2.getClassId() and \
det1.classConfidence == det2.classConfidenc() and \
det1BB[0] == det2BB[0] and \
det1BB[1] == det2BB[1] and \
det1BB[2] == det2BB[2] and \
det1BB[3] == det2BB[3] and \
det1ImgSize[0] == det1ImgSize[0] and \
det2ImgSize[1] == det2ImgSize[1]:
return True
return False
@staticmethod
def clone(boundingBox):
absBB = boundingBox.getAbsoluteBoundingBox(format=BBFormat.XYWH)
newBoundingBox = BoundingBox(
boundingBox.getImageName(),
boundingBox.getClassId(),
absBB[0],
absBB[1],
absBB[2],
absBB[3],
typeCoordinates=boundingBox.getCoordinatesType(),
imgSize=boundingBox.getImageSize(),
bbType=boundingBox.getBBType(),
classConfidence=boundingBox.getConfidence(),
format=BBFormat.XYWH)
return newBoundingBox
def get_union_bbox_xywh(self, other_bbox):
[x0, y0, x1, y1] = self.getAbsoluteBoundingBox(format=BBFormat.XYX2Y2)
[other_x0, other_y0, other_x1, other_y1] = other_bbox.getAbsoluteBoundingBox(format=BBFormat.XYX2Y2)
union_x0 = min(x0, other_x0)
union_y0 = min(y0, other_y0)
union_x1 = max(x1, other_x1)
union_y1 = max(y1, other_y1)
union_w = union_x1 - union_x0
union_h = union_y1 - union_y0
return [union_x0, union_y0, union_w, union_h]
def intersects(self, other_bbox):
boxA = self.getAbsoluteBoundingBox(format=BBFormat.XYX2Y2)
boxB = other_bbox.getAbsoluteBoundingBox(format=BBFormat.XYX2Y2)
if boxA[0] > boxB[2]:
return False # boxA is right of boxB
if boxB[0] > boxA[2]:
return False # boxA is left of boxB
if boxA[3] < boxB[1]:
return False # boxA is above boxB
if boxA[1] > boxB[3]:
return False # boxA is below boxB
return True
def intersectionArea(self, other_bbox):
boxA = self.getAbsoluteBoundingBox(format=BBFormat.XYX2Y2)
boxB = other_bbox.getAbsoluteBoundingBox(format=BBFormat.XYX2Y2)
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# intersection area
return (xB - xA) * (yB - yA)
def getArea(self):
w, h, _, _ = self.getAbsoluteBoundingBox(format=BBFormat.XYX2Y2)
return w * h
| 39 | 108 | 0.582578 |
79433dc9c760cff3263f0c9c3b298c457cfea208 | 8,579 | py | Python | main.py | WilloIzCitron/Lilliebot-Beta | 4b5fda134b721fefeb6cc4d362a2e391f7bec606 | [
"MIT"
] | null | null | null | main.py | WilloIzCitron/Lilliebot-Beta | 4b5fda134b721fefeb6cc4d362a2e391f7bec606 | [
"MIT"
] | null | null | null | main.py | WilloIzCitron/Lilliebot-Beta | 4b5fda134b721fefeb6cc4d362a2e391f7bec606 | [
"MIT"
] | null | null | null | #dont steal my code
import asyncio
import ast
import inspect
import random
import discord
import webserver
from discord.ext.commands import bot
from webserver import keep_alive
import json
import os
from discord.ext import commands
client = discord.Client()
bot = commands.Bot(command_prefix='l$')
print('Please wait ok')
print(os.getenv("DB_USERNAME"))
secret_token = os.getenv("TOKEN")
@client.event
async def on_ready():
myAct = discord.Activity(name=str(len(client.users))+ ' People in '+str(len(client.guilds))+' Cities '' | l$ ', type=discord.ActivityType.listening)
await client.change_presence(status=discord.Status.idle, activity=myAct,)
print('Bot is activated to '+str(len(client.guilds))+ ' servers')
print('Console has Launched')
print('Lilliebot Console By WilloIzCitron')
@client.event
async def on_message(message):
msg = message.content.lower()
split = message.content.split()
author = message.author.id
if message.author.bot == True:
return
if message.author == client.user:
return
if '<@!703427882726457403>' in message.content or '<@703427882726457403>' in message.content:
embed = discord.Embed(
title="",
description=
"hello {0.author.mention} My Prefix is `l$` | use `l$help`".format(
message),
color=(random.choice(
[0x00ff00, 0x0ff0ff, 0xff01ff, 0xfd300f, 0x000000])))
embed.set_footer(text='dont ping me again :v')
if message.content.startswith('l$hello'):
await message.channel.send('Hello {0.author.mention}'.format(message))
if msg.startswith('l$avatar'):
try:
if len(message.mentions) == 0: target = message.author
else: target = message.mentions[0]
embed = discord.Embed(title=target.name)
embed.set_image(
url=str(target.avatar_url).replace('.webp', '.png'))
except:
await message.channel.send('This member not exist')
if message.content.startswith('l$whoareyou'):
await message.channel.send('im liliebot i can help you')
if message.content.startswith('l$lol'):
await message.channel.send('hahaha its funny!')
if message.content.startswith('l$dance'):
await message.channel.send(
random.choice([
'lol i cant dance im bot',
'try this a song https://www.youtube.com/watch?v=A67ZkAd1wmI'
]))
if message.content.startswith('l$about'):
embed = discord.Embed(
title="Lilliebot Biodata",
description=(random.choice([
'this is fun fact?', 'also try username601', 'what is this?',
'also try Nezumi Yui', 'you know? who is Vladimir Putin',
'press Alt+F4', 'you know? who is Ash Kentchum',
'You eat Nugget everyday?'
])),
colour=0xFBFB9B)
embed.add_field(
name='Bot Biodata',
value=
'Programing code:python(py)\nBot Created:April 25 2020\nCreated by: ||<@479642404216111124> or someball45#2588||\nDefault Prefix: l$'
)
embed.add_field(
name='Programer biodata',
value=
'Favorite game=Terraria,Minecraft,From The Depths, Pc Buidling Simulator\nFavorite Language:Python,HTML,Javascript\nName:Willoizcitron\nSocial Media:\n[Github](https://github.com/WilloIzCitron)\n[Repl.It](https://repl.it/@SomeBall45)'
)
embed.add_field(
name='Versions',
value='Discord.py = 1.3.3\nBot Version = Pre Release 0.12\n')
embed.add_field(
name='Links',
value=(
'[Donate A Hacker Plan](https://repl.it/upgrade/SomeBall45)'))
embed.set_thumbnail(
url=
'https://cdn.discordapp.com/avatars/703427882726457403/89b43921fbcd58a3ff05b0bc9f7a7826.png?size=2048'
)
embed.set_footer(text='Copyright (c) 2020 WilloIzCitron')
if message.content.startswith('l$nuke'):
embed = discord.Embed(
title="Nuke Complete",
description="You completly nuke this channel",
colour=0x0ff00)
embed.set_image(
url='https://i.makeagif.com/media/12-22-2015/_1JY9N.gif')
if message.content.startswith('l$covid'):
await message.channel.send(
'Dont touch me pls, {0.author.mention} is possitive coronavirus!'.
format(message))
if message.content.startswith('l$shoot'):
if len(split) < 2:
await message.channel.send('mention needed')
else:
armies = message.mentions[0]
shooter = message.guild.get_member(int(author))
embed = discord.Embed(
description=str(armies) + ' is killed by ' + str(shooter))
embed.set_image(
url=random.choice([
'https://i.gifer.com/7a4J.gif',
'https://i.gifer.com/7iW1.gif',
'https://gifimage.net/wp-content/uploads/2018/05/shooting-gun-gif-12.gif'
]))
if message.content.startswith('l$connections'):
if int(author) == 479642404216111124:
embed = discord.Embed(
title='Lilliebot Connections', colour=discord.Colour.magenta())
embed.add_field(
name='Connections',
value='Members = ' + str(len(client.users)) + ' Members '
'\n Servers = ' + str(len(client.guilds)) + ' Servers ')
else:
await message.channel.send(
'This command is `OWNER ONLY` you cant access it')
if split[0] == 'l$calc' or split[0] == 'l$eval':
if int(author) == 479642404216111124:
cmd = message.content[int(len(split[0]) + 1):]
try:
thing = eval(cmd)
if inspect.isawaitable(thing):
await message.channel.send('```py\n' + await thing + '```')
else:
await message.channel.send('```py\n' + await thing + '```')
except Exception as er:
await message.channel.send(f'ERROR you wrong: `{er}`')
else:
await message.channel.send(
'This command is `OWNER ONLY` you cant access it')
if message.content.startswith('l$god'):
await message.channel.send(
random.choice([
'i dont know about that the Lillie is G O D',
'by the way the laptop had G O D specs',
'we need a G O D Terraria', 'is here a G O D Mario',
'i catch a G O D Pokemon'
]))
if msg.startswith('l$ping'):
embed = discord.Embed(
title=random.choice(['Png', 'Pong', 'Poong', 'Pooong']),
description="Bot latency is " + str(
round(client.latency * 1000)) + "ms".format(message),
color=0x00ff00)
embed.set_thumbnail(
url=
'https://cdn.dribbble.com/users/729829/screenshots/4272534/galshir-pingpong-slow-motion.gif'
)
embed.add_field(
name='you dont know latency?',
value=
'[Click here](https://en.wikipedia.org/wiki/Latency_(engineering))')
if message.content.startswith('l$pokemon'):
await message.channel.send(
random.choice(
['Pikachu', 'Eevee', 'Charmander', 'Bulbasaur', 'Squirtle']))
if msg.startswith(f'l$say'):
await message.delete()
await message.channel.send(message.content[int(len(split[0]) + 1):])
if message.content.startswith('l$invite'):
embed = discord.Embed(
title="Invite Links",
description="Please invite me to your server 😊",
color=(random.choice(
[0x00ff00, 0x0ff0ff, 0xff01ff, 0xfd300f, 0x000000])))
embed.add_field(
name='Links',
value=
'[Bot Invite](https://discordapp.com/api/oauth2/authorize?client_id=703427882726457403&permissions=8&scope=bot)\n[Support Server](https://discord.gg/6AkeDD9)'
)
embed.set_footer(text='Please Support him')
if message.content.startswith('l$randomnpc'):
await message.channel.send(
random.choice([
'https://www.pngitem.com/pimgs/m/446-4468761_terraria-guide-npc-hd-png-download.png You Got Guide',
'https://66.media.tumblr.com/247cfd904f5fb23a6de54d3cb8a1b9b6/tumblr_phngb6yM2G1vhhmun_540.jpg You Got Dryad',
'https://vignette.wikia.nocookie.net/terraria/images/4/4d/NPC_19.png/revision/latest?cb=20200425230158 You Got Arms Dealer',
]))
if msg.startswith('someball'):
await message.channel.send('somegirl')
if msg.startswith(f'l$help'):
embed = discord.Embed(
title="Lilliebot Help",
description="Is here a Help Pages,{0.author.mention}".format(
message),
color=(random.choice(
[0x00ff00, 0x0ff0ff, 0xff01ff, 0xfd300f, 0x000000])))
embed.add_field(
name='Roleplay',
value='`hello, lol, covid, shoot `',
inline='False')
embed.add_field(
name='Games Themed Commands',
value='`randomnpc, pokemon `',
inline='False')
embed.add_field(
name='Miscellaneous',
value='`ping, invite, god, about, dance, whoareyou `',
inline='False')
embed.set_footer(text='the prefix is `l$`|Made By someball45#2588')
await message.channel.send(embed=embed)
keep_alive()
TOKEN = os.environ.get("DISCORD_BOT_SECRET")
client.run(TOKEN)
| 34.453815 | 240 | 0.667094 |
7943404022d522c613563998b5c434e96bbd7de5 | 2,127 | py | Python | database/message_id.py | Adwaith-Rajesh/Task-Me-Up | ece9033c58e6e5dbd7dc641d872e8d71b1972a17 | [
"MIT"
] | 5 | 2021-03-25T13:50:52.000Z | 2021-07-10T00:57:12.000Z | database/message_id.py | Adwaith-Rajesh/Task-Me-Up | ece9033c58e6e5dbd7dc641d872e8d71b1972a17 | [
"MIT"
] | null | null | null | database/message_id.py | Adwaith-Rajesh/Task-Me-Up | ece9033c58e6e5dbd7dc641d872e8d71b1972a17 | [
"MIT"
] | null | null | null | import os
from typing import List
from pathlib import Path
from meta.logger import Logger, logging
from pysondb import db
from rich.logging import RichHandler
# stop imported loggers
logging.getLogger("pysondb").setLevel(logging.WARNING)
logging.getLogger("filelock").setLevel(logging.WARNING)
logging.getLogger("filelock").addHandler(RichHandler())
# db loggers
ms_log = logging.getLogger("msg_id")
ms_logger = Logger(logger=ms_log, base_level=logging.DEBUG, filename="message_id.log")
DB_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "msg_id.json")
if not Path(DB_PATH).is_file():
with open(DB_PATH, "w") as f:
f.write('{"data":[]}')
class MessageID:
def __init__(self) -> None:
self._db = db.getDb(filename=DB_PATH)
def add_msg_id(self, user_id: int, msg_id: int) -> None:
if not self.check_user_id_exists(user_id):
self._db.add({"user_id": user_id, "msg_ids": [msg_id]})
else:
data = self._db.getBy({"user_id": user_id})[0]
data["msg_ids"].append(msg_id)
self._db.update({"user_id": user_id}, data)
ms_logger.log(
logging.INFO,
message=f"add_msg_id added user {user_id=} message_id {msg_id=}",
)
def get_msg_id(self, user_id: int) -> List[int]:
ms_logger.log(logging.INFO, message=f"get_msg_id get {user_id=}")
if self.check_user_id_exists(user_id):
data = self._db.getBy({"user_id": user_id})[0]
return data["msg_ids"]
else:
return []
def remove_msg_id(self, user_id: int) -> None:
if self.check_user_id_exists(user_id):
data = self._db.getBy({"user_id": user_id})[0]
data["msg_ids"].clear()
self._db.update({"user_id": user_id}, data)
ms_logger.log(
logging.INFO,
message=f"remove_msg_id message ids remove successfully {user_id=}",
)
def check_user_id_exists(self, user_id: int) -> bool:
if self._db.getBy({"user_id": user_id}):
return True
return False | 31.279412 | 86 | 0.627645 |
7943411fef8022294a57a718f6082ab43205714c | 757 | py | Python | generated-libraries/python/netapp/lock/break_error.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | 2 | 2017-03-28T15:31:26.000Z | 2018-08-16T22:15:18.000Z | generated-libraries/python/netapp/lock/break_error.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | generated-libraries/python/netapp/lock/break_error.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | from netapp.netapp_object import NetAppObject
class BreakError(NetAppObject):
"""
Information about a single error encountered by specific
protocol(s).
"""
_err = None
@property
def err(self):
"""
The error string.
"""
return self._err
@err.setter
def err(self, val):
if val != None:
self.validate('err', val)
self._err = val
@staticmethod
def get_api_name():
return "break-error"
@staticmethod
def get_desired_attrs():
return [
'err',
]
def describe_properties(self):
return {
'err': { 'class': basestring, 'is_list': False, 'required': 'required' },
}
| 21.027778 | 85 | 0.531044 |
794341bfec077e86c0d37d47add5847c029535b4 | 7,943 | py | Python | sdk/python/pulumi_azure_nextgen/policyinsights/v20190701/get_remediation_at_resource.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/policyinsights/v20190701/get_remediation_at_resource.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/policyinsights/v20190701/get_remediation_at_resource.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetRemediationAtResourceResult',
'AwaitableGetRemediationAtResourceResult',
'get_remediation_at_resource',
]
@pulumi.output_type
class GetRemediationAtResourceResult:
"""
The remediation definition.
"""
def __init__(__self__, created_on=None, deployment_status=None, filters=None, id=None, last_updated_on=None, name=None, policy_assignment_id=None, policy_definition_reference_id=None, provisioning_state=None, resource_discovery_mode=None, type=None):
if created_on and not isinstance(created_on, str):
raise TypeError("Expected argument 'created_on' to be a str")
pulumi.set(__self__, "created_on", created_on)
if deployment_status and not isinstance(deployment_status, dict):
raise TypeError("Expected argument 'deployment_status' to be a dict")
pulumi.set(__self__, "deployment_status", deployment_status)
if filters and not isinstance(filters, dict):
raise TypeError("Expected argument 'filters' to be a dict")
pulumi.set(__self__, "filters", filters)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_updated_on and not isinstance(last_updated_on, str):
raise TypeError("Expected argument 'last_updated_on' to be a str")
pulumi.set(__self__, "last_updated_on", last_updated_on)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if policy_assignment_id and not isinstance(policy_assignment_id, str):
raise TypeError("Expected argument 'policy_assignment_id' to be a str")
pulumi.set(__self__, "policy_assignment_id", policy_assignment_id)
if policy_definition_reference_id and not isinstance(policy_definition_reference_id, str):
raise TypeError("Expected argument 'policy_definition_reference_id' to be a str")
pulumi.set(__self__, "policy_definition_reference_id", policy_definition_reference_id)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_discovery_mode and not isinstance(resource_discovery_mode, str):
raise TypeError("Expected argument 'resource_discovery_mode' to be a str")
pulumi.set(__self__, "resource_discovery_mode", resource_discovery_mode)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="createdOn")
def created_on(self) -> str:
"""
The time at which the remediation was created.
"""
return pulumi.get(self, "created_on")
@property
@pulumi.getter(name="deploymentStatus")
def deployment_status(self) -> 'outputs.RemediationDeploymentSummaryResponse':
"""
The deployment status summary for all deployments created by the remediation.
"""
return pulumi.get(self, "deployment_status")
@property
@pulumi.getter
def filters(self) -> Optional['outputs.RemediationFiltersResponse']:
"""
The filters that will be applied to determine which resources to remediate.
"""
return pulumi.get(self, "filters")
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the remediation.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastUpdatedOn")
def last_updated_on(self) -> str:
"""
The time at which the remediation was last updated.
"""
return pulumi.get(self, "last_updated_on")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the remediation.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="policyAssignmentId")
def policy_assignment_id(self) -> Optional[str]:
"""
The resource ID of the policy assignment that should be remediated.
"""
return pulumi.get(self, "policy_assignment_id")
@property
@pulumi.getter(name="policyDefinitionReferenceId")
def policy_definition_reference_id(self) -> Optional[str]:
"""
The policy definition reference ID of the individual definition that should be remediated. Required when the policy assignment being remediated assigns a policy set definition.
"""
return pulumi.get(self, "policy_definition_reference_id")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The status of the remediation.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceDiscoveryMode")
def resource_discovery_mode(self) -> Optional[str]:
"""
The way resources to remediate are discovered. Defaults to ExistingNonCompliant if not specified.
"""
return pulumi.get(self, "resource_discovery_mode")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the remediation.
"""
return pulumi.get(self, "type")
class AwaitableGetRemediationAtResourceResult(GetRemediationAtResourceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRemediationAtResourceResult(
created_on=self.created_on,
deployment_status=self.deployment_status,
filters=self.filters,
id=self.id,
last_updated_on=self.last_updated_on,
name=self.name,
policy_assignment_id=self.policy_assignment_id,
policy_definition_reference_id=self.policy_definition_reference_id,
provisioning_state=self.provisioning_state,
resource_discovery_mode=self.resource_discovery_mode,
type=self.type)
def get_remediation_at_resource(remediation_name: Optional[str] = None,
resource_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRemediationAtResourceResult:
"""
The remediation definition.
:param str remediation_name: The name of the remediation.
:param str resource_id: Resource ID.
"""
__args__ = dict()
__args__['remediationName'] = remediation_name
__args__['resourceId'] = resource_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:policyinsights/v20190701:getRemediationAtResource', __args__, opts=opts, typ=GetRemediationAtResourceResult).value
return AwaitableGetRemediationAtResourceResult(
created_on=__ret__.created_on,
deployment_status=__ret__.deployment_status,
filters=__ret__.filters,
id=__ret__.id,
last_updated_on=__ret__.last_updated_on,
name=__ret__.name,
policy_assignment_id=__ret__.policy_assignment_id,
policy_definition_reference_id=__ret__.policy_definition_reference_id,
provisioning_state=__ret__.provisioning_state,
resource_discovery_mode=__ret__.resource_discovery_mode,
type=__ret__.type)
| 40.319797 | 254 | 0.681984 |
794341c249e91895d9c1beb31685e52b90ec5cd5 | 236 | py | Python | erpnextturkish/erpnext_turkish/doctype/efatura_ayarlar/test_efatura_ayarlar.py | logedosoft/erpnextturkish | b9e765113c3017119a75aea91a6d6627f9aa1c47 | [
"MIT"
] | 5 | 2020-05-30T15:52:57.000Z | 2021-12-05T11:34:30.000Z | erpnextturkish/erpnext_turkish/doctype/efatura_ayarlar/test_efatura_ayarlar.py | logedosoft/erpnextturkish | b9e765113c3017119a75aea91a6d6627f9aa1c47 | [
"MIT"
] | null | null | null | erpnextturkish/erpnext_turkish/doctype/efatura_ayarlar/test_efatura_ayarlar.py | logedosoft/erpnextturkish | b9e765113c3017119a75aea91a6d6627f9aa1c47 | [
"MIT"
] | 9 | 2020-11-06T12:04:30.000Z | 2022-03-16T05:51:39.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Logedosoft Business Solutions and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestEFaturaAyarlar(unittest.TestCase):
pass
| 21.454545 | 68 | 0.779661 |
794341f8ec561dfbb5336a908759ffdf301387b8 | 2,795 | py | Python | doc/d_sensitivity.py | Assimila/sense | 62dc29a619b7e110f42fe599c20dc1070b812250 | [
"Apache-2.0"
] | 3 | 2018-10-08T13:40:52.000Z | 2021-03-07T07:59:40.000Z | doc/d_sensitivity.py | Assimila/sense | 62dc29a619b7e110f42fe599c20dc1070b812250 | [
"Apache-2.0"
] | 2 | 2017-07-31T12:51:02.000Z | 2017-08-10T22:09:56.000Z | doc/d_sensitivity.py | PMarzahn/sense | 332852bf781620a5cc714efb2d86ffaff5275955 | [
"Apache-2.0"
] | 6 | 2018-06-29T10:10:36.000Z | 2022-03-06T20:24:54.000Z | """
compare results of implemented models
against Fig 11.07 from Ulaby (2014) and
compare own results against references
from the Ulaby example codes provided
http://mrs.eecs.umich.edu/codes/Module11_1/Module11_1.html
Ulaby uses for online code and book figure the reflectivity without the roughness correction factor. Email response why he did that was: He don't know he actually would use reflectivity with correction factor. Used parameter within this code: reflectivity with roughness correction factor! Therefore slightly different results. Matlab/graphical interface and python code produce same results if the same term for reflectivity is used.
Difference between matlab code and graphical interface is the curve of the ground contribution for low incidence angles. Don't know why.
Implementation of SSRT-model should be fine!!!
"""
import sys
import os
sys.path.append(os.path.abspath(os.path.dirname(__file__)) + os.sep + '..')
import numpy as np
# from sense.surface import Dubois95, Oh92
from sense.util import f2lam
from sense.model import RTModel
from sense.soil import Soil
from sense.canopy import OneLayer
import matplotlib.pyplot as plt
import pdb
plt.close('all')
theta_deg = np.arange(0.,71.)
theta_deg = 35
theta = np.deg2rad(theta_deg)
f = 3. # GHz
lam = f2lam(f) # m
s = 0.01 # m
l = 0.1 # m
omega = 0.1
s=0.02
# canopy
ke=1.
ks=omega*ke
# ks=0.63
# results strongly depend on the surface scattering model chosen!
# Oh92 gives a much smoother response at low incidence angles compared
# to the Dubois95 model
# shape of ACL would certailny also play an important role!
models = {'surface' : 'Oh92', 'canopy' : 'turbid_isotropic'}
S = Soil(f=f, s=s, mv=0.2, sand=0.4, clay=0.3, bulk = 1.65)
pol='vv'
d = np.linspace(0.1,1)
C = OneLayer(ke_h=ke, ke_v=ke, d=d, ks_v=ks, ks_h=ks, canopy = models['canopy'])
RT = RTModel(theta=theta, models=models, surface=S, canopy=C, freq=f)
RT.sigma0()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(d, 10.*np.log10(RT.stot[pol]), label='STOT', color='k')
ax.plot(d, 10.*np.log10(RT.s0g[pol]), label='SIGGROUND', color='r')
ax.plot(d, 10.*np.log10(RT.s0c[pol]), label='SIG can', color='b')
# ax.plot(d, 10.*np.log10(RT.s0cgt[pol]), label='SIG can ground', color='g')
# ax.plot(d, 10.*np.log10(RT.s0gcg[pol]), label='SIG ground can ground', color='k', linestyle='--')
#ax.plot(theta_deg, 10.*np.log10(RT.G.rt_s.vv), label='surface', linestyle='-.')
ax.legend()
ax.set_title('s='+str(s)+' f='+str(f)+' sand='+str(0.4)+' clay='+str(0.3)+' bulk='+str(1.65)+' theta='+str(theta_deg)+' mv='+str(0.2)+' ke='+str(ke))
ax.grid()
ax.set_xlabel('height [m]')
ax.set_ylabel('sigma [dB]')
# ax.set_xlim(0.,70.)
# ax.set_ylim(-17.,-9.)
plt.savefig('/media/tweiss/Daten/plots/sensitivity/height')
plt.close()
plt.show()
| 31.404494 | 434 | 0.713059 |
794342fcd1415bee5e85f3fb18ca11f8debfdde3 | 1,199 | py | Python | vendor/packages/translate-toolkit/translate/storage/placeables/interfaces.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | 2 | 2019-08-19T17:08:47.000Z | 2019-10-05T11:37:02.000Z | vendor/packages/translate-toolkit/translate/storage/placeables/interfaces.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | null | null | null | vendor/packages/translate-toolkit/translate/storage/placeables/interfaces.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
This file contains abstract (semantic) interfaces for placeable implementations.
"""
from strelem import StringElem
class InvisiblePlaceable(StringElem):
parse = None
class MaskingPlaceable(StringElem):
parse = None
class ReplacementPlaceable(StringElem):
parse = None
class SubflowPlaceable(StringElem):
parse = None
class Delimiter(object):
pass
class PairedDelimiter(object):
pass
| 23.98 | 80 | 0.748123 |
7943437105b3081f8080a256a40e1d6fe8988c52 | 658 | py | Python | lintcode/04MathOrBitOperation/140FastPower.py | zhaoxinlu/leetcode-algorithms | f5e1c94c99628e7fb04ba158f686a55a8093e933 | [
"MIT"
] | null | null | null | lintcode/04MathOrBitOperation/140FastPower.py | zhaoxinlu/leetcode-algorithms | f5e1c94c99628e7fb04ba158f686a55a8093e933 | [
"MIT"
] | null | null | null | lintcode/04MathOrBitOperation/140FastPower.py | zhaoxinlu/leetcode-algorithms | f5e1c94c99628e7fb04ba158f686a55a8093e933 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Editor: Zhao Xinlu
School: BUPT
Date: 2018-03-07
算法思想: 快速幂--分治
"""
class Solution:
"""
@param a: A 32bit integer
@param b: A 32bit integer
@param n: A 32bit integer
@return: An integer
"""
def fastPower(self, a, b, n):
# write your code here
if n == 0:
return 1 % b
elif n == 1:
return a % b
elif n < 0:
return 0
tmp = self.fastPower(a, b, n/2)
if n % 2 == 1:
return (tmp * tmp * a) % b
else:
return (tmp * tmp) % b
if __name__ == '__main__':
print Solution().fastPower(2, 3, 31) | 20.5625 | 40 | 0.480243 |
794343f56534997c000bad3f4c9972025f750d1c | 4,805 | py | Python | pypy/module/__pypy__/test/test_special.py | GabriellaUwa/pypy | 2ede3b557a25cb49db969e942ca5a7f8a9eae0d4 | [
"Apache-2.0",
"OpenSSL"
] | 1 | 2018-12-27T20:40:49.000Z | 2018-12-27T20:40:49.000Z | pypy/module/__pypy__/test/test_special.py | GabriellaUwa/pypy | 2ede3b557a25cb49db969e942ca5a7f8a9eae0d4 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | pypy/module/__pypy__/test/test_special.py | GabriellaUwa/pypy | 2ede3b557a25cb49db969e942ca5a7f8a9eae0d4 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | import py
class AppTest(object):
spaceconfig = {"objspace.usemodules.select": False}
def setup_class(cls):
if cls.runappdirect:
py.test.skip("does not make sense on pypy-c")
def test_cpumodel(self):
import __pypy__
assert hasattr(__pypy__, 'cpumodel')
def test_builtinify(self):
import __pypy__
class A(object):
a = lambda *args: args
b = __pypy__.builtinify(a)
my = A()
assert my.a() == (my,)
assert my.b() == ()
assert A.a(my) == (my,)
assert A.b(my) == (my,)
assert A.a.im_func(my) == (my,)
assert not hasattr(A.b, 'im_func')
assert A.a is not A.__dict__['a']
assert A.b is A.__dict__['b']
def test_hidden_applevel(self):
import __pypy__
import sys
@__pypy__.hidden_applevel
def sneak(): (lambda: 1/0)()
try:
sneak()
except ZeroDivisionError as e:
tb = sys.exc_info()[2]
assert tb.tb_frame == sys._getframe()
assert tb.tb_next.tb_frame.f_code.co_name == '<lambda>'
else:
assert False, 'Expected ZeroDivisionError'
def test_hidden_applevel_frames(self):
import __pypy__
import sys
@__pypy__.hidden_applevel
def test_hidden():
assert sys._getframe().f_code.co_name != 'test_hidden'
def e(): 1/0
try: e()
except ZeroDivisionError as e:
assert sys.exc_info() == (None, None, None)
else: assert False
return 2
assert test_hidden() == 2
def test_get_hidden_tb(self):
import __pypy__
import sys
result = [False]
@__pypy__.hidden_applevel
def test_hidden_with_tb():
def not_hidden(): 1/0
try: not_hidden()
except ZeroDivisionError as e:
assert sys.exc_info() == (None, None, None)
tb = __pypy__.get_hidden_tb()
assert tb.tb_frame.f_code.co_name == 'not_hidden'
result[0] = True
raise
else: return False
raises(ZeroDivisionError, test_hidden_with_tb)
assert result[0]
def test_lookup_special(self):
from __pypy__ import lookup_special
class X(object):
def foo(self): return 42
x = X()
x.foo = 23
x.bar = 80
assert lookup_special(x, "foo")() == 42
assert lookup_special(x, "bar") is None
class X:
pass
raises(TypeError, lookup_special, X(), "foo")
def test_do_what_I_mean(self):
from __pypy__ import do_what_I_mean
x = do_what_I_mean()
assert x == 42
def test_list_strategy(self):
from __pypy__ import strategy
l = [1, 2, 3]
assert strategy(l) == "IntegerListStrategy"
l = ["a", "b", "c"]
assert strategy(l) == "BytesListStrategy"
l = [u"a", u"b", u"c"]
assert strategy(l) == "UnicodeListStrategy"
l = [1.1, 2.2, 3.3]
assert strategy(l) == "FloatListStrategy"
l = range(3)
assert strategy(l) == "SimpleRangeListStrategy"
l = range(1, 2)
assert strategy(l) == "RangeListStrategy"
l = [1, "b", 3]
assert strategy(l) == "ObjectListStrategy"
l = []
assert strategy(l) == "EmptyListStrategy"
o = 5
raises(TypeError, strategy, 5)
def test_dict_strategy(self):
from __pypy__ import strategy
d = {}
assert strategy(d) == "EmptyDictStrategy"
d = {1: None, 5: None}
assert strategy(d) == "IntDictStrategy"
def test_set_strategy(self):
from __pypy__ import strategy
s = set()
assert strategy(s) == "EmptySetStrategy"
s = set([2, 3, 4])
assert strategy(s) == "IntegerSetStrategy"
class AppTestJitFeatures(object):
spaceconfig = {"translation.jit": True}
def setup_class(cls):
cls.w_runappdirect = cls.space.wrap(cls.runappdirect)
def test_jit_backend_features(self):
try:
from __pypy__ import jit_backend_features
except ImportError:
skip("compiled without jit")
supported_types = jit_backend_features
assert isinstance(supported_types, list)
for x in supported_types:
assert x in ['floats', 'singlefloats', 'longlong']
def test_do_what_I_mean_error(self):
if not self.runappdirect:
skip("we don't wrap a random exception inside SystemError "
"when untranslated, because it makes testing harder")
from __pypy__ import do_what_I_mean
raises(SystemError, do_what_I_mean, 1)
| 30.605096 | 71 | 0.565869 |
7943445f57d2b2868e920183144bfc81ebf1f9e6 | 5,000 | py | Python | tox_conda/plugin.py | drdavella/tox-conda | 8cb9d2f4fed1f7b3e851a2460bbd7756fad7d19c | [
"MIT"
] | 2 | 2018-12-05T18:37:46.000Z | 2018-12-29T02:41:23.000Z | tox_conda/plugin.py | drdavella/tox-conda | 8cb9d2f4fed1f7b3e851a2460bbd7756fad7d19c | [
"MIT"
] | 7 | 2018-11-03T14:55:23.000Z | 2019-03-27T20:26:07.000Z | tox_conda/plugin.py | drdavella/tox-conda | 8cb9d2f4fed1f7b3e851a2460bbd7756fad7d19c | [
"MIT"
] | 1 | 2018-12-28T16:00:19.000Z | 2018-12-28T16:00:19.000Z | import os
import re
import types
import subprocess as sp
import pluggy
import py.path
import tox.venv
from tox.config import DepConfig, DepOption
hookimpl = pluggy.HookimplMarker('tox')
class CondaDepOption(DepOption):
name = 'conda_deps'
help="each line specifies a conda dependency in pip/setuptools format"
def get_py_version(envconfig):
# Try to use basepython
match = re.match(r'python(\d)(?:\.(\d))?', envconfig.basepython)
if match:
groups = match.groups()
version = groups[0]
if groups[1]:
version += ".{}".format(groups[1])
# First fallback
elif envconfig.python_info.version_info:
version = '{}.{}'.format(*envconfig.python_info.version_info[:2])
# Second fallback
else:
code = 'import sys; print("{}.{}".format(*sys.version_info[:2]))'
args = [envconfig.basepython, '-c', code]
result = sp.check_output(args)
version = result.decode('utf-8').strip()
return 'python={}'.format(version)
@hookimpl
def tox_addoption(parser):
parser.add_testenv_attribute_obj(CondaDepOption())
parser.add_testenv_attribute(
name="conda_channels",
type="line-list",
help="each line specifies a conda channel"
)
@hookimpl
def tox_configure(config):
# This is a pretty cheesy workaround. It allows tox to consider changes to
# the conda dependencies when it decides whether an existing environment
# needs to be updated before being used
for _, envconfig in config.envconfigs.items():
conda_deps = [DepConfig(str(name)) for name in envconfig.conda_deps]
envconfig.deps.extend(conda_deps)
def find_conda():
# This should work if we're not already in an environment
conda_exe = os.environ.get('_CONDA_EXE')
if conda_exe:
return conda_exe
# This should work if we're in an active environment
conda_exe = os.environ.get('CONDA_EXE')
if conda_exe:
return conda_exe
# Try a simple fallback
if sp.call(['conda', '-h'], stdout=sp.PIPE, stderr=sp.PIPE) == 0:
return 'conda'
raise RuntimeError("Can't locate conda executable")
def venv_lookup(self, name):
# In Conda environments on Windows, the Python executable is installed in
# the top-level environment directory, as opposed to virtualenvs, where it
# is installed in the Scripts directory. Tox assumes that looking in the
# Scripts directory is sufficient, which is why this workaround is required.
paths = [self.envconfig.envdir, self.envconfig.envbindir]
return py.path.local.sysfind(name, paths=paths)
@hookimpl
def tox_testenv_create(venv, action):
venv.session.make_emptydir(venv.path)
basepath = venv.path.dirpath()
# Check for venv.envconfig.sitepackages and venv.config.alwayscopy here
conda_exe = find_conda()
venv.envconfig.conda_exe = conda_exe
envdir = venv.envconfig.envdir
python = get_py_version(venv.envconfig)
# This is a workaround for locating the Python executable in Conda
# environments on Windows.
venv._venv_lookup = types.MethodType(venv_lookup, venv)
args = [conda_exe, 'create', '--yes', '-p', envdir]
for channel in venv.envconfig.conda_channels:
args += ['--channel', channel]
args += [python]
venv._pcall(args, venv=False, action=action, cwd=basepath)
venv.envconfig.conda_python = python
return True
def install_conda_deps(venv, action, basepath, envdir):
conda_exe = venv.envconfig.conda_exe
# Account for the fact that we have a list of DepOptions
conda_deps = [str(dep.name) for dep in venv.envconfig.conda_deps]
action.setactivity('installcondadeps', ', '.join(conda_deps))
args = [conda_exe, 'install', '--yes', '-p', envdir]
for channel in venv.envconfig.conda_channels:
args += ['--channel', channel]
# We include the python version in the conda requirements in order to make
# sure that none of the other conda requirements inadvertently downgrade
# python in this environment. If any of the requirements are in conflict
# with the installed python version, installation will fail (which is what
# we want).
args += [venv.envconfig.conda_python] + conda_deps
venv._pcall(args, venv=False, action=action, cwd=basepath)
@hookimpl
def tox_testenv_install_deps(venv, action):
basepath = venv.path.dirpath()
envdir = venv.envconfig.envdir
num_conda_deps = len(venv.envconfig.conda_deps)
if num_conda_deps > 0:
install_conda_deps(venv, action, basepath, envdir)
# Account for the fact that we added the conda_deps to the deps list in
# tox_configure (see comment there for rationale). We don't want them
# to be present when we call pip install
venv.envconfig.deps = venv.envconfig.deps[:-1*num_conda_deps]
# Install dependencies from pypi here
tox.venv.tox_testenv_install_deps(venv=venv, action=action)
return True
| 30.487805 | 80 | 0.694 |
7943447bc5f75f817f495ee281b3784ff7264a9c | 4,750 | py | Python | allennlp/scripts/benchmark_iter.py | rahular/joint-coref-srl | cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be | [
"MIT"
] | null | null | null | allennlp/scripts/benchmark_iter.py | rahular/joint-coref-srl | cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be | [
"MIT"
] | null | null | null | allennlp/scripts/benchmark_iter.py | rahular/joint-coref-srl | cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Benchmarks the iterator (and indirectly the dataset reader) for a given config.
#
# Example 1: Log stats every 100 batches. Periodically output internals of
# MultiprocessDatasetReader and MultiprocessIterator.
#
# $ scripts/benchmark_iter.py --config training_config/bidirectional_language_model.jsonnet --serialization-dir serialization-dir --action=log --assume-multiprocess-types
#
# Example 2: Output seconds/batch over 10k batches.
#
# $ scripts/benchmark_iter.py --config training_config/bidirectional_language_model.jsonnet --serialization-dir serialization-dir --action=time --batch-count=10000
#
# Example 3: Output seconds to produce the first batch in order to measure overhead.
#
# $ scripts/benchmark_iter.py --config training_config/bidirectional_language_model.jsonnet --serialization-dir serialization-dir --action=first
import argparse
from enum import Enum
from multiprocessing import Process
import time
from allennlp.common import Params
from allennlp.training.trainer_pieces import TrainerPieces
from allennlp.training.util import get_batch_size
BATCH_INTERVAL = 100
LOGGING_INTERVAL_SECONDS = 5
def run_periodically(reader_output, iterator_output):
while True:
message = (
f"read out q: {reader_output.qsize()} "
+ f"it out q: {iterator_output.qsize()}"
)
print(message)
time.sleep(LOGGING_INTERVAL_SECONDS)
def log_iterable(iterable, assume_multiprocess_types):
start = time.perf_counter()
last = start
periodic_logging_process = None
have_started_periodic_process = False
batch_count = 0
cumulative_batch_size = 0
for batch in iterable:
batch_count += 1
cumulative_batch_size += get_batch_size(batch)
if assume_multiprocess_types and not have_started_periodic_process:
have_started_periodic_process = True
periodic_logging_process = Process(
target=run_periodically,
# Pass the queues directly. Passing the iterable naively
# won't work because the forked process (in contrast with
# threads) has an entirely separate address space.
# Presumably this could be worked around with
# multiprocessing.managers or similar.
args=(
iterable.gi_frame.f_locals["qiterable"].output_queue,
iterable.gi_frame.f_locals["output_queue"],
),
)
periodic_logging_process.start()
if batch_count % BATCH_INTERVAL == 0:
end = time.perf_counter()
msg = (
f"s/b total: {(end - start) / batch_count:.3f} "
+ f"s/b last: {(end - last) / BATCH_INTERVAL:.3f} "
+ f"batch count: {batch_count} "
+ f"batch size: {cumulative_batch_size / batch_count:.1f} "
)
print(msg)
last = end
if periodic_logging_process:
periodic_logging_process.terminate()
def time_iterable(iterable, batch_count):
assert batch_count > 0
print("Starting test")
start = time.perf_counter()
i = batch_count
for _ in iterable:
i -= 1
if i == 0:
break
assert i == 0, "Not enough batches!"
end = time.perf_counter()
print(f"{(end - start)/batch_count:.3f} s/b over {batch_count} batches")
def time_to_first(iterable):
print("Starting test")
start = time.perf_counter()
for _ in iterable:
break
end = time.perf_counter()
print(f"{(end - start):.3f} s/b for first batch")
class Action(Enum):
log = "log"
time = "time"
first = "first"
def __str__(self):
return self.name
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--action", type=Action, choices=list(Action), required=True)
parser.add_argument("--config", required=True)
parser.add_argument("--serialization-dir", required=True)
parser.add_argument("--batch-count", type=int, default=0)
parser.add_argument("--assume-multiprocess-types", action="store_true")
args = parser.parse_args()
params = Params.from_file(args.config)
pieces = TrainerPieces.from_params(params, args.serialization_dir)
raw_generator = pieces.iterator(pieces.train_dataset, num_epochs=1, shuffle=True)
if args.action is Action.log:
log_iterable(raw_generator, args.assume_multiprocess_types)
elif args.action is Action.time:
time_iterable(raw_generator, args.batch_count)
elif args.action is Action.first:
time_to_first(raw_generator)
else:
raise Exception(f"Unaccounted for action {args.action}")
| 32.534247 | 170 | 0.671368 |
794345aa5476b49b3425240a6fbce03e43bc9f1e | 1,035 | py | Python | user.py | Okalll/Password-locker | 8911a7bbd8f5d074731743d9588f8a780671c930 | [
"Unlicense"
] | null | null | null | user.py | Okalll/Password-locker | 8911a7bbd8f5d074731743d9588f8a780671c930 | [
"Unlicense"
] | null | null | null | user.py | Okalll/Password-locker | 8911a7bbd8f5d074731743d9588f8a780671c930 | [
"Unlicense"
] | null | null | null | class User:
"""
class that generates new instances of users
"""
user_list = [] # empty Users list
def save_user(self):
'''
save_user method saves user objects into user_list
'''
User.user_list.append(self)
def __init__(self, username, password, email):
'''
defining structure of the user object
'''
self.username = username
self.password = password
self.email = email
@classmethod
def confirm_user(cls, username, password, email):
'''
Method that checks if the name, password and email entered match entries in the users_list
'''
current_user = ''
for user in User.user_list:
if (user.username == username and user.password == password):
current_user = user.username
return current_user
@classmethod
def display_user(cls):
'''
method that returns the contact list
'''
return cls.user_list
| 24.069767 | 98 | 0.577778 |
794346cb6896fd9eff31fa9a9f863aba5aa1ca87 | 2,594 | py | Python | Code/summaryStats.py | gugek/dataBiasUSCourts | bec8a67c92ac3ad72371d77eae6d46972f0210fa | [
"MIT"
] | 1 | 2020-05-19T17:34:14.000Z | 2020-05-19T17:34:14.000Z | Code/summaryStats.py | gugek/dataBiasUSCourts | bec8a67c92ac3ad72371d77eae6d46972f0210fa | [
"MIT"
] | null | null | null | Code/summaryStats.py | gugek/dataBiasUSCourts | bec8a67c92ac3ad72371d77eae6d46972f0210fa | [
"MIT"
] | 1 | 2021-09-16T13:30:19.000Z | 2021-09-16T13:30:19.000Z | import csv
import os
import helpers
#since the text of some cases is so long we need to increase the csv field size limit in order to read it
csv.field_size_limit(3000000)
#This script goes through the processed data and produces summary statistics about it.
#Finds the number of cases which have the United States as one of two parties on the case for each circuit.
#Finds the number of cases with 0, 1, 2, and 3 democrats on the panel for each circuit.
#Also prints the total number of cases in the corpus
def summarize(circuits,circuitCSVDirName,summaryStatsDir):
#make our output directory
helpers.maybeMakeDirStructure(summaryStatsDir)
#open our output files
with open(os.path.join(summaryStatsDir,'USPartyCount.csv'),'wb') as USOutFileRaw:
USPartyOutFile = csv.writer(USOutFileRaw)
USPartyOutFile.writerow(['Circuit','US Party Case Count','Non US Party Case Count', 'Percent of Cases with US as Party'])
with open(os.path.join(summaryStatsDir,'panelComposition.csv'),'wb') as demsOutFileRaw:
demsOnPanelOutFile = csv.writer(demsOutFileRaw)
demsOnPanelOutFile.writerow(['Circuit','Cases with 0/3 Democrats','Cases with 1/3 Democrats','Cases with 2/3 Democrats','Cases with 3/3 Democrats'])
#go through all circuits
for circuit in circuits:
with open(os.path.join(circuitCSVDirName,circuit + 'DataForSTM.csv'),'rb') as metaFileRaw:
metaFile = csv.reader(metaFileRaw)
#initialize variables for us to store the aggregate counts in for this circuit
usPartyCount=0
count = 0
demDict = {}
for demCount in range(4):
demDict[str(demCount)] = 0
#for each case read in the relevant data
for line in metaFile:
fileName = line[1]
fileParties = line[3]
fileUSParty = line[7]
#skip the header line
if fileName!='filename':
count +=1
if fileUSParty == "True":
usPartyCount+=1
demCount = str(fileParties.count('1'))
demDict[demCount]+=1
#write out the results for this circuit
USPartyOutFile.writerow([circuit,usPartyCount,count-usPartyCount,(0.0+usPartyCount)/count])
demsOnPanelOutFile.writerow([circuit,demDict['0'],demDict['1'],demDict['2'],demDict['3']])
if __name__ == "__main__":
circuitCSVDirName = os.path.join('..','Data','stmCSV')
summaryStatsDir = os.path.join('..','Results','summaryStats')
circuitList = ['ca1','ca2','ca3','ca4','ca5','ca6','ca7','ca8','ca9','ca10','ca11','cadc']
summarize(circuitList,circuitCSVDirName,summaryStatsDir) | 37.594203 | 152 | 0.69468 |
7943476628f8d301a7ca9fe0380b0c6814cec864 | 796 | py | Python | cinder/units.py | CloudVPS/cinder | 9097b9407b6ce16c7b5678682284a0ad0fcc652d | [
"Apache-2.0"
] | null | null | null | cinder/units.py | CloudVPS/cinder | 9097b9407b6ce16c7b5678682284a0ad0fcc652d | [
"Apache-2.0"
] | null | null | null | cinder/units.py | CloudVPS/cinder | 9097b9407b6ce16c7b5678682284a0ad0fcc652d | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A module where we define some basic units for use across Cinder.
"""
KiB = 1024
MiB = KiB * 1024
GiB = MiB * 1024
TiB = GiB * 1024
| 33.166667 | 78 | 0.717337 |
7943477ac4bd2786b06f6dd45a4e0c897d0cb061 | 1,556 | py | Python | jackhammer/cloud/gcp.py | nicocoffo/jackhammer | 614bab06fc81d85c7aae7b14b9175c7426b33b29 | [
"MIT"
] | null | null | null | jackhammer/cloud/gcp.py | nicocoffo/jackhammer | 614bab06fc81d85c7aae7b14b9175c7426b33b29 | [
"MIT"
] | null | null | null | jackhammer/cloud/gcp.py | nicocoffo/jackhammer | 614bab06fc81d85c7aae7b14b9175c7426b33b29 | [
"MIT"
] | null | null | null | import logging
from jackhammer.cloud.cloud import Cloud
# libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
logger = logging.getLogger("jackhammer.cloud")
class GCP(Cloud):
"""
Wrapper around libcloud's GCP NodeDriver. Creates preemptible
machines, to keep costs low.
"""
def __init__(self, uuid, config):
super().__init__(uuid, config)
logger.debug("Creating GCP node driver")
self.compute = get_driver(Provider.GCE)
self.driver = self.compute(
self.config['email'],
self.config['keyPath'],
project=self.config['project'])
def create_machine(self, name, key):
logger.debug("Creating GCP node")
node = self.config['node']
metadata = node['metadata'] if 'metadata' in node else {}
metadata['ssh-keys'] = key
storage = self.driver.create_volume(
node['diskSize'],
name,
location=node['zone'],
image=node['image'])
return self.driver.create_node(
name,
node['size'],
node['image'],
node['zone'],
ex_boot_disk=storage,
ex_tags=node['tags'],
ex_metadata=metadata,
ex_preemptible=True)
def list_machines(self):
return [m for m in self.driver.list_nodes() if self.uuid in m.name]
def list_volumes(self):
return [v for v in self.driver.list_volumes() if self.uuid in v.name]
| 28.814815 | 77 | 0.595116 |
79434896bc31cd17431670cbc0d0b7cfdd5830a8 | 6,480 | py | Python | Example/Psi4Numpy/13-GeometryOptimization/opt_helper/stre.py | yychuang/109-2-compchem-lite | cbf17e542f9447e89fb48de1b28759419ffff956 | [
"BSD-3-Clause"
] | 214 | 2017-03-01T08:04:48.000Z | 2022-03-23T08:52:04.000Z | Example/Psi4Numpy/13-GeometryOptimization/opt_helper/stre.py | yychuang/109-2-compchem-lite | cbf17e542f9447e89fb48de1b28759419ffff956 | [
"BSD-3-Clause"
] | 100 | 2017-03-03T13:20:20.000Z | 2022-03-05T18:20:27.000Z | Example/Psi4Numpy/13-GeometryOptimization/opt_helper/stre.py | yychuang/109-2-compchem-lite | cbf17e542f9447e89fb48de1b28759419ffff956 | [
"BSD-3-Clause"
] | 150 | 2017-02-17T19:44:47.000Z | 2022-03-22T05:52:43.000Z | import numpy as np
from . import covRadii
from . import optExceptions
from . import v3d
from .misc import delta, ZtoPeriod, HguessLindhRho
from .simple import *
from psi4 import constants
BOHR2ANGSTROMS = constants.bohr2angstroms
HARTREE2AJ = constants.hartree2aJ
class STRE(SIMPLE):
def __init__(self, a, b, frozen=False, fixedEqVal=None, inverse=False):
self._inverse = inverse # bool - is really 1/R coordinate?
if a < b: atoms = (a, b)
else: atoms = (b, a)
SIMPLE.__init__(self, atoms, frozen, fixedEqVal)
def __str__(self):
if self.frozen: s = '*'
else: s = ' '
if self.inverse: s += '1/R'
else: s += 'R'
s += "(%d,%d)" % (self.A + 1, self.B + 1)
if self.fixedEqVal:
s += "[%.4f]" % (self.fixedEqVal * self.qShowFactor)
return s
def __eq__(self, other):
if self.atoms != other.atoms: return False
elif not isinstance(other, STRE): return False
elif self.inverse != other.inverse: return False
else: return True
@property
def inverse(self):
return self._inverse
@inverse.setter
def inverse(self, setval):
self._inverse = bool(setval)
def q(self, geom):
return v3d.dist(geom[self.A], geom[self.B])
def qShow(self, geom):
return self.qShowFactor * self.q(geom)
@property
def qShowFactor(self):
return BOHR2ANGSTROMS
@property
def fShowFactor(self):
return HARTREE2AJ / BOHR2ANGSTROMS
# If mini == False, dqdx is 1x(3*number of atoms in fragment).
# if mini == True, dqdx is 1x6.
def DqDx(self, geom, dqdx, mini=False):
check, eAB = v3d.eAB(geom[self.A], geom[self.B]) # A->B
if not check:
raise optExceptions.ALG_FAIL("STRE.DqDx: could not normalize s vector")
if mini:
startA = 0
startB = 3
else:
startA = 3 * self.A
startB = 3 * self.B
dqdx[startA:startA + 3] = -1 * eAB[0:3]
dqdx[startB:startB + 3] = eAB[0:3]
if self._inverse:
val = self.q(geom)
dqdx[startA:startA + 3] *= -1.0 * val * val # -(1/R)^2 * (dR/da)
dqdx[startB:startB + 3] *= -1.0 * val * val
return
# Return derivative B matrix elements. Matrix is cart X cart and passed in.
def Dq2Dx2(self, geom, dq2dx2):
check, eAB = v3d.eAB(geom[self.A], geom[self.B]) # A->B
if not check:
raise optExceptions.ALG_FAIL("STRE.Dq2Dx2: could not normalize s vector")
if not self._inverse:
length = self.q(geom)
for a in range(2):
for a_xyz in range(3):
for b in range(2):
for b_xyz in range(3):
tval = (
eAB[a_xyz] * eAB[b_xyz] - delta(a_xyz, b_xyz)) / length
if a == b:
tval *= -1.0
dq2dx2[3*self.atoms[a]+a_xyz, \
3*self.atoms[b]+b_xyz] = tval
else: # using 1/R
val = self.q(geom)
dqdx = np.zeros((3 * len(self.atoms)), float)
self.DqDx(geom, dqdx, mini=True) # returned matrix is 1x6 for stre
for a in range(a):
for a_xyz in range(3):
for b in range(b):
for b_xyz in range(3):
dq2dx2[3*self.atoms[a]+a_xyz, 3*self.atoms[b]+b_xyz] \
= 2.0 / val * dqdx[3*a+a_xyz] * dqdx[3*b+b_xyz]
return
def diagonalHessianGuess(self, geom, Z, connectivity=False, guessType="SIMPLE"):
""" Generates diagonal empirical Hessians in a.u. such as
Schlegel, Theor. Chim. Acta, 66, 333 (1984) and
Fischer and Almlof, J. Phys. Chem., 96, 9770 (1992).
"""
if guessType == "SIMPLE":
return 0.5
if guessType == "SCHLEGEL":
R = v3d.dist(geom[self.A], geom[self.B])
PerA = ZtoPeriod(Z[self.A])
PerB = ZtoPeriod(Z[self.B])
AA = 1.734
if PerA == 1:
if PerB == 1:
BB = -0.244
elif PerB == 2:
BB = 0.352
else:
BB = 0.660
elif PerA == 2:
if PerB == 1:
BB = 0.352
elif PerB == 2:
BB = 1.085
else:
BB = 1.522
else:
if PerB == 1:
BB = 0.660
elif PerB == 2:
BB = 1.522
else:
BB = 2.068
F = AA / ((R - BB) * (R - BB) * (R - BB))
return F
elif guessType == "FISCHER":
Rcov = (
covRadii.R[int(Z[self.A])] + covRadii.R[int(Z[self.B])]) / BOHR2ANGSTROMS
R = v3d.dist(geom[self.A], geom[self.B])
AA = 0.3601
BB = 1.944
return AA * (np.exp(-BB * (R - Rcov)))
elif guessType == "LINDH_SIMPLE":
R = v3d.dist(geom[self.A], geom[self.B])
k_r = 0.45
return k_r * HguessLindhRho(Z[self.A], Z[self.B], R)
else:
print("Warning: Hessian guess encountered unknown coordinate type.\n")
return 1.0
class HBOND(STRE):
def __str__(self):
if self.frozen: s = '*'
else: s = ' '
if self.inverse: s += '1/H'
else: s += 'H'
s += "(%d,%d)" % (self.A + 1, self.B + 1)
if self.fixedEqVal:
s += "[%.4f]" % self.fixedEqVal
return s
# overrides STRE eq in comparisons, regardless of order
def __eq__(self, other):
if self.atoms != other.atoms: return False
elif not isinstance(other, HBOND): return False
elif self.inverse != other.inverse: return False
else: return True
def diagonalHessianGuess(self, geom, Z, connectivity, guessType):
""" Generates diagonal empirical Hessians in a.u. such as
Schlegel, Theor. Chim. Acta, 66, 333 (1984) and
Fischer and Almlof, J. Phys. Chem., 96, 9770 (1992).
"""
if guess == "SIMPLE":
return 0.1
else:
print("Warning: Hessian guess encountered unknown coordinate type.\n")
return 1.0
| 31.004785 | 89 | 0.495216 |
794349bfc3adeee74c9291a28dbeddcfeb44161e | 757 | py | Python | setup.py | cyy0523xc/vit-pytorch | 8bf0383507d838ac35ac574b11b377058afbf627 | [
"MIT"
] | 1 | 2021-06-24T14:20:52.000Z | 2021-06-24T14:20:52.000Z | setup.py | bshantam97/vit-pytorch | 85314cf0b6c4ab254fed4257d2ed069cf4f8f377 | [
"MIT"
] | null | null | null | setup.py | bshantam97/vit-pytorch | 85314cf0b6c4ab254fed4257d2ed069cf4f8f377 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name = 'vit-pytorch',
packages = find_packages(exclude=['examples']),
version = '0.6.7',
license='MIT',
description = 'Vision Transformer (ViT) - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/vit-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'image recognition'
],
install_requires=[
'torch>=1.6',
'einops>=0.3'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| 26.103448 | 65 | 0.645971 |
794349e248fee845d6303c2d53860a06e1edb9fe | 11,029 | py | Python | tests/test_write_preds.py | YianZhang/jiant-v1-legacy-online-code | b6b1066de7cdbe1b95ca1ae3de6989d07b2e9629 | [
"MIT"
] | 11 | 2020-10-28T07:41:01.000Z | 2021-12-14T20:09:46.000Z | tests/test_write_preds.py | YianZhang/jiant-v1-legacy-online-code | b6b1066de7cdbe1b95ca1ae3de6989d07b2e9629 | [
"MIT"
] | 3 | 2020-10-08T18:09:58.000Z | 2021-07-22T22:24:02.000Z | tests/test_write_preds.py | YianZhang/jiant-v1-legacy-online-code | b6b1066de7cdbe1b95ca1ae3de6989d07b2e9629 | [
"MIT"
] | 12 | 2020-10-23T07:13:45.000Z | 2022-03-29T22:00:58.000Z | import csv
import os
import os.path
import shutil
import tempfile
import unittest
from unittest import mock
import torch
import pandas as pd
from jiant import evaluate
import jiant.tasks.tasks as tasks
from jiant.models import MultiTaskModel
from jiant.__main__ import evaluate_and_write
from jiant.allennlp_mods.numeric_field import NumericField
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data import Instance, Token, vocabulary
from allennlp.data.fields import LabelField, ListField, MetadataField, TextField
def model_forward(task, batch, predict=True):
if task.name == "sts-b":
logits = torch.Tensor([0.6, 0.4])
labels = torch.Tensor([0.875, 0.6])
out = {"logits": logits, "labels": labels, "n_exs": 2, "preds": [1.0, 0.8]}
elif task.name == "wic":
logits = torch.Tensor([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5]])
labels = torch.LongTensor([0, 1, 1, 0])
out = {"logits": logits, "labels": labels, "n_exs": 4, "preds": [0, 1, 1, 1]}
else:
raise ValueError("Unexpected task found")
task.update_metrics(out, batch)
return out
class TestWritePreds(unittest.TestCase):
def sentence_to_text_field(self, sent, indexers):
""" Helper function to map a sequence of tokens into a sequence of
AllenNLP Tokens, then wrap in a TextField with the given indexers """
return TextField(list(map(Token, sent)), token_indexers=indexers)
def setUp(self):
"""
Since we're testing write_preds, we need to mock model predictions and the parts
of the model, arguments, and trainer needed to write to predictions.
Unlike in update_metrics tests, the actual contents of the examples in val_data
is not the most important as long as it adheres to the API necessary for examples
of that task.
"""
self.temp_dir = tempfile.mkdtemp()
self.path = os.path.join(self.temp_dir, "temp_dataset.tsv")
self.stsb = tasks.STSBTask(self.temp_dir, 100, "sts-b", tokenizer_name="MosesTokenizer")
self.wic = tasks.WiCTask(self.temp_dir, 100, "wic", tokenizer_name="MosesTokenizer")
stsb_val_preds = pd.DataFrame(
data=[
{
"idx": 0,
"labels": 1.00,
"preds": 1.00,
"sent1_str": "A man with a hard hat is dancing.",
"sent2_str": "A man wearing a hard hat is dancing",
},
{
"idx": 1,
"labels": 0.950,
"preds": 0.34,
"sent1_str": "A young child is riding a horse.",
"sent2_str": "A child is riding a horse.",
},
]
)
wic_val_preds = pd.DataFrame(
data=[
{
"idx": 0,
"sent1": "Room and board. ",
"sent2": "He nailed boards across the windows.",
"labels": 0,
"preds": 0,
},
{
"idx": 1,
"sent1": "Hook a fish",
"sent2": "He hooked a snake accidentally.",
"labels": 1,
"preds": 1,
},
]
)
indexers = {"bert_cased": SingleIdTokenIndexer("bert-xe-cased")}
self.wic.set_instance_iterable(
"val",
[
Instance(
{
"sent1_str": MetadataField("Room and board."),
"sent2_str": MetadataField("He nailed boards"),
"idx": LabelField(0, skip_indexing=True),
"idx2": NumericField(2),
"idx1": NumericField(3),
"inputs": self.sentence_to_text_field(
[
"[CLS]",
"Room",
"and",
"Board",
".",
"[SEP]",
"He",
"nailed",
"boards",
"[SEP]",
],
indexers,
),
"labels": LabelField(0, skip_indexing=1),
}
),
Instance(
{
"sent1_str": MetadataField("C ##ir ##culate a rumor ."),
"sent2_str": MetadataField("This letter is being circulated"),
"idx": LabelField(1, skip_indexing=True),
"idx2": NumericField(2),
"idx1": NumericField(3),
"inputs": self.sentence_to_text_field(
[
"[CLS]",
"C",
"##ir",
"##culate",
"a",
"rumor",
"[SEP]",
"This",
"##let",
"##ter",
"is",
"being",
"c",
"##ir",
"##culated",
"[SEP]",
],
indexers,
),
"labels": LabelField(0, skip_indexing=1),
}
),
Instance(
{
"sent1_str": MetadataField("Hook a fish'"),
"sent2_str": MetadataField("He hooked a snake accidentally"),
"idx": LabelField(2, skip_indexing=True),
"idx2": NumericField(2),
"idx1": NumericField(3),
"inputs": self.sentence_to_text_field(
[
"[CLS]",
"Hook",
"a",
"fish",
"[SEP]",
"He",
"hooked",
"a",
"snake",
"accidentally",
"[SEP]",
],
indexers,
),
"labels": LabelField(1, skip_indexing=1),
}
),
Instance(
{
"sent1_str": MetadataField("For recreation he wrote poetry."),
"sent2_str": MetadataField("Drug abuse is often regarded as recreation ."),
"idx": LabelField(3, skip_indexing=True),
"idx2": NumericField(2),
"idx1": NumericField(3),
"inputs": self.sentence_to_text_field(
[
"[CLS]",
"For",
"re",
"##creation",
"he",
"wrote",
"poetry",
"[SEP]",
"Drug",
"abuse",
"is",
"often",
"re",
"##garded",
"as",
"re",
"##creation",
"[SEP]",
],
indexers,
),
"labels": LabelField(1, skip_indexing=1),
}
),
],
)
self.val_preds = {"sts-b": stsb_val_preds, "wic": wic_val_preds}
self.vocab = vocabulary.Vocabulary.from_instances(self.wic.get_instance_iterable("val"))
self.vocab.add_token_to_namespace("True", "wic_tags")
for data in self.wic.get_instance_iterable("val"):
data.index_fields(self.vocab)
self.glue_tasks = [self.stsb, self.wic]
self.args = mock.Mock()
self.args.batch_size = 4
self.args.cuda = -1
self.args.run_dir = self.temp_dir
self.args.exp_dir = ""
def test_write_preds_does_run(self):
evaluate.write_preds(
self.glue_tasks, self.val_preds, self.temp_dir, "test", strict_glue_format=True
)
assert os.path.exists(self.temp_dir + "/STS-B.tsv") and os.path.exists(
self.temp_dir + "/WiC.jsonl"
)
def test_write_preds_glue(self):
evaluate.write_preds(
self.glue_tasks, self.val_preds, self.temp_dir, "test", strict_glue_format=True
)
stsb_predictions = pd.read_csv(self.temp_dir + "/STS-B.tsv", sep="\t")
assert "index" in stsb_predictions.columns and "prediction" in stsb_predictions.columns
assert stsb_predictions.iloc[0]["prediction"] == 5.00
assert stsb_predictions.iloc[1]["prediction"] == 1.7
def test_write_preds_superglue(self):
"""
Ensure that SuperGLUE write predictions for test is saved to the correct file
format.
"""
evaluate.write_preds(
[self.wic], self.val_preds, self.temp_dir, "test", strict_glue_format=True
)
wic_predictions = pd.read_json(self.temp_dir + "/WiC.jsonl", lines=True)
assert "idx" in wic_predictions.columns and "label" in wic_predictions.columns
assert wic_predictions.iloc[0]["label"] == "false"
assert wic_predictions.iloc[1]["label"] == "true"
@mock.patch("jiant.models.MultiTaskModel.forward", side_effect=model_forward)
def test_evaluate_and_write_does_run(self, model_forward_function):
"""
Testing that evaluate_and_write runs without breaking.
"""
with mock.patch("jiant.models.MultiTaskModel") as MockModel:
MockModel.return_value.eval.return_value = None
MockModel.return_value.forward = model_forward
MockModel.use_bert = 1
model = MockModel()
evaluate_and_write(self.args, model, [self.wic], splits_to_write="val", cuda_device=-1)
def tear_down(self):
shutil.rmtree(self.temp_dir)
| 41 | 99 | 0.429504 |
79434a45414bf5500896163b4730585b5f608439 | 4,720 | py | Python | guiClass.py | arseniiyamnii/PyLearnQt | 48f51530cabb37a993619c3b9f950eb760d7ea3a | [
"MIT"
] | null | null | null | guiClass.py | arseniiyamnii/PyLearnQt | 48f51530cabb37a993619c3b9f950eb760d7ea3a | [
"MIT"
] | null | null | null | guiClass.py | arseniiyamnii/PyLearnQt | 48f51530cabb37a993619c3b9f950eb760d7ea3a | [
"MIT"
] | null | null | null | ##\file guiClass.py
#\author Arsenii Yamnii
#\brief class with qt GUI
#\warning dependence's\n
#module \b PyQt5
from PyQt5 import uic
import random
from PyQt5.QtWidgets import QMainWindow, QPushButton, QTextBrowser, QLineEdit,QMenu,QAction
import sys
import resultClass, settingsClass
from PyQt5.QtCore import QTimer
import json
##\brief class with gui
#\details It class contain all Qt Widgets for main window,\n
#and run other windows, like result, and config window
class UI(QMainWindow):
##\brief initialize ui file
#\details initialize all GUI qwidgets, to control them,\n
#connect functions to buttons,\n
#set text to buttons from language dictionary,\n
def __init__(self,working_exercise):
with open("config.json", "r") as configFile:
self.configDictionary=json.load(configFile)
with open("languages/"+self.configDictionary["language"]+".json", "r") as language_file: #open language file
##\brief dictionary with language
#\details contain dictionary with language.
self.language_dict = json.load(language_file)
##\brief exercise array
#\details array that contain all exercise objects.\n
self.working_exercise=working_exercise
super(UI, self).__init__()
uic.loadUi("./qtUi/main.ui", self)#initialize UI from GUI file
##\brief exercise widget
#\details that widget contain text with exercise,\n
#and variables.
self.exerciseText=self.findChild(QTextBrowser, "textBrowser")
##\brief answer winget
#\details that widget for user input...answer input.
self.answerLine=self.findChild(QLineEdit, "lineEdit")
##\brief Settings button
#\details QT widget with TopMenu button 'Settings'
self.menuSettingsButton=self.findChild(QAction, "actionSettings")
##\brief File button
#\details TopMenu button 'File'
self.menuFileButton=self.findChild(QMenu, "menuFile")
self.menuFileButton.setTitle(self.language_dict["words"]["topMenuFile"])#add text to File Button
self.menuSettingsButton.setText(self.language_dict["words"]["topMenuSettings"])#add text to Settings Button
self.menuSettingsButton.triggered.connect(self.runSettings)#add action to Settings Button
##\brief push button widget
#\details QT widget Button to Push Redy answer
self.pushAnswerBtton=self.findChild(QPushButton,"pushButton")
self.pushAnswerBtton.setText(self.language_dict["words"]["pushButton"])#add text to Push Button
self.pushAnswerBtton.clicked.connect(self.getAnswerText)#here we add some function to pushbutton
self.getExercise()
self.addExerciseText()
self.show()
##\brief run Settings window
#\details create settings window from SettingsClass
def runSettings(self):
self.settingsWindow=settingsClass.UI()
self.settingsWindow.show()
print("runSettings")
##\brief function that add text with exercise
def addExerciseText(self):
self.exerciseText.setText("a="+str(self.runing_exercise.a)+"\nb="+str(self.runing_exercise.b)+"\n"+self.runing_exercise.get_exercise_text())
##\brief get answer text
#\details Push answer text to runing_exercise.coomare_answer function,\n
#get result from compare_answer in bool format,\n
#send it to resultwindow function\n
#,\n and remove text from input line
def getAnswerText(self):
self.resultWindow(self.runing_exercise.compare_answer(self.answerLine.text()))
self.answerLine.setText("")
##\brief open result window
def resultWindow(self,answer):
##\brief result window object
#\details object that contain all result window Widgets
self.dialog=resultClass.UI(self.language_dict["words"]["progressBarText"],self.configDictionary["tieWaitResult"])
if answer=="true":
self.dialog.resultLabel.setText(self.language_dict["words"]["correct"])
else:
self.dialog.resultLabel.setText(self.language_dict["words"]["wrong"])
self.dialog.show()
self.dialog.timer.timeout.connect(self.dialog.handleTimer)
self.dialog.timer.start(1000)
self.getExercise()
#self.runing_exercise.change_exercise()
self.runing_exercise.create_vars()
self.addExerciseText()
##\brief Switch exercise
#\details Insert to runing_exercise variable, new obgect from working_exercise array
def getExercise(self):
self.runing_exercise=self.working_exercise[random.randint(0,len(self.working_exercise)-1)]
self.runing_exercise.get_statements()
self.runing_exercise.create_vars()
| 48.163265 | 148 | 0.704237 |
79434a6aecd75e0e7e8a9ba8831739a57654529c | 1,681 | py | Python | dataworkspace/dataworkspace/apps/accounts/backends.py | uktrade/jupyterhub-data-auth-admin | 91544f376209a201531f4dbfb8faad1b8ada18c9 | [
"MIT"
] | 1 | 2019-06-10T08:22:56.000Z | 2019-06-10T08:22:56.000Z | dataworkspace/dataworkspace/apps/accounts/backends.py | uktrade/jupyterhub-data-auth-admin | 91544f376209a201531f4dbfb8faad1b8ada18c9 | [
"MIT"
] | 2 | 2019-05-17T13:10:42.000Z | 2019-06-17T10:48:46.000Z | dataworkspace/dataworkspace/apps/accounts/backends.py | uktrade/jupyterhub-data-auth-admin | 91544f376209a201531f4dbfb8faad1b8ada18c9 | [
"MIT"
] | null | null | null | import logging
from datetime import datetime
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth import get_user_model
from sentry_sdk import set_user
from dataworkspace.apps.applications.utils import create_user_from_sso
logger = logging.getLogger("app")
class AuthbrokerBackendUsernameIsEmail(ModelBackend):
def authenticate(self, request, username=None, password=None, **kwargs):
try:
email = request.META["HTTP_SSO_PROFILE_EMAIL"]
contact_email = request.META["HTTP_SSO_PROFILE_CONTACT_EMAIL"]
related_emails = request.META["HTTP_SSO_PROFILE_RELATED_EMAILS"].split(",")
user_id = request.META["HTTP_SSO_PROFILE_USER_ID"]
first_name = request.META["HTTP_SSO_PROFILE_FIRST_NAME"]
last_name = request.META["HTTP_SSO_PROFILE_LAST_NAME"]
except KeyError:
return None
primary_email = contact_email if contact_email else email
emails = [email] + ([contact_email] if contact_email else []) + related_emails
user = create_user_from_sso(
user_id,
primary_email,
emails,
first_name,
last_name,
check_tools_access_if_user_exists=False,
)
set_user({"id": str(user.profile.sso_id), "email": user.email})
if user.profile.first_login is None:
user.profile.first_login = datetime.now()
user.profile.save()
return user
def get_user(self, user_id):
User = get_user_model()
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
| 34.306122 | 87 | 0.663296 |
79434a7c3f5ee83be36a065d57619d0bcfc0d42f | 543 | py | Python | Topics/Sorting/Bubble-Sort/bubble-sort.py | shihab4t/Competitive-Programming | e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be | [
"Unlicense"
] | 3 | 2021-06-15T01:19:23.000Z | 2022-03-16T18:23:53.000Z | Topics/Sorting/Bubble-Sort/bubble-sort.py | shihab4t/Competitive-Programming | e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be | [
"Unlicense"
] | null | null | null | Topics/Sorting/Bubble-Sort/bubble-sort.py | shihab4t/Competitive-Programming | e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be | [
"Unlicense"
] | null | null | null | def bubble_sort(a: list):
"""Optimized Bubble sort that return sorted list with O(n^2) complexity"""
a, n = a.copy(), len(a)
for _ in range(n):
is_swap = False
for i in range(1, n):
if a[i-1] > a[i]:
is_swap = True
a[i-1], a[i] = a[i], a[i-1]
if is_swap == False:
break
return a
if __name__ == "__main__":
from random import randint
nums = [randint(0, 50) for _ in range(10)]
print(nums)
print(bubble_sort(nums))
print(nums)
| 25.857143 | 78 | 0.521179 |
79434a81d3c7096d8562f149cef18c442fbb9e8b | 1,168 | py | Python | setup.py | jayvdb/py2many | 3c6e902da523ff802a3d44b640889f5f29fc5bda | [
"MIT"
] | 1 | 2021-05-14T00:40:10.000Z | 2021-05-14T00:40:10.000Z | setup.py | jayvdb/py2many | 3c6e902da523ff802a3d44b640889f5f29fc5bda | [
"MIT"
] | 1 | 2021-07-07T05:29:15.000Z | 2021-07-07T05:29:15.000Z | setup.py | jayvdb/py2many | 3c6e902da523ff802a3d44b640889f5f29fc5bda | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from setuptools import setup, find_packages
__version__ = "0.2.1"
install_requires = ["toposort", "astor; python_version<'3.9'"]
setup_requires = []
tests_require = ["pytest", "unittest-expander", "argparse_dataclass"]
with open("README.md") as readme_file:
readme = readme_file.read()
setup(
name="py2many",
version=__version__,
description="Python to CLike language transpiler.",
long_description=readme + "\n\n",
long_description_content_type="text/markdown",
author="Arun Sharma",
python_requires=">=3.8",
url="https://github.com/adsharma/py2many",
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
packages=find_packages(exclude=["docs", "examples", "tests", "tests*"]),
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Software Development",
"Topic :: Utilities",
],
test_suite="tests",
entry_points={
"console_scripts": ["py2many=py2many.cli:main"],
},
)
| 29.2 | 76 | 0.664384 |
79434b1cdb760b619f0d8b25e440790721adc249 | 3,992 | py | Python | p6/PipelineCPU/my_files/test/auto_test.py | t0ush1/ComputerOrganization | 8093949bbd3e48678cea832133e9bf8990bbdf27 | [
"MIT"
] | 2 | 2022-03-06T06:05:24.000Z | 2022-03-10T09:08:08.000Z | p6/PipelineCPU/my_files/test/auto_test.py | t0ush1/ComputerOrganization | 8093949bbd3e48678cea832133e9bf8990bbdf27 | [
"MIT"
] | null | null | null | p6/PipelineCPU/my_files/test/auto_test.py | t0ush1/ComputerOrganization | 8093949bbd3e48678cea832133e9bf8990bbdf27 | [
"MIT"
] | null | null | null | #############################################################
# win10 64bit
# python 3.9.6
#
# all args of func are filepath
#
# tools:
# run Mars
# run ISE
# cmp result
#############################################################
import os
import re
# software path
xilinxPath = "G:\\ISE\\ise\\14.7\\ISE_DS\\ISE\\"
marsPath = "G:\\mars\\Mars_test.jar"
# export code and run mips
def runMars(asm, code, out):
os.system("java -jar " + marsPath + " db nc mc CompactDataAtZero a dump .text HexText " + code + " " + asm)
os.system("java -jar " + marsPath + " " + asm + " 4096 db nc mc CompactDataAtZero > " + out)
# generate prj and tcl file
def initISE(prj):
verilogPath = prj + "my_files\\cpu\\"
prjFilePath = prj + "mips.prj"
tclFilePath = prj + "mips.tcl"
with open(prjFilePath, "w") as prjFile, open(tclFilePath, "w") as tclFile:
for root, dirs, files in os.walk(verilogPath):
for fileName in files:
if re.match(r"[\w]*\.v", fileName):
prjFile.write("Verilog work " + root + "\\" + fileName + "\n")
tclFile.write("run 200us" + "\n" + "exit")
# compile and run verilog
def runISE(prj, code, out):
prjFilePath = prj + "mips.prj"
tclFilePath = prj + "mips.tcl"
exeFilePath = prj + "mips.exe"
logFilePath = prj + "log.txt"
codeFilePath = prj + "code.txt"
with open(code, "r") as codeSrc, open(codeFilePath, "w") as codeDst:
codeDst.write(codeSrc.read())
os.environ['XILINX'] = xilinxPath
os.system(xilinxPath + "bin\\nt64\\fuse -nodebug -prj " + prjFilePath + " -o " + exeFilePath + " mips_txt > " + logFilePath)
os.system(exeFilePath + " -nolog -tclbatch " + tclFilePath + " > " + out)
# compare my and std
def cmp(index, my, std, res):
with open(my, "r") as myFile, open(std, "r") as stdFile, open(res, "a") as out:
out.write("in testpoint" + str(index) + " : \n")
print("in testpoint" + str(index) + " : \n")
myFileText = myFile.read()
myLogs = re.findall("\@[^\n]*", myFileText)
stdLogs = re.findall("\@[^\n]*", stdFile.read())
isAC = True
for i in range(len(stdLogs)):
if i < len(myLogs) and myLogs[i] != stdLogs[i]:
out.write("\tOn Line " + str(i+1) + "\n")
out.write("\tGet\t\t: " + myLogs[i] + "\n")
out.write("\tExpect\t: " + stdLogs[i] + "\n")
print("\tOn Line " + str(i+1))
print("\tGet\t: " + myLogs[i])
print("\tExpect\t: " + stdLogs[i])
isAC = False
break
elif i >= len(myLogs):
out.write("myLogs is too short\n")
print("myLogs is too short")
isAC = False
break
if isAC :
out.write("\tAll Accepted\n")
print("\tAll Accepted")
return isAC
# run auto_test
prjPath = "D:\\study\\CO\\p6\\PipelineCPU\\"
initISE(prjPath)
tot = 101
ac = 0
testdataPath = prjPath + "my_files\\test\\data\\"
cmpResPath = testdataPath + "cmp_res.txt"
if os.path.exists(cmpResPath):
os.remove(cmpResPath)
print("------------------------------------------------------------")
for i in range(0, tot):
testpointPath = testdataPath + "testpoint\\testpoint" + str(i) + ".asm"
codePath = testdataPath + "code\\code" + str(i) + ".txt"
stdAnsPath = testdataPath + "std_ans\\std_ans" + str(i) + ".txt"
testAnsPath = testdataPath + "test_ans\\test_ans" + str(i) + ".txt"
runMars(testpointPath, codePath, stdAnsPath)
runISE(prjPath, codePath, testAnsPath)
if cmp(i, testAnsPath, stdAnsPath, cmpResPath):
ac += 1
# if ac + 3 <= i:
# break
print("------------------------------------------------------------")
if ac == tot:
print("All Killed!!!\n") | 35.017544 | 129 | 0.506263 |
79434b37021a31e2e356eec63b4a98506dc6f6af | 4,293 | py | Python | drf_util/utils.py | RodiZaharadji/drf-util | 16789b870263a5c0c86b6b8870b68a4305690b61 | [
"MIT"
] | null | null | null | drf_util/utils.py | RodiZaharadji/drf-util | 16789b870263a5c0c86b6b8870b68a4305690b61 | [
"MIT"
] | null | null | null | drf_util/utils.py | RodiZaharadji/drf-util | 16789b870263a5c0c86b6b8870b68a4305690b61 | [
"MIT"
] | null | null | null | from typing import Any
from dateutil import parser
from django.conf import settings
def dict_merge(a, b, path=None):
if path is None: path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
dict_merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
else:
pass
# a[key] = b[key]
# raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))
else:
a[key] = b[key]
return a
def gt(obj: object, path: str, default: Any = None, sep: str = '.') -> Any:
"""
Function that extracts the value from the specified path in obj and returns default if nothing found
:param obj: Parameter in which we are searching for values in
:param path: Path we are trying to search for in our obj
:param default: Default value we return if nothing found in that path
:param sep: Separator used between path values
:return: Value in obj path if it exists or default value
"""
def _dispatch_item(_obj, _key):
if _key == '*':
for item in _obj:
yield item
elif hasattr(_obj, '__getitem__'):
if _key.isdigit():
yield _obj.__getitem__(int(_key))
else:
yield _obj.__getitem__(_key)
else:
yield getattr(_obj, _key)
def _dispatch_list(_gen, _key):
for _obj in _gen:
for item in _dispatch_item(_obj, _key):
yield item
obj = [obj]
for key in path.split(sep):
obj = _dispatch_list(obj, key)
try:
obj = list(obj)
except Exception:
return default
if len(obj) <= 1:
obj = next(iter(obj), default)
return obj
def sf(function, exception=Exception):
try:
return function()
except exception:
pass
def join_url(part_one: str, part_two: str):
return '/'.join([part_one.rstrip('/'), part_two.lstrip('/')])
def st(path, value):
dict_return = {}
parts = path.split('.')
if not path:
return value
key = parts.pop(0)
dict_return[key] = st(".".join(parts), value)
return dict_return
def get_object_labels(obj, names=None):
labels = []
iterate = []
if isinstance(obj, dict):
iterate = obj.items()
elif isinstance(obj, list):
iterate = enumerate(obj)
for key, value in iterate:
if isinstance(value, str):
if names:
if key in names:
labels.append(value)
else:
labels.append(value)
else:
labels = labels + get_object_labels(value, names)
return list(set(labels))
def fetch_objects(instance, function, select=50):
skip = 0
while True:
objects = list(instance[skip:skip + select])
if len(objects) == 0:
break
skip += select
for obj in objects:
function(obj)
def offset_objects(key, get_function, save_function, storage):
offset = storage.get(key)
while True:
objects, offset = get_function(offset)
if not objects:
break
for object_data in objects:
save_function(object_data['ocid'])
storage.put(key, offset)
def date(item):
try:
return parser.parse(item, ignoretz=not getattr(settings, 'USE_TZ', False))
except TypeError:
return None
def to_dt(items):
for k, item in enumerate(items):
if item:
items[k] = parser.parse(item, ignoretz=not getattr(settings, 'USE_TZ', False))
return items
def min_next(items, min_value=None):
for item in sorted(filter(lambda x: x is not None, items)):
if min_value < item:
return item
return None
def any_value(items: list):
"""
Function that extracts values from a list and checks if they are diff from None,0,{},[],False...
First value that is diff from them is being returned
:param items: List of items that is searched for non-null values
:return: First value that fits the criteria
"""
for item in items:
if item:
return item
| 24.959302 | 104 | 0.57652 |
79434d207f8622abb5c1141707095a79cca5ad44 | 2,994 | py | Python | app_forum/models.py | azhari33/website | 841a8e49c5357f63dcf6e5dc3511092c234c0e20 | [
"MIT"
] | 35 | 2017-10-07T16:50:05.000Z | 2020-10-26T13:19:27.000Z | app_forum/models.py | azhari33/website | 841a8e49c5357f63dcf6e5dc3511092c234c0e20 | [
"MIT"
] | 24 | 2017-10-11T08:44:33.000Z | 2022-02-10T07:27:26.000Z | app_forum/models.py | azhari33/website | 841a8e49c5357f63dcf6e5dc3511092c234c0e20 | [
"MIT"
] | 28 | 2017-10-07T16:52:01.000Z | 2020-10-02T11:14:22.000Z | from django.db import models
from markdownx.models import MarkdownxField
from app_author.models import Profile
class Category(models.Model):
"""
Category Model
"""
id = models.AutoField(
primary_key=True
)
category_title = models.CharField(
max_length=200,
verbose_name=u'Category Name',
blank=False,
null=False
)
slug = models.SlugField()
class Meta:
verbose_name_plural = "Categories"
def __str__(self):
return str(self.category_title)
def save(self, **kwargs):
if not self.slug:
from djangoid.utils import get_unique_slug
self.slug = get_unique_slug(instance=self, field='category_title')
super(Category, self).save(**kwargs)
def get_absolute_url(self):
"""
Call Category Slug
"""
return 'app_forum:category'
class Forum(models.Model):
"""
Thread Model
"""
class Meta:
verbose_name_plural = "Title"
forum_author = models.ForeignKey(
Profile,
related_name='user_forums',
null=True,
blank=True,
on_delete=models.CASCADE
)
forum_title = models.CharField(
max_length=225,
verbose_name=u'Title',
blank=False,
null=False
)
forum_category = models.ForeignKey(
'Category',
on_delete=models.CASCADE,
verbose_name=u'Category',
)
forum_content = MarkdownxField(
verbose_name=u'Content (Use Markdown)',
)
is_created = models.DateTimeField(
auto_now_add=True,
null=True,
blank=True
)
is_modified = models.DateTimeField(
auto_now=True,
null=True,
blank=True
)
is_hot = models.BooleanField(
default=False
)
is_closed = models.BooleanField(
default=False
)
def __str__(self):
return str(self.forum_title)
def latest_comment_author(self):
return self.forum_comments.latest('is_created').comment_author
def latest_comment_date(self):
return self.forum_comments.latest('is_created').is_created
def get_absolute_url(self):
"""
Call Forum ID
"""
return 'app_forum:forum'
class Comment(models.Model):
"""
Comment Model
"""
class Meta:
verbose_name_plural = "Comment"
forum = models.ForeignKey(
'Forum',
on_delete=models.CASCADE,
related_name='forum_comments'
)
comment_author = models.ForeignKey(
Profile,
related_name='user_comments',
null=True,
blank=True,
on_delete=models.CASCADE
)
comment_content = MarkdownxField(
verbose_name=u'Markdown',
)
is_created = models.DateTimeField(
auto_now_add=True,
)
is_modified = models.DateTimeField(
auto_now=True,
)
def __str__(self):
return self.comment_content
| 20.22973 | 78 | 0.602538 |
79434d53c01522395ad1abd54903758cc2e9017f | 21,091 | py | Python | airbyte-integrations/connectors/source-tiktok-marketing/source_tiktok_marketing/streams.py | aleia-team/airbyte | 7e25fddd6824fa1d2c8c216b294bc21a2baa1331 | [
"MIT"
] | 1 | 2022-02-05T16:49:37.000Z | 2022-02-05T16:49:37.000Z | airbyte-integrations/connectors/source-tiktok-marketing/source_tiktok_marketing/streams.py | aleia-team/airbyte | 7e25fddd6824fa1d2c8c216b294bc21a2baa1331 | [
"MIT"
] | 1 | 2022-02-15T11:14:05.000Z | 2022-02-15T11:14:05.000Z | airbyte-integrations/connectors/source-tiktok-marketing/source_tiktok_marketing/streams.py | aleia-team/airbyte | 7e25fddd6824fa1d2c8c216b294bc21a2baa1331 | [
"MIT"
] | 1 | 2022-03-28T17:22:22.000Z | 2022-03-28T17:22:22.000Z | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import json
from abc import ABC, abstractmethod
from datetime import datetime
from decimal import Decimal
from enum import Enum
from functools import total_ordering
from typing import (Any, Dict, Iterable, List, Mapping, MutableMapping,
Optional, Tuple, TypeVar, Union)
import pendulum
import requests
import pydantic
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources.streams.core import package_name_from_class
from airbyte_cdk.sources.streams.http import HttpStream
from airbyte_cdk.sources.streams.http.auth import NoAuth
from airbyte_cdk.sources.utils.schema_helpers import ResourceSchemaLoader
from airbyte_cdk.sources.utils.transform import (TransformConfig,
TypeTransformer)
# TikTok Initial release date is September 2016
DEFAULT_START_DATE = "2016-09-01"
T = TypeVar("T")
# Hierarchy of classes
# TiktokStream
# ├── ListAdvertiserIdsStream
# └── FullRefreshTiktokStream
# ├── Advertisers
# └── IncrementalTiktokStream
# ├── AdGroups
# ├── Ads
# ├── Campaigns
# └── BasicReports
# ├── AdsReports
# ├── AdvertisersReports
# ├── CampaignsReports
# └── AdGroupsReports
@total_ordering
class JsonUpdatedState(pydantic.BaseModel):
current_stream_state: str
stream: T
def __repr__(self):
"""Overrides print view"""
return str(self.dict())
def dict(self, **kwargs):
"""Overrides default logic.
A new updated stage has to be sent if all advertisers are used only
"""
if not self.stream.is_finished:
return self.current_stream_state
max_updated_at = self.stream.max_cursor_date or ""
return max(max_updated_at, self.current_stream_state)
def __eq__(self, other):
if isinstance(other, JsonUpdatedState):
return self.current_stream_state == other.current_stream_state
return self.current_stream_state == other
def __lt__(self, other):
if isinstance(other, JsonUpdatedState):
return self.current_stream_state < other.current_stream_state
return self.current_stream_state < other
class ReportLevel(str, Enum):
ADVERTISER = "ADVERTISER"
CAMPAIGN = "CAMPAIGN"
ADGROUP = "ADGROUP"
AD = "AD"
class ReportGranularity(str, Enum):
LIFETIME = "LIFETIME"
DAY = "DAY"
HOUR = "HOUR"
@classmethod
def default(cls):
return cls.DAY
class TiktokException(Exception):
"""default exception of custom Tiktok logic"""
class TiktokStream(HttpStream, ABC):
# endpoints can have different list names
response_list_field = "list"
# max value of page
page_size = 1000
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
"""All responses have the similar structure:
{
"message": "<OK or ERROR>",
"code": <code>, # 0 if error else error unique code
"request_id": "<unique_request_id>"
"data": {
"page_info": {
"total_number": <total_item_count>,
"page": <current_page_number>,
"page_size": <page_size>,
"total_page": <total_page_count>
},
"list": [
<list_item>
]
}
}
"""
data = response.json()
if data["code"]:
raise TiktokException(data)
raise TiktokException(data["message"])
data = data["data"]
if self.response_list_field in data:
data = data[self.response_list_field]
for record in data:
yield record
@property
def url_base(self) -> str:
"""
Docs: https://business-api.tiktok.com/marketing_api/docs?id=1701890920013825
"""
if self.is_sandbox:
return "https://sandbox-ads.tiktok.com/open_api/v1.2/"
return "https://business-api.tiktok.com/open_api/v1.2/"
def next_page_token(self, *args, **kwargs) -> Optional[Mapping[str, Any]]:
# this data without listing
return None
def should_retry(self, response: requests.Response) -> bool:
"""
Once the rate limit is met, the server returns "code": 40100
Docs: https://business-api.tiktok.com/marketing_api/docs?id=1701890997610497
"""
try:
data = response.json()
except Exception:
self.logger.error(f"Incorrect JSON response: {response.text}")
raise
if data["code"] == 40100:
return True
return super().should_retry(response)
def backoff_time(self, response: requests.Response) -> Optional[float]:
"""
The system uses a second call limit for each developer app. The set limit varies according to the app's call limit level.
"""
# Basic: 10/sec
# Advanced: 20/sec
# Premium: 30/sec
# All apps are set to basic call limit level by default.
# Returns maximum possible delay
return 0.6
class ListAdvertiserIdsStream(TiktokStream):
"""Loading of all possible advertisers"""
primary_key = "advertiser_id"
def __init__(self, advertiser_id: int, app_id: int, secret: str, access_token: str):
super().__init__(authenticator=NoAuth())
self._advertiser_ids = []
# for Sandbox env
self._advertiser_id = advertiser_id
if not self._advertiser_id:
# for Production env
self._secret = secret
self._app_id = app_id
self._access_token = access_token
else:
self._advertiser_ids.append(self._advertiser_id)
@property
def is_sandbox(self) -> bool:
"""
the config parameter advertiser_id is required for Sandbox
"""
# only sandbox has a not empty self._advertiser_id value
return self._advertiser_id > 0
def request_params(
self, stream_state: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None, **kwargs
) -> MutableMapping[str, Any]:
return {"access_token": self._access_token, "secret": self._secret, "app_id": self._app_id}
def path(self, *args, **kwargs) -> str:
return "oauth2/advertiser/get/"
@property
def advertiser_ids(self):
if not self._advertiser_ids:
for advertiser in self.read_records(SyncMode.full_refresh):
self._advertiser_ids.append(advertiser["advertiser_id"])
return self._advertiser_ids
class FullRefreshTiktokStream(TiktokStream, ABC):
primary_key = "id"
fields: List[str] = None
transformer = TypeTransformer(TransformConfig.DefaultSchemaNormalization | TransformConfig.CustomSchemaNormalization)
@transformer.registerCustomTransform
def transform_function(original_value: Any, field_schema: Dict[str, Any]) -> Any:
"""Custom traun"""
if original_value == "-":
return None
elif isinstance(original_value, float):
return Decimal(original_value)
return original_value
def __init__(self, advertiser_id: int, app_id: int, secret: str, start_date: str, **kwargs):
super().__init__(**kwargs)
# convert a start date to TikTok format
# example: "2021-08-24" => "2021-08-24 00:00:00"
self._start_time = pendulum.parse(start_date or DEFAULT_START_DATE).strftime("%Y-%m-%d 00:00:00")
self._advertiser_storage = ListAdvertiserIdsStream(
advertiser_id=advertiser_id, app_id=app_id, secret=secret, access_token=self.authenticator.token
)
self.max_cursor_date = None
self._advertiser_ids = self._advertiser_storage.advertiser_ids
@property
def is_sandbox(self):
return self._advertiser_storage.is_sandbox
@staticmethod
def convert_array_param(arr: List[Union[str, int]]) -> str:
return json.dumps(arr)
@property
def is_finished(self):
return len(self._advertiser_ids) == 0
def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]:
"""Loads all updated tickets after last stream state"""
while self._advertiser_ids:
advertiser_id = self._advertiser_ids.pop(0)
yield {"advertiser_id": advertiser_id}
def request_params(
self,
stream_state: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
stream_slice: Mapping[str, Any] = None,
**kwargs,
) -> MutableMapping[str, Any]:
params = {"page_size": self.page_size}
if self.fields:
params["fields"] = self.convert_array_param(self.fields)
if stream_slice:
params.update(stream_slice)
return params
class IncrementalTiktokStream(FullRefreshTiktokStream, ABC):
cursor_field = "modify_time"
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
"""All responses have the following pagination data:
{
"data": {
"page_info": {
"total_number": < total_item_count >,
"page": < current_page_number >,
"page_size": < page_size >,
"total_page": < total_page_count >
},
...
}
}
"""
page_info = response.json()["data"]["page_info"]
if page_info["page"] < page_info["total_page"]:
return {"page": page_info["page"] + 1}
return None
def request_params(self, next_page_token: Mapping[str, Any] = None, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(next_page_token=next_page_token, **kwargs)
if next_page_token:
params.update(next_page_token)
return params
def select_cursor_field_value(self, data: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None) -> str:
if not data or not self.cursor_field:
return None
cursor_field_path = self.cursor_field if isinstance(self.cursor_field, list) else [self.cursor_field]
result = data
for key in cursor_field_path:
result = result.get(key)
return result
def parse_response(
self, response: requests.Response, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, **kwargs
) -> Iterable[Mapping]:
"""Additional data filtering"""
state = self.select_cursor_field_value(stream_state) or self._start_time
for record in super().parse_response(response=response, stream_state=stream_state, **kwargs):
updated = self.select_cursor_field_value(record, stream_slice)
if updated is None:
yield record
elif updated <= state:
continue
else:
if not self.max_cursor_date or self.max_cursor_date < updated:
self.max_cursor_date = updated
yield record
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
# needs to save a last state if all advertisers are used before only
current_stream_state_value = (self.select_cursor_field_value(current_stream_state)) or ""
# a object JsonUpdatedState is related with a current stream and should return a new updated state if needed
if not isinstance(current_stream_state_value, JsonUpdatedState):
current_stream_state_value = JsonUpdatedState(stream=self, current_stream_state=current_stream_state_value)
# reports streams have cursor fields which be allocated into a nested object
cursor_field_path = self.cursor_field if isinstance(self.cursor_field, list) else [self.cursor_field]
# generate a dict with nested items
# ["key1", "key1"] => {"key1": {"key2": <value>}}
tree_dict = current_stream_state_value
for key in reversed(cursor_field_path):
tree_dict = {key: tree_dict}
return tree_dict
class Advertisers(FullRefreshTiktokStream):
"""Docs: https://ads.tiktok.com/marketing_api/docs?id=1708503202263042"""
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(**kwargs)
params["advertiser_ids"] = self.convert_array_param(self._advertiser_ids)
return params
def path(self, *args, **kwargs) -> str:
return "advertiser/info/"
def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]:
"""this stream must work with the default slice logic"""
yield None
class Campaigns(IncrementalTiktokStream):
"""Docs: https://ads.tiktok.com/marketing_api/docs?id=1708582970809346"""
primary_key = "campaign_id"
def path(self, *args, **kwargs) -> str:
return "campaign/get/"
class AdGroups(IncrementalTiktokStream):
"""Docs: https://ads.tiktok.com/marketing_api/docs?id=1708503489590273"""
primary_key = "adgroup_id"
def path(self, *args, **kwargs) -> str:
return "adgroup/get/"
class Ads(IncrementalTiktokStream):
"""Docs: https://ads.tiktok.com/marketing_api/docs?id=1708572923161602"""
primary_key = "ad_id"
def path(self, *args, **kwargs) -> str:
return "ad/get/"
class BasicReports(IncrementalTiktokStream, ABC):
"""Docs: https://ads.tiktok.com/marketing_api/docs?id=1707957200780290"""
primary_key = None
@property
@abstractmethod
def report_level(self) -> ReportLevel:
"""
Returns a necessary level value
"""
def __init__(self, report_granularity: ReportGranularity, **kwargs):
super().__init__(**kwargs)
self.report_granularity = report_granularity
@property
def cursor_field(self):
if self.report_granularity == ReportGranularity.DAY:
return ["dimensions", "stat_time_day"]
if self.report_granularity == ReportGranularity.HOUR:
return ["dimensions", "stat_time_hour"]
return []
@staticmethod
def _get_time_interval(start_date: Union[datetime, str], granularity: ReportGranularity) -> Iterable[Tuple[datetime, datetime]]:
"""Due to time range restrictions based on the level of granularity of reports, we have to chunk API calls in order
to get the desired time range.
Docs: https://ads.tiktok.com/marketing_api/docs?id=1714590313280513
:param start_date - Timestamp from which we should start the report
:param granularity - Level of granularity of the report; one of [HOUR, DAY, LIFETIME]
:return Iterator for pair of start_date and end_date that can be used as request parameters
"""
if isinstance(start_date, str):
start_date = pendulum.parse(start_date)
end_date = pendulum.now()
# Snapchat API only allows certain amount of days of data based on the reporting granularity
if granularity == ReportGranularity.DAY:
max_interval = 30
elif granularity == ReportGranularity.HOUR:
max_interval = 1
elif granularity == ReportGranularity.LIFETIME:
max_interval = 364
else:
raise ValueError("Unsupported reporting granularity, must be one of DAY, HOUR, LIFETIME")
total_date_diff = end_date - start_date
iterations = total_date_diff.days // max_interval
for i in range(iterations + 1):
chunk_start = start_date + pendulum.duration(days=(i * max_interval))
chunk_end = min(chunk_start + pendulum.duration(days=max_interval, seconds=-1), end_date)
yield chunk_start, chunk_end
def _get_reporting_dimensions(self):
result = []
spec_id_dimensions = {
ReportLevel.ADVERTISER: "advertiser_id",
ReportLevel.CAMPAIGN: "campaign_id",
ReportLevel.ADGROUP: "adgroup_id",
ReportLevel.AD: "ad_id",
}
spec_time_dimensions = {ReportGranularity.DAY: "stat_time_day", ReportGranularity.HOUR: "stat_time_hour"}
if self.report_level and self.report_level in spec_id_dimensions:
result.append(spec_id_dimensions[self.report_level])
if self.report_granularity and self.report_granularity in spec_time_dimensions:
result.append(spec_time_dimensions[self.report_granularity])
return result
def _get_metrics(self):
# common metrics for all reporting levels
result = ["spend", "cpc", "cpm", "impressions", "clicks", "ctr", "reach", "cost_per_1000_reached", "frequency"]
if self.report_level == ReportLevel.ADVERTISER and self.report_granularity == ReportGranularity.DAY:
result.extend(["cash_spend", "voucher_spend"])
if self.report_level in (ReportLevel.CAMPAIGN, ReportLevel.ADGROUP, ReportLevel.AD):
result.extend(["campaign_name"])
if self.report_level in (ReportLevel.ADGROUP, ReportLevel.AD):
result.extend(
[
"campaign_id",
"adgroup_name",
"placement",
"tt_app_id",
"tt_app_name",
"mobile_app_id",
"promotion_type",
"dpa_target_audience_type",
]
)
result.extend(
[
"conversion",
"cost_per_conversion",
"conversion_rate",
"real_time_conversion",
"real_time_cost_per_conversion",
"real_time_conversion_rate",
"result",
"cost_per_result",
"result_rate",
"real_time_result",
"real_time_cost_per_result",
"real_time_result_rate",
"secondary_goal_result",
"cost_per_secondary_goal_result",
"secondary_goal_result_rate",
]
)
if self.report_level == ReportLevel.AD:
result.extend(["adgroup_id", "ad_name", "ad_text"])
return result
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]:
stream_start = self.select_cursor_field_value(stream_state) or self._start_time
for slice in super().stream_slices(**kwargs):
for start_date, end_date in self._get_time_interval(stream_start, self.report_granularity):
slice["start_date"] = start_date.strftime("%Y-%m-%d")
slice["end_date"] = end_date.strftime("%Y-%m-%d")
self.logger.debug(
f'name: {self.name}, advertiser_id: {slice["advertiser_id"]}, slice: {slice["start_date"]} - {slice["end_date"]}'
)
yield slice
def path(self, *args, **kwargs) -> str:
return "reports/integrated/get/"
def request_params(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, **kwargs
) -> MutableMapping[str, Any]:
params = super().request_params(stream_state=stream_state, stream_slice=stream_slice, **kwargs)
params["advertiser_id"] = stream_slice["advertiser_id"]
params["service_type"] = "AUCTION"
params["report_type"] = "BASIC"
params["data_level"] = f"AUCTION_{self.report_level}"
params["dimensions"] = json.dumps(self._get_reporting_dimensions())
params["metrics"] = json.dumps(self._get_metrics())
if self.report_granularity == ReportGranularity.LIFETIME:
params["lifetime"] = "true"
else:
params["start_date"] = stream_slice["start_date"]
params["end_date"] = stream_slice["end_date"]
return params
def get_json_schema(self) -> Mapping[str, Any]:
"""All reports have same schema"""
return ResourceSchemaLoader(package_name_from_class(self.__class__)).get_schema("basic_reports")
def select_cursor_field_value(self, data: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None) -> str:
if stream_slice:
return stream_slice["end_date"]
return super().select_cursor_field_value(data)
class AdsReports(BasicReports):
"""Custom reports for ads"""
report_level = ReportLevel.AD
class AdvertisersReports(BasicReports):
"""Custom reports for advertiser"""
report_level = ReportLevel.ADVERTISER
class CampaignsReports(BasicReports):
"""Custom reports for campaigns"""
report_level = ReportLevel.CAMPAIGN
class AdGroupsReports(BasicReports):
"""Custom reports for adgroups"""
report_level = ReportLevel.ADGROUP
| 36.363793 | 135 | 0.631312 |
79434d8a44ccef6858e6ccb935c347831429d496 | 6,169 | py | Python | plot_combinedfom_selectsamples_custom.py | johnmgregoire/JCAPdatavis | 6d77a510e00acf31de9665828d27ea33aba6ab78 | [
"BSD-3-Clause"
] | null | null | null | plot_combinedfom_selectsamples_custom.py | johnmgregoire/JCAPdatavis | 6d77a510e00acf31de9665828d27ea33aba6ab78 | [
"BSD-3-Clause"
] | null | null | null | plot_combinedfom_selectsamples_custom.py | johnmgregoire/JCAPdatavis | 6d77a510e00acf31de9665828d27ea33aba6ab78 | [
"BSD-3-Clause"
] | null | null | null | import numpy, pylab, os, sys, csv
from echem_plate_fcns import *
from echem_plate_math import *
PyCodePath=os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
sys.path.append(os.path.join(PyCodePath,'ternaryplot'))
from myternaryutility import TernaryPlot
from myquaternaryutility import QuaternaryPlot
pylab.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
dp='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/results/combinedfom.txt'
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/parsedresults/paperfom'
elkeys=['Ni', 'Fe', 'Co', 'Ce']
ellabels=elkeys
compvertsp=numpy.array([[.5, .5, 0, 0], [.5, 0, .5, 0], [0, 0, .1, .9]])
critdistp=.05
compverts=numpy.array([[.5, .37, .13, 0], [.25, 0, .25, .5]])
critdist=.05
betweenbool=True
invertbool=False
pointsize=20
opacity=.6
view_azim=-159
view_elev=18
f=open(dp, mode='r')
dr=csv.DictReader(f, delimiter='\t')
dropd={}
for l in dr:
for kr in l.keys():
k=kr.strip()
if not k in dropd.keys():
dropd[k]=[]
dropd[k]+=[myeval(l[kr].strip())]
for k in dropd.keys():
dropd[k]=numpy.array(dropd[k])
f.close()
dropd['compositions']=numpy.array([dropd[elkey] for elkey in elkeys]).T
comps=numpy.array([dropd[elkey] for elkey in elkeys]).T
gridi=30
comps30=[(a*1./gridi, b*1./gridi, c*1./gridi, (gridi-a-b-c)*1./gridi) for a in numpy.arange(0,1+gridi) for b in numpy.arange(0,1+gridi-a) for c in numpy.arange(0,1+gridi-a-b)]
pylab.figure()
#axq=pylab.subplot(111)
stpq=QuaternaryPlot(111, ellabels=ellabels)
cols=stpq.rgb_comp(comps30)
stpq.plotbycolor(comps30, cols, marker='o', markersize=3, alpha=1)
stpq.set_projection(azim=view_azim, elev=view_elev)
pylab.savefig(os.path.join(savefolder, 'QuatPointsAll.png'))
pylab.savefig(os.path.join(savefolder, 'QuatPointsAll.eps'))
pylab.figure()
#axq=pylab.subplot(111)
stpqp=QuaternaryPlot(111, ellabels=ellabels)
selectinds, distfromplane, xyparr, xyp_verts,intriangle=stpqp.filterbydistancefromplane(comps, compvertsp[0], compvertsp[1], compvertsp[2], critdistp, withintriangle=betweenbool, invlogic=invertbool, returnall=True)
xyparr=xyparr[selectinds]
cols=stpqp.rgb_comp(comps[selectinds])
stpqp.plotbycolor(comps[selectinds], cols, marker='o', markersize=3, alpha=1)
stpqp.line(compvertsp[0], compvertsp[1], lw=2)
stpqp.line(compvertsp[2], compvertsp[1], lw=2)
stpqp.line(compvertsp[0], compvertsp[2], lw=2)
#stpqp.scatter(comps[selectinds], pointsize
stpqp.set_projection(azim=view_azim, elev=view_elev)
stpqp.label(fontsize=20)
pylab.savefig(os.path.join(savefolder, 'QuatPointsPlane.png'))
pylab.savefig(os.path.join(savefolder, 'QuatPointsPlane.eps'))
pylab.figure()
#axq=pylab.subplot(111)
stpql=QuaternaryPlot(111, ellabels=ellabels)
selectinds, distfromlin, lineparameter=stpql.filterbydistancefromline(comps, compverts[0], compverts[1], critdist, betweenpoints=betweenbool, invlogic=invertbool, returnall=True)
dropd['lineparameter']=lineparameter
dropd['distfromlin']=distfromlin
lineparameter=lineparameter[selectinds]
cols=stpql.rgb_comp(comps[selectinds])
stpql.plotbycolor(comps[selectinds], cols, marker='o', markersize=3, alpha=1)
stpql.line(compverts[0], compverts[1], lw=2)
stpql.line(compvertsp[0], compvertsp[1], lw=1.2)
stpql.line(compvertsp[2], compvertsp[1], lw=1.2)
stpql.line(compvertsp[0], compvertsp[2], lw=1.2)
#stpql.scatter(comps[selectinds], pointsize
stpql.set_projection(azim=view_azim, elev=view_elev)
stpql.label(fontsize=20)
pylab.savefig(os.path.join(savefolder, 'QuatPointsLin.png'))
pylab.savefig(os.path.join(savefolder, 'QuatPointsLin.eps'))
pylab.figure(figsize=(6, 4))
ax=pylab.subplot(111)
for k, c, l, vsh in [('CP5Eave', 'b', '1 mA/cm$^2$', -(.187-.044)), ('CP4Eave', 'g', '10 mA/cm$^2$', -(.187-.044)), ('CP6Eave', 'r', '19 mA/cm$^2$', -(.187-.048))]:
fomselect=(vsh+dropd[k][selectinds])*1000.
stpql.plotfomalonglineparameter(ax, lineparameter, fomselect, compend1=compverts[0], compend2=compverts[1], lineparticks=numpy.linspace(0, 1, 5), ls='none', marker='.', color=c, label=l, labelfmtstr='%.2f', ticklabelkwargdict=dict([('rotation', -20), ('horizontalalignment', 'left')]))
#pylab.legend(loc=3)
pylab.ylim(290, 430)
pylab.text(.4,300,'$J$=1 mA cm$^{-2}$', color='b')
pylab.text(.3,358,'$J$=10 mA cm$^{-2}$', color='g')
pylab.text(.2,416,'$J$=19 mA cm$^{-2}$', color='r')
pylab.ylabel('$\eta$, OER overpotential (mV)')
pylab.subplots_adjust(bottom=.25, left=.15, right=.72)
pylab.savefig(os.path.join(savefolder, 'AllCP.png'))
pylab.savefig(os.path.join(savefolder, 'AllCP.eps'))
pylab.figure(figsize=(6, 4))
ax=pylab.subplot(111)
for k, c, l, vsh in [ ('TafelCPLogExCurrent', 'b', '', 5.)]:
fomselect=(vsh+dropd[k][selectinds])*1.
stpql.plotfomalonglineparameter(ax, lineparameter, fomselect, compend1=compverts[0], compend2=compverts[1], lineparticks=numpy.linspace(0, 1, 5), ls='none', marker='.', color='k', label=l, labelfmtstr='%.2f', ticklabelkwargdict=dict([('rotation', -20), ('horizontalalignment', 'left')]))
pylab.ylabel('Log($J_{\eta=0}$ / mA cm$^{-2}$)')
pylab.subplots_adjust(bottom=.25, left=.15, right=.72)
pylab.savefig(os.path.join(savefolder, 'TafelCPexchange.png'))
pylab.savefig(os.path.join(savefolder, 'TafelCPexchange.eps'))
pylab.figure(figsize=(6, 4))
ax=pylab.subplot(111)
for k, c, l, vsh in [ ('TafelCPSlopeVperdec', 'b', '', 0.)]:
fomselect=(vsh+dropd[k][selectinds])*1000.
stpql.plotfomalonglineparameter(ax, lineparameter, fomselect, compend1=compverts[0], compend2=compverts[1], lineparticks=numpy.linspace(0, 1, 5), ls='none', marker='.', color='k', label=l, labelfmtstr='%.2f', ticklabelkwargdict=dict([('rotation', -20), ('horizontalalignment', 'left')]))
pylab.ylim(40, 95)
pylab.ylabel(r'$\alpha$'+'=d$\eta$/d$Log(J)$ (mV/decade)')
pylab.text(.16,60,'Fig.2a', color='k', ha='center')
pylab.text(.46,44,'Fig.2b', color='k', ha='center')
pylab.text(.91,64,'Fig.2c', color='k', ha='center')
pylab.subplots_adjust(bottom=.33, left=.15, right=.72)
pylab.savefig(os.path.join(savefolder, 'TafelCPmVdecade.png'))
pylab.savefig(os.path.join(savefolder, 'TafelCPmVdecade.eps'))
pylab.show()
| 35.866279 | 291 | 0.714865 |
79434f32db976cadecc0ee3f1361d0dd130bc17e | 23 | py | Python | csimpy/_version.py | dewancse/csimpy | 58c32e40e5d991b4ca98df05e6f61020def475a9 | [
"Apache-2.0"
] | 4 | 2018-10-02T20:20:26.000Z | 2019-07-26T12:57:26.000Z | csimpy/_version.py | dewancse/csimpy | 58c32e40e5d991b4ca98df05e6f61020def475a9 | [
"Apache-2.0"
] | null | null | null | csimpy/_version.py | dewancse/csimpy | 58c32e40e5d991b4ca98df05e6f61020def475a9 | [
"Apache-2.0"
] | 1 | 2020-08-21T02:32:57.000Z | 2020-08-21T02:32:57.000Z | __version__ = '0.0.1a'
| 11.5 | 22 | 0.652174 |
79434fdf09e9dd9b877244adeb3da8eed0b3edd1 | 4,044 | py | Python | regrippy/plugins/env.py | simsor/regrippy | 5a4babedb31fc1ac4699a435265da18cc60dca30 | [
"Apache-2.0"
] | null | null | null | regrippy/plugins/env.py | simsor/regrippy | 5a4babedb31fc1ac4699a435265da18cc60dca30 | [
"Apache-2.0"
] | null | null | null | regrippy/plugins/env.py | simsor/regrippy | 5a4babedb31fc1ac4699a435265da18cc60dca30 | [
"Apache-2.0"
] | null | null | null | from Registry.Registry import RegistryValueNotFoundException
from regrippy import BasePlugin, PluginResult
# Based on the information in libyal/winreg-kb
# https://github.com/libyal/winreg-kb/blob/main/docs/sources/system-keys/Environment-variables.md
class Plugin(BasePlugin):
"""Lists all environment variables"""
__REGHIVE__ = ["SYSTEM", "SOFTWARE", "NTUSER.DAT"]
def run(self):
if self.hive_name == "SYSTEM":
yield from self.handle_env_key()
elif self.hive_name == "SOFTWARE":
yield from self.handle_windows_currentversion()
yield from self.handle_windows_nt_currentversion()
yield from self.handle_profilelist()
elif self.hive_name == "NTUSER.DAT":
yield from self.handle_user_env()
def handle_env_key(self):
ccs = self.get_currentcontrolset_path()
k = self.open_key(ccs + "\\Control\\Session Manager\\Environment")
if not k:
return
for v in k.values():
r = PluginResult(key=k, value=v)
r.custom["Name"] = f"%{v.name()}%"
yield r
def handle_windows_currentversion(self):
k = self.open_key("Microsoft\\Windows\\CurrentVersion")
if not k:
return
try:
v = k.value("CommonFilesDir")
r = PluginResult(key=k, value=v)
r.custom["Name"] = "%CommonProgramFiles%"
yield r
except RegistryValueNotFoundException:
pass
try:
v = k.value("CommonFilesDir (x86)")
r = PluginResult(key=k, value=v)
r.custom["Name"] = "%CommonProgramFiles(x86)%"
yield r
except RegistryValueNotFoundException:
pass
try:
v = k.value("CommonW6432Dir")
r = PluginResult(key=k, value=v)
r.custom["Name"] = "%CommonProgramW6432%"
yield r
except RegistryValueNotFoundException:
pass
try:
v = k.value("ProgramFilesDir")
r = PluginResult(key=k, value=v)
r.custom["Name"] = "%ProgramFiles%"
yield r
except RegistryValueNotFoundException:
pass
try:
v = k.value("ProgramFilesDir (x86)")
r = PluginResult(key=k, value=v)
r.custom["Name"] = "%ProgramFiles(x86)%"
yield r
except RegistryValueNotFoundException:
pass
try:
v = k.value("ProgramW6432Dir")
r = PluginResult(key=k, value=v)
r.custom["Name"] = "%ProgramW6432%"
yield r
except RegistryValueNotFoundException:
pass
def handle_windows_nt_currentversion(self):
k = self.open_key("Microsoft\\Windows NT\\CurrentVersion")
if not k:
return
try:
v = k.value("SystemRoot")
r = PluginResult(key=k, value=v)
r.custom["Name"] = "%SystemRoot%"
yield r
except RegistryValueNotFoundException:
pass
def handle_profilelist(self):
k = self.open_key("Microsoft\\Windows NT\\CurrentVersion\\ProfileList")
if not k:
return
try:
v = k.value("ProgramData")
r = PluginResult(key=k, value=v)
r.custom["Name"] = "%ProgramData%"
yield r
except RegistryValueNotFoundException:
pass
try:
v = k.value("Public")
r = PluginResult(key=k, value=v)
r.custom["Name"] = "%Public%"
yield r
except RegistryValueNotFoundException:
pass
def handle_user_env(self):
k = self.open_key("Environment")
if not k:
return
for v in k.values():
r = PluginResult(key=k, value=v)
r.custom["Name"] = f"%{v.name()}%"
yield r
def display_human(self, r: PluginResult):
print(f"{r.custom['Name']}: {r.value_data}")
| 29.955556 | 97 | 0.552423 |
7943501859228f4312b61c03bc0891ca0e5d465d | 449 | py | Python | shortest-word-distance/shortest-word-distance.py | QQuinn03/LeetHub | 51ce21d721f0f524a07ed24266fb5fca473fcaa5 | [
"MIT"
] | null | null | null | shortest-word-distance/shortest-word-distance.py | QQuinn03/LeetHub | 51ce21d721f0f524a07ed24266fb5fca473fcaa5 | [
"MIT"
] | null | null | null | shortest-word-distance/shortest-word-distance.py | QQuinn03/LeetHub | 51ce21d721f0f524a07ed24266fb5fca473fcaa5 | [
"MIT"
] | null | null | null | class Solution:
def shortestDistance(self, wordsDict: List[str], word1: str, word2: str) -> int:
w1 = -1
w2 = -1
res = len(wordsDict)
for idx,val in enumerate(wordsDict):
if word1==val:
w1=idx
elif word2==val:
w2=idx
if w1!=-1 and w2!=-1:
res = min(res,abs(w1-w2))
return res | 29.933333 | 84 | 0.420935 |
794350cf9d817a15e613d3b55766524c044ac97a | 23,725 | py | Python | pypika/tests/test_functions.py | foggel/pypika | 1f6ab34bada982fe632e5828f87bfbac2e601c5f | [
"Apache-2.0"
] | null | null | null | pypika/tests/test_functions.py | foggel/pypika | 1f6ab34bada982fe632e5828f87bfbac2e601c5f | [
"Apache-2.0"
] | null | null | null | pypika/tests/test_functions.py | foggel/pypika | 1f6ab34bada982fe632e5828f87bfbac2e601c5f | [
"Apache-2.0"
] | null | null | null | import unittest
from pypika import (
Case,
CaseException,
DatePart,
Field as F,
Query,
Query as Q,
Schema,
Table as T,
VerticaQuery,
functions as fn,
)
from pypika.enums import Dialects, SqlTypes
__author__ = "Timothy Heys"
__email__ = "[email protected]"
class FunctionTests(unittest.TestCase):
def test_dialect_propagation(self):
func = fn.Function("func", ["a"], ["b"])
self.assertEqual(
"func(ARRAY['a'],ARRAY['b'])", func.get_sql(dialect=Dialects.POSTGRESQL)
)
def test_is_aggregate_None_for_non_aggregate_function_or_function_with_no_aggregate_functions(self):
self.assertIsNone(fn.Coalesce('a', 0).is_aggregate)
self.assertIsNone(fn.Coalesce(fn.NullIf('a', 0), 0).is_aggregate)
def test_is_aggregate_True_for_aggregate_function_or_function_with_aggregate_functions(self):
self.assertTrue(fn.Sum('a').is_aggregate)
self.assertTrue(fn.Coalesce(fn.Avg('a'), 0).is_aggregate)
self.assertTrue(fn.Coalesce(fn.NullIf(fn.Sum('a'), 0), 0).is_aggregate)
class SchemaTests(unittest.TestCase):
def test_schema_no_schema_in_sql_when_none_set(self):
func = fn.Function("my_proc", 1, 2, 3)
self.assertEqual("my_proc(1,2,3)", func.get_sql(quote_char='"'))
def test_schema_included_in_function_sql(self):
a = Schema("a")
func = fn.Function("my_proc", 1, 2, 3, schema=a)
self.assertEqual('"a".my_proc(1,2,3)', func.get_sql(quote_char='"'))
class ArithmeticTests(unittest.TestCase):
t = T("abc")
def test__addition__fields(self):
q1 = Q.from_("abc").select(F("a") + F("b"))
q2 = Q.from_(self.t).select(self.t.a + self.t.b)
self.assertEqual('SELECT "a"+"b" FROM "abc"', str(q1))
self.assertEqual('SELECT "a"+"b" FROM "abc"', str(q2))
def test__addition__number(self):
q1 = Q.from_("abc").select(F("a") + 1)
q2 = Q.from_(self.t).select(self.t.a + 1)
self.assertEqual('SELECT "a"+1 FROM "abc"', str(q1))
self.assertEqual('SELECT "a"+1 FROM "abc"', str(q2))
def test__addition__decimal(self):
q1 = Q.from_("abc").select(F("a") + 1.0)
q2 = Q.from_(self.t).select(self.t.a + 1.0)
self.assertEqual('SELECT "a"+1.0 FROM "abc"', str(q1))
self.assertEqual('SELECT "a"+1.0 FROM "abc"', str(q2))
def test__addition__right(self):
q1 = Q.from_("abc").select(1 + F("a"))
q2 = Q.from_(self.t).select(1 + self.t.a)
self.assertEqual('SELECT 1+"a" FROM "abc"', str(q1))
self.assertEqual('SELECT 1+"a" FROM "abc"', str(q2))
def test__subtraction__fields(self):
q1 = Q.from_("abc").select(F("a") - F("b"))
q2 = Q.from_(self.t).select(self.t.a - self.t.b)
self.assertEqual('SELECT "a"-"b" FROM "abc"', str(q1))
self.assertEqual('SELECT "a"-"b" FROM "abc"', str(q2))
def test__subtraction__number(self):
q1 = Q.from_("abc").select(F("a") - 1)
q2 = Q.from_(self.t).select(self.t.a - 1)
self.assertEqual('SELECT "a"-1 FROM "abc"', str(q1))
self.assertEqual('SELECT "a"-1 FROM "abc"', str(q2))
def test__subtraction__decimal(self):
q1 = Q.from_("abc").select(F("a") - 1.0)
q2 = Q.from_(self.t).select(self.t.a - 1.0)
self.assertEqual('SELECT "a"-1.0 FROM "abc"', str(q1))
self.assertEqual('SELECT "a"-1.0 FROM "abc"', str(q2))
def test__subtraction__right(self):
q1 = Q.from_("abc").select(1 - F("a"))
q2 = Q.from_(self.t).select(1 - self.t.a)
self.assertEqual('SELECT 1-"a" FROM "abc"', str(q1))
self.assertEqual('SELECT 1-"a" FROM "abc"', str(q2))
def test__multiplication__fields(self):
q1 = Q.from_("abc").select(F("a") * F("b"))
q2 = Q.from_(self.t).select(self.t.a * self.t.b)
self.assertEqual('SELECT "a"*"b" FROM "abc"', str(q1))
self.assertEqual('SELECT "a"*"b" FROM "abc"', str(q2))
def test__multiplication__number(self):
q1 = Q.from_("abc").select(F("a") * 1)
q2 = Q.from_(self.t).select(self.t.a * 1)
self.assertEqual('SELECT "a"*1 FROM "abc"', str(q1))
self.assertEqual('SELECT "a"*1 FROM "abc"', str(q2))
def test__multiplication__decimal(self):
q1 = Q.from_("abc").select(F("a") * 1.0)
q2 = Q.from_(self.t).select(self.t.a * 1.0)
self.assertEqual('SELECT "a"*1.0 FROM "abc"', str(q1))
self.assertEqual('SELECT "a"*1.0 FROM "abc"', str(q2))
def test__multiplication__right(self):
q1 = Q.from_("abc").select(1 * F("a"))
q2 = Q.from_(self.t).select(1 * self.t.a)
self.assertEqual('SELECT 1*"a" FROM "abc"', str(q1))
self.assertEqual('SELECT 1*"a" FROM "abc"', str(q2))
def test__division__fields(self):
q1 = Q.from_("abc").select(F("a") / F("b"))
q2 = Q.from_(self.t).select(self.t.a / self.t.b)
self.assertEqual('SELECT "a"/"b" FROM "abc"', str(q1))
self.assertEqual('SELECT "a"/"b" FROM "abc"', str(q2))
def test__division__number(self):
q1 = Q.from_("abc").select(F("a") / 1)
q2 = Q.from_(self.t).select(self.t.a / 1)
self.assertEqual('SELECT "a"/1 FROM "abc"', str(q1))
self.assertEqual('SELECT "a"/1 FROM "abc"', str(q2))
def test__division__decimal(self):
q1 = Q.from_("abc").select(F("a") / 1.0)
q2 = Q.from_(self.t).select(self.t.a / 1.0)
self.assertEqual('SELECT "a"/1.0 FROM "abc"', str(q1))
self.assertEqual('SELECT "a"/1.0 FROM "abc"', str(q2))
def test__division__right(self):
q1 = Q.from_("abc").select(1 / F("a"))
q2 = Q.from_(self.t).select(1 / self.t.a)
self.assertEqual('SELECT 1/"a" FROM "abc"', str(q1))
self.assertEqual('SELECT 1/"a" FROM "abc"', str(q2))
def test__complex_op(self):
q1 = Q.from_("abc").select(2 + 1 / F("a") - 5)
q2 = Q.from_(self.t).select(2 + 1 / self.t.a - 5)
self.assertEqual('SELECT 2+1/"a"-5 FROM "abc"', str(q1))
self.assertEqual('SELECT 2+1/"a"-5 FROM "abc"', str(q2))
def test__complex_op_add_parentheses(self):
q1 = Q.from_("abc").select((F("a") + 1) + (F("b") - 5))
q2 = Q.from_(self.t).select((self.t.a + 1) + (self.t.b - 5))
self.assertEqual('SELECT "a"+1+"b"-5 FROM "abc"', str(q1))
self.assertEqual('SELECT "a"+1+"b"-5 FROM "abc"', str(q2))
def test__complex_op_sub_parentheses(self):
q1 = Q.from_("abc").select((F("a") + 1) - (F("b") - 5))
q2 = Q.from_(self.t).select((self.t.a + 1) - (self.t.b - 5))
self.assertEqual('SELECT "a"+1-"b"-5 FROM "abc"', str(q1))
self.assertEqual('SELECT "a"+1-"b"-5 FROM "abc"', str(q2))
def test__complex_op_mul_parentheses(self):
q1 = Q.from_("abc").select((F("a") + 1) * (F("b") - 5))
q2 = Q.from_(self.t).select((self.t.a + 1) * (self.t.b - 5))
self.assertEqual('SELECT ("a"+1)*("b"-5) FROM "abc"', str(q1))
self.assertEqual('SELECT ("a"+1)*("b"-5) FROM "abc"', str(q2))
def test__complex_op_mul_no_parentheses(self):
q = Q.from_("abc").select(F("a") + 1 * F("b") - 5)
self.assertEqual('SELECT "a"+1*"b"-5 FROM "abc"', str(q))
def test__complex_op_div_parentheses(self):
q1 = Q.from_("abc").select((F("a") + 1) / (F("b") - 5))
q2 = Q.from_(self.t).select((self.t.a + 1) / (self.t.b - 5))
self.assertEqual('SELECT ("a"+1)/("b"-5) FROM "abc"', str(q1))
self.assertEqual('SELECT ("a"+1)/("b"-5) FROM "abc"', str(q2))
def test__complex_op_div_no_parentheses(self):
q = Q.from_("abc").select(F("a") + 1 / F("b") - 5)
self.assertEqual('SELECT "a"+1/"b"-5 FROM "abc"', str(q))
def test__arithmetic_equality(self):
q1 = Q.from_("abc").select(F("a") / 2 == 2)
q2 = Q.from_(self.t).select(self.t.a / 2 == 2)
self.assertEqual('SELECT "a"/2=2 FROM "abc"', str(q1))
self.assertEqual('SELECT "a"/2=2 FROM "abc"', str(q2))
def test__arithmetic_with_function(self):
q1 = Q.from_("abc").select(fn.Sum(F("foo")) + 1)
q2 = Q.from_(self.t).select(fn.Sum(self.t.foo) + 1)
self.assertEqual('SELECT SUM("foo")+1 FROM "abc"', str(q1))
self.assertEqual('SELECT SUM("foo")+1 FROM "abc"', str(q2))
def test__exponent__number(self):
q1 = Q.from_("abc").select(F("a") ** 2)
q2 = Q.from_(self.t).select(self.t.a ** 2)
self.assertEqual('SELECT POW("a",2) FROM "abc"', str(q1))
self.assertEqual('SELECT POW("a",2) FROM "abc"', str(q2))
def test__exponent__decimal(self):
q1 = Q.from_("abc").select(F("a") ** 0.5)
q2 = Q.from_(self.t).select(self.t.a ** 0.5)
self.assertEqual('SELECT POW("a",0.5) FROM "abc"', str(q1))
self.assertEqual('SELECT POW("a",0.5) FROM "abc"', str(q2))
def test__modulus__number(self):
q1 = Q.from_("abc").select(F("a") % 2)
q2 = Q.from_(self.t).select(self.t.a % 2)
self.assertEqual('SELECT MOD("a",2) FROM "abc"', str(q1))
self.assertEqual('SELECT MOD("a",2) FROM "abc"', str(q2))
def test__floor(self):
q1 = Q.from_("abc").select(fn.Floor(F("foo")))
q2 = Q.from_(self.t).select(fn.Floor(self.t.foo))
self.assertEqual('SELECT FLOOR("foo") FROM "abc"', str(q1))
self.assertEqual('SELECT FLOOR("foo") FROM "abc"', str(q2))
class AggregationTests(unittest.TestCase):
def test__count(self):
q = Q.from_("abc").select(fn.Count(F("foo")))
self.assertEqual('SELECT COUNT("foo") FROM "abc"', str(q))
def test__count__star(self):
q = Q.from_("abc").select(fn.Count("*"))
self.assertEqual('SELECT COUNT(*) FROM "abc"', str(q))
def test__sum(self):
q = Q.from_("abc").select(fn.Sum(F("foo")))
self.assertEqual('SELECT SUM("foo") FROM "abc"', str(q))
def test__avg(self):
q = Q.from_("abc").select(fn.Avg(F("foo")))
self.assertEqual('SELECT AVG("foo") FROM "abc"', str(q))
def test__first(self):
q = Q.from_("abc").select(fn.First(F("foo")))
self.assertEqual('SELECT FIRST("foo") FROM "abc"', str(q))
def test__last(self):
q = Q.from_("abc").select(fn.Last(F("foo")))
self.assertEqual('SELECT LAST("foo") FROM "abc"', str(q))
def test__min(self):
q = Q.from_("abc").select(fn.Min(F("foo")))
self.assertEqual('SELECT MIN("foo") FROM "abc"', str(q))
def test__max(self):
q = Q.from_("abc").select(fn.Max(F("foo")))
self.assertEqual('SELECT MAX("foo") FROM "abc"', str(q))
def test__std(self):
q = Q.from_("abc").select(fn.Std(F("foo")))
self.assertEqual('SELECT STD("foo") FROM "abc"', str(q))
def test__stddev(self):
q = Q.from_("abc").select(fn.StdDev(F("foo")))
self.assertEqual('SELECT STDDEV("foo") FROM "abc"', str(q))
def test__approx_percentile(self):
q = Q.from_("abc").select(fn.ApproximatePercentile(F("foo"), 0.5))
self.assertEqual(
'SELECT APPROXIMATE_PERCENTILE("foo" USING PARAMETERS percentile=0.5) FROM "abc"',
str(q),
)
class ConditionTests(unittest.TestCase):
def test__case__raw(self):
q = Q.from_("abc").select(Case().when(F("foo") == 1, "a"))
self.assertEqual('SELECT CASE WHEN "foo"=1 THEN \'a\' END FROM "abc"', str(q))
def test__case__else(self):
q = Q.from_("abc").select(Case().when(F("foo") == 1, "a").else_("b"))
self.assertEqual(
"SELECT CASE WHEN \"foo\"=1 THEN 'a' ELSE 'b' END FROM \"abc\"", str(q)
)
def test__case__field(self):
q = Q.from_("abc").select(Case().when(F("foo") == 1, F("bar")).else_(F("buz")))
self.assertEqual(
'SELECT CASE WHEN "foo"=1 THEN "bar" ELSE "buz" END FROM "abc"', str(q)
)
def test__case__multi(self):
q = Q.from_("abc").select(
Case().when(F("foo") > 0, F("fiz")).when(F("bar") <= 0, F("buz")).else_(1)
)
self.assertEqual(
'SELECT CASE WHEN "foo">0 THEN "fiz" WHEN "bar"<=0 THEN "buz" ELSE 1 END FROM "abc"',
str(q),
)
def test__case__no_cases(self):
with self.assertRaises(CaseException):
q = Q.from_("abc").select(Case())
str(q)
class StringTests(unittest.TestCase):
t = T("abc")
def test__ascii__str(self):
q = Q.select(fn.Ascii("2"))
self.assertEqual("SELECT ASCII('2')", str(q))
def test__ascii__int(self):
q = Q.select(fn.Ascii(2))
self.assertEqual("SELECT ASCII(2)", str(q))
def test__ascii__field(self):
q = Q.from_(self.t).select(fn.Ascii(self.t.foo))
self.assertEqual('SELECT ASCII("foo") FROM "abc"', str(q))
def test__bin__str(self):
q = Q.select(fn.Bin("2"))
self.assertEqual("SELECT BIN('2')", str(q))
def test__bin__int(self):
q = Q.select(fn.Bin(2))
self.assertEqual("SELECT BIN(2)", str(q))
def test__bin__field(self):
q = Q.from_(self.t).select(fn.Bin(self.t.foo))
self.assertEqual('SELECT BIN("foo") FROM "abc"', str(q))
def test__concat__str(self):
q = Q.select(fn.Concat("p", "y", "q", "b"))
self.assertEqual("SELECT CONCAT('p','y','q','b')", str(q))
def test__concat__field(self):
q = Q.from_(self.t).select(fn.Concat(self.t.foo, self.t.bar))
self.assertEqual('SELECT CONCAT("foo","bar") FROM "abc"', str(q))
def test__insert__str(self):
q = Q.select(fn.Insert("Quadratic", 3, 4, "What"))
self.assertEqual("SELECT INSERT('Quadratic',3,4,'What')", str(q))
def test__insert__field(self):
q = Q.from_(self.t).select(fn.Insert(self.t.foo, 3, 4, self.t.bar))
self.assertEqual('SELECT INSERT("foo",3,4,"bar") FROM "abc"', str(q))
def test__lower__str(self):
q = Q.select(fn.Lower("ABC"))
self.assertEqual("SELECT LOWER('ABC')", str(q))
def test__lower__field(self):
q = Q.from_(self.t).select(fn.Lower(self.t.foo))
self.assertEqual('SELECT LOWER("foo") FROM "abc"', str(q))
def test__length__str(self):
q = Q.select(fn.Length("ABC"))
self.assertEqual("SELECT LENGTH('ABC')", str(q))
def test__length__field(self):
q = Q.from_(self.t).select(fn.Length(self.t.foo))
self.assertEqual('SELECT LENGTH("foo") FROM "abc"', str(q))
def test__substring(self):
q = Q.from_(self.t).select(fn.Substring(self.t.foo, 2, 6))
self.assertEqual('SELECT SUBSTRING("foo",2,6) FROM "abc"', str(q))
class SplitPartFunctionTests(unittest.TestCase):
t = T("abc")
def test__split_part(self):
q = VerticaQuery.from_(self.t).select(fn.SplitPart(self.t.foo, "|", 3))
self.assertEqual('SELECT SPLIT_PART("foo",\'|\',3) FROM "abc"', str(q))
class RegexpLikeFunctionTests(unittest.TestCase):
t = T("abc")
def test__regexp_like(self):
q = VerticaQuery.from_(self.t).select(fn.RegexpLike(self.t.foo, "^a", "x"))
self.assertEqual("SELECT REGEXP_LIKE(\"foo\",'^a','x') FROM \"abc\"", str(q))
class CastTests(unittest.TestCase):
t = T("abc")
def test__cast__as(self):
q = Q.from_(self.t).select(fn.Cast(self.t.foo, SqlTypes.UNSIGNED))
self.assertEqual('SELECT CAST("foo" AS UNSIGNED) FROM "abc"', str(q))
def test__cast__signed(self):
q1 = Q.from_(self.t).select(fn.Signed(self.t.foo))
q2 = Q.from_(self.t).select(fn.Cast(self.t.foo, SqlTypes.SIGNED))
self.assertEqual('SELECT CAST("foo" AS SIGNED) FROM "abc"', str(q1))
self.assertEqual('SELECT CAST("foo" AS SIGNED) FROM "abc"', str(q2))
def test__cast__unsigned(self):
q1 = Q.from_(self.t).select(fn.Unsigned(self.t.foo))
q2 = Q.from_(self.t).select(fn.Cast(self.t.foo, SqlTypes.UNSIGNED))
self.assertEqual('SELECT CAST("foo" AS UNSIGNED) FROM "abc"', str(q1))
self.assertEqual('SELECT CAST("foo" AS UNSIGNED) FROM "abc"', str(q2))
def test__cast__date(self):
q1 = Q.from_(self.t).select(fn.Date(self.t.foo))
q2 = Q.from_(self.t).select(fn.Cast(self.t.foo, SqlTypes.DATE))
self.assertEqual('SELECT DATE("foo") FROM "abc"', str(q1))
self.assertEqual('SELECT CAST("foo" AS DATE) FROM "abc"', str(q2))
def test__cast__timestamp(self):
q1 = Q.from_(self.t).select(fn.Timestamp(self.t.foo))
q2 = Q.from_(self.t).select(fn.Cast(self.t.foo, SqlTypes.TIMESTAMP))
self.assertEqual('SELECT TIMESTAMP("foo") FROM "abc"', str(q1))
self.assertEqual('SELECT CAST("foo" AS TIMESTAMP) FROM "abc"', str(q2))
def test__cast__char(self):
q = Q.from_(self.t).select(fn.Cast(self.t.foo, SqlTypes.CHAR))
self.assertEqual('SELECT CAST("foo" AS CHAR) FROM "abc"', str(q))
def test__cast__char_with_arg(self):
q = Q.from_(self.t).select(fn.Cast(self.t.foo, SqlTypes.VARCHAR(24)))
self.assertEqual('SELECT CAST("foo" AS VARCHAR(24)) FROM "abc"', str(q))
def test__cast__varchar(self):
q = Q.from_(self.t).select(fn.Cast(self.t.foo, SqlTypes.VARCHAR))
self.assertEqual('SELECT CAST("foo" AS VARCHAR) FROM "abc"', str(q))
def test__cast__varchar_with_arg(self):
q = Q.from_(self.t).select(fn.Cast(self.t.foo, SqlTypes.VARCHAR(24)))
self.assertEqual('SELECT CAST("foo" AS VARCHAR(24)) FROM "abc"', str(q))
def test__cast__long_varchar(self):
q = Q.from_(self.t).select(fn.Cast(self.t.foo, SqlTypes.LONG_VARCHAR))
self.assertEqual('SELECT CAST("foo" AS LONG VARCHAR) FROM "abc"', str(q))
def test__cast__long_varchar_with_arg(self):
q = Q.from_(self.t).select(fn.Cast(self.t.foo, SqlTypes.LONG_VARCHAR(24)))
self.assertEqual('SELECT CAST("foo" AS LONG VARCHAR(24)) FROM "abc"', str(q))
def test__cast__binary(self):
q = Q.from_(self.t).select(fn.Cast(self.t.foo, SqlTypes.BINARY))
self.assertEqual('SELECT CAST("foo" AS BINARY) FROM "abc"', str(q))
def test__cast__binary_with_arg(self):
q = Q.from_(self.t).select(fn.Cast(self.t.foo, SqlTypes.BINARY(24)))
self.assertEqual('SELECT CAST("foo" AS BINARY(24)) FROM "abc"', str(q))
def test__cast__varbinary(self):
q = Q.from_(self.t).select(fn.Cast(self.t.foo, SqlTypes.VARBINARY))
self.assertEqual('SELECT CAST("foo" AS VARBINARY) FROM "abc"', str(q))
def test__cast__varbinary_with_arg(self):
q = Q.from_(self.t).select(fn.Cast(self.t.foo, SqlTypes.VARBINARY(24)))
self.assertEqual('SELECT CAST("foo" AS VARBINARY(24)) FROM "abc"', str(q))
def test__cast__long_varbinary(self):
q = Q.from_(self.t).select(fn.Cast(self.t.foo, SqlTypes.LONG_VARBINARY))
self.assertEqual('SELECT CAST("foo" AS LONG VARBINARY) FROM "abc"', str(q))
def test__cast__long_varbinary_with_arg(self):
q = Q.from_(self.t).select(fn.Cast(self.t.foo, SqlTypes.LONG_VARBINARY(24)))
self.assertEqual('SELECT CAST("foo" AS LONG VARBINARY(24)) FROM "abc"', str(q))
def test__cast__boolean(self):
q = Q.from_(self.t).select(fn.Cast(self.t.foo, SqlTypes.BOOLEAN))
self.assertEqual('SELECT CAST("foo" AS BOOLEAN) FROM "abc"', str(q))
def test__cast__integer(self):
q = Q.from_(self.t).select(fn.Cast(self.t.foo, SqlTypes.INTEGER))
self.assertEqual('SELECT CAST("foo" AS INTEGER) FROM "abc"', str(q))
def test__cast__float(self):
q = Q.from_(self.t).select(fn.Cast(self.t.foo, SqlTypes.FLOAT))
self.assertEqual('SELECT CAST("foo" AS FLOAT) FROM "abc"', str(q))
def test__cast__numeric(self):
q = Q.from_(self.t).select(fn.Cast(self.t.foo, SqlTypes.NUMERIC))
self.assertEqual('SELECT CAST("foo" AS NUMERIC) FROM "abc"', str(q))
def test__tochar__(self):
q = Q.from_(self.t).select(fn.ToChar(self.t.foo, "SomeFormat"))
self.assertEqual('SELECT TO_CHAR("foo",\'SomeFormat\') FROM "abc"', str(q))
class DateFunctionsTests(unittest.TestCase):
dt = F("dt")
t = T("abc")
def _test_extract_datepart(self, date_part):
q = Q.from_(self.t).select(fn.Extract(date_part, self.t.foo))
self.assertEqual(
'SELECT EXTRACT(%s FROM "foo") FROM "abc"' % date_part.value, str(q)
)
def test_extract_microsecond(self):
self._test_extract_datepart(DatePart.microsecond)
def test_extract_second(self):
self._test_extract_datepart(DatePart.second)
def test_extract_minute(self):
self._test_extract_datepart(DatePart.minute)
def test_extract_hour(self):
self._test_extract_datepart(DatePart.hour)
def test_extract_day(self):
self._test_extract_datepart(DatePart.day)
def test_extract_week(self):
self._test_extract_datepart(DatePart.week)
def test_extract_month(self):
self._test_extract_datepart(DatePart.month)
def test_extract_quarter(self):
self._test_extract_datepart(DatePart.quarter)
def test_extract_year(self):
self._test_extract_datepart(DatePart.year)
def test_timestampadd(self):
a = fn.TimestampAdd("year", 1, "2017-10-01")
self.assertEqual(str(a), "TIMESTAMPADD('year',1,'2017-10-01')")
def test_time_diff(self):
a = fn.TimeDiff("18:00:00", "10:00:00")
self.assertEqual(str(a), "TIMEDIFF('18:00:00','10:00:00')")
def test_date_add(self):
a = fn.DateAdd("year", 1, "2017-10-01")
self.assertEqual(str(a), "DATE_ADD('year',1,'2017-10-01')")
def test_now(self):
query = Query.select(fn.Now())
self.assertEqual("SELECT NOW()", str(query))
def test_utc_timestamp(self):
query = Query.select(fn.UtcTimestamp())
self.assertEqual("SELECT UTC_TIMESTAMP()", str(query))
def test_current_date(self):
query = Query.select(fn.CurDate())
self.assertEqual("SELECT CURRENT_DATE()", str(query))
def test_current_time(self):
query = Query.select(fn.CurTime())
self.assertEqual("SELECT CURRENT_TIME()", str(query))
def test_current_timestamp(self):
query = Query.select(fn.CurTimestamp())
self.assertEqual("SELECT CURRENT_TIMESTAMP", str(query))
def test_current_timestamp_with_alias(self):
query = Query.select(fn.CurTimestamp("ts"))
self.assertEqual('SELECT CURRENT_TIMESTAMP "ts"', str(query))
def test_to_date(self):
q1 = fn.ToDate("2019-06-21", "yyyy-mm-dd")
q2 = Query.from_(self.t).select(fn.ToDate("2019-06-21", "yyyy-mm-dd"))
q3 = Query.from_(self.t).select(fn.ToDate(F("foo"), "yyyy-mm-dd"))
self.assertEqual(str(q1), "TO_DATE('2019-06-21','yyyy-mm-dd')")
self.assertEqual(
str(q2), "SELECT TO_DATE('2019-06-21','yyyy-mm-dd') FROM \"abc\""
)
self.assertEqual(str(q3), 'SELECT TO_DATE("foo",\'yyyy-mm-dd\') FROM "abc"')
class NullFunctionsTests(unittest.TestCase):
def test_isnull(self):
q = Q.from_("abc").select(fn.IsNull(F("foo")))
self.assertEqual('SELECT ISNULL("foo") FROM "abc"', str(q))
def test_coalesce(self):
q = Q.from_("abc").select(fn.Coalesce(F("foo"), 0))
self.assertEqual('SELECT COALESCE("foo",0) FROM "abc"', str(q))
def test_nullif(self):
q = Q.from_("abc").select(fn.NullIf(F("foo"), 0))
self.assertEqual('SELECT NULLIF("foo",0) FROM "abc"', str(q))
def test_nvl(self):
q = Q.from_("abc").select(fn.NVL(F("foo"), 0))
self.assertEqual('SELECT NVL("foo",0) FROM "abc"', str(q))
| 34.78739 | 104 | 0.598736 |
794350f7aee7713dd9fa9d1e83b3764c22b047ff | 1,277 | py | Python | content_gallery/tests/models.py | jenia0jenia/django-content-gallery | e96c6972fc3a0c4c7b95855d9c3e2af030ce01c1 | [
"BSD-3-Clause"
] | 25 | 2017-07-03T13:58:17.000Z | 2020-10-31T11:44:49.000Z | content_gallery/tests/models.py | jenia0jenia/django-content-gallery | e96c6972fc3a0c4c7b95855d9c3e2af030ce01c1 | [
"BSD-3-Clause"
] | 5 | 2017-07-30T20:34:00.000Z | 2020-08-17T18:52:38.000Z | content_gallery/tests/models.py | jenia0jenia/django-content-gallery | e96c6972fc3a0c4c7b95855d9c3e2af030ce01c1 | [
"BSD-3-Clause"
] | 15 | 2017-07-31T09:29:33.000Z | 2020-10-02T08:23:15.000Z | from django.db import models as django_models
from .. import models
class BaseTestModel(django_models.Model):
"""
An abstract base test model class for test objects that
could be related to Images. Contains just one text field
"""
name = django_models.CharField(max_length=100)
def __str__(self):
"""
A str version of the object just returns
the value of its text field
"""
return self.name
class Meta:
abstract = True
class TestModel(models.ContentGalleryMixin, BaseTestModel):
"""
A main test model. Uses the ContentGalleryMixin without
changes, so it allows to show the model in the list of
available models in the Image admin.
"""
class AnotherTestModel(models.ContentGalleryMixin, BaseTestModel):
"""
Another test model. It also uses the ContentGalleryMixin, but
sets the 'gallery_visible' to False, so it's still possible
to attach images to objects of this model, but the model
does not present in the available models list in the Image admin.
"""
gallery_visible = False
class WrongTestModel(BaseTestModel):
"""
A test model that does not uses the ContentGalleryMixin.
It could not be related to the Image objects.
"""
| 27.76087 | 69 | 0.694597 |
794351716c4c507f660e9882904d19e8664fa8d0 | 3,467 | py | Python | tests/modules/test_consolidatingAggregator.py | gulraiz14/Taurus | 3fb886092634fd97f38012dc81fb62b1ed20a145 | [
"Apache-2.0"
] | null | null | null | tests/modules/test_consolidatingAggregator.py | gulraiz14/Taurus | 3fb886092634fd97f38012dc81fb62b1ed20a145 | [
"Apache-2.0"
] | null | null | null | tests/modules/test_consolidatingAggregator.py | gulraiz14/Taurus | 3fb886092634fd97f38012dc81fb62b1ed20a145 | [
"Apache-2.0"
] | null | null | null | from bzt.modules.aggregator import ConsolidatingAggregator, DataPoint, KPISet
from tests import BZTestCase, r
from tests.mocks import MockReader
class TestConsolidatingAggregator(BZTestCase):
def test_mock(self):
# check mock reader
reader = self.get_reader()
reader.buffer_scale_idx = '90.0'
first = list(reader.datapoints())
second = list(reader.datapoints(True))
self.assertEquals([1, 2, 3, 4], [x[DataPoint.TIMESTAMP] for x in first])
self.assertEquals([5, 6], [x[DataPoint.TIMESTAMP] for x in second])
for point in first + second:
val = point[DataPoint.CURRENT]['']
# self.assertGreater(val[KPISet.AVG_RESP_TIME], 0)
def test_merging(self):
dst = DataPoint(0)
src = DataPoint(0)
src[DataPoint.CUMULATIVE].get('', KPISet())
src[DataPoint.CUMULATIVE][''].sum_rt = 0.5
src[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT] = 1
dst.merge_point(src)
self.assertEquals(0.5, dst[DataPoint.CUMULATIVE][''].sum_rt)
self.assertEquals(0.5, dst[DataPoint.CUMULATIVE][''][KPISet.AVG_RESP_TIME])
src[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT] = 3
dst.merge_point(src)
self.assertEquals(4, dst[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT])
self.assertEquals(1, dst[DataPoint.CUMULATIVE][''].sum_rt)
self.assertEquals(0.25, dst[DataPoint.CUMULATIVE][''][KPISet.AVG_RESP_TIME])
src[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT] = 6
dst.merge_point(src)
self.assertEquals(10, dst[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT])
self.assertEquals(1.5, dst[DataPoint.CUMULATIVE][''].sum_rt)
self.assertEquals(0.15, dst[DataPoint.CUMULATIVE][''][KPISet.AVG_RESP_TIME])
def test_two_executions(self):
# check consolidator
obj = ConsolidatingAggregator()
obj.track_percentiles = [0, 50, 100]
obj.prepare()
underling1 = self.get_reader()
underling2 = self.get_reader()
obj.add_underling(underling1)
obj.add_underling(underling2)
cnt = 0
for _ in range(1, 10):
for point in obj.datapoints():
overall = point[DataPoint.CURRENT]['']
self.assertEquals(2, overall[KPISet.CONCURRENCY])
self.assertGreater(overall[KPISet.PERCENTILES]["100.0"], 0)
self.assertGreater(overall[KPISet.AVG_RESP_TIME], 0)
cnt += 1
self.assertEquals(2, cnt)
def get_reader(self, offset=0):
mock = MockReader()
mock.data.append((1 + offset, "", 1, r(), r(), r(), 200, None, ''))
mock.data.append((2 + offset, "", 1, r(), r(), r(), 200, None, ''))
mock.data.append((2 + offset, "", 1, r(), r(), r(), 200, None, ''))
mock.data.append((3 + offset, "", 1, r(), r(), r(), 200, None, ''))
mock.data.append((3 + offset, "", 1, r(), r(), r(), 200, None, ''))
mock.data.append((4 + offset, "", 1, r(), r(), r(), 200, None, ''))
mock.data.append((4 + offset, "", 1, r(), r(), r(), 200, None, ''))
mock.data.append((6 + offset, "", 1, r(), r(), r(), 200, None, ''))
mock.data.append((6 + offset, "", 1, r(), r(), r(), 200, None, ''))
mock.data.append((6 + offset, "", 1, r(), r(), r(), 200, None, ''))
mock.data.append((5 + offset, "", 1, r(), r(), r(), 200, None, ''))
return mock | 45.025974 | 84 | 0.582925 |
794351a8cb40479d5d0b31ad2a9ba54a90d566de | 297 | py | Python | cms/plugins/text/forms.py | LUKKIEN/django-cms-2.0 | 0600cc1a3f3636a867faf0afe3719539fee36d69 | [
"BSD-3-Clause"
] | null | null | null | cms/plugins/text/forms.py | LUKKIEN/django-cms-2.0 | 0600cc1a3f3636a867faf0afe3719539fee36d69 | [
"BSD-3-Clause"
] | null | null | null | cms/plugins/text/forms.py | LUKKIEN/django-cms-2.0 | 0600cc1a3f3636a867faf0afe3719539fee36d69 | [
"BSD-3-Clause"
] | null | null | null | from django.forms.models import ModelForm
from cms.plugins.text.models import Text
from django import forms
class TextForm(ModelForm):
body_storage = forms.CharField()
class Meta:
model = Text
exclude = ('page', 'position', 'placeholder', 'language', 'plugin_type')
| 24.75 | 80 | 0.693603 |
794352dbd7e4303aef86369b654dc0fb20e7f36a | 708 | py | Python | protlearn/features/tests/test_cksaap.py | tadorfer/ProtClass | da1a01ea9abd3c367b3389dfed683c6a9dfa6afd | [
"MIT"
] | 24 | 2020-09-17T10:35:44.000Z | 2022-03-09T19:19:01.000Z | protlearn/features/tests/test_cksaap.py | tadorfer/ProtClass | da1a01ea9abd3c367b3389dfed683c6a9dfa6afd | [
"MIT"
] | 14 | 2020-08-09T18:23:01.000Z | 2020-11-19T05:48:14.000Z | protlearn/features/tests/test_cksaap.py | tadorfer/ProtClass | da1a01ea9abd3c367b3389dfed683c6a9dfa6afd | [
"MIT"
] | 3 | 2020-03-17T16:43:54.000Z | 2020-08-03T06:10:24.000Z | import pytest
import numpy as np
from ..cksaap import cksaap
import pkg_resources
PATH = pkg_resources.resource_filename(__name__, 'test_data/')
def test_cksaap():
"Test k-spaced amino acid pair composition"
# load data
X_list = open(PATH+'multiple.txt').read().splitlines()
X_err = 'AGT2HT9'
# get cksaap
cksaap_list, desc = cksaap(X_list, k=3, remove_zero_cols=True)
# test cksaap
assert np.array_equal(cksaap_list, np.array([
[0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1]]))
# test ValueError
with pytest.raises(ValueError):
cksaap_error, desc = cksaap(X_err) | 27.230769 | 66 | 0.610169 |
794354284cf499a96a5cc1e64169c6fad04b9768 | 12,837 | py | Python | app/oauth/views/authorize.py | fariszr/app | 932134c2123714cf1d1b7090998fbdf27344cce0 | [
"MIT"
] | 1 | 2020-11-21T11:18:21.000Z | 2020-11-21T11:18:21.000Z | app/oauth/views/authorize.py | fariszr/app | 932134c2123714cf1d1b7090998fbdf27344cce0 | [
"MIT"
] | null | null | null | app/oauth/views/authorize.py | fariszr/app | 932134c2123714cf1d1b7090998fbdf27344cce0 | [
"MIT"
] | null | null | null | from typing import Dict
from urllib.parse import urlparse
from flask import request, render_template, redirect, flash
from flask_login import current_user
from itsdangerous import SignatureExpired
from app.config import EMAIL_DOMAIN
from app.dashboard.views.custom_alias import available_suffixes, signer
from app.extensions import db
from app.jose_utils import make_id_token
from app.log import LOG
from app.models import (
Client,
AuthorizationCode,
ClientUser,
Alias,
RedirectUri,
OauthToken,
DeletedAlias,
CustomDomain,
DomainDeletedAlias,
)
from app.oauth.base import oauth_bp
from app.oauth_models import (
get_response_types,
ResponseType,
Scope,
SUPPORTED_OPENID_FLOWS,
SUPPORTED_OPENID_FLOWS_STR,
response_types_to_str,
)
from app.utils import random_string, encode_url
@oauth_bp.route("/authorize", methods=["GET", "POST"])
def authorize():
"""
Redirected from client when user clicks on "Login with Server".
This is a GET request with the following field in url
- client_id
- (optional) state
- response_type: must be code
"""
oauth_client_id = request.args.get("client_id")
state = request.args.get("state")
scope = request.args.get("scope")
redirect_uri = request.args.get("redirect_uri")
response_mode = request.args.get("response_mode")
nonce = request.args.get("nonce")
try:
response_types: [ResponseType] = get_response_types(request)
except ValueError:
return (
"response_type must be code, token, id_token or certain combination of these."
" Please see /.well-known/openid-configuration to see what response_type are supported ",
400,
)
if set(response_types) not in SUPPORTED_OPENID_FLOWS:
return (
f"SimpleLogin only support the following OIDC flows: {SUPPORTED_OPENID_FLOWS_STR}",
400,
)
if not redirect_uri:
LOG.d("no redirect uri")
return "redirect_uri must be set", 400
client = Client.get_by(oauth_client_id=oauth_client_id)
if not client:
final_redirect_uri = (
f"{redirect_uri}?error=invalid_client_id&client_id={oauth_client_id}"
)
return redirect(final_redirect_uri)
# check if redirect_uri is valid
# allow localhost by default
hostname, scheme = get_host_name_and_scheme(redirect_uri)
if hostname != "localhost" and hostname != "127.0.0.1":
# support custom scheme for mobile app
if scheme == "http":
final_redirect_uri = f"{redirect_uri}?error=http_not_allowed"
return redirect(final_redirect_uri)
if not RedirectUri.get_by(client_id=client.id, uri=redirect_uri):
final_redirect_uri = f"{redirect_uri}?error=unknown_redirect_uri"
return redirect(final_redirect_uri)
# redirect from client website
if request.method == "GET":
if current_user.is_authenticated:
suggested_email, other_emails, email_suffix = None, [], None
suggested_name, other_names = None, []
# user has already allowed this client
client_user: ClientUser = ClientUser.get_by(
client_id=client.id, user_id=current_user.id
)
user_info = {}
if client_user:
LOG.debug("user %s has already allowed client %s", current_user, client)
user_info = client_user.get_user_info()
else:
suggested_email, other_emails = current_user.suggested_emails(
client.name
)
suggested_name, other_names = current_user.suggested_names()
user_custom_domains = [
cd.domain for cd in current_user.verified_custom_domains()
]
# List of (is_custom_domain, alias-suffix, time-signed alias-suffix)
suffixes = available_suffixes(current_user)
return render_template(
"oauth/authorize.html",
Scope=Scope,
EMAIL_DOMAIN=EMAIL_DOMAIN,
**locals(),
)
else:
# after user logs in, redirect user back to this page
return render_template(
"oauth/authorize_nonlogin_user.html",
client=client,
next=request.url,
Scope=Scope,
)
else: # POST - user allows or denies
if request.form.get("button") == "deny":
LOG.debug("User %s denies Client %s", current_user, client)
final_redirect_uri = f"{redirect_uri}?error=deny&state={state}"
return redirect(final_redirect_uri)
LOG.debug("User %s allows Client %s", current_user, client)
client_user = ClientUser.get_by(client_id=client.id, user_id=current_user.id)
# user has already allowed this client, user cannot change information
if client_user:
LOG.d("user %s has already allowed client %s", current_user, client)
else:
alias_prefix = request.form.get("prefix")
signed_suffix = request.form.get("suffix")
alias = None
# user creates a new alias, not using suggested alias
if alias_prefix:
# should never happen as this is checked on the front-end
if not current_user.can_create_new_alias():
raise Exception(f"User {current_user} cannot create custom email")
alias_prefix = alias_prefix.strip().lower().replace(" ", "")
# hypothesis: user will click on the button in the 600 secs
try:
alias_suffix = signer.unsign(signed_suffix, max_age=600).decode()
except SignatureExpired:
LOG.warning("Alias creation time expired for %s", current_user)
flash("Alias creation time is expired, please retry", "warning")
return redirect(request.url)
except Exception:
LOG.warning("Alias suffix is tampered, user %s", current_user)
flash("Unknown error, refresh the page", "error")
return redirect(request.url)
user_custom_domains = [
cd.domain for cd in current_user.verified_custom_domains()
]
from app.dashboard.views.custom_alias import verify_prefix_suffix
if verify_prefix_suffix(current_user, alias_prefix, alias_suffix):
full_alias = alias_prefix + alias_suffix
if (
Alias.get_by(email=full_alias)
or DeletedAlias.get_by(email=full_alias)
or DomainDeletedAlias.get_by(email=full_alias)
):
LOG.exception("alias %s already used, very rare!", full_alias)
flash(f"Alias {full_alias} already used", "error")
return redirect(request.url)
else:
alias = Alias.create(
user_id=current_user.id,
email=full_alias,
mailbox_id=current_user.default_mailbox_id,
)
# get the custom_domain_id if alias is created with a custom domain
if alias_suffix.startswith("@"):
alias_domain = alias_suffix[1:]
domain = CustomDomain.get_by(domain=alias_domain)
if domain:
alias.custom_domain_id = domain.id
db.session.flush()
flash(f"Alias {full_alias} has been created", "success")
# only happen if the request has been "hacked"
else:
flash("something went wrong", "warning")
return redirect(request.url)
# User chooses one of the suggestions
else:
chosen_email = request.form.get("suggested-email")
# todo: add some checks on chosen_email
if chosen_email != current_user.email:
alias = Alias.get_by(email=chosen_email)
if not alias:
alias = Alias.create(
email=chosen_email,
user_id=current_user.id,
mailbox_id=current_user.default_mailbox_id,
)
db.session.flush()
suggested_name = request.form.get("suggested-name")
custom_name = request.form.get("custom-name")
use_default_avatar = request.form.get("avatar-choice") == "default"
client_user = ClientUser.create(
client_id=client.id, user_id=current_user.id
)
if alias:
client_user.alias_id = alias.id
if custom_name:
client_user.name = custom_name
elif suggested_name != current_user.name:
client_user.name = suggested_name
if use_default_avatar:
# use default avatar
LOG.d("use default avatar for user %s client %s", current_user, client)
client_user.default_avatar = True
db.session.flush()
LOG.d("create client-user for client %s, user %s", client, current_user)
redirect_args = {}
if state:
redirect_args["state"] = state
else:
LOG.warning(
"more security reason, state should be added. client %s", client
)
if scope:
redirect_args["scope"] = scope
auth_code = None
if ResponseType.CODE in response_types:
# Create authorization code
auth_code = AuthorizationCode.create(
client_id=client.id,
user_id=current_user.id,
code=random_string(),
scope=scope,
redirect_uri=redirect_uri,
response_type=response_types_to_str(response_types),
)
db.session.add(auth_code)
redirect_args["code"] = auth_code.code
oauth_token = None
if ResponseType.TOKEN in response_types:
# create access-token
oauth_token = OauthToken.create(
client_id=client.id,
user_id=current_user.id,
scope=scope,
redirect_uri=redirect_uri,
access_token=generate_access_token(),
response_type=response_types_to_str(response_types),
)
db.session.add(oauth_token)
redirect_args["access_token"] = oauth_token.access_token
if ResponseType.ID_TOKEN in response_types:
redirect_args["id_token"] = make_id_token(
client_user,
nonce,
oauth_token.access_token if oauth_token else None,
auth_code.code if auth_code else None,
)
db.session.commit()
# should all params appended the url using fragment (#) or query
fragment = False
if response_mode and response_mode == "fragment":
fragment = True
# if response_types contain "token" => implicit flow => should use fragment
# except if client sets explicitly response_mode
if not response_mode:
if ResponseType.TOKEN in response_types:
fragment = True
# construct redirect_uri with redirect_args
return redirect(construct_url(redirect_uri, redirect_args, fragment))
def construct_url(url, args: Dict[str, str], fragment: bool = False):
for i, (k, v) in enumerate(args.items()):
# make sure to escape v
v = encode_url(v)
if i == 0:
if fragment:
url += f"#{k}={v}"
else:
url += f"?{k}={v}"
else:
url += f"&{k}={v}"
return url
def generate_access_token() -> str:
"""generate an access-token that does not exist before"""
access_token = random_string(40)
if not OauthToken.get_by(access_token=access_token):
return access_token
# Rerun the function
LOG.warning("access token already exists, generate a new one")
return generate_access_token()
def get_host_name_and_scheme(url: str) -> (str, str):
"""http://localhost:7777?a=b -> (localhost, http) """
url_comp = urlparse(url)
return url_comp.hostname, url_comp.scheme
| 37.425656 | 101 | 0.580432 |
7943550de261dba65c9eeeecd8083b2e686ef73a | 3,458 | py | Python | plugins/network.py | firecat53/py-multistatus | 90e76ec112dd2ea4c464c4aa3b43515edf26d2a2 | [
"MIT"
] | null | null | null | plugins/network.py | firecat53/py-multistatus | 90e76ec112dd2ea4c464c4aa3b43515edf26d2a2 | [
"MIT"
] | null | null | null | plugins/network.py | firecat53/py-multistatus | 90e76ec112dd2ea4c464c4aa3b43515edf26d2a2 | [
"MIT"
] | null | null | null | from .worker import Worker
from psutil import net_io_counters
from subprocess import Popen, PIPE
import socket
class PluginNetwork(Worker):
"""New plugin
"""
def __init__(self, **kwargs):
Worker.__init__(self, **kwargs)
self.old = net_io_counters(pernic=True)
interfaces = self.cfg.network.interfaces.split()
iface_icons = self.cfg.network.iface_icons.split()
self.interfaces = dict(zip(interfaces, iface_icons))
def _round(self, x, base=5):
"""Round number to nearest 10
"""
return int(base * round(float(x) / base))
def _check_net_status(self):
"""Check if network is attached to internet.
"""
try:
# see if we can resolve the host name -- tells us if there is
# a DNS listening
host = socket.gethostbyname(self.cfg.network.url_check)
# connect to the host -- tells us if the host is actually reachable
socket.create_connection((host, 80), 2)
return True
except:
# Try both these a second time before calling the network down
try:
host = socket.gethostbyname(self.cfg.network.url_check)
socket.create_connection((host, 80), 2)
return True
except:
pass
return False
def _get_interface(self):
"""Determine which of the given interfaces is currently up.
"""
res = Popen(["ip", "addr"],
stdout=PIPE).communicate()[0].decode().split('\n')
try:
res = [line for line in res if ' UP ' in line or ',UP,' in line]
return [i for line in res for i in self.interfaces if i in line][0]
except IndexError:
return None
def _check_vpn(self):
"""Determine if VPN is up or down.
"""
if Popen(["pgrep", "openvpn"], stdout=PIPE).communicate()[0]:
return True
else:
return False
def _update_data(self):
interface = self._get_interface()
if interface is not None and self._check_net_status() is True:
self.new = net_io_counters(pernic=True)
old_down = self.old[interface].bytes_recv
old_up = self.old[interface].bytes_sent
new_down = self.new[interface].bytes_recv
new_up = self.new[interface].bytes_sent
up = self._round((new_up - old_up) /
(1024 * int(self.cfg.network.interval)))
down = self._round((new_down - old_down) /
(1024 * int(self.cfg.network.interval)))
self.old = self.new
net_str = "{} {}{:.0f} {}{:.0f}".format(self.interfaces[interface],
self.cfg.network.up_icon,
up,
self.cfg.network.down_icon,
down)
if self._check_vpn():
out = self._color_text(net_str,
fg=self.cfg.network.color_vpn_fg,
bg=self.cfg.network.color_vpn_bg)
else:
out = net_str
else:
out = self._err_text("Network Down")
return (self.__module__, self._out_format(out))
| 36.787234 | 79 | 0.524002 |
794355cf1b3cf44497a3c82c785f1b57f265a9db | 9,277 | py | Python | SUTutorialRoutes.py | soumikmohianuta/PSDoodle | 9edeb40f095beca9ee4016c4938065cd82223b50 | [
"BSD-3-Clause"
] | null | null | null | SUTutorialRoutes.py | soumikmohianuta/PSDoodle | 9edeb40f095beca9ee4016c4938065cd82223b50 | [
"BSD-3-Clause"
] | null | null | null | SUTutorialRoutes.py | soumikmohianuta/PSDoodle | 9edeb40f095beca9ee4016c4938065cd82223b50 | [
"BSD-3-Clause"
] | null | null | null | from flask import Flask, render_template, request, Blueprint,send_from_directory,session,redirect,url_for,jsonify
import os
from random import randint
import binascii
import time
from similarUI import SimilarUIBOW,similarUIUtility
# from similarUI import SimilarUIBOWTest
from helpers import StrokeParse
import pickle
from mlModule.FastPredict import FastPredict
import pickle
from mlModule.Predict23LSTM import Predictor23LSTM
from RectUtils.RectObj import RectObj
from mlModule import GetPrediction
# Set Folder for model
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
output_directory = os.path.join(APP_ROOT,'My23Records5')
export_dir = os.path.join(APP_ROOT,'My23Records5','tb')
RICO_PATH= os.path.join(APP_ROOT, 'similarUI',"RICO23BOWCount.pkl")
# Load dictionary for searching
pkl_file = open(RICO_PATH, 'rb')
RICO = pickle.load(pkl_file)
pkl_file.close()
TEMPLATE_FOLDER = os.path.join(APP_ROOT, 'templates','Tutorials')
sUTutorialRoutes = Blueprint('SUTutorialRoutes', __name__, template_folder=TEMPLATE_FOLDER)
RICO2 = {18: 1.3078818029580455, 17: 1.139763200847382, 7: 1.6042572583253525, 23: 0.20480255166735367, 13: 1.2705841196816363, 21: 1.2151277497211468, 14: 1.109574534964655, 4: 1.27350305661627, 1: 0.5610761239057094, 8: 1.2898451990888444, 3: 1.1001165287284727, 19: 0.2384449560029641, 22: 1.3393355557525861, 0: 0.9671365739392712, 2: 1.6390691490153984, 15: 0.8551847317189294, 6: 2.3419400282173046, 20: 0.026601131356820077, 9: 1.2291284704809808, 12: 0.6849345254248218, 16: 1.076536962335742, 10: 0.10631666807601393, 5: 0.254524251188198, 11: 0}
# Load model for faster prediction
PREDICTOR = Predictor23LSTM(export_dir,output_directory)
FASTPREDICT = FastPredict(PREDICTOR.classifier,PREDICTOR.example_input_fn)
# Create Json dict from rect to pass it to canvas to sketch.
def rectObjtoJson(rectObj):
dictObj = {'x':str(rectObj.x), 'y':str(rectObj.y),'width':str(rectObj.width),'height':str(rectObj.height),'iconID':str(rectObj.iconID),'elementId':str(rectObj.elementId)}
return dictObj
# Generate token to record the drawing of current session
def generateToken(tokenSize):
byteToken = os.urandom(tokenSize)
hexToken = binascii.hexlify(byteToken)
return hexToken.decode("utf-8")
# Setting page for Tutorials
@sUTutorialRoutes.route('/toolIns/')
def toolIns():
return render_template('UIRetTutorial1.html')
# Setting page for Tutorials
@sUTutorialRoutes.route('/toolIns_1/')
def toolIns_1():
session['ELEMENTID'] = 0
session['RectObjs'] = []
return render_template('UIRetTutorial2.html')
# Setting page for Tutorials
@sUTutorialRoutes.route('/toolIns_2/')
def toolIns_2():
return render_template('UIRetTutorial3.html')
@sUTutorialRoutes.route('/UIRetTutorial/')
def UIRetTutorial():
if 'username' not in session:
session['username'] = generateToken(16)
session['ELEMENTID'] = 0
session['RectObjs'] = []
return render_template('UIRetTutorial4.html')
@sUTutorialRoutes.route('/similarUI/')
def similarUI():
session['ELEMENTID'] = 0
session['RectObjs'] = []
return render_template('SimilarUIRetrieval.html')
# Ger prediction while drawing
@sUTutorialRoutes.route('/MidPredictSimilar/', methods=['GET','POST'])
def MidPredictSimilar():
if request.method == 'POST':
canvas_strokes = request.form['save_data']
# start = timeit.default_timer()
if(session['strtTime']==-1):
session['strtTime'] = round(time.monotonic()*1000)
compressStroke,rect = StrokeParse.compressDataForFullUI(canvas_strokes)
if len(compressStroke)==0:
result = "Unchanged"
else:
result =GetPrediction.getFasterTop3Predict(compressStroke, PREDICTOR, FASTPREDICT )
response = jsonify(predictedResult =result)
return response
# Remove last icon from the session and update page accordingly for last tutorial
@sUTutorialRoutes.route('/RemoveLastIconForTest/', methods=['GET', 'POST'])
def RemoveLastIconForTest():
elementID = session['ELEMENTID']
rectObjs = session['RectObjs']
for item in rectObjs:
if (item['elementId'] == str(elementID - 1)):
rectObjs.remove(item)
break
session['RectObjs'] = rectObjs
session['ELEMENTID'] = elementID - 1
if len(rectObjs) == 0:
response = jsonify(similarUI=[])
return response
jsonRectObjs = session['RectObjs']
canvasWidth = int(session['canvas_width'])
canvasHeight = int(session['canvas_height'])
similarUIArray = SimilarUIBOW.findSimilarUIForTest(jsonRectObjs, RICO, canvasWidth, canvasHeight, RICO2)
response = jsonify(similarUI=similarUIArray)
return response
# Find if there is a text button in the drawings on the session. It's for class select.
@sUTutorialRoutes.route('/FindTextButtonOnClassSelect/', methods=['GET', 'POST'])
def FindTextButtonOnClassSelect():
elementID = session['ELEMENTID']
if request.method == 'POST':
jsonRectObjs = session['RectObjs']
width = request.form['canvas_width']
height = request.form['canvas_height']
hasText = similarUIUtility.isATextButton(jsonRectObjs,int(width),int(height))
session['ELEMENTID'] = elementID + 1
responseResult = "No"
if(hasText):
responseResult= "Yes"
response = jsonify(reponseText=responseResult)
return response
# Find if there is a text button in the drawings on the session plus current drawing.
@sUTutorialRoutes.route('/FindTextButton/', methods=['GET', 'POST'])
def FindTextButton():
hasText = False
elementID = session['ELEMENTID']
if request.method == 'POST':
canvas_strokes = request.form['save_data']
compressStroke, rect = StrokeParse.compressDataForFullUI(canvas_strokes)
if len(compressStroke) == 0:
responseResult = "Unchanged"
else:
result = GetPrediction.getFasterTop3Predict(compressStroke, PREDICTOR, FASTPREDICT)
resultID = int(result[session['CurrentClassLabel']][1])
rectObj = RectObj(rect, resultID, elementID)
jsonRectObj = rectObjtoJson(rectObj)
# Maintaining Session for Tracking Elements
jsonRectObjs = session['RectObjs']
jsonRectObjs.append(jsonRectObj)
session['RectObjs'] = jsonRectObjs
width = request.form['canvas_width']
height = request.form['canvas_height']
hasText = similarUIUtility.isATextButton(jsonRectObjs,int(width),int(height))
session['ELEMENTID'] = elementID + 1
responseResult = "No"
if(hasText):
responseResult= "Yes"
response = jsonify(reponseText=responseResult)
return response
# Saving drawings in the session for further process for first Tutorial
@sUTutorialRoutes.route('/DrawSaveForFirstTutorial/', methods=['GET', 'POST'])
def DrawSaveForFirstTutorial():
elementID = session['ELEMENTID']
if request.method == 'POST':
canvas_strokes = request.form['save_data']
compressStroke, rect = StrokeParse.compressDataForFullUI(canvas_strokes)
if len(compressStroke) == 0:
responseResult = "Unchanged"
else:
result = GetPrediction.getFasterTop3Predict(compressStroke, PREDICTOR, FASTPREDICT)
resultID = int(result[session['CurrentClassLabel']][1])
rectObj = RectObj(rect, resultID, elementID)
jsonRectObj = rectObjtoJson(rectObj)
# Maintaining Session for Tracking Elements
jsonRectObjs = session['RectObjs']
jsonRectObjs.append(jsonRectObj)
session['RectObjs'] = jsonRectObjs
responseResult ="Changed"
session['ELEMENTID'] = elementID + 1
response = jsonify(predictedResult=responseResult)
return response
# Saving drawings in the session for further process for last tutorial
@sUTutorialRoutes.route('/DrawSaveForTest/', methods=['GET', 'POST'])
def DrawSaveForTest():
elementID = session['ELEMENTID']
if request.method == 'POST':
canvas_strokes = request.form['save_data']
similarUIArray=[]
compressStroke, rect = StrokeParse.compressDataForFullUI(canvas_strokes)
if len(compressStroke) == 0:
responseResult = "Unchanged"
else:
result = GetPrediction.getFasterTop3Predict(compressStroke, PREDICTOR, FASTPREDICT)
resultID = int(result[session['CurrentClassLabel']][1])
rectObj = RectObj(rect, resultID, elementID)
jsonRectObj = rectObjtoJson(rectObj)
# Maintaining Session for Tracking Elements
jsonRectObjs = session['RectObjs']
jsonRectObjs.append(jsonRectObj)
session['RectObjs'] = jsonRectObjs
canvasWidth = int(session['canvas_width'])
canvasHeight = int(session['canvas_height'])
similarUIArray = SimilarUIBOW.findSimilarUIForTest(jsonRectObjs, RICO, canvasWidth, canvasHeight, RICO2)
responseResult = "Updated"
session['ELEMENTID'] = elementID + 1
response = jsonify(predictedResult=responseResult, similarUI=similarUIArray)
return response
| 38.334711 | 555 | 0.699041 |
7943567a53929d13279ad637eb046424864dd101 | 1,631 | py | Python | wplay/utils/helpers.py | Neelaksh-Singh/whatsapp-play | ae07d1d1aa7db297853b06bc25cd1e88f471e397 | [
"MIT"
] | 1 | 2020-05-26T13:00:58.000Z | 2020-05-26T13:00:58.000Z | wplay/utils/helpers.py | Neelaksh-Singh/whatsapp-play | ae07d1d1aa7db297853b06bc25cd1e88f471e397 | [
"MIT"
] | null | null | null | wplay/utils/helpers.py | Neelaksh-Singh/whatsapp-play | ae07d1d1aa7db297853b06bc25cd1e88f471e397 | [
"MIT"
] | 1 | 2020-10-31T09:45:38.000Z | 2020-10-31T09:45:38.000Z | # region IMPORTS
from pathlib import Path
from whaaaaat import style_from_dict, Token
# endregion
# region WEBSITES
websites = {'whatsapp': 'https://web.whatsapp.com/'}
# endregion
# region SELECTOR
whatsapp_selectors_dict = {
'login_area':'#app > div > div > div.landing-header',
'new_chat_button': '#side > header div[role="button"] span[data-icon="chat"]',
'search_contact_input_new_chat': '#app > div > div span > div > span > div div > label > input',
'contact_list_elements_filtered_new_chat': '#app > div > div span > div > span > div div > div div > div div > span > span[title][dir]',
'group_list_elements_filtered_new_chat': '#app > div > div span > div > span > div div > div div > div div > span[title][dir]',
'search_contact_input':'#side > div > div > label > input',
'chat_list_elements_filtered':'#pane-side > div > div > div > div > div > div > div > div > div > span > span[title][dir]',
'target_focused_title': '#main > header div > div > span[title]',
'message_area': '#main > footer div.selectable-text[contenteditable]',
'last_seen':'#main > header > div > div > span[title]'
}
# endregion
# region PATHS
data_folder_path = Path.home() / 'wplay'
logs_path = Path.home() / 'wplay' / 'logs'
user_data_folder_path = Path.home() / 'wplay' / '.userData'
# endregion
# region MENU STYLES
menu_style = style_from_dict({
Token.Separator: '#6C6C6C',
Token.QuestionMark: '#FF9D00 bold',
Token.Selected: '#5F819D',
Token.Pointer: '#FF9D00 bold',
Token.Instruction: '', # default
Token.Answer: '#5F819D bold',
Token.Question: '',
})
# endregion
| 36.244444 | 140 | 0.663397 |
794356998607baf63bb20e5f361de26484541e6d | 6,321 | py | Python | Assistant/constants.py | Brodevil/Androme | 068113d71a54c48f1216e09dbe7046492e6de06c | [
"MIT"
] | 5 | 2021-07-21T08:48:10.000Z | 2022-01-22T15:42:04.000Z | Assistant/constants.py | NVS-OS/Alice | 068113d71a54c48f1216e09dbe7046492e6de06c | [
"MIT"
] | null | null | null | Assistant/constants.py | NVS-OS/Alice | 068113d71a54c48f1216e09dbe7046492e6de06c | [
"MIT"
] | 2 | 2021-07-21T12:32:03.000Z | 2022-01-22T15:42:29.000Z | from os import environ
import os
from dotenv import load_dotenv
import pyttsx3
import platform
import shutil
import psutil
import string
from Assistant.exts.networks import localInfo, weather, internetConnection # noqa
from Assistant.exts.workWithFiles import contactInfo # noqa
from Assistant.utils.exceptions import EnvFileValueError # noqa
__all__ = [
"Client",
"Contacts",
"ERROR_REPLIES",
"NEGATIVE_REPLIES",
"POSITIVE_REPLIES",
"WELCOME",
"Storage",
]
load_dotenv()
def Storage():
"""Function to get total harder STORAGE as per the drive"""
totalStorage = 0
usedStorage = 0
freeStorage = 0
for i in list(string.ascii_lowercase):
try:
storeInfo = list(map(lambda x: x // 2 ** 30, shutil.disk_usage(f"{i}:\\")))
totalStorage += storeInfo[0]
usedStorage += storeInfo[1]
freeStorage += storeInfo[2]
except Exception:
pass
return totalStorage, usedStorage, freeStorage
storageInfo = Storage()
engine = pyttsx3.init()
LOCAL_INFORMATION = localInfo()
userSystem = platform.uname()
try:
BATTERY = psutil.sensors_battery()
except Exception:
BATTERY = None
class Client:
ASSISTANT_NAME = environ.get("ASSISTANT_NAME", "Alice")
INTRO = f"Hey There! Now me to introduce myself, I am {ASSISTANT_NAME}. A virtual desktop assistant and I'm here to assist you with a verity of tasks as best as I can. 24 Hours a day, seven days a week, Importing all preferences from home interface, system is now initializing!"
ALICE_INFO = (
"I am written in python by Abhinav, My birthday is 21 December of 2020."
)
ALICE_PASSWORD = environ.get("ALICE_PASSWORD", None)
# Author Info
AUTHOR = "Abhinav(Brodevil)"
CONTACT = "[email protected]"
ALICE_GITHUB_REPOSITORY = "https://github.com/Brodevil/Alice"
DISCORD_ID = "Brodevil#5822"
GENDER = environ.get("GENDER")
if GENDER == "male":
GENDER = "Sir"
elif GENDER == "female":
GENDER = "Mam"
else:
raise EnvFileValueError(
"In .env file GENDER= always should be 'male or female!' which will your GENDER"
)
# Client Choice to Alice
VOICES = [engine.id for engine in engine.getProperty("voices")] # noqa
VOICE_RATE = int(environ.get("VoiceRate", 175))
VOICE = int(environ.get("VoiceNumber", 1))
if VOICE > len(VOICES):
raise EnvFileValueError(
f"There are just {len(VOICES)} available in your system and you had choice the {VOICE} number of voice! please Change it in .env file"
)
# Few Computer status
STORAGE = {
"Total": storageInfo[0],
"Used": storageInfo[1],
"Free": storageInfo[2],
} # values are in GB
MEMORY_STATUS = psutil.virtual_memory().percent # Used memory in percentage
CPU_STATUS = psutil.cpu_percent() # cpu uses in percentage
COMPUTER_INFO = {
"System": userSystem.system,
"Node name": userSystem.node,
"Release": userSystem.release,
"Version": userSystem.version,
"Machine": userSystem.machine,
"Processor": userSystem.processor,
}
INTERNET_CONNECTION = internetConnection()
# Few user Info :
MUSIC_DIRECTORY = environ.get(
"MUSIC", r"C:\Users\ADMIN\Music"
) # Music directory should be without space
FAVOURITE_MUSIC = environ.get("FavMusic", None)
APPLICATIONS_SHORTCUTS_PATH = os.getcwd().replace(
"\\Alice", "\Alice\Applications"
) # Application folder where all the using application shortcuts will available to the user
ALICE_PATH = "".join([os.getcwd().split("\\Alice")[0], "\\Alice\\"])
USER_GITHUB = environ.get("GITHUB", "Brodevil")
if BATTERY is not None:
BATTERY_STATUS = BATTERY.percent
BATTERY_PLUGGED = BATTERY.power_plugged
# Networks infos
if LOCAL_INFORMATION is not None and weather() is not None:
CITY = LOCAL_INFORMATION[0]
LOCATION = (
LOCAL_INFORMATION[1]["country"],
LOCAL_INFORMATION[1]["regionName"],
LOCAL_INFORMATION[1]["city"],
)
NETWORK = LOCAL_INFORMATION[1]["isp"]
WEATHER_INFO = weather()
class Contacts:
files = os.listdir(Client.ALICE_PATH)
if "contactinfo.xlsx" in files:
contactsFile = os.path.join(Client.ALICE_PATH, "contactinfo.xlsx")
else:
contactsFile = os.path.join(Client.ALICE_PATH, "Contact.xlsx")
emails = {
name: email[0] for name, email in contactInfo(contactsFile).items()
} # noqa
contactNumber = {
name: contactNumber[1]
for name, contactNumber in contactInfo(contactsFile).items()
} # noqa
ERROR_REPLIES = [
"Please don't do that.",
"You have to stop.",
"Do you mind?",
"In the future, don't do that.",
"That was a mistake.",
"You blew it.",
"You're bad at computers.",
"Are you trying to kill me?",
"Noooooo!!",
"I can't believe you've done this",
]
NEGATIVE_REPLIES = [
"Noooooo!!",
"Nope.",
"I'm sorry Dave, I'm afraid I can't do that.",
"I don't think so.",
"Not gonna happen.",
"Out of the question.",
"Huh? No.",
"Nah.",
"Naw.",
"Not likely.",
"No way, José.",
"Not in a million years.",
"Fat chance.",
"Certainly not.",
"NEGATORY.",
"Nuh-uh.",
"Not in my house!",
]
POSITIVE_REPLIES = [
"Yep.",
"Absolutely!",
"Can do!",
"Affirmative!",
"Yeah okay.",
"Sure.",
"Sure thing!",
"You're the boss!",
"Okay.",
"No problem.",
"I got you.",
"Alright.",
"You got it!",
"ROGER THAT",
"Of course!",
"Aye aye, cap'n!",
"I'll allow it.",
]
WELCOME = [
"Your're most welcome, anything else you did like me to do!",
"I am to please!",
"My pleasure {Client.GENDER}!",
"You're very welcome!",
"I'm just doing my job, But again Your most Welcome!",
"I'm here to help please let me know if you need anything else!",
"Thank you for the thank you, nice to be acknowledged!",
]
| 30.389423 | 283 | 0.606233 |
794356d16cd37836fceccee06674fd88f677711d | 130 | py | Python | lib/python/kuiseros/utils.py | ArctarusLimited/KuiserOS | 8c56124719009355f2b96700659186088b0e9b5e | [
"MIT"
] | 3 | 2021-07-12T16:23:01.000Z | 2021-07-15T10:41:45.000Z | lib/python/kuiseros/utils.py | ArctarusLimited/KuiserOS | 8c56124719009355f2b96700659186088b0e9b5e | [
"MIT"
] | 1 | 2021-06-12T02:17:34.000Z | 2021-06-12T02:17:34.000Z | lib/python/kuiseros/utils.py | ArctarusLimited/arnix | f42c16ea68e9f762dfd344d8bfbf206b9b392793 | [
"MIT"
] | null | null | null | import logging
import coloredlogs
logger = logging.getLogger("kuiseros")
coloredlogs.install(level=logging.DEBUG, logger=logger)
| 21.666667 | 55 | 0.823077 |
794356f4cd20149982471772b0d6ac6043d087a0 | 5,477 | py | Python | testcases/cloud_user/elb/elb_policy_basics.py | tbeckham/eutester | 1440187150ce284bd87147e71ac7f0fda194b4d9 | [
"BSD-2-Clause"
] | null | null | null | testcases/cloud_user/elb/elb_policy_basics.py | tbeckham/eutester | 1440187150ce284bd87147e71ac7f0fda194b4d9 | [
"BSD-2-Clause"
] | null | null | null | testcases/cloud_user/elb/elb_policy_basics.py | tbeckham/eutester | 1440187150ce284bd87147e71ac7f0fda194b4d9 | [
"BSD-2-Clause"
] | null | null | null | # Software License Agreement (BSD License)
#
# Copyright (c) 2009-2011, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Tony Beckham [email protected]
#
import time
from eucaops import Eucaops
from eucaops import ELBops
from boto.ec2.elb import LoadBalancer
from eutester.eutestcase import EutesterTestCase
import random
class LoadBalancerPolicy(EutesterTestCase):
def __init__(self, extra_args= None):
self.setuptestcase()
self.setup_parser()
if extra_args:
for arg in extra_args:
self.parser.add_argument(arg)
self.get_args()
# Setup basic eutester object
if self.args.region:
self.tester = ELBops( credpath=self.args.credpath, region=self.args.region)
else:
self.tester = Eucaops( credpath=self.args.credpath, config_file=self.args.config,password=self.args.password)
self.tester.poll_count = 120
### Populate available zones
zones = self.tester.ec2.get_all_zones()
self.zone = random.choice(zones).name
self.load_balancer_port = 80
self.lb_name="test-" + str(int(time.time()))
self.load_balancer = self.tester.create_load_balancer(zones=[self.zone],
name=self.lb_name,
load_balancer_port=self.load_balancer_port)
assert isinstance(self.load_balancer, LoadBalancer)
def Policy_CRUD(self):
"""
This will test creating, retrieving and deleting AppCookieStickiness and LBCookieStickiness policies.
@raise Exception:
"""
self.debug("policy test")
### create policies
lbcookiestickinesspolicy = "Test-LBCookieStickinessPolicy"
self.tester.create_lb_cookie_stickiness_policy(cookie_expiration_period=300,
lb_name=self.lb_name,
policy_name=lbcookiestickinesspolicy)
appcookiestickinesspolicy = "Test-AppCookieStickinessPolicy"
self.tester.create_app_cookie_stickiness_policy(name="test_cookie",
lb_name=self.lb_name,
policy_name=appcookiestickinesspolicy)
### check that the policies were added
self.tester.sleep(2)
policies = self.tester.describe_lb_policies(self.lb_name)
if lbcookiestickinesspolicy not in str(policies):
raise Exception(lbcookiestickinesspolicy + " not created.")
if appcookiestickinesspolicy not in str(policies):
raise Exception(appcookiestickinesspolicy + " not created.")
### now we delete the policies. There are grace periods added between calls
self.tester.sleep(1)
self.tester.delete_lb_policy(lb_name=self.lb_name, policy_name=appcookiestickinesspolicy)
self.tester.sleep(1)
self.tester.delete_lb_policy(lb_name=self.lb_name, policy_name=lbcookiestickinesspolicy)
self.tester.sleep(1)
### check that the policies were deleted
policies = self.tester.describe_lb_policies(self.lb_name)
if lbcookiestickinesspolicy in str(policies):
raise Exception(lbcookiestickinesspolicy + " not deleted.")
if appcookiestickinesspolicy in str(policies):
raise Exception(appcookiestickinesspolicy + " not deleted.")
def clean_method(self):
self.tester.cleanup_artifacts()
#self.debug("done")
if __name__ == "__main__":
testcase = LoadBalancerPolicy()
### Use the list of tests passed from config/command line to determine what subset of tests to run
### or use a predefined list
list = testcase.args.tests or ["Policy_CRUD"]
### Convert test suite methods to EutesterUnitTest objects
unit_list = [ ]
for test in list:
unit_list.append( testcase.create_testunit_by_name(test) )
### Run the EutesterUnitTest objects
result = testcase.run_test_case_list(unit_list,clean_on_exit=True)
exit(result)
| 44.169355 | 121 | 0.678839 |
794357f87ec719f350c0cb68f7df2a0dbd938fb6 | 10,019 | py | Python | beta/tests/tensorflow/test_sanity_sample.py | wei1tang/nncf | dcf966e5ffbdababb0e1b39942c9e10193487cbf | [
"Apache-2.0"
] | null | null | null | beta/tests/tensorflow/test_sanity_sample.py | wei1tang/nncf | dcf966e5ffbdababb0e1b39942c9e10193487cbf | [
"Apache-2.0"
] | null | null | null | beta/tests/tensorflow/test_sanity_sample.py | wei1tang/nncf | dcf966e5ffbdababb0e1b39942c9e10193487cbf | [
"Apache-2.0"
] | null | null | null | """
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import os
import tempfile
from operator import itemgetter
import pytest
import tensorflow as tf
from beta.tests.conftest import TEST_ROOT
from beta.tests.tensorflow.helpers import get_coco_dataset_builders
from beta.tests.tensorflow.test_models import SequentialModel, SequentialModelNoInput
from beta.examples.tensorflow.classification import main as cls_main
from beta.examples.tensorflow.object_detection import main as od_main
from beta.examples.tensorflow.common.model_loader import AVAILABLE_MODELS
od_main.get_dataset_builders = get_coco_dataset_builders
AVAILABLE_MODELS.update({
'SequentialModel': SequentialModel,
'SequentialModelNoInput': SequentialModelNoInput
})
class ConfigFactory:
"""Allows to modify config file before test run"""
def __init__(self, base_config, config_path):
self.config = base_config
self.config_path = str(config_path)
def serialize(self):
with open(self.config_path, 'w') as f:
json.dump(self.config, f)
return self.config_path
def __getitem__(self, item):
return self.config[item]
def __setitem__(self, key, value):
self.config[key] = value
def convert_to_argv(args):
return ' '.join(key if val is None else '{} {}'.format(key, val) for key, val in args.items()).split()
SAMPLE_TYPES = ['classification', 'object_detection']
SAMPLES = {
'classification': cls_main.main,
'object_detection': od_main.main,
}
DATASETS = {
'classification': [('cifar10', 'tfds'), ('cifar10', 'tfds'), ('cifar10', 'tfds')],
'object_detection': [('coco2017', 'tfrecords')],
}
CONFIGS = {
'classification': [
TEST_ROOT.joinpath('tensorflow', 'data', 'configs',
'resnet50_cifar10_magnitude_sparsity_int8.json'),
TEST_ROOT.joinpath('tensorflow', 'data', 'configs',
'sequential_model_cifar10_magnitude_sparsity_int8.json'),
TEST_ROOT.joinpath('tensorflow', 'data', 'configs',
'sequential_model_no_input_cifar10_magnitude_sparsity_int8.json')
],
'object_detection': [TEST_ROOT.joinpath('tensorflow', 'data', 'configs',
'retinanet_coco2017_magnitude_sparsity_int8.json')]
}
BATCHSIZE_PER_GPU = {
'classification': [256, 256, 256],
'object_detection': [3, 3],
}
DATASET_PATHS = {
'classification': {
x: lambda dataset_root, dataset_name=x:
os.path.join(dataset_root, dataset_name) if dataset_root else
os.path.join(tempfile.gettempdir(), dataset_name)
for x, _ in DATASETS['classification']
},
'object_detection': {
'coco2017': lambda dataset_root: TEST_ROOT.joinpath('tensorflow', 'data', 'mock_datasets', 'coco2017')
}
}
CONFIG_PARAMS = list()
for i, sample in enumerate(SAMPLE_TYPES):
for idx, tpl in enumerate(list(zip(CONFIGS[sample],
map(itemgetter(0), DATASETS[sample]),
map(itemgetter(1), DATASETS[sample]),
BATCHSIZE_PER_GPU[sample]))):
CONFIG_PARAMS.append((sample,) + tpl + ('{}_{}'.format(i, idx),))
@pytest.fixture(params=CONFIG_PARAMS,
ids=['-'.join([p[0], p[1].name, p[2], p[3], str(p[4])]) for p in CONFIG_PARAMS])
def _config(request, dataset_dir):
sample_type, config_path, dataset_name, dataset_type, batch_size, tid = request.param
dataset_path = DATASET_PATHS[sample_type][dataset_name](dataset_dir)
with config_path.open() as f:
jconfig = json.load(f)
if 'checkpoint_save_dir' in jconfig.keys():
del jconfig['checkpoint_save_dir']
jconfig['dataset'] = dataset_name
jconfig['dataset_type'] = dataset_type
return {
'sample_type': sample_type,
'nncf_config': jconfig,
'model_name': jconfig['model'],
'dataset_path': dataset_path,
'batch_size': batch_size,
'tid': tid
}
@pytest.fixture(scope='module')
def _case_common_dirs(tmp_path_factory):
return {
'checkpoint_save_dir': str(tmp_path_factory.mktemp('models'))
}
def test_model_eval(_config, tmp_path):
config_factory = ConfigFactory(_config['nncf_config'], tmp_path / 'config.json')
args = {
'--mode': 'test',
'--data': _config['dataset_path'],
'--config': config_factory.serialize(),
'--log-dir': tmp_path,
'--batch-size': _config['batch_size']
}
main = SAMPLES[_config['sample_type']]
main(convert_to_argv(args))
@pytest.mark.dependency(name='tf_test_model_train')
def test_model_train(_config, tmp_path, _case_common_dirs):
checkpoint_save_dir = os.path.join(_case_common_dirs['checkpoint_save_dir'], _config['tid'])
config_factory = ConfigFactory(_config['nncf_config'], tmp_path / 'config.json')
args = {
'--mode': 'train',
'--data': _config['dataset_path'],
'--config': config_factory.serialize(),
'--log-dir': tmp_path,
'--batch-size': _config['batch_size'],
'--epochs': 1,
'--checkpoint-save-dir': checkpoint_save_dir
}
main = SAMPLES[_config['sample_type']]
main(convert_to_argv(args))
assert tf.io.gfile.isdir(checkpoint_save_dir)
assert tf.train.latest_checkpoint(checkpoint_save_dir)
@pytest.mark.dependency(depends=['tf_test_model_train'])
def test_trained_model_eval(_config, tmp_path, _case_common_dirs):
config_factory = ConfigFactory(_config['nncf_config'], tmp_path / 'config.json')
ckpt_path = os.path.join(_case_common_dirs['checkpoint_save_dir'], _config['tid'])
args = {
'--mode': 'test',
'--data': _config['dataset_path'],
'--config': config_factory.serialize(),
'--log-dir': tmp_path,
'--batch-size': _config['batch_size'],
'--resume': ckpt_path
}
main = SAMPLES[_config['sample_type']]
main(convert_to_argv(args))
@pytest.mark.dependency(depends=['tf_test_model_train'])
def test_resume(_config, tmp_path, _case_common_dirs):
checkpoint_save_dir = os.path.join(str(tmp_path), 'models')
config_factory = ConfigFactory(_config['nncf_config'], tmp_path / 'config.json')
ckpt_path = os.path.join(_case_common_dirs['checkpoint_save_dir'], _config['tid'])
args = {
'--mode': 'train',
'--data': _config['dataset_path'],
'--config': config_factory.serialize(),
'--log-dir': tmp_path,
'--batch-size': _config['batch_size'],
'--epochs': 2,
'--checkpoint-save-dir': checkpoint_save_dir,
'--resume': ckpt_path
}
main = SAMPLES[_config['sample_type']]
main(convert_to_argv(args))
assert tf.io.gfile.isdir(checkpoint_save_dir)
assert tf.train.latest_checkpoint(checkpoint_save_dir)
@pytest.mark.dependency(depends=['tf_test_model_train'])
def test_trained_model_resume_train_test_export_last_ckpt(_config, tmp_path, _case_common_dirs):
checkpoint_save_dir = os.path.join(str(tmp_path), 'models')
config_factory = ConfigFactory(_config['nncf_config'], tmp_path / 'config.json')
ckpt_path = os.path.join(_case_common_dirs['checkpoint_save_dir'], _config['tid'])
export_path = os.path.join(str(tmp_path), 'model.pb')
args = {
'--mode': 'train test export',
'--data': _config['dataset_path'],
'--config': config_factory.serialize(),
'--log-dir': tmp_path,
'--batch-size': _config['batch_size'],
'--epochs': 2,
'--checkpoint-save-dir': checkpoint_save_dir,
'--resume': ckpt_path,
'--to-frozen-graph': export_path
}
main = SAMPLES[_config['sample_type']]
main(convert_to_argv(args))
assert tf.io.gfile.isdir(checkpoint_save_dir)
assert tf.train.latest_checkpoint(checkpoint_save_dir)
assert os.path.exists(export_path)
FORMATS = [
'frozen-graph',
'saved-model',
'h5'
]
def get_export_model_name(export_format):
model_name = 'model'
if export_format == 'frozen-graph':
model_name = 'model.pb'
elif export_format == 'h5':
model_name = 'model.h5'
return model_name
@pytest.mark.dependency(depends=['tf_test_model_train'])
@pytest.mark.parametrize('export_format', FORMATS, ids=FORMATS)
def test_export_with_resume(_config, tmp_path, export_format, _case_common_dirs):
config_factory = ConfigFactory(_config['nncf_config'], tmp_path / 'config.json')
ckpt_path = os.path.join(_case_common_dirs['checkpoint_save_dir'], _config['tid'])
if export_format == 'saved-model':
compression_config = _config['nncf_config'].get('compression', {})
if isinstance(compression_config, dict):
compression_config = [compression_config]
for config in compression_config:
if config.get('algorithm', '') == 'quantization':
pytest.skip()
export_path = os.path.join(str(tmp_path), get_export_model_name(export_format))
args = {
'--mode': 'export',
'--config': config_factory.serialize(),
'--log-dir': tmp_path,
'--resume': ckpt_path,
'--to-{}'.format(export_format): export_path,
}
main = SAMPLES[_config['sample_type']]
main(convert_to_argv(args))
model_path = os.path.join(export_path, 'saved_model.pb') \
if export_format == 'saved-model' else export_path
assert os.path.exists(model_path)
| 34.548276 | 110 | 0.665935 |
7943584b8765e20ec02be1b3647fcfafb4ad394c | 5,532 | py | Python | dvc/ui/prompt.py | lucasalavapena/dvc | 230eb7087df7f063ded7422af7ae45bd04eb794a | [
"Apache-2.0"
] | null | null | null | dvc/ui/prompt.py | lucasalavapena/dvc | 230eb7087df7f063ded7422af7ae45bd04eb794a | [
"Apache-2.0"
] | null | null | null | dvc/ui/prompt.py | lucasalavapena/dvc | 230eb7087df7f063ded7422af7ae45bd04eb794a | [
"Apache-2.0"
] | null | null | null | from typing import TYPE_CHECKING, Any, Callable, Optional, Tuple, Union
import rich.prompt
if TYPE_CHECKING:
from rich.console import Console as RichConsole
from . import RichText
TextType = Union["RichText", str]
class InvalidResponse(rich.prompt.InvalidResponse):
pass
class RichInputMixin:
"""Prevents exc message from printing in the same line on Ctrl + D/C."""
@classmethod
def get_input(cls, console: "RichConsole", *args, **kwargs) -> str:
try:
return super().get_input( # type: ignore[misc]
console, *args, **kwargs
)
except (KeyboardInterrupt, EOFError):
console.print()
raise
class Prompt(RichInputMixin, rich.prompt.Prompt):
"""Extended version supports custom validations and omission of values."""
omit_value: str = "n"
def __init__(
self,
*args: Any,
allow_omission: bool = False,
**kwargs: Any,
) -> None:
super().__init__(*args, **kwargs)
self.allow_omission: bool = allow_omission
@classmethod
def ask( # pylint: disable=arguments-differ
cls,
*args: Any,
allow_omission: bool = False,
validator: Optional[
Callable[[str], Union[str, Tuple[str, str]]]
] = None,
**kwargs: Any,
) -> str:
"""Extended to pass validator argument.
Args:
allow_omission: Allows omitting prompts
validator: Validates entered value through prompts
validator can raise an InvalidResponseError, after which
it'll reprompt user. It can also return:
- processed/modified value that will be return to the user.
- tuple of processed value and a message to show to the user.
Note that the validator is always required to return a value,
even if it does not process/modify it.
"""
default = kwargs.pop("default", None)
stream = kwargs.pop("stream", None)
return cls(*args, allow_omission=allow_omission, **kwargs)(
default=default if default is not None else ...,
stream=stream,
validator=validator,
)
@classmethod
def prompt_(
cls,
*args: Any,
allow_omission: bool = False,
validator: Optional[
Callable[[str], Union[str, Tuple[str, str]]]
] = None,
**kwargs: Any,
) -> Optional[str]:
"""Extends `.ask()` to allow skipping/returning None type."""
value = cls.ask(
*args, allow_omission=allow_omission, validator=validator, **kwargs
)
if allow_omission and value == cls.omit_value:
return None
return value
def __call__(
self,
*args: Any,
validator: Optional[
Callable[[str], Union[str, Tuple[str, str]]]
] = None,
**kwargs: Any,
) -> str:
"""Supports validating response and show warning message."""
# We cannot put this in `process_response` as it does not validate
# `default` values.
while True:
value = super().__call__(*args, **kwargs)
if validator is None or (
self.allow_omission and value == self.omit_value
):
return value
try:
validated = validator(value)
except InvalidResponse as exc:
self.on_validate_error(value, exc)
continue
else:
if isinstance(validated, tuple):
value, message = validated
else:
value, message = validated, ""
if message:
self.console.print(message)
return value
def process_response(self, value: str) -> str:
"""Disallow empty values."""
ret = super().process_response(value)
if not ret:
raise InvalidResponse(
"[prompt.invalid]Response required. Please try again."
)
return ret
def render_default(self, default):
from rich.text import Text
return Text(f"{default!s}", "green")
def make_prompt(self, default):
prompt = self.prompt.copy()
prompt.end = ""
parts = []
if (
default is not ...
and self.show_default
and isinstance(default, (str, self.response_type))
):
_default = self.render_default(default)
parts.append(_default)
if self.allow_omission and parts:
from rich.text import Text
parts.append(Text(f", {self.omit_value} to omit", style="italic"))
if parts:
parts = [" [", *parts, "]"]
for part in parts:
prompt.append(part)
prompt.append(self.prompt_suffix)
return prompt
class Confirm(RichInputMixin, rich.prompt.Confirm):
def make_prompt(self, default):
prompt = self.prompt.copy()
prompt.end = ""
prompt.append(" [")
yes, no = self.choices
for idx, (val, choice) in enumerate(((True, yes), (False, no))):
if idx:
prompt.append("/")
if val == default:
prompt.append(choice.upper(), "green")
else:
prompt.append(choice)
prompt.append("]")
prompt.append(self.prompt_suffix)
return prompt
| 29.741935 | 79 | 0.553326 |
79435877b800dfbf140afd5eeffba64f0a934838 | 943 | py | Python | IntroProPython/listagem/capitulo 08/08.10 - Outra forma de calcular o fatorial.py | SweydAbdul/estudos-python | b052708d0566a0afb9a1c04d035467d45f820879 | [
"MIT"
] | null | null | null | IntroProPython/listagem/capitulo 08/08.10 - Outra forma de calcular o fatorial.py | SweydAbdul/estudos-python | b052708d0566a0afb9a1c04d035467d45f820879 | [
"MIT"
] | null | null | null | IntroProPython/listagem/capitulo 08/08.10 - Outra forma de calcular o fatorial.py | SweydAbdul/estudos-python | b052708d0566a0afb9a1c04d035467d45f820879 | [
"MIT"
] | null | null | null | ##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2017
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Primeira reimpressão - Outubro/2011
# Segunda reimpressão - Novembro/2012
# Terceira reimpressão - Agosto/2013
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Primeira reimpressão - Segunda edição - Maio/2015
# Segunda reimpressão - Segunda edição - Janeiro/2016
# Terceira reimpressão - Segunda edição - Junho/2016
# Quarta reimpressão - Segunda edição - Março/2017
#
# Site: http://python.nilo.pro.br/
#
# Arquivo: listagem\capitulo 08\08.10 - Outra forma de calcular o fatorial.py
##############################################################################
def fatorial(n):
fat = 1
x = 1
while x <= n:
fat *= x
x += 1
return fat
| 34.925926 | 78 | 0.582185 |
794358e22103c19b2229d8aee80dae664a8724f1 | 8,931 | py | Python | starcli/search.py | AkashD-Developer/starcli | 83a6a835fe7d98727bbecfb68d3d42c2b300203e | [
"MIT"
] | 1 | 2020-10-02T09:55:41.000Z | 2020-10-02T09:55:41.000Z | starcli/search.py | ineelshah/starcli | ab999f90d8d9dde9e4cdd6ceee01a81d2a81b524 | [
"MIT"
] | 3 | 2021-03-29T11:02:23.000Z | 2022-03-14T11:04:12.000Z | starcli/search.py | ineelshah/starcli | ab999f90d8d9dde9e4cdd6ceee01a81d2a81b524 | [
"MIT"
] | null | null | null | """ starcli.search """
# Standard library imports
from datetime import datetime, timedelta
from time import sleep
import logging
from random import randint
import re
# Third party imports
import requests
from click import secho
import colorama
from gtrending import fetch_repos
import http.client
from rich.logging import RichHandler
API_URL = "https://api.github.com/search/repositories"
date_range_map = {"today": "daily", "this-week": "weekly", "this-month": "monthly"}
status_actions = {
"retry": "Failed to retrieve data. Retrying in ",
"invalid": "The server was unable to process the request.",
"unauthorized": "The server did not accept the credentials. See: https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token",
"not_found": "The server indicated no data was found.",
"unsupported": "The request is not supported.",
"unknown": "An unknown error occurred.",
"valid": "The request returned successfully, but an unknown exception occurred.",
}
FORMAT = "%(message)s"
httpclient_logger = logging.getLogger("http.client")
def httpclient_logging_debug(level=logging.DEBUG):
def httpclient_log(*args):
httpclient_logger.log(level, " ".join(args))
http.client.print = httpclient_log
http.client.HTTPConnection.debuglevel = 1
def debug_requests_on():
""" Turn on the logging for requests """
logging.basicConfig(
level=logging.DEBUG,
format=FORMAT,
datefmt="[%Y-%m-%d]",
handlers=[RichHandler()],
)
logger = logging.getLogger(__name__)
from http.client import HTTPConnection
httpclient_logging_debug()
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
def convert_datetime(date, date_format="%Y-%m-%d"):
""" Safely convert a date string to datetime """
try:
# try to turn the string into a date-time object
tmp_date = datetime.strptime(date, date_format)
except ValueError: # ValueError will be thrown if format is invalid
secho(
"Invalid date: " + date + " must be yyyy-mm-dd",
fg="bright_red",
)
return None
return tmp_date
def get_date(date):
""" Finds the date info in a string """
prefix = ""
if any(i in date[0] for i in [">", "=", "<"]):
if "=" in date[1]:
prefix = date[:2]
date = date.strip(prefix)
else:
prefix = date[0]
date = date.strip(prefix)
tmp_date = convert_datetime(date)
if not tmp_date:
return None
return prefix + tmp_date.strftime("%Y-%m-%d")
def get_valid_request(url, auth=""):
"""
Provide a URL to submit a GET request for and handle a connection error.
"""
while True:
try:
session = requests.Session()
if auth:
session.auth = (auth.split(":")[0], auth.split(":")[1])
request = session.get(url)
except requests.exceptions.ConnectionError:
secho("Internet connection error...", fg="bright_red")
return None
if not request.status_code in (200, 202):
handling_code = search_error(request.status_code)
if handling_code == "retry":
for i in range(15, 0, -1):
secho(
f"{status_actions[handling_code]} {i} seconds...",
fg="bright_yellow",
) # Print and update a timer
sleep(1)
elif handling_code in status_actions:
secho(status_actions[handling_code], fg="bright_yellow")
return None
else:
secho("An invalid handling code was returned.", fg="bright_red")
return None
else:
break
return request
def search_error(status_code):
"""
This returns a directive on how to handle a given HTTP status code.
"""
int_status_code = int(
status_code
) # Need to make sure the status code is an integer
http_code_handling = {
"200": "valid",
"202": "valid",
"204": "valid",
"400": "invalid",
"401": "unauthorized",
"403": "retry",
"404": "not_found",
"405": "invalid",
"422": "not_found",
"500": "invalid",
"501": "invalid",
}
try:
return http_code_handling[str(int_status_code)]
except KeyError:
return "unsupported"
def search(
language=None,
created=None,
pushed=None,
stars=">=100",
topics=[],
user=None,
debug=False,
order="desc",
auth="",
):
""" Returns repositories searched from GitHub API """
date_format = "%Y-%m-%d" # date format in iso format
if debug:
debug_requests_on()
logger = logging.getLogger(__name__)
logger.debug("Search: created param:" + created)
logger.debug("Search: order param: " + order)
day_range = 0 - randint(100, 400) # random negative from 100 to 400
if not created: # if created not provided
# creation date: the time now minus a random number of days
# 100 to 400 days - which was stored in day_range
created_str = ">=" + (datetime.utcnow() + timedelta(days=day_range)).strftime(
date_format
)
else: # if created is provided
created_str = get_date(created)
if not created_str:
return None
if not pushed: # if pushed not provided
# pushed date: start, is the time now minus a random number of days
# 100 to 400 days - which was stored in day_range
pushed_str = ">=" + (datetime.utcnow() + timedelta(days=day_range)).strftime(
date_format
)
else: # if pushed is provided
pushed_str = get_date(pushed)
if not pushed_str:
return None
if user:
query = f"user:{user}+"
else:
query = ""
query += f"stars:{stars}+created:{created_str}" # construct query
query += f"+pushed:{pushed_str}" # add pushed info to query
query += f"+language:{language}" if language else "" # add language to query
query += f"".join(["+topic:" + i for i in topics]) # add topics to query
url = f"{API_URL}?q={query}&sort=stars&order={order}" # use query to construct url
if debug:
logger.debug("Search: url:" + url) # print the url when debugging
if debug and auth:
logger.debug("Auth: on")
elif debug:
logger.debug("Auth: off")
request = get_valid_request(url, auth)
if request is None:
return request
return request.json()["items"]
def search_github_trending(
language=None, spoken_language=None, order="desc", stars=">=10", date_range=None
):
""" Returns trending repositories from github trending page """
if date_range:
gtrending_repo_list = fetch_repos(
language, spoken_language, date_range_map[date_range]
)
else:
gtrending_repo_list = fetch_repos(language, spoken_language)
repositories = []
for gtrending_repo in gtrending_repo_list:
repo_dict = convert_repo_dict(gtrending_repo)
repo_dict["date_range"] = (
str(repo_dict["date_range"]) + " stars " + date_range.replace("-", " ")
if date_range
else None
)
repo_dict["watchers_count"] = -1 # watchers count not available
# filter by number of stars
num = [int(s) for s in re.findall(r"\d+", stars)][0]
if (
("<" in stars and repo_dict["stargazers_count"] < num)
or ("<=" in stars and repo_dict["stargazers_count"] <= num)
or (">" in stars and repo_dict["stargazers_count"] > num)
or (">=" in stars and repo_dict["stargazers_count"] >= num)
):
repositories.append(repo_dict)
if order == "asc":
return sorted(repositories, key=lambda repo: repo["stargazers_count"])
return sorted(repositories, key=lambda repo: repo["stargazers_count"], reverse=True)
def convert_repo_dict(gtrending_repo):
repo_dict = {}
repo_dict["full_name"] = gtrending_repo.get("fullname")
repo_dict["name"] = gtrending_repo.get("name")
repo_dict["html_url"] = gtrending_repo.get("url")
repo_dict["stargazers_count"] = gtrending_repo.get("stars", -1)
repo_dict["forks_count"] = gtrending_repo.get("forks", -1)
repo_dict["language"] = gtrending_repo.get("language")
# gtrending_repo has key `description` and value is empty string if it's empty
repo_dict["description"] = (
gtrending_repo.get("description")
if gtrending_repo.get("description") != ""
else None
)
repo_dict["date_range"] = gtrending_repo.get("currentPeriodStars")
return repo_dict
| 32.125899 | 162 | 0.612473 |
7943591df8cb69c5278ebfe945b0349d3a132cc2 | 200 | py | Python | cartography/intel/aws/ec2/util.py | Relys/cartography | 0f71b3f0246665d5fa065afa2e3dc46c22d6c689 | [
"Apache-2.0"
] | 1 | 2021-03-26T12:00:26.000Z | 2021-03-26T12:00:26.000Z | cartography/intel/aws/ec2/util.py | srics/cartography | 19a06766e304d657d956246179a2bb01a6d9aef6 | [
"Apache-2.0"
] | 1 | 2021-02-23T18:08:04.000Z | 2021-03-31T08:17:23.000Z | cartography/intel/aws/ec2/util.py | srics/cartography | 19a06766e304d657d956246179a2bb01a6d9aef6 | [
"Apache-2.0"
] | 1 | 2021-03-31T17:55:31.000Z | 2021-03-31T17:55:31.000Z | import botocore.config
# TODO memoize this
def get_botocore_config():
return botocore.config.Config(
read_timeout=360,
retries={
'max_attempts': 10,
},
)
| 16.666667 | 34 | 0.595 |
79435ae2d7523a858e28975e162aa2ee1d09c918 | 17,838 | py | Python | talks/events/views.py | alan-turing-institute/talks.ox | 5e172b7bb7296fcfc2d5c1b5978ec98a6643d90a | [
"Apache-2.0"
] | 5 | 2015-09-03T11:46:07.000Z | 2022-01-12T10:15:50.000Z | talks/events/views.py | alan-turing-institute/talks.ox | 5e172b7bb7296fcfc2d5c1b5978ec98a6643d90a | [
"Apache-2.0"
] | 306 | 2015-01-05T10:16:56.000Z | 2021-06-10T08:00:31.000Z | talks/events/views.py | alan-turing-institute/talks.ox | 5e172b7bb7296fcfc2d5c1b5978ec98a6643d90a | [
"Apache-2.0"
] | 5 | 2016-04-21T10:40:20.000Z | 2021-01-05T09:15:23.000Z | from __future__ import absolute_import
from __future__ import print_function
import logging
import functools
from datetime import date, timedelta, datetime
from django.urls import reverse
from django.http.response import Http404
from django.shortcuts import render, get_object_or_404, redirect
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import Event, EventGroup, Person, TopicItem
from talks.events.models import ROLES_SPEAKER, ROLES_HOST, ROLES_ORGANISER
from talks.events.datasources import TOPICS_DATA_SOURCE, DEPARTMENT_DATA_SOURCE, DEPARTMENT_DESCENDANT_DATA_SOURCE
from talks.users.models import COLLECTION_ROLES_OWNER, COLLECTION_ROLES_EDITOR, COLLECTION_ROLES_READER
from .forms import BrowseEventsForm, BrowseSeriesForm
from talks.api.services import events_search
from talks.api_ox.api import ApiException, OxfordDateResource
logger = logging.getLogger(__name__)
def homepage(request):
HOMEPAGE_YOUR_TALKS_RESULTS_LIMIT = 5
today = date.today()
tomorrow = today + timedelta(days=1)
events = Event.objects.filter(start__gte=today,
start__lt=tomorrow).order_by('start')
event_groups = EventGroup.objects.for_events(events)
conferences = filter(lambda eg: eg.group_type == EventGroup.CONFERENCE,
event_groups)
series = filter(lambda eg: eg.group_type == EventGroup.SEMINAR,
event_groups)
group_no_type = filter(lambda eg: not eg.group_type,
event_groups)
context = {
'events': events,
'event_groups': event_groups,
'conferences': conferences,
'group_no_type': group_no_type,
'series': series
}
if request.tuser:
# Authenticated user
collections = request.tuser.collections.all()
if collections:
user_events = collections[0].get_all_events()
for collection in collections[1:]:
user_events = user_events | collection.get_all_events()
context['collections'] = collections
user_events = user_events.filter(start__gte=today)
if (user_events.count() > HOMEPAGE_YOUR_TALKS_RESULTS_LIMIT):
context['user_events_more_link'] = True
context['user_events'] = user_events[:HOMEPAGE_YOUR_TALKS_RESULTS_LIMIT]
return render(request, 'front.html', context)
def browse_events(request):
modified_request_parameters = request.GET.copy()
modified_request_parameters['subdepartments'] = "false"
if (len(request.GET) == 0) or (len(request.GET) == 1) and request.GET.get('limit_to_collections'):
today = date.today()
modified_request_parameters['start_date'] = today.strftime("%d/%m/%Y")
modified_request_parameters['include_subdepartments'] = True
modified_request_parameters['subdepartments'] = 'true'
elif request.GET.get('include_subdepartments'):
modified_request_parameters['include_subdepartments'] = True
modified_request_parameters['subdepartments'] = 'true'
else:
modified_request_parameters['include_subdepartments'] = False
modified_request_parameters['subdepartments'] = 'false'
browse_events_form = BrowseEventsForm(modified_request_parameters)
count = request.GET.get('count', 20)
page = request.GET.get('page', 1)
if request.GET.get('limit_to_collections'):
modified_request_parameters['limit_to_collections'] = request.tuser.collections.all()
# used to build a URL fragment that does not
# contain "page" so that we can... paginate
args = {'count': count}
for param in ('start_date', 'to', 'venue', 'organising_department', 'include_subdepartments', 'seriesid', 'limit_to_collections'):
if modified_request_parameters.get(param):
args[param] = modified_request_parameters.get(param)
if not modified_request_parameters['start_date']:
return redirect(reverse('browse_events'))
events = events_search(modified_request_parameters)
paginator = Paginator(events, count)
try:
events = paginator.page(page)
except (PageNotAnInteger, EmptyPage):
return redirect(reverse('browse_events'))
grouped_events = group_events(events)
fragment = '&'.join(["{k}={v}".format(k=k, v=v) for k, v in args.items()])
old_query = request.META['QUERY_STRING']
dates_start = old_query.find("start_date=")
dates_end = dates_start + 35
today = date.today()
offset_Sunday = (6 - today.weekday()) % 7 # weekday(): Monday=0 .... Sunday=6
query_params_all = request.GET.copy()
query_params_all['start_date']=str(today)
query_params_all.pop('to', None)
query_params_today = request.GET.copy()
query_params_today['start_date']=str(today)
query_params_today['to']=str(today)
query_params_tomorrow = request.GET.copy()
query_params_tomorrow['start_date']=str(today+timedelta(days=1))
query_params_tomorrow['to']=str(today+timedelta(days=2))
query_params_this_week = request.GET.copy()
query_params_this_week['start_date']=str(today)
query_params_this_week['to']=str(today+timedelta(days=offset_Sunday))
query_params_next_week = request.GET.copy()
query_params_next_week['start_date']=str(today+timedelta(days=offset_Sunday+1))
query_params_next_week['to']=str(today+timedelta(days=offset_Sunday+7))
query_params_next_30days = request.GET.copy()
query_params_next_30days['start_date']=str(today)
query_params_next_30days['to']=str(today+timedelta(days=30))
tab_dates = [
{
'label': 'All',
'href': 'browse?' + query_params_all.urlencode(),
'active': False
}, {
'label': 'Today',
'href': 'browse?' + query_params_today.urlencode(),
'active': False
}, {
'label': 'Tomorrow',
'href': 'browse?' + query_params_tomorrow.urlencode(),
'active': False
}, {
'label': 'This week',
'href': 'browse?' + query_params_this_week.urlencode(),
'active': False
}, {
'label': 'Next week',
'href': 'browse?' + query_params_next_week.urlencode(),
'active': False
}, {
'label': 'Next 30 days',
'href': 'browse?' + query_params_next_30days.urlencode(),
'active': False
}
]
if not old_query:
tab_dates[0]['active'] = True
else:
for tab in tab_dates:
if tab['href'] == 'browse?' + old_query:
tab['active'] = True
date_continued_previous = False
if int(page) != 1:
# if the date of the first talk of the current page is the same with that of the last talk of the previous page
if list(events)[0].start.date()==list(paginator.page(int(page)-1))[-1].start.date():
date_continued_previous = True
date_continued_next = False
if paginator.num_pages != int(page):
# if the date of the last talk of the current page is the same with that of the first talk of the next page
if list(events)[-1].start.date()==list(paginator.page(int(page)+1))[0].start.date():
date_continued_next = True
context = {
'events': events,
'grouped_events': grouped_events,
'fragment': fragment,
'browse_events_form': browse_events_form,
'start_date': modified_request_parameters.get('start_date'),
'end_date': modified_request_parameters.get('to'),
'tab_dates': tab_dates,
'date_continued_previous': date_continued_previous,
'date_continued_next': date_continued_next,
}
return render(request, 'events/browse.html', context)
def group_events (events):
grouped_events = {}
event_dates = []
for group_event in events:
hours = datetime.strftime(group_event.start, '%I')
minutes = datetime.strftime(group_event.start, ':%M')
if minutes==":00":
minutes = ""
ampm = datetime.strftime(group_event.start, '%p')
group_event.display_time = group_event.formatted_time
# if there is no oxford_date field, events are search results
# we need to call date_to_oxford_date to create the oxford date
if not group_event.oxford_date:
group_event.oxford_date = date_to_oxford_date(group_event.start)
comps = group_event.oxford_date.components
key = comps['day_name']+ " " +str(comps['day_number'])+ " " +comps['month_long']+ " "
key+= str(comps['year'])+ " ("+ str(comps['week']) + comps['ordinal']+ " Week, " +comps['term_long']+ " Term)"
if key not in grouped_events:
grouped_events[key] = []
event_dates.append(key)
grouped_events[key].append(group_event)
result_events = []
for event_date in event_dates:
result_events.append({"start_date":event_date, "gr_events":grouped_events[event_date]})
return result_events
def date_to_oxford_date(date_str):
func = functools.partial(OxfordDateResource.from_date, date_str)
try:
res = func()
return res
except ApiException:
logger.warn('Unable to reach API', exc_info=True)
return None
def upcoming_events(request):
today = date.today()
events = Event.objects.filter(start__gte=today).order_by('start')
return _events_list(request, events)
def events_for_year(request, year):
events = Event.objects.filter(start__year=year)
return _events_list(request, events)
def events_for_month(request, year, month):
events = Event.objects.filter(start__year=year,
start__month=month)
return _events_list(request, events)
def events_for_day(request, year, month, day):
events = Event.objects.filter(start__year=year,
start__month=month,
start__day=day)
return _events_list(request, events)
def _events_list(request, events):
context = {'events': events}
return render(request, 'events/events.html', context)
def show_event(request, event_slug):
try:
# TODO depending if user is admin or not,
# we should use Event.published here...
ev = Event.objects.select_related('group').get(slug=event_slug)
except Event.DoesNotExist:
raise Http404
context = {
'event': ev,
'url': request.build_absolute_uri(reverse('show-event', args=[ev.slug])),
'location': ev.api_location,
'speakers': ev.speakers.all(),
'hosts': ev.hosts.all(),
'organisers': ev.organisers.all(),
'editors': ev.editor_set.all(),
}
if request.tuser:
context['editable_collections'] = request.tuser.collections.filter(talksusercollection__role__in=[COLLECTION_ROLES_OWNER, COLLECTION_ROLES_EDITOR]).distinct()
if request.GET.get('format') == 'txt':
return render(request, 'events/event.txt.html', context)
else:
return render(request, 'events/event.html', context)
def list_event_groups(request):
modified_request_parameters = request.GET.copy()
if request.POST.get('seriesslug'):
return redirect('show-event-group', request.POST.get('seriesslug'))
browse_series_form = BrowseSeriesForm(modified_request_parameters)
object_list = EventGroup.objects.all().order_by('title')
context = {
'object_list': object_list,
'browse_events_form': browse_series_form,
}
return render(request, "events/event_group_list.html", context)
def show_event_group(request, event_group_slug):
group = get_object_or_404(EventGroup, slug=event_group_slug)
events = group.events.order_by('start')
show_all = request.GET.get('show_all', False)
if not show_all:
events = events.filter(start__gte=date.today())
grouped_events = group_events(events)
context = {
'event_group': group,
'events': events,
'grouped_events': grouped_events,
'organisers': group.organisers.all(),
'show_all': show_all,
'editors': group.editor_set.all(),
}
if request.tuser:
context['editable_collections'] = request.tuser.collections.filter(talksusercollection__role__in=[COLLECTION_ROLES_OWNER, COLLECTION_ROLES_EDITOR]).distinct()
if request.GET.get('format') == 'txt':
return render(request, 'events/event-group.txt.html', context)
else:
return render(request, 'events/event-group.html', context)
def show_person(request, person_slug):
person = get_object_or_404(Person, slug=person_slug)
events = Event.objects.order_by('start')
host_events = events.filter(personevent__role=ROLES_HOST, personevent__person__slug=person.slug)
speaker_events = events.filter(personevent__role=ROLES_SPEAKER, personevent__person__slug=person.slug)
organiser_events = events.filter(personevent__role=ROLES_ORGANISER, personevent__person__slug=person.slug)
grouped_host_events = group_events(host_events)
grouped_speaker_events = group_events(speaker_events)
grouped_organiser_events = group_events(organiser_events)
context = {
'person': person,
'host_events': host_events,
'speaker_events': speaker_events,
'organiser_events': organiser_events,
'grouped_host_events': grouped_host_events,
'grouped_speaker_events': grouped_speaker_events,
'grouped_organiser_events': grouped_organiser_events,
}
if request.GET.get('format') == 'txt':
return render(request, 'events/person.txt.html', context)
else:
return render(request, 'events/person.html', context)
def show_topic(request):
topic_uri = request.GET.get('uri')
api_topic = TOPICS_DATA_SOURCE.get_object_by_id(topic_uri)
events = Event.objects.filter(topics__uri=topic_uri).order_by('start')
#RB 3/5/16 get filtered by current talks in topic
show_all = request.GET.get('show_all', False)
if not show_all:
events = events.filter(start__gte=date.today())
grouped_events = group_events(events)
context = {
'grouped_events': grouped_events,
'topic': api_topic,
'events': events,
'show_all': show_all#RB 3/5/16 get filtered by current talks in topic
}
if request.GET.get('format') == 'txt':
return render(request, 'events/topic.txt.html', context)
else:
return render(request, 'events/topic.html', context)
def list_topics(request):
topics = TopicItem.objects.distinct()
topics_results = []
for topic in topics.all():
events = Event.objects.filter(topics__uri=topic.uri)
if(len(events)>0):
api_topic = TOPICS_DATA_SOURCE.get_object_by_id(topic.uri)
if api_topic not in topics_results:
topics_results.append(api_topic)
topics_results.sort(key=lambda topic:topic['prefLabel'])
context = {
'topics': topics_results,
}
return render(request, 'events/topic_list.html', context)
def show_department_organiser(request, org_id):
org = DEPARTMENT_DATA_SOURCE.get_object_by_id(org_id)
events = Event.objects.filter(department_organiser=org_id).order_by('start')
show_all = request.GET.get('show_all', False)
if not show_all:
events = events.filter(start__gte=date.today())
context = {
'org': org,
'events': events,
'department': org_id
}
if request.tuser:
context['editable_collections'] = request.tuser.collections.filter(talksusercollection__role__in=[COLLECTION_ROLES_OWNER, COLLECTION_ROLES_EDITOR]).distinct()
return render(request, 'events/department.html', context)
def show_department_descendant(request, org_id):
org = DEPARTMENT_DATA_SOURCE.get_object_by_id(org_id)
try:
results = DEPARTMENT_DESCENDANT_DATA_SOURCE.get_object_by_id(org_id)
descendants = results['descendants']
sub_orgs = descendants
ids = [o['id'] for o in sub_orgs]
ids.append(results['id']) # Include self
events = Event.objects.filter(department_organiser__in=ids).order_by('start')
except Exception:
print("Error retrieving sub-departments, only showing department")
events = Event.objects.filter(department_organiser=org).order_by('start')
sub_orgs = []
show_all = request.GET.get('show_all', False)
if not show_all:
events = events.filter(start__gte=date.today())
grouped_events = group_events(events)
if org:
if '_links' in org:
if 'parent' in org['_links']:
parent_href = org['_links']['parent'][0]['href']
parent_id = parent_href[parent_href.find("oxpoints"):]
parent = DEPARTMENT_DATA_SOURCE.get_object_by_id(parent_id)
else:
parent = None
else:
parent = None
else:
parent = None
context = {
'org': org,
'sub_orgs': sub_orgs,
'events': events,
'grouped_events': grouped_events,
'parent': parent,
'show_all': show_all,
'todays_date': date.today().strftime("%Y-%m-%d"),
'department': org_id
}
if request.tuser:
context['editable_collections'] = request.tuser.collections.filter(talksusercollection__role__in=[COLLECTION_ROLES_OWNER, COLLECTION_ROLES_EDITOR]).distinct()
if request.GET.get('format') == 'txt':
return render(request, 'events/department.txt.html', context)
else:
return render(request, 'events/department.html', context)
def list_departments(request):
context = {}
return render(request, 'events/department_list.html', context)
| 36.931677 | 166 | 0.668517 |
79435ae9be8efc8b1a5e5d31b930412e428bd99f | 731 | py | Python | python/phonenumbers/shortdata/region_IM.py | nickhargreaves/python-phonenumbers | df203baa68fc307e5c833c015bee2e15386a8dbe | [
"Apache-2.0"
] | null | null | null | python/phonenumbers/shortdata/region_IM.py | nickhargreaves/python-phonenumbers | df203baa68fc307e5c833c015bee2e15386a8dbe | [
"Apache-2.0"
] | null | null | null | python/phonenumbers/shortdata/region_IM.py | nickhargreaves/python-phonenumbers | df203baa68fc307e5c833c015bee2e15386a8dbe | [
"Apache-2.0"
] | 1 | 2020-09-08T14:45:34.000Z | 2020-09-08T14:45:34.000Z | """Auto-generated file, do not edit by hand. IM metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_IM = PhoneMetadata(id='IM', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[189]\\d\\d(?:\\d{2,3})?', possible_length=(3, 5, 6)),
emergency=PhoneNumberDesc(national_number_pattern='999', example_number='999', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='1\\d{2}(?:\\d{3})?|8(?:6444|9887)|999', example_number='150', possible_length=(3, 5, 6)),
sms_services=PhoneNumberDesc(national_number_pattern='8(?:6444|9887)', example_number='86444', possible_length=(5,)),
short_data=True)
| 73.1 | 145 | 0.746922 |
79435bf60733961a6580b77aa941a4f2ccc6bd2b | 6,662 | py | Python | lib/rucio/tests/test_transfer.py | justincc/rucio | 95d81403c835d9f43fc30d328a8e2e388617a369 | [
"Apache-2.0"
] | null | null | null | lib/rucio/tests/test_transfer.py | justincc/rucio | 95d81403c835d9f43fc30d328a8e2e388617a369 | [
"Apache-2.0"
] | null | null | null | lib/rucio/tests/test_transfer.py | justincc/rucio | 95d81403c835d9f43fc30d328a8e2e388617a369 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Radu Carpa <[email protected]>, 2021
import pytest
from rucio.common.exception import NoDistance
from rucio.core.distance import add_distance
from rucio.core.transfer import get_hops
def test_get_hops(rse_factory):
# Build the following topology:
# +------+ +------+ 10 +------+
# | | 40 | +-----------+ |
# | RSE0 | +--------+ RSE1 | | RSE2 +-------------+
# | | | | | +----+ | |
# +------+ | +------+ | +------+ | <missing_cost>
# | | |
# | | |
# | | |
# +------+ | +------+ 10 | +------+ +-+----+
# | +--+ | +------+ | | --20-> | |
# | RSE3 | --10-> | RSE4 | | RSE5 +-->--->--->+ RSE6 |
# | +-->--->--->+ +-----------+ | | |
# +----+-+ +------+ 10 +-+----+ +------+
# | |
# | 50 |
# +----------------------------------+
#
_, rse0_id = rse_factory.make_mock_rse()
_, rse1_id = rse_factory.make_mock_rse()
_, rse2_id = rse_factory.make_mock_rse()
_, rse3_id = rse_factory.make_mock_rse()
_, rse4_id = rse_factory.make_mock_rse()
_, rse5_id = rse_factory.make_mock_rse()
_, rse6_id = rse_factory.make_mock_rse()
all_rses = [rse0_id, rse1_id, rse2_id, rse3_id, rse4_id, rse5_id, rse6_id]
add_distance(rse1_id, rse3_id, ranking=40)
add_distance(rse1_id, rse2_id, ranking=10)
add_distance(rse2_id, rse1_id, ranking=10)
add_distance(rse2_id, rse4_id, ranking=10)
add_distance(rse3_id, rse1_id, ranking=40)
add_distance(rse3_id, rse4_id, ranking=10)
add_distance(rse3_id, rse5_id, ranking=50)
add_distance(rse4_id, rse2_id, ranking=10)
add_distance(rse4_id, rse5_id, ranking=10)
add_distance(rse5_id, rse3_id, ranking=50)
add_distance(rse5_id, rse4_id, ranking=10)
add_distance(rse5_id, rse6_id, ranking=20)
# There must be no paths between an isolated node and other nodes; be it with multipath enabled or disabled
with pytest.raises(NoDistance):
get_hops(source_rse_id=rse0_id, dest_rse_id=rse1_id)
with pytest.raises(NoDistance):
get_hops(source_rse_id=rse1_id, dest_rse_id=rse0_id)
with pytest.raises(NoDistance):
get_hops(source_rse_id=rse0_id, dest_rse_id=rse1_id, include_multihop=True, multihop_rses=all_rses)
with pytest.raises(NoDistance):
get_hops(source_rse_id=rse1_id, dest_rse_id=rse0_id, include_multihop=True, multihop_rses=all_rses)
# A single hop path must be found between two directly connected RSE
[hop] = get_hops(source_rse_id=rse1_id, dest_rse_id=rse2_id)
assert hop['source_rse_id'] == rse1_id
assert hop['dest_rse_id'] == rse2_id
# No path will be found if there is no direct connection and "include_multihop" is not set
with pytest.raises(NoDistance):
get_hops(source_rse_id=rse3_id, dest_rse_id=rse2_id)
# Multihop_rses argument empty (not set), no path will be computed
with pytest.raises(NoDistance):
get_hops(source_rse_id=rse3_id, dest_rse_id=rse2_id, include_multihop=True)
# The shortest multihop path will be computed
[hop1, hop2] = get_hops(source_rse_id=rse3_id, dest_rse_id=rse2_id, include_multihop=True, multihop_rses=all_rses)
assert hop1['source_rse_id'] == rse3_id
assert hop1['dest_rse_id'] == rse4_id
assert hop2['source_rse_id'] == rse4_id
assert hop2['dest_rse_id'] == rse2_id
# multihop_rses doesn't contain the RSE needed for the shortest path. Return a longer path
[hop1, hop2] = get_hops(source_rse_id=rse1_id, dest_rse_id=rse4_id, include_multihop=True, multihop_rses=[rse3_id])
assert hop1['source_rse_id'] == rse1_id
assert hop1['dest_rse_id'] == rse3_id
assert hop2['source_rse_id'] == rse3_id
assert hop2['dest_rse_id'] == rse4_id
# A direct connection is preferred over a multihop one with smaller cost
[hop] = get_hops(source_rse_id=rse3_id, dest_rse_id=rse5_id, include_multihop=True, multihop_rses=all_rses)
assert hop['source_rse_id'] == rse3_id
assert hop['dest_rse_id'] == rse5_id
# A link with cost only in one direction will not be used in the opposite direction
with pytest.raises(NoDistance):
get_hops(source_rse_id=rse6_id, dest_rse_id=rse5_id, include_multihop=True, multihop_rses=all_rses)
[hop1, hop2] = get_hops(source_rse_id=rse4_id, dest_rse_id=rse3_id, include_multihop=True, multihop_rses=all_rses)
assert hop1['source_rse_id'] == rse4_id
assert hop2['source_rse_id'] == rse5_id
assert hop2['dest_rse_id'] == rse3_id
# A longer path is preferred over a shorter one with high intermediate cost
[hop1, hop2, hop3] = get_hops(source_rse_id=rse3_id, dest_rse_id=rse6_id, include_multihop=True, multihop_rses=all_rses)
assert hop1['source_rse_id'] == rse3_id
assert hop2['source_rse_id'] == rse4_id
assert hop3['source_rse_id'] == rse5_id
assert hop3['dest_rse_id'] == rse6_id
# A link with no cost is ignored. Both for direct connection and multihop paths
[hop1, hop2, hop3] = get_hops(source_rse_id=rse2_id, dest_rse_id=rse6_id, include_multihop=True, multihop_rses=all_rses)
assert hop1['source_rse_id'] == rse2_id
assert hop2['source_rse_id'] == rse4_id
assert hop3['source_rse_id'] == rse5_id
assert hop3['dest_rse_id'] == rse6_id
[hop1, hop2, hop3, hop4] = get_hops(source_rse_id=rse1_id, dest_rse_id=rse6_id, include_multihop=True, multihop_rses=all_rses)
assert hop1['source_rse_id'] == rse1_id
assert hop2['source_rse_id'] == rse2_id
assert hop3['source_rse_id'] == rse4_id
assert hop4['source_rse_id'] == rse5_id
assert hop4['dest_rse_id'] == rse6_id
| 47.585714 | 130 | 0.633594 |
79435cb6cfdf8d91644de4d574219d43f2b75b8d | 3,182 | py | Python | ports/esp32/modules/menusys.py | oshwabadge2018/micropython | 1c1584fc37a13a3407da341dd443f6d76644c26d | [
"MIT"
] | 3 | 2018-10-02T02:13:11.000Z | 2018-10-08T20:49:56.000Z | ports/esp32/modules/menusys.py | acamilo/micropython | 533d3d0c56ad0e822cb923ef3ff86f09cd5df118 | [
"MIT"
] | 1 | 2018-08-29T18:16:22.000Z | 2018-08-29T18:16:22.000Z | ports/esp32/modules/menusys.py | acamilo/micropython | 533d3d0c56ad0e822cb923ef3ff86f09cd5df118 | [
"MIT"
] | 2 | 2018-08-29T02:55:32.000Z | 2018-09-23T04:44:21.000Z | import gxgde0213b1
import G_FreeSans24pt7b
import font12
import font16
import font20
import font24
import network
import ubinascii
import urandom
import machine
import time
import os
import imagedata
from machine import Pin, TouchPad
from ohsbadge import epd
from ohsbadge import fb
class Menu:
menuitems = []
cur_opt = None
menutitle = ""
def __init__(self,title):
self.menutitle= title
# Add an item to the menu
def addItem(self,text,function):
i = {'text':text,'function':function}
if self.cur_opt == None:
self.cur_opt = i
self.menuitems.append(i)
def handleKey(self,key):
f = self.cur_opt['function']
if key == "up":
self.move(-1)
elif key == "down":
self.move(1)
elif key == "right":
self.move(8)
elif key == "left":
self.move(-8)
elif key == "launch":
if f != None:
print("Launching %s"%f)
epd.init()
epd.clear_frame(fb)
epd.display_string_at(fb, 0, 0, "Launching", font16, gxgde0213b1.COLORED)
epd.display_string_at(fb, 0, 16, self.cur_opt['text']+" ..", font16, gxgde0213b1.COLORED)
epd.display_frame(fb)
time.sleep(1)
f(self.cur_opt['text'])
epd.clear_frame(fb)
epd.display_string_at(fb, 0, 0, "App Finished!", font16, gxgde0213b1.COLORED)
epd.display_string_at(fb, 0, 16, self.cur_opt['text']+" ..", font16, gxgde0213b1.COLORED)
epd.display_frame(fb)
time.sleep(1)
epd.clear_frame(fb)
epd.display_frame(fb)
epd.initPart()
else:
print("Could not launch '%s' no function attatched!"%self.cur_opt['text'])
def move(self,num):
m = self
try:
m.cur_opt = m.menuitems[m.menuitems.index(m.cur_opt)+num]
except IndexError:
print("invalid menu index")
def drawMenu(self):
epd.clear_frame(fb)
ypos = 20
xpos = 15
ydelta = 12
xdelta = 125
epd.display_string_at(fb, 0, 0, self.menutitle, font16, gxgde0213b1.COLORED)
for i in self.menuitems:
epd.display_string_at(fb, xpos, ypos, i['text'], font12, gxgde0213b1.COLORED)
if i == self.cur_opt:
epd.display_string_at(fb, xpos-14, ypos, "->", font12, gxgde0213b1.COLORED)
ypos +=ydelta
if ypos>(9*12):
ypos = 20
xpos += xdelta
epd.display_frame(fb)
def menuloop(self,up,down,left,right,run,exit):
m = self
epd.clear_frame(fb)
epd.display_frame(fb)
epd.initPart()
m.drawMenu()
touchdelay = 0.01
touchthres = 800
while True:
if up.read()<touchthres:
m.handleKey("up")
while up.read()<touchthres:
time.sleep(touchdelay)
m.drawMenu()
if down.read()<touchthres:
m.handleKey("down")
while down.read()<touchthres:
time.sleep(touchdelay)
m.drawMenu()
if left.read()<touchthres:
m.handleKey("left")
while left.read()<touchthres:
time.sleep(touchdelay)
m.drawMenu()
if right.read()<touchthres:
m.handleKey("right")
while right.read()<touchthres:
time.sleep(touchdelay)
m.drawMenu()
if run.read()<touchthres:
m.handleKey("launch")
while run.read()<touchthres:
time.sleep(touchdelay)
m.drawMenu()
if exit.read()<touchthres:
m.handleKey("right")
while exit.read()<touchthres:
time.sleep(touchdelay)
return
| 23.397059 | 93 | 0.662476 |
7943610488028d1fa1fadb466b7cbc4aabce4600 | 1,863 | py | Python | nicos_sinq/amor/gui/panels/cmdbuilder.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_sinq/amor/gui/panels/cmdbuilder.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_sinq/amor/gui/panels/cmdbuilder.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | # -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Michele Brambilla <[email protected]>
#
# *****************************************************************************
from nicos.clients.flowui.panels import get_icon
from nicos.clients.flowui.panels.cmdbuilder import \
CommandPanel as DefaultCommandPanel
from nicos.guisupport.qt import pyqtSlot
from nicos_sinq.amor.gui import uipath
class CommandPanel(DefaultCommandPanel):
ui = f'{uipath}/panels/ui_files/cmdbuilder.ui'
def set_icons(self):
DefaultCommandPanel.set_icons(self)
self.pauseBtn.setIcon(get_icon('stop-24px.svg'))
self.emergencyStopBtn.setIcon(
get_icon('emergency_stop_cross_red-24px.svg')
)
self.pause = False
@pyqtSlot()
def on_pauseBtn_clicked(self):
self.client.tell_action('stop')
@pyqtSlot()
def on_emergencyStopBtn_clicked(self):
self.client.tell_action('emergency')
| 37.26 | 79 | 0.671498 |
794361c9aced6121d0988e77e51b1feaffc263b4 | 614 | py | Python | YouTubeStats/YTStats/migrations/0001_initial.py | Blizek/youtube-stats | ed2971325a702509c9ec4e0d82aae5c49d28ba4c | [
"MIT"
] | 2 | 2021-08-02T11:14:35.000Z | 2021-08-03T13:39:51.000Z | YouTubeStats/YTStats/migrations/0001_initial.py | Blizek/youtube-stats | ed2971325a702509c9ec4e0d82aae5c49d28ba4c | [
"MIT"
] | null | null | null | YouTubeStats/YTStats/migrations/0001_initial.py | Blizek/youtube-stats | ed2971325a702509c9ec4e0d82aae5c49d28ba4c | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-08-04 20:10
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Channel',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('yt_id', models.CharField(max_length=24)),
('name', models.CharField(max_length=60)),
('custom_url', models.CharField(max_length=60)),
],
),
]
| 25.583333 | 117 | 0.571661 |
794362d66c2ed7cf860d83fe358cda2d62244467 | 1,865 | py | Python | examples/dfp/v201805/publisher_query_language_service/get_all_browsers.py | christineyi3898/googleads-python-lib | cd707dc897b93cf1bbb19355f7424e7834e7fb55 | [
"Apache-2.0"
] | 1 | 2019-10-21T04:10:22.000Z | 2019-10-21T04:10:22.000Z | examples/dfp/v201805/publisher_query_language_service/get_all_browsers.py | christineyi3898/googleads-python-lib | cd707dc897b93cf1bbb19355f7424e7834e7fb55 | [
"Apache-2.0"
] | null | null | null | examples/dfp/v201805/publisher_query_language_service/get_all_browsers.py | christineyi3898/googleads-python-lib | cd707dc897b93cf1bbb19355f7424e7834e7fb55 | [
"Apache-2.0"
] | 1 | 2019-10-21T04:10:51.000Z | 2019-10-21T04:10:51.000Z | #!/usr/bin/env python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all browsers available to target from the Browser table.
Other tables include 'Bandwidth_Group', 'Browser_Language',
'Device_Capability', 'Operating_System', etc...
A full list of available criteria tables can be found at
https://developers.google.com/doubleclick-publishers/docs/reference/v201708/PublisherQueryLanguageService
"""
import tempfile
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize a report downloader.
report_downloader = client.GetDataDownloader(version='v201805')
with tempfile.NamedTemporaryFile(
prefix='browser_data_',
suffix='.csv', mode='w', delete=False) as browser_data_file:
browser_pql_query = ('SELECT Id, BrowserName, MajorVersion, MinorVersion '
'FROM Browser '
'ORDER BY BrowserName ASC')
# Downloads the response from PQL select statement to the specified file
report_downloader.DownloadPqlResultToCsv(
browser_pql_query, browser_data_file)
print 'Saved browser data to... %s' % browser_data_file.name
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| 33.303571 | 105 | 0.742627 |
7943636d8c0b15dc7105c752a7cc15f38aa1b75e | 13,751 | py | Python | dynamicgem/embedding/dynAERNN.py | Sujit-O/dyngem | a879bf362d1e9409faa4e1186c345337ad6d0189 | [
"MIT"
] | null | null | null | dynamicgem/embedding/dynAERNN.py | Sujit-O/dyngem | a879bf362d1e9409faa4e1186c345337ad6d0189 | [
"MIT"
] | null | null | null | dynamicgem/embedding/dynAERNN.py | Sujit-O/dyngem | a879bf362d1e9409faa4e1186c345337ad6d0189 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Input, Subtract
from keras.models import Model, model_from_json
from keras.optimizers import SGD, Adam
from keras.callbacks import TensorBoard, EarlyStopping
from keras import backend as KBack
import tensorflow as tf
from time import time
from dynamicgem.embedding.dynamic_graph_embedding import DynamicGraphEmbedding
from dynamicgem.utils.dnn_utils import *
class DynAERNN(DynamicGraphEmbedding):
"""Dynamic AutoEncoder with Recurrent Neural Network
dyngraph2vecAERNN or DynAERNN is a dynamic graph embedding algorithm
which combines the auto-encoder with the recurrent neural network
to perform the embedding for the temporally evolving graphs.
Args:
d (int): dimension of the embedding
beta (float): penalty parameter in matrix B of 2nd order objective
n_prev_graphs (int): Lookback (number of previous graphs to be considered) for the dynamic graph embedding
nu1 (float): L1-reg hyperparameter
nu2 (float): L2-reg hyperparameter
K (float): number of hidden layers in encoder/decoder
rho (float): bounding ratio for number of units in consecutive layers (< 1)
n_aeunits (list) = List of embedding dimension for auto encoder layers
n_lstmunits= List of embedding dimension for lstm layers
n_iter (int): number of sgd iterations for first embedding (const)
xeta (float): sgd step size parameter
n_batch (int): minibatch size for SGD
modelfile (str): Files containing previous encoder and decoder models
weightfile (str): Files containing previous encoder and decoder weights
Examples:
>>> from dynamicgem.embedding.dynAERNN import DynAERNN
>>> from dynamicgem.graph_generation import dynamic_SBM_graph
>>> node_num = 1000
>>> community_num = 2
>>> node_change_num = 10
>>> length =5
>>> dynamic_sbm_series = dynamic_SBM_graph.get_community_diminish_series_v2(node_num,
community_num,
length,
1,
node_change_num)
>>> embedding = DynAERNN(d=dim_emb,
beta=5,
n_prev_graphs=lookback,
nu1=1e-6,
nu2=1e-6,
n_units=[500, 300, ],
rho=0.3,
n_iter=epochs,
xeta=args.learningrate,
n_batch=args.batch,
modelfile=['./intermediate/enc_model.json', './intermediate/dec_model.json'],
weightfile=['./intermediate/enc_weights.hdf5', './intermediate/dec_weights.hdf5'],
savefilesuffix="testing")
>>> graphs = [g[0] for g in dynamic_sbm_series]
>>> embs = []
>>> for temp_var in range(length):
>>> emb, _ = embedding.learn_embeddings(graphs[temp_var])
>>> embs.append(emb)
"""
def __init__(self, d, *hyper_dict, **kwargs):
self._d = d
hyper_params = {
'method_name': 'dynAERNN',
'actfn': 'relu',
'modelfile': None,
'weightfile': None,
'savefilesuffix': None
}
hyper_params.update(kwargs)
for key in hyper_params.keys():
self.__setattr__('_%s' % key, hyper_params[key])
for dictionary in hyper_dict:
for key in dictionary:
self.__setattr__('_%s' % key, dictionary[key])
def get_method_name(self):
"""Function to return the method name.
Returns:
String: Name of the method.
"""
return self._method_name
def get_method_summary(self):
"""Function to return the summary of the algorithm.
Returns:
String: Method summary
"""
return '%s_%d' % (self._method_name, self._d)
def learn_embeddings(self, graphs):
"""Learns the embedding of the nodes.
Attributes:
graph (Object): Networkx Graph Object
Returns:
List: Node embeddings and time taken by the algorithm
"""
self._node_num = graphs[0].number_of_nodes()
t1 = time()
###################################
# TensorFlow wizardry
config = tf.ConfigProto()
# Don't pre-allocate memory; allocate as-needed
config.gpu_options.allow_growth = True
# Only allow a total of half the GPU memory to be allocated
config.gpu_options.per_process_gpu_memory_fraction = 0.2
# Create a session to pass the above configuration
# sess=tf.Session(config=config)
# Create a tensorflow debugger wrapper
# sess = tf_debug.LocalCLIDebugWrapperSession(sess)
# Create a session with the above options specified.
KBack.tensorflow_backend.set_session(tf.Session(config=config))
# KBack.tensorflow_backend.set_session(sess)
###################################
# Generate encoder, decoder and autoencoder
self._num_iter = self._n_iter
self._aeencoders = [None] * self._n_prev_graphs
for i in range(self._n_prev_graphs):
self._aeencoders[i] = get_encoder_dynaernn(
self._node_num,
self._d,
self._n_aeunits,
self._nu1,
self._nu2,
self._actfn
)
self._aeencoders[i].name = "ae_encoder_%d" % i
self._lstmencoder = get_lstm_encoder(
self._d,
self._n_prev_graphs,
self._d,
self._n_lstmunits,
self._actfn,
None,
None,
None,
False
)
self._lstmencoder.name = "lstm_encoder"
self._aedecoder = get_decoder_dynaernn(
self._node_num,
self._d,
self._n_aeunits,
self._nu1,
self._nu2,
self._actfn
)
self._aedecoder.name = "decoder"
self._autoencoder = get_aelstm_autoencoder(
self._aeencoders,
self._lstmencoder,
self._aedecoder
)
# Initialize self._model
# Input
x_in = Input(
shape=(self._n_prev_graphs * self._node_num,),
name='x_in'
)
x_pred = Input(
shape=(self._node_num,),
name='x_pred'
)
[x_hat, y] = self._autoencoder(x_in)
# Outputs
x_diff = Subtract()([x_hat, x_pred])
# Objectives
def weighted_mse_x(y_true, y_pred):
''' Hack: This fn doesn't accept additional arguments.
We use y_true to pass them.
y_pred: Contains x_hat - x_pred
y_true: Contains b
'''
return KBack.sum(
KBack.square(y_pred * y_true[:, 0:self._node_num]),
axis=-1
)
# Model
self._model = Model(input=[x_in, x_pred], output=x_diff)
sgd = SGD(lr=self._xeta, decay=1e-5, momentum=0.99, nesterov=True)
adam = Adam(lr=self._xeta, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
# self._model.compile(optimizer=sgd, loss=weighted_mse_x)
self._model.compile(optimizer=adam, loss=weighted_mse_x)
# tensorboard = TensorBoard(log_dir="logs/{}".format(time()))
early_stop = EarlyStopping(monitor='val_loss', patience=10, verbose=1)
history = self._model.fit_generator(
generator=batch_generator_dynaernn(
graphs,
self._beta,
self._n_batch,
self._n_prev_graphs,
True
),
nb_epoch=self._num_iter,
samples_per_epoch=(
graphs[0].number_of_nodes() * self._n_prev_graphs
) // self._n_batch,
verbose=1
# callbacks=[tensorboard]
)
loss = history.history['loss']
# Get embedding for all points
if loss[0] == np.inf or np.isnan(loss[0]):
print('Model diverged. Assigning random embeddings')
self._Y = np.random.randn(self._node_num, self._d)
else:
self._Y, self._next_adj = model_batch_predictor_dynaernn(
self._autoencoder,
graphs[len(graphs) - self._n_prev_graphs:],
self._n_batch
)
t2 = time()
# Save the autoencoder and its weights
if self._weightfile is not None:
pass
# saveweights(self._encoder, self._weightfile[0])
# saveweights(self._decoder, self._weightfile[1])
if self._modelfile is not None:
pass
# savemodel(self._encoder, self._modelfile[0])
# savemodel(self._decoder, self._modelfile[1])
if self._savefilesuffix is not None:
pass
# saveweights(self._encoder,
# 'encoder_weights_' + self._savefilesuffix + '.hdf5')
# saveweights(self._decoder,
# 'decoder_weights_' + self._savefilesuffix + '.hdf5')
# savemodel(self._encoder,
# 'encoder_model_' + self._savefilesuffix + '.json')
# savemodel(self._decoder,
# 'decoder_model_' + self._savefilesuffix + '.json')
# # Save the embedding
# np.savetxt('embedding_' + self._savefilesuffix + '.txt',
# self._Y)
# np.savetxt('next_pred_' + self._savefilesuffix + '.txt',
# self._next_adj)
# sess.close()
return self._Y, (t2 - t1)
def get_embeddings(self):
"""Function to return the embeddings"""
return self._Y
def get_edge_weight(self, i, j, embed=None, filesuffix=None):
"""Function to get edge weight.
Attributes:
i (int): source node for the edge.
j (int): target node for the edge.
embed (Matrix): Embedding values of all the nodes.
filesuffix (str): File suffix to be used to load the embedding.
Returns:
Float: Weight of the given edge.
"""
if embed is None:
if filesuffix is None:
embed = self._Y
else:
embed = np.loadtxt('embedding_' + filesuffix + '.txt')
if i == j:
return 0
else:
S_hat = self.get_reconst_from_embed(embed[(i, j), :], filesuffix)
return (S_hat[i, j] + S_hat[j, i]) / 2
def get_reconstructed_adj(self, embed=None, node_l=None, filesuffix=None):
"""Function to reconstruct the adjacency list for the given node.
Attributes:
node_l (int): node for which the adjacency list will be created.
embed (Matrix): Embedding values of all the nodes.
filesuffix (str): File suffix to be used to load the embedding.
Returns:
List : Adjacency list of the given node.
"""
if embed is None:
if filesuffix is None:
embed = self._Y
else:
embed = np.loadtxt('embedding_' + filesuffix + '.txt')
S_hat = self.get_reconst_from_embed(embed, filesuffix)
return graphify(S_hat)
def get_reconst_from_embed(self, embed, filesuffix=None):
"""Function to reconstruct the graph from the embedding.
Attributes:
node_l (int): node for which the adjacency list will be created.
embed (Matrix): Embedding values of all the nodes.
filesuffix (str): File suffix to be used to load the embedding.
Returns:
List: REconstructed graph for the given nodes.
"""
if filesuffix is None:
return self._decoder.predict(embed, batch_size=self._n_batch)
else:
try:
decoder = model_from_json(open('./intermediate/decoder_model_' + filesuffix + '.json').read())
except:
print('Error reading file: {0}. Cannot load previous model'.format(
'decoder_model_' + filesuffix + '.json'))
exit()
try:
decoder.load_weights('./intermediate/decoder_weights_' + filesuffix + '.hdf5')
except:
print('Error reading file: {0}. Cannot load previous weights'.format(
'decoder_weights_' + filesuffix + '.hdf5'))
exit()
return decoder.predict(embed, batch_size=self._n_batch)
def predict_next_adj(self, node_l=None):
"""Function to predict the next adjacency for the given node.
Attributes:
node_l (int): node for which the adjacency list will be created.
Returns:
List: Reconstructed adjancey list.
"""
if node_l is not None:
return self._next_adj[node_l]
else:
return self._next_adj
| 38.844633 | 114 | 0.547887 |
7943647a8cccac44f557272ada20491bcf3fab88 | 5,249 | py | Python | tests/unitary/LiquidityGaugeV3_1/test_set_rewards_deposit.py | caterpillar1219/hundred-dao | de95e4ade1b50ec64b59e44a53a324a12c4e29df | [
"MIT"
] | null | null | null | tests/unitary/LiquidityGaugeV3_1/test_set_rewards_deposit.py | caterpillar1219/hundred-dao | de95e4ade1b50ec64b59e44a53a324a12c4e29df | [
"MIT"
] | null | null | null | tests/unitary/LiquidityGaugeV3_1/test_set_rewards_deposit.py | caterpillar1219/hundred-dao | de95e4ade1b50ec64b59e44a53a324a12c4e29df | [
"MIT"
] | null | null | null | import brownie
import pytest
from brownie import ZERO_ADDRESS
from tests.conftest import approx
REWARD = 10 ** 20
WEEK = 7 * 86400
LP_AMOUNT = 10 ** 18
@pytest.fixture(scope="module")
def reward_contract_2(CurveRewards, mock_lp_token, accounts, coin_a):
contract = CurveRewards.deploy(mock_lp_token, coin_a, {"from": accounts[0]})
contract.setRewardDistribution(accounts[0], {"from": accounts[0]})
yield contract
@pytest.fixture(scope="module", autouse=True)
def initial_setup(gauge_v3_1, mock_lp_token, alice, reward_contract, coin_reward):
mock_lp_token.approve(gauge_v3_1, 2 ** 256 - 1, {"from": alice})
gauge_v3_1.deposit(1, {"from": alice})
sigs = [
reward_contract.stake.signature[2:],
reward_contract.withdraw.signature[2:],
reward_contract.getReward.signature[2:],
]
sigs = f"0x{sigs[0]}{sigs[1]}{sigs[2]}{'00' * 20}"
gauge_v3_1.set_rewards(reward_contract, sigs, [coin_reward] + [ZERO_ADDRESS] * 7, {"from": alice})
gauge_v3_1.withdraw(1, {"from": alice})
def test_unset_no_totalsupply(alice, coin_reward, reward_contract, gauge_v3_1, mock_lp_token):
gauge_v3_1.set_rewards(ZERO_ADDRESS, "0x00", [coin_reward] + [ZERO_ADDRESS] * 7, {"from": alice})
assert mock_lp_token.allowance(gauge_v3_1, reward_contract) == 0
assert gauge_v3_1.reward_contract() == ZERO_ADDRESS
assert [gauge_v3_1.reward_tokens(i) for i in range(8)] == [coin_reward] + [ZERO_ADDRESS] * 7
def test_unset_with_totalsupply(alice, coin_reward, reward_contract, gauge_v3_1, mock_lp_token):
gauge_v3_1.deposit(LP_AMOUNT, {"from": alice})
gauge_v3_1.set_rewards(ZERO_ADDRESS, "0x00", [coin_reward] + [ZERO_ADDRESS] * 7, {"from": alice})
assert mock_lp_token.allowance(gauge_v3_1, reward_contract) == 0
assert mock_lp_token.balanceOf(gauge_v3_1) == LP_AMOUNT
assert gauge_v3_1.reward_contract() == ZERO_ADDRESS
assert [gauge_v3_1.reward_tokens(i) for i in range(8)] == [coin_reward] + [ZERO_ADDRESS] * 7
def test_unsetting_claims(alice, chain, coin_reward, reward_contract, gauge_v3_1):
gauge_v3_1.deposit(LP_AMOUNT, {"from": alice})
coin_reward._mint_for_testing(reward_contract, REWARD)
reward_contract.notifyRewardAmount(REWARD, {"from": alice})
chain.sleep(WEEK)
gauge_v3_1.set_rewards(ZERO_ADDRESS, "0x00", [coin_reward] + [ZERO_ADDRESS] * 7, {"from": alice})
reward = coin_reward.balanceOf(gauge_v3_1)
assert reward <= REWARD
assert approx(REWARD, reward, 1.001 / WEEK)
def test_modify_no_deposit_no_ts(reward_contract_2, alice, gauge_v3_1, coin_a, coin_reward):
sigs = f"0x{'00' * 4}{'00' * 4}{reward_contract_2.getReward.signature[2:]}{'00' * 20}"
gauge_v3_1.set_rewards(
reward_contract_2, sigs, [coin_reward, coin_a] + [ZERO_ADDRESS] * 6, {"from": alice}
)
assert gauge_v3_1.reward_contract() == reward_contract_2
assert [gauge_v3_1.reward_tokens(i) for i in range(3)] == [coin_reward, coin_a, ZERO_ADDRESS]
def test_modify_no_deposit(
reward_contract, reward_contract_2, alice, gauge_v3_1, chain, coin_a, coin_reward, mock_lp_token
):
gauge_v3_1.deposit(LP_AMOUNT, {"from": alice})
coin_reward._mint_for_testing(reward_contract, REWARD)
reward_contract.notifyRewardAmount(REWARD, {"from": alice})
chain.sleep(86400)
sigs = f"0x{'00' * 4}{'00' * 4}{reward_contract_2.getReward.signature[2:]}{'00' * 20}"
gauge_v3_1.set_rewards(
reward_contract_2, sigs, [coin_reward, coin_a] + [ZERO_ADDRESS] * 6, {"from": alice}
)
assert mock_lp_token.balanceOf(gauge_v3_1) == LP_AMOUNT
assert gauge_v3_1.reward_contract() == reward_contract_2
assert [gauge_v3_1.reward_tokens(i) for i in range(3)] == [coin_reward, coin_a, ZERO_ADDRESS]
assert coin_reward.balanceOf(gauge_v3_1) > 0
def test_modify_deposit(
reward_contract, reward_contract_2, alice, gauge_v3_1, chain, coin_a, coin_reward, mock_lp_token
):
gauge_v3_1.deposit(LP_AMOUNT, {"from": alice})
coin_reward._mint_for_testing(reward_contract, REWARD)
reward_contract.notifyRewardAmount(REWARD, {"from": alice})
chain.sleep(86400)
sigs = [
reward_contract.stake.signature[2:],
reward_contract.withdraw.signature[2:],
reward_contract.getReward.signature[2:],
]
sigs = f"0x{sigs[0]}{sigs[1]}{sigs[2]}{'00' * 20}"
gauge_v3_1.set_rewards(
reward_contract_2, sigs, [coin_reward, coin_a] + [ZERO_ADDRESS] * 6, {"from": alice}
)
assert mock_lp_token.balanceOf(reward_contract_2) == LP_AMOUNT
assert gauge_v3_1.reward_contract() == reward_contract_2
assert [gauge_v3_1.reward_tokens(i) for i in range(3)] == [coin_reward, coin_a, ZERO_ADDRESS]
assert coin_reward.balanceOf(gauge_v3_1) > 0
def test_modify_deposit_no_ts(reward_contract_2, alice, gauge_v3_1, coin_a, coin_reward):
sigs = [
reward_contract_2.stake.signature[2:],
reward_contract_2.withdraw.signature[2:],
reward_contract_2.getReward.signature[2:],
]
sigs = f"0x{sigs[0]}{sigs[1]}{sigs[2]}{'00' * 20}"
with brownie.reverts("dev: zero total supply"):
gauge_v3_1.set_rewards(
reward_contract_2, sigs, [coin_reward, coin_a] + [ZERO_ADDRESS] * 6, {"from": alice}
)
| 39.765152 | 102 | 0.710802 |
Subsets and Splits