ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a318fed43e20f30b34043d7559eb04c92481caa | import logging
from typing import Any, MutableMapping, Optional
from cloudformation_cli_python_lib import (
Action,
HandlerErrorCode,
OperationStatus,
ProgressEvent,
Resource,
SessionProxy,
)
from datadog_api_client.v1 import ApiException
from datadog_api_client.v1.api.monitors_api import MonitorsApi
from datadog_api_client.v1.model.monitor import Monitor as ApiMonitor
from datadog_api_client.v1.model.monitor_options import MonitorOptions as ApiMonitorOptions
from datadog_api_client.v1.model.monitor_threshold_window_options import \
MonitorThresholdWindowOptions as ApiMonitorThresholdWindows
from datadog_api_client.v1.model.monitor_thresholds import MonitorThresholds as ApiMonitorThresholds
from datadog_api_client.v1.model.monitor_type import MonitorType as ApiMonitorType
from datadog_api_client.v1.model.monitor_update_request import MonitorUpdateRequest as ApiMonitorUpdateRequest
from datadog_cloudformation_common.api_clients import v1_client
from datadog_cloudformation_common.utils import http_to_handler_error_code
from .models import (
Creator,
MonitorOptions,
MonitorThresholdWindows,
MonitorThresholds,
ResourceHandlerRequest,
ResourceModel,
TypeConfigurationModel,
)
from .version import __version__
# Use this logger to forward log messages to CloudWatch Logs.
LOG = logging.getLogger(__name__)
TYPE_NAME = "Datadog::Monitors::Monitor"
TELEMETRY_TYPE_NAME = "monitors-monitor"
resource = Resource(TYPE_NAME, ResourceModel, TypeConfigurationModel)
test_entrypoint = resource.test_entrypoint
@resource.handler(Action.READ)
def read_handler(
session: Optional[SessionProxy],
request: ResourceHandlerRequest,
callback_context: MutableMapping[str, Any],
) -> ProgressEvent:
LOG.info("Starting %s Read Handler", TYPE_NAME)
model = request.desiredResourceState
type_configuration = request.typeConfiguration
with v1_client(
type_configuration.DatadogCredentials.ApiKey,
type_configuration.DatadogCredentials.ApplicationKey,
type_configuration.DatadogCredentials.ApiURL,
TELEMETRY_TYPE_NAME,
__version__,
) as api_client:
api_instance = MonitorsApi(api_client)
monitor_id = model.Id
if monitor_id is None:
return ProgressEvent(
status=OperationStatus.FAILED,
resourceModel=model,
message=f"Error getting monitor: monitor does not exist",
errorCode=HandlerErrorCode.NotFound,
)
try:
monitor = api_instance.get_monitor(monitor_id)
except ApiException as e:
LOG.exception("Exception when calling MonitorsApi->get_monitor: %s\n", e)
return ProgressEvent(
status=OperationStatus.FAILED,
resourceModel=model,
message=f"Error getting monitor: {e}",
errorCode=http_to_handler_error_code(e.status),
)
model.Created = monitor.created.isoformat()
model.Modified = monitor.modified.isoformat()
model.Message = monitor.message
model.Name = monitor.name
model.Tags = monitor.tags
model.Priority = monitor.priority
model.Query = monitor.query
model.Multi = monitor.multi
if monitor.deleted:
model.Deleted = monitor.deleted.isoformat()
if not (
(model.Type == "query alert" and monitor.type.value == "metric alert") or
(model.Type == "metric alert" and monitor.type.value == "query alert")
):
# metric alert and query alert are interchangeable, so don't update from one to the other
model.Type = monitor.type.value
if monitor.creator:
model.Creator = Creator(Name=monitor.creator.name, Email=monitor.creator.email, Handle=monitor.creator.handle)
# Add hasattr checks for options since not all of them are applicable to all monitor types, so some attributes
# might not always be present
options = monitor.options if hasattr(monitor, "options") else None
if options:
model.Options = MonitorOptions(
EnableLogsSample=options.enable_logs_sample if hasattr(options, "enable_logs_sample") else None,
EscalationMessage=options.escalation_message if hasattr(options, "escalation_message") else None,
EvaluationDelay=options.evaluation_delay if hasattr(options, "evaluation_delay") else None,
IncludeTags=options.include_tags if hasattr(options, "include_tags") else None,
Locked=options.locked if hasattr(options, "locked") else None,
MinLocationFailed=options.min_location_failed if hasattr(options, "min_location_failed") else None,
NewHostDelay=options.new_host_delay if hasattr(options, "new_host_delay") else None,
NoDataTimeframe=options.no_data_timeframe if hasattr(options, "no_data_timeframe") else None,
NotifyAudit=options.notify_audit if hasattr(options, "notify_audit") else None,
NotifyNoData=options.notify_no_data if hasattr(options, "notify_no_data") else None,
RenotifyInterval=options.renotify_interval if hasattr(options, "renotify_interval") else None,
RequireFullWindow=options.require_full_window if hasattr(options, "require_full_window") else None,
SyntheticsCheckID=options.synthetics_check_id if hasattr(options, "synthetics_check_id") else None,
Thresholds=None,
ThresholdWindows=None,
TimeoutH=options.timeout_h if hasattr(options, "timeout_h") else None,
)
thresholds = options.thresholds if hasattr(options, "thresholds") else None
if thresholds:
model.Options.Thresholds = MonitorThresholds(
Critical=thresholds.critical if hasattr(thresholds, "critical") else None,
CriticalRecovery=thresholds.critical_recovery if hasattr(thresholds, "critical_recovery") else None,
Warning=thresholds.warning if hasattr(thresholds, "warning") else None,
WarningRecovery=thresholds.warning_recovery if hasattr(thresholds, "warning_recovery") else None,
OK=thresholds.ok if hasattr(thresholds, "ok") else None,
)
tw = options.threshold_windows if hasattr(options, "threshold_windows") else None
if tw:
model.Options.ThresholdWindows = MonitorThresholdWindows(
TriggerWindow=tw.trigger_window if hasattr(tw, "trigger_window") else None,
RecoveryWindow=tw.recovery_window if hasattr(tw, "recovery_window") else None,
)
model.Id = monitor.id
return ProgressEvent(
status=OperationStatus.SUCCESS,
resourceModel=model,
)
@resource.handler(Action.UPDATE)
def update_handler(
session: Optional[SessionProxy],
request: ResourceHandlerRequest,
callback_context: MutableMapping[str, Any],
) -> ProgressEvent:
LOG.info("Starting %s Update Handler", TYPE_NAME)
model = request.desiredResourceState
type_configuration = request.typeConfiguration
monitor = ApiMonitorUpdateRequest()
monitor.query = model.Query
monitor.type = ApiMonitorType(model.Type)
if model.Message is not None:
monitor.message = model.Message
if model.Name is not None:
monitor.name = model.Name
if model.Tags is not None:
monitor.tags = model.Tags
if model.Priority is not None:
monitor.priority = model.Priority
options = build_monitor_options_from_model(model)
if options:
monitor.options = options
with v1_client(
type_configuration.DatadogCredentials.ApiKey,
type_configuration.DatadogCredentials.ApplicationKey,
type_configuration.DatadogCredentials.ApiURL,
TELEMETRY_TYPE_NAME,
__version__,
) as api_client:
api_instance = MonitorsApi(api_client)
try:
api_instance.update_monitor(model.Id, monitor)
except ApiException as e:
LOG.exception("Exception when calling MonitorsApi->update_monitor: %s\n", e)
return ProgressEvent(
status=OperationStatus.FAILED,
resourceModel=model,
message=f"Error updating monitor: {e}",
errorCode=http_to_handler_error_code(e.status),
)
return read_handler(session, request, callback_context)
@resource.handler(Action.DELETE)
def delete_handler(
session: Optional[SessionProxy],
request: ResourceHandlerRequest,
callback_context: MutableMapping[str, Any],
) -> ProgressEvent:
LOG.info("Starting %s Delete Handler", TYPE_NAME)
model = request.desiredResourceState
type_configuration = request.typeConfiguration
with v1_client(
type_configuration.DatadogCredentials.ApiKey,
type_configuration.DatadogCredentials.ApplicationKey,
type_configuration.DatadogCredentials.ApiURL,
TELEMETRY_TYPE_NAME,
__version__,
) as api_client:
api_instance = MonitorsApi(api_client)
try:
api_instance.delete_monitor(model.Id)
except ApiException as e:
LOG.exception("Exception when calling MonitorsApi->delete_monitor: %s\n", e)
return ProgressEvent(
status=OperationStatus.FAILED,
resourceModel=model,
message=f"Error deleting monitor: {e}",
errorCode=http_to_handler_error_code(e.status),
)
return ProgressEvent(
status=OperationStatus.SUCCESS,
resourceModel=None,
)
@resource.handler(Action.CREATE)
def create_handler(
session: Optional[SessionProxy],
request: ResourceHandlerRequest,
callback_context: MutableMapping[str, Any],
) -> ProgressEvent:
LOG.info("Starting %s Create Handler", TYPE_NAME)
model = request.desiredResourceState
type_configuration = request.typeConfiguration
monitor = ApiMonitor(model.Query, ApiMonitorType(model.Type))
if model.Message is not None:
monitor.message = model.Message
if model.Name is not None:
monitor.name = model.Name
if model.Tags is not None:
monitor.tags = model.Tags
if model.Priority is not None:
monitor.priority = model.Priority
options = build_monitor_options_from_model(model)
if options:
monitor.options = options
with v1_client(
type_configuration.DatadogCredentials.ApiKey,
type_configuration.DatadogCredentials.ApplicationKey,
type_configuration.DatadogCredentials.ApiURL,
TELEMETRY_TYPE_NAME,
__version__,
) as api_client:
api_instance = MonitorsApi(api_client)
try:
monitor_resp = api_instance.create_monitor(monitor)
except ApiException as e:
LOG.exception("Exception when calling MonitorsApi->create_monitor: %s\n", e)
return ProgressEvent(
status=OperationStatus.FAILED,
resourceModel=model,
message=f"Error creating monitor: {e}",
errorCode=http_to_handler_error_code(e.status),
)
model.Id = monitor_resp.id
return read_handler(session, request, callback_context)
def build_monitor_options_from_model(model: ResourceModel) -> ApiMonitorOptions:
options = None
if model.Options:
options = ApiMonitorOptions()
# Nullable attributes
options.evaluation_delay = model.Options.EvaluationDelay
options.min_location_failed = model.Options.MinLocationFailed
options.new_host_delay = model.Options.NewHostDelay
options.no_data_timeframe = model.Options.NoDataTimeframe
options.synthetics_check_id = model.Options.SyntheticsCheckID
options.timeout_h = model.Options.TimeoutH
options.renotify_interval = model.Options.RenotifyInterval
# Non nullable
if model.Options.EnableLogsSample is not None:
options.enable_logs_sample = model.Options.EnableLogsSample
if model.Options.EscalationMessage is not None:
options.escalation_message = model.Options.EscalationMessage
if model.Options.IncludeTags is not None:
options.include_tags = model.Options.IncludeTags
if model.Options.Locked is not None:
options.locked = model.Options.Locked
if model.Options.NotifyAudit is not None:
options.notify_audit = model.Options.NotifyAudit
if model.Options.NotifyNoData is not None:
options.notify_no_data = model.Options.NotifyNoData
if model.Options.RequireFullWindow is not None:
options.require_full_window = model.Options.RequireFullWindow
if model.Options.Thresholds is not None:
options.thresholds = ApiMonitorThresholds()
if model.Options.Thresholds.Critical is not None:
options.thresholds.critical = model.Options.Thresholds.Critical
if model.Options.Thresholds.CriticalRecovery is not None:
options.thresholds.critical_recovery = model.Options.Thresholds.CriticalRecovery
if model.Options.Thresholds.Warning is not None:
options.thresholds.warning = model.Options.Thresholds.Warning
if model.Options.Thresholds.WarningRecovery is not None:
options.thresholds.warning_recovery = model.Options.Thresholds.WarningRecovery
if model.Options.Thresholds.OK is not None:
options.thresholds.ok = model.Options.Thresholds.OK
if model.Options.ThresholdWindows is not None:
options.threshold_windows = ApiMonitorThresholdWindows()
options.threshold_windows.trigger_window = model.Options.ThresholdWindows.TriggerWindow
options.threshold_windows.recovery_window = model.Options.ThresholdWindows.RecoveryWindow
return options
|
py | 1a3191e60c802a9958fde4a70b2fac7f7d64b1a7 | from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
__author__ = 'Lesko'
# Documentation is like sex.
# When it's good, it's very good.
# When it's bad, it's better than nothing.
# When it lies to you, it may be a while before you realize something's wrong.
app = Flask(__name__)
app.config.from_object('real_config')
db = SQLAlchemy(app)
from app import models
from app.views import views
# Add custom jinja filters
import custom_jinja_filters
app.jinja_env.filters["format_datetime"] = custom_jinja_filters.format_datetime
|
py | 1a319225fc16cfe2182e4d3492d53d6fd232ee54 | from floodsystem.flood import stations_highest_rel_level
from floodsystem.stationdata import build_station_list , update_water_levels
from floodsystem.datafetcher import fetch_latest_water_level_data, fetch_station_data
stations = build_station_list()
N=10
update_water_levels(stations)
stations_high_threat = stations_highest_rel_level(stations, N)
for station in stations_high_threat:
print(station[0].name, station[1])
if __name__ == "__main__":
print("*** Task 2C: CUED Part IA Flood Warning System ***")
|
py | 1a3193e4588a67b88a04b8ba863445f918df7788 | # placeholder definition for an access pattern object, can be passed as input
class PatternConfig:
def __init__(self,
exp_name="default", #name or ID
benchmark_name="test", #if this is a specific benchmark, include here
read_freq=-1, #number of reads/s
total_reads=-1, #total number of reads, can compute either way
read_size=8, #size/read in bytes
write_freq=-1, #number of writes/s
total_writes=-1, #total number of reads, can compute either way
write_size=8, #size/write in bytes
workingset=1, #total working set size in MB
total_ins=-1 #total number of ins in benchmark
):
#load all the parameters into the pattern class
#everything that defines the access pattern should be in this class
self.exp_name = exp_name
self.benchmark_name = benchmark_name
self.read_freq = read_freq
self.total_reads = total_reads
self.read_size = read_size
self.write_freq = write_freq
self.total_writes = total_writes
self.write_size = write_size
self.workingset = workingset
self.total_ins = total_ins
benchmarks = [ #collection of benchmarks from Tufts IISWC paper
PatternConfig(benchmark_name="bzip2",
total_reads=4.3e9,
read_size=4,
total_writes=1.47e9,
write_size=4,
workingset=(2505.38e3/1024./1024.),
total_ins=1404973
),
PatternConfig(benchmark_name="GemsFDTD",
total_reads=1.3e9,
read_size=4,
total_writes=0.7e9,
write_size=4,
workingset=(76576.59e3/1024./1024.),
total_ins=475257
),
PatternConfig(benchmark_name="tonto",
total_reads=1.1e9,
read_size=4,
total_writes=0.47e9,
write_size=4,
workingset=(5.59e3/1024./1024.),
total_ins=490533
),
PatternConfig(benchmark_name="leela",
total_reads=6.01e9,
read_size=4,
total_writes=2.35e9,
write_size=4,
workingset=(1.59e3/1024./1024.),
total_ins=42211
),
PatternConfig(benchmark_name="exchange2",
total_reads=62.28e9,
read_size=4,
total_writes=42.89e9,
write_size=4,
workingset=(0.64e3/1024./1024.),
total_ins=417088
),
PatternConfig(benchmark_name="deepsjeng",
total_reads=9.36e9,
read_size=4,
total_writes=4.43e9,
write_size=4,
workingset=(4.79e3/1024./1024.),
total_ins=71720506
),
PatternConfig(benchmark_name="vips",
total_reads=1.91e9,
read_size=4,
total_writes=0.68e9,
write_size=4,
workingset=(1107.19e3/1024./1024.),
total_ins=3949419070
),
PatternConfig(benchmark_name="x264",
total_reads=18.07e9,
read_size=4,
total_writes=2.84e9,
write_size=4,
workingset=(1585.49e3/1024./1024.),
total_ins=229607
),
PatternConfig(benchmark_name="cg",
total_reads=0.73e9,
read_size=4,
total_writes=0.04e9,
write_size=4,
workingset=(1015.43e3/1024./1024.),
total_ins=1942892619
),
PatternConfig(benchmark_name="ep",
total_reads=1.25e9,
read_size=4,
total_writes=0.54e9,
write_size=4,
workingset=(0.84e3/1024./1024.),
total_ins=7051878902
),
PatternConfig(benchmark_name="ft",
total_reads=0.28e9,
read_size=4,
total_writes=0.27e9,
write_size=4,
workingset=(342.64e3/1024./1024.),
total_ins=1416746823
),
PatternConfig(benchmark_name="is",
total_reads=0.12e9,
read_size=4,
total_writes=0.06e9,
write_size=4,
workingset=(1228.86e3/1024./1024.),
total_ins=298507496
),
PatternConfig(benchmark_name="lu",
total_reads=17.84e9,
read_size=4,
total_writes=3.99e9,
write_size=4,
workingset=(289.46e3/1024./1024.),
total_ins=29482003362
),
PatternConfig(benchmark_name="mg",
total_reads=0.76e9,
read_size=4,
total_writes=0.16e9,
write_size=4,
workingset=(4249.78e3/1024./1024.),
total_ins=1308033184
),
PatternConfig(benchmark_name="sp",
total_reads=9.23e9,
read_size=4,
total_writes=4.12e9,
write_size=4,
workingset=(556.75e3/1024./1024.),
total_ins=30840210911
),
PatternConfig(benchmark_name="ua",
total_reads=9.97e9,
read_size=4,
total_writes=5.85e9,
write_size=4,
workingset=(362.45e3/1024./1024.),
total_ins=19361069980
),
]
|
py | 1a319489a34044ffe9e1abe1ea0f87fb5530b208 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
OMAS plot examples
==================
This example loads some data from S3, augments the ODS with pressure information, and generates some plots
"""
from matplotlib import pyplot
from omas import *
# load some data from S3
ods = load_omas_s3('OMFITprofiles_sample', user='omas_shared')
# augment ODS with pressure information
ods.physics_core_profiles_pressures()
# omas plot for pressures
ods.plot_core_profiles_pressures()
pyplot.show()
# omas plot for core profiles
ods.plot_core_profiles_summary()
pyplot.show()
# omas plot for equilibrium
omas_plot.equilibrium_summary(ods, linewidth=1, label='my equilibrium')
pyplot.show()
# omas plot for transport fluxes
ods = ODS().sample(5)
ods.plot_core_transport_fluxes()
pyplot.show()
|
py | 1a3194dc7c2ad27d373926dc05ea74be8635acbd | import datetime
import re
from io import BytesIO
from unittest.mock import create_autospec, call, Mock
import pytest
from sap.aibus.dar.client.base_client import BaseClient
from sap.aibus.dar.client.data_manager_client import DataManagerClient
from sap.aibus.dar.client.exceptions import ModelAlreadyExists, DARHTTPException
from sap.aibus.dar.client.util.credentials import (
StaticCredentialsSource,
CredentialsSource,
)
from sap.aibus.dar.client.workflow.model import ModelCreator
from sap.aibus.dar.client.model_manager_client import ModelManagerClient
from tests.sap.aibus.dar.client.test_data_manager_client import (
AbstractDARClientConstruction,
)
@pytest.fixture
def csv_data_stream():
csv = """
manufacturer,description,category,subcategory
me,"simple è test, records",A,AA
me,"übrigens ein Beispiel, records",A,AA
me,"un po' di testo",A,AA
me,"какой-то текст",A,AA
me,"du texte",A,AA
me,"一些文字",A,AA
me,"कुछ पाठ",A,AA
me,"κάποιο κείμενο",A,AA
me,"кейбір мәтін",A,AA
me,"iu teksto",A,AA
"""
data_stream = BytesIO(csv.strip().encode("utf-8"))
return data_stream
@pytest.fixture()
def create_model():
create_model = ModelCreator.construct_from_jwt("https://abcd/", token="54321")
create_model.data_manager_client = create_autospec(DataManagerClient, instance=True)
create_model.model_manager_client = create_autospec(
ModelManagerClient, instance=True
)
return create_model
@pytest.fixture()
def model_resource():
return {
"jobId": "522de4e6-2609-4972-8f75-61e9262b86de",
"name": "my-model",
"createdAt": "2018-08-31T11:45:54+00:00",
"validationResult": {
"accuracy": 0.9,
"f1Score": 0.9,
"precision": 0.9,
"recall": 0.9,
},
}
a_timestamp = datetime.datetime(
2011, 11, 4, 0, 5, 23, 283000, tzinfo=datetime.timezone.utc
)
class TestModelCreatorClientConstruction(AbstractDARClientConstruction):
# Tests are in base class
clazz = ModelCreator
def test_constructor(self):
dar_url = "https://aiservices-dar.cfapps.xxx.hana.ondemand.com/"
source = StaticCredentialsSource("1234")
client = self.clazz(dar_url, source)
for embedded_client in [
client.data_manager_client,
client.model_manager_client,
]:
assert embedded_client.credentials_source == source
def test_create_from_jwt(self):
# Override and change assertions to look into embedded clients.
jwt = "12345"
client = self.clazz.construct_from_jwt(self.dar_url, jwt)
for embedded_client in [
client.data_manager_client,
client.model_manager_client,
]:
assert isinstance(
embedded_client.credentials_source, StaticCredentialsSource
)
assert embedded_client.credentials_source.token() == jwt
assert embedded_client.session.base_url == self.dar_url[:-1]
def _assert_fields_initialized(self, client):
assert isinstance(client.data_manager_client, DataManagerClient)
assert isinstance(client.model_manager_client, ModelManagerClient)
for embedded_client in [
client.data_manager_client,
client.model_manager_client,
]:
assert (
embedded_client.session.base_url
== "https://aiservices-dar.cfapps.xxx.hana.ondemand.com"
)
assert isinstance(embedded_client.credentials_source, CredentialsSource)
class TestModelCreator:
def test_is_subclass_of_base_client(self):
# Should have all the nice construction methods
assert issubclass(ModelCreator, BaseClient)
def test_format_dataset_name(self):
formatted = ModelCreator.format_dataset_name("my-model")
assert re.match(r"my-model-(\w|-)+", formatted)
assert len(formatted) <= 255
# returns a different same for same input on next call
formatted_2 = ModelCreator.format_dataset_name("my-model")
assert formatted != formatted_2
assert len(formatted_2) <= 255
def test_format_dataset_name_excessive_length_is_truncated(self):
input_str = "a" * 300
formatted = ModelCreator.format_dataset_name(input_str)
assert len(formatted) == 255
uuid_len = 37
# First part is still all a's
assert formatted[:-uuid_len] == input_str[0 : 255 - uuid_len]
def test_create_model(self, csv_data_stream, create_model, model_resource):
# inputs
# model_name: str,
model_template_id = "d7810207-ca31-4d4d-9b5a-841a644fd81f"
dataset_schema = {
"features": [
{"label": "manufacturer", "type": "CATEGORY"},
{"label": "description", "type": "TEXT"},
],
"labels": [
{"label": "category", "type": "CATEGORY"},
{"label": "subcategory", "type": "CATEGORY"},
],
"name": "test",
}
new_dataset_schema_id = "3689fc17-5394-46ba-8757-39a36b570e6e"
dataset_schema_created = dict(dataset_schema.items())
dataset_schema_created["id"] = new_dataset_schema_id
dataset_schema_created["createdAt"] = a_timestamp.isoformat()
model_name = "my-model"
dataset_name = model_name + "-123"
new_dataset_id = "915f16d7-48b0-438b-aca8-048f855ac627"
dataset_created = {
"createdAt": a_timestamp.isoformat(),
"id": new_dataset_id,
"name": dataset_name,
"status": "SUCCEEDED",
"validationMessage": "",
"datasetSchemaId": new_dataset_schema_id,
}
create_model.format_dataset_name = Mock(return_value=dataset_name)
create_model.data_manager_client.create_dataset_schema.return_value = (
dataset_schema_created
)
create_model.data_manager_client.create_dataset.return_value = dataset_created
dm = create_model.data_manager_client
mm = create_model.model_manager_client
mm.read_model_by_name.side_effect = [
DARHTTPException(url="https://abcd/", response=Mock(status_code=404)),
model_resource,
]
# act
result = create_model.create(
data_stream=csv_data_stream,
model_template_id=model_template_id,
dataset_schema=dataset_schema,
model_name=model_name,
)
assert result == model_resource
# Expected calls
expected_create_dataset_schema = call(dataset_schema)
assert dm.create_dataset_schema.call_args_list == [
expected_create_dataset_schema
]
expected_dataset_name = dataset_name
expected_create_dataset = call(
dataset_name=expected_dataset_name,
dataset_schema_id=dataset_schema_created["id"],
)
assert dm.create_dataset.call_args_list == [expected_create_dataset]
expected_call_to_upload_and_validate = call(
dataset_id=dataset_created["id"], data_stream=csv_data_stream
)
assert dm.upload_data_and_validate.call_args_list == [
expected_call_to_upload_and_validate
]
expected_call_to_create_job_and_wait = call(
model_name=model_name,
dataset_id=new_dataset_id,
model_template_id=model_template_id,
)
assert mm.create_job_and_wait.call_args_list == [
expected_call_to_create_job_and_wait
]
expected_call_to_read_model_by_name = call(model_name=model_name)
assert mm.read_model_by_name.call_args_list == [
expected_call_to_read_model_by_name,
expected_call_to_read_model_by_name,
]
def test_create_model_checks_for_existing_model(self, create_model, model_resource):
"""
If the model already exists, this should be an error.
"""
model_name = "my-model"
create_model.model_manager_client.read_model_by_name.return_value = (
model_resource
)
with pytest.raises(ModelAlreadyExists) as context:
create_model.create(
data_stream=Mock(),
model_template_id=Mock(),
dataset_schema=Mock(),
model_name=model_name,
)
assert "Model 'my-model' already exists" in str(context.value)
assert create_model.model_manager_client.read_model_by_name.call_args_list == [
call(model_name=model_name)
]
def test_create_model_forwards_exception(self, create_model, model_resource):
"""
If ModelManagerClient.read_model_by_name raises a 404 in the initial check,
this means that the model is not there and execution and proceed. This is
tested in test_create_model above.
For all other status code, the exception should be re-raised as is.
This is tested here.
"""
model_name = "my-model"
exc = DARHTTPException(url="https://abcd/", response=Mock(status_code=429))
create_model.model_manager_client.read_model_by_name.side_effect = exc
with pytest.raises(DARHTTPException) as context:
create_model.create(
data_stream=Mock(),
model_template_id=Mock(),
dataset_schema=Mock(),
model_name=model_name,
)
assert context.value == exc
assert create_model.model_manager_client.read_model_by_name.call_args_list == [
call(model_name=model_name)
]
|
py | 1a3195ebb70311ad8e990f611c2eff64406d8a24 | import os
import sys
THIS_FOLDER = os.path.dirname(os.path.abspath(__file__)) + "/.."
sys.path.insert(1, THIS_FOLDER)
from vyxal.lexer import *
from vyxal.parse import *
from vyxal.structure import *
def fully_parse(program: str) -> list[Structure]:
"""
Essentially, wrap tokenise(program) in parse
Parameters
----------
program : str
The program to tokenise and then parse
Returns
-------
list[Structure]
Quite literally parse(tokenise(program))
"""
return parse(tokenise(program)) # see what I mean?
def test_basic():
assert str(fully_parse("1 1+")) == str(
[
GenericStatement([Token(TokenType.NUMBER, "1")]),
GenericStatement([Token(TokenType.NUMBER, "1")]),
GenericStatement([Token(TokenType.GENERAL, "+")]),
]
)
assert str(fully_parse("`Hello, World!`")) == str(
[GenericStatement([Token(TokenType.STRING, "Hello, World!")])]
)
def test_fizzbuzz():
assert str(fully_parse("₁ƛ₍₃₅kF½*∑∴")) == str(
[
GenericStatement([Token(TokenType.GENERAL, "₁")]),
Lambda(
"1",
[
DyadicModifier(
"₍",
GenericStatement([Token(TokenType.GENERAL, "₃")]),
GenericStatement([Token(TokenType.GENERAL, "₅")]),
),
GenericStatement([Token(TokenType.GENERAL, "kF")]),
GenericStatement([Token(TokenType.GENERAL, "½")]),
GenericStatement([Token(TokenType.GENERAL, "*")]),
GenericStatement([Token(TokenType.GENERAL, "∑")]),
GenericStatement([Token(TokenType.GENERAL, "∴")]),
],
),
GenericStatement([Token(TokenType.GENERAL, "M")]),
]
)
def test_modifiers():
assert str(fully_parse("⁽*r")) == str(
[
Lambda("1", [GenericStatement([Token(TokenType.GENERAL, "*")])]),
GenericStatement([Token(TokenType.GENERAL, "r")]),
]
)
assert str(fully_parse("vv+")) == str(
[
MonadicModifier(
"v",
MonadicModifier(
"v", GenericStatement([Token(TokenType.GENERAL, "+")])
),
)
]
)
assert str(fully_parse("‡₌*ġḭd†")) == str(
[
Lambda(
"1",
[
DyadicModifier(
"₌",
GenericStatement([Token(TokenType.GENERAL, "*")]),
GenericStatement([Token(TokenType.GENERAL, "ġ")]),
),
GenericStatement([Token(TokenType.GENERAL, "ḭ")]),
],
),
GenericStatement([Token(TokenType.GENERAL, "d")]),
GenericStatement([Token(TokenType.GENERAL, "†")]),
]
)
def test_structures():
assert str(fully_parse("[1 1+|`nice`")) == str(
[
IfStatement(
[
GenericStatement([Token(TokenType.NUMBER, "1")]),
GenericStatement([Token(TokenType.NUMBER, "1")]),
GenericStatement([Token(TokenType.GENERAL, "+")]),
],
[GenericStatement([Token(TokenType.STRING, "nice")])],
)
]
)
assert str(fully_parse("1 10r(i|n2*,")) == str(
[
GenericStatement([Token(TokenType.NUMBER, "1")]),
GenericStatement([Token(TokenType.NUMBER, "10")]),
GenericStatement([Token(TokenType.GENERAL, "r")]),
ForLoop(
["i"],
[
GenericStatement([Token(TokenType.GENERAL, "n")]),
GenericStatement([Token(TokenType.NUMBER, "2")]),
GenericStatement([Token(TokenType.GENERAL, "*")]),
GenericStatement([Token(TokenType.GENERAL, ",")]),
],
),
]
)
assert str(fully_parse("@triple:1|3*;")) == str(
[
FunctionDef(
"triple",
["1"],
[
GenericStatement([Token(TokenType.NUMBER, "3")]),
GenericStatement([Token(TokenType.GENERAL, "*")]),
],
)
]
)
assert str(fully_parse("(code‛|c")) == str(
[
ForLoop(
[],
[
GenericStatement([Token(TokenType.GENERAL, "c")]),
GenericStatement([Token(TokenType.GENERAL, "o")]),
GenericStatement([Token(TokenType.GENERAL, "d")]),
GenericStatement([Token(TokenType.GENERAL, "e")]),
GenericStatement([Token(TokenType.STRING, "|c")]),
],
)
]
)
|
py | 1a31963f9f6d60906c4bfdcbd4a06da2fb000041 | from os import getenv
from dotenv import load_dotenv
load_dotenv()
UNSPLASH_ACCESS_KEY = getenv('UNSPLASH_ACCESS_KEY')
FLICKR_KEY = getenv('FLICKR_KEY')
FLICKR_SECRET = getenv('FLICKR_SECRET')
ALBUM_FONTS = [
'Comforter Brush',
'Praise',
'Dancing Script',
'Estonia',
]
ARTIST_FONTS = [
'Bebas Neue',
'Road Rage',
'Comfortaa',
'Lobster',
'Patua One',
] |
py | 1a3197095c402eb68659aa61c0d443e9950479e4 | # -*- coding: utf-8 -*-
#
# Review Board Codebase documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 12 02:10:34 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# Set this up to parse Django-driven code.
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.insert(0, os.path.dirname(__file__))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reviewboard.settings')
import reviewboard
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Review Board Codebase'
copyright = u'2009-2010, Christian Hammond'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join([str(i) for i in reviewboard.__version_info__[:2]])
# The full version, including alpha/beta/rc tags.
release = reviewboard.get_version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'ReviewBoardCodebasedoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'ReviewBoardCodebase.tex', ur'Review Board Codebase Documentation',
ur'Christian Hammond', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/dev': None}
|
py | 1a319a990f4b622b1f5e4180a42e428d88ba70c1 | import asyncio
import sys
import time
from datetime import datetime
from decimal import Decimal
from typing import Callable, List, Optional, Tuple, Dict
import aiohttp
from peas.cmds.units import units
from peas.rpc.wallet_rpc_client import WalletRpcClient
from peas.server.start_wallet import SERVICE_NAME
from peas.util.bech32m import encode_puzzle_hash
from peas.util.byte_types import hexstr_to_bytes
from peas.util.config import load_config
from peas.util.default_root import DEFAULT_ROOT_PATH
from peas.util.ints import uint16, uint64
from peas.wallet.transaction_record import TransactionRecord
from peas.wallet.util.wallet_types import WalletType
def print_transaction(tx: TransactionRecord, verbose: bool, name) -> None:
if verbose:
print(tx)
else:
peas_amount = Decimal(int(tx.amount)) / units["peas"]
to_address = encode_puzzle_hash(tx.to_puzzle_hash, name)
print(f"Transaction {tx.name}")
print(f"Status: {'Confirmed' if tx.confirmed else ('In mempool' if tx.is_in_mempool() else 'Pending')}")
print(f"Amount: {peas_amount} {name}")
print(f"To address: {to_address}")
print("Created at:", datetime.fromtimestamp(tx.created_at_time).strftime("%Y-%m-%d %H:%M:%S"))
print("")
async def get_transaction(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
transaction_id = hexstr_to_bytes(args["tx_id"])
config = load_config(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME)
name = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"]
tx: TransactionRecord = await wallet_client.get_transaction(wallet_id, transaction_id=transaction_id)
print_transaction(tx, verbose=(args["verbose"] > 0), name=name)
async def get_transactions(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
txs: List[TransactionRecord] = await wallet_client.get_transactions(wallet_id)
config = load_config(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME)
name = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"]
if len(txs) == 0:
print("There are no transactions to this address")
offset = args["offset"]
num_per_screen = 5
for i in range(offset, len(txs), num_per_screen):
for j in range(0, num_per_screen):
if i + j >= len(txs):
break
print_transaction(txs[i + j], verbose=(args["verbose"] > 0), name=name)
if i + num_per_screen >= len(txs):
return None
print("Press q to quit, or c to continue")
while True:
entered_key = sys.stdin.read(1)
if entered_key == "q":
return None
elif entered_key == "c":
break
def check_unusual_transaction(amount: Decimal, fee: Decimal):
return fee >= amount
async def send(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
amount = Decimal(args["amount"])
fee = Decimal(args["fee"])
address = args["address"]
override = args["override"]
if not override and check_unusual_transaction(amount, fee):
print(
f"A transaction of amount {amount} and fee {fee} is unusual.\n"
f"Pass in --override if you are sure you mean to do this."
)
return
print("Submitting transaction...")
final_amount = uint64(int(amount * units["peas"]))
final_fee = uint64(int(fee * units["peas"]))
res = await wallet_client.send_transaction(wallet_id, final_amount, address, final_fee)
tx_id = res.name
start = time.time()
while time.time() - start < 10:
await asyncio.sleep(0.1)
tx = await wallet_client.get_transaction(wallet_id, tx_id)
if len(tx.sent_to) > 0:
print(f"Transaction submitted to nodes: {tx.sent_to}")
print(f"Do peas wallet get_transaction -f {fingerprint} -tx 0x{tx_id} to get status")
return None
print("Transaction not yet submitted to nodes")
print(f"Do 'peas wallet get_transaction -f {fingerprint} -tx 0x{tx_id}' to get status")
async def get_address(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
res = await wallet_client.get_next_address(wallet_id, False)
print(res)
async def delete_unconfirmed_transactions(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
await wallet_client.delete_unconfirmed_transactions(wallet_id)
print(f"Successfully deleted all unconfirmed transactions for wallet id {wallet_id} on key {fingerprint}")
def wallet_coin_unit(typ: WalletType, address_prefix: str) -> Tuple[str, int]:
if typ == WalletType.COLOURED_COIN:
return "", units["colouredcoin"]
if typ in [WalletType.STANDARD_WALLET, WalletType.POOLING_WALLET, WalletType.MULTI_SIG, WalletType.RATE_LIMITED]:
return address_prefix, units["peas"]
return "", units["mojo"]
def print_balance(amount: int, scale: int, address_prefix: str) -> str:
ret = f"{amount/scale} {address_prefix} "
if scale > 1:
ret += f"({amount} mojo)"
return ret
async def print_balances(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
summaries_response = await wallet_client.get_wallets()
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
address_prefix = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"]
print(f"Wallet height: {await wallet_client.get_height_info()}")
print(f"Sync status: {'Synced' if (await wallet_client.get_synced()) else 'Not synced'}")
print(f"Balances, fingerprint: {fingerprint}")
for summary in summaries_response:
wallet_id = summary["id"]
balances = await wallet_client.get_wallet_balance(wallet_id)
typ = WalletType(int(summary["type"]))
address_prefix, scale = wallet_coin_unit(typ, address_prefix)
print(f"Wallet ID {wallet_id} type {typ.name} {summary['name']}")
print(f" -Total Balance: {print_balance(balances['confirmed_wallet_balance'], scale, address_prefix)}")
print(
f" -Pending Total Balance: {print_balance(balances['unconfirmed_wallet_balance'], scale, address_prefix)}"
)
print(f" -Spendable: {print_balance(balances['spendable_balance'], scale, address_prefix)}")
async def get_wallet(wallet_client: WalletRpcClient, fingerprint: int = None) -> Optional[Tuple[WalletRpcClient, int]]:
if fingerprint is not None:
fingerprints = [fingerprint]
else:
fingerprints = await wallet_client.get_public_keys()
if len(fingerprints) == 0:
print("No keys loaded. Run 'peas keys generate' or import a key")
return None
if len(fingerprints) == 1:
fingerprint = fingerprints[0]
if fingerprint is not None:
log_in_response = await wallet_client.log_in(fingerprint)
else:
print("Choose wallet key:")
for i, fp in enumerate(fingerprints):
print(f"{i+1}) {fp}")
val = None
while val is None:
val = input("Enter a number to pick or q to quit: ")
if val == "q":
return None
if not val.isdigit():
val = None
else:
index = int(val) - 1
if index >= len(fingerprints):
print("Invalid value")
val = None
continue
else:
fingerprint = fingerprints[index]
assert fingerprint is not None
log_in_response = await wallet_client.log_in(fingerprint)
if log_in_response["success"] is False:
if log_in_response["error"] == "not_initialized":
use_cloud = True
if "backup_path" in log_in_response:
path = log_in_response["backup_path"]
print(f"Backup file from backup.peas.net downloaded and written to: {path}")
val = input("Do you want to use this file to restore from backup? (Y/N) ")
if val.lower() == "y":
log_in_response = await wallet_client.log_in_and_restore(fingerprint, path)
else:
use_cloud = False
if "backup_path" not in log_in_response or use_cloud is False:
if use_cloud is True:
val = input(
"No online backup file found,\n Press S to skip restore from backup"
"\n Press F to use your own backup file: "
)
else:
val = input(
"Cloud backup declined,\n Press S to skip restore from backup"
"\n Press F to use your own backup file: "
)
if val.lower() == "s":
log_in_response = await wallet_client.log_in_and_skip(fingerprint)
elif val.lower() == "f":
val = input("Please provide the full path to your backup file: ")
log_in_response = await wallet_client.log_in_and_restore(fingerprint, val)
if "success" not in log_in_response or log_in_response["success"] is False:
if "error" in log_in_response:
error = log_in_response["error"]
print(f"Error: {log_in_response[error]}")
return None
return wallet_client, fingerprint
async def execute_with_wallet(
wallet_rpc_port: Optional[int], fingerprint: int, extra_params: Dict, function: Callable
) -> None:
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if wallet_rpc_port is None:
wallet_rpc_port = config["wallet"]["rpc_port"]
wallet_client = await WalletRpcClient.create(self_hostname, uint16(wallet_rpc_port), DEFAULT_ROOT_PATH, config)
wallet_client_f = await get_wallet(wallet_client, fingerprint=fingerprint)
if wallet_client_f is None:
wallet_client.close()
await wallet_client.await_closed()
return None
wallet_client, fingerprint = wallet_client_f
await function(extra_params, wallet_client, fingerprint)
except KeyboardInterrupt:
pass
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(
f"Connection error. Check if the wallet is running at {wallet_rpc_port}. "
"You can run the wallet via:\n\tchia start wallet"
)
else:
print(f"Exception from 'wallet' {e}")
wallet_client.close()
await wallet_client.await_closed()
|
py | 1a319c8c5cb08736f076ed0846fa427e7f750763 | __author__ = 'marble_xu'
import os
import json
from abc import abstractmethod
import pygame as pg
from . import constants as c
class State():
def __init__(self):
self.start_time = 0.0
self.current_time = 0.0
self.done = False
self.next = None
self.persist = {}
@abstractmethod
def startup(self, current_time, persist):
'''abstract method'''
def cleanup(self):
self.done = False
return self.persist
@abstractmethod
def update(self, surface, keys, current_time):
'''abstract method'''
class Control():
def __init__(self):
self.screen = pg.display.get_surface()
self.done = False
self.clock = pg.time.Clock()
self.fps = 60
self.keys = pg.key.get_pressed()
self.mouse_pos = None
self.mouse_click = [False, False] # value:[left mouse click, right mouse click]
self.current_time = 0.0
self.state_dict = {}
self.state_name = None
self.state = None
self.game_info = {c.CURRENT_TIME:0.0,
c.LEVEL_NUM:c.START_LEVEL_NUM}
def setup_states(self, state_dict, start_state):
self.state_dict = state_dict
self.state_name = start_state
self.state = self.state_dict[self.state_name]
self.state.startup(self.current_time, self.game_info)
def update(self):
self.current_time = pg.time.get_ticks()
if self.state.done:
self.flip_state()
self.state.update(self.screen, self.current_time, self.mouse_pos, self.mouse_click)
self.mouse_pos = None
self.mouse_click[0] = False
self.mouse_click[1] = False
def flip_state(self):
previous, self.state_name = self.state_name, self.state.next
persist = self.state.cleanup()
self.state = self.state_dict[self.state_name]
self.state.startup(self.current_time, persist)
def event_loop(self):
for event in pg.event.get():
if event.type == pg.QUIT:
self.done = True
elif event.type == pg.KEYDOWN:
self.keys = pg.key.get_pressed()
elif event.type == pg.KEYUP:
self.keys = pg.key.get_pressed()
elif event.type == pg.MOUSEBUTTONDOWN:
self.mouse_pos = pg.mouse.get_pos()
self.mouse_click[0], _, self.mouse_click[1] = pg.mouse.get_pressed()
print('pos:', self.mouse_pos, ' mouse:', self.mouse_click)
def main(self):
while not self.done:
self.event_loop()
self.update()
pg.display.update()
self.clock.tick(self.fps)
print('game over')
def get_image(sheet, x, y, width, height, colorkey=c.BLACK, scale=1):
image = pg.Surface([width, height])
rect = image.get_rect()
image.blit(sheet, (0, 0), (x, y, width, height))
image.set_colorkey(colorkey)
image = pg.transform.scale(image,
(int(rect.width*scale),
int(rect.height*scale)))
return image
def load_image_frames(directory, image_name, colorkey, accept):
frame_list = []
tmp = {}
# image_name is "Peashooter", pic name is 'Peashooter_1', get the index 1
index_start = len(image_name) + 1
frame_num = 0;
for pic in os.listdir(directory):
name, ext = os.path.splitext(pic)
if ext.lower() in accept:
index = int(name[index_start:])
img = pg.image.load(os.path.join(directory, pic))
if img.get_alpha():
img = img.convert_alpha()
else:
img = img.convert()
img.set_colorkey(colorkey)
tmp[index]= img
frame_num += 1
for i in range(frame_num):
frame_list.append(tmp[i])
return frame_list
def load_all_gfx(directory, colorkey=c.WHITE, accept=('.png', '.jpg', '.bmp', '.gif')):
graphics = {}
for name1 in os.listdir(directory):
# subfolders under the folder resources\graphics
dir1 = os.path.join(directory, name1)
if os.path.isdir(dir1):
for name2 in os.listdir(dir1):
dir2 = os.path.join(dir1, name2)
if os.path.isdir(dir2):
# e.g. subfolders under the folder resources\graphics\Zombies
for name3 in os.listdir(dir2):
dir3 = os.path.join(dir2, name3)
# e.g. subfolders or pics under the folder resources\graphics\Zombies\ConeheadZombie
if os.path.isdir(dir3):
# e.g. it's the folder resources\graphics\Zombies\ConeheadZombie\ConeheadZombieAttack
image_name, _ = os.path.splitext(name3)
graphics[image_name] = load_image_frames(dir3, image_name, colorkey, accept)
else:
# e.g. pics under the folder resources\graphics\Plants\Peashooter
image_name, _ = os.path.splitext(name2)
graphics[image_name] = load_image_frames(dir2, image_name, colorkey, accept)
break
else:
# e.g. pics under the folder resources\graphics\Screen
name, ext = os.path.splitext(name2)
if ext.lower() in accept:
img = pg.image.load(dir2)
if img.get_alpha():
img = img.convert_alpha()
else:
img = img.convert()
img.set_colorkey(colorkey)
graphics[name] = img
return graphics
def loadZombieImageRect():
file_path = os.path.join('source', 'data', 'entity', 'zombie.json')
f = open(file_path)
data = json.load(f)
f.close()
return data[c.ZOMBIE_IMAGE_RECT]
def loadPlantImageRect():
file_path = os.path.join('source', 'data', 'entity', 'plant.json')
f = open(file_path)
data = json.load(f)
f.close()
return data[c.PLANT_IMAGE_RECT]
pg.init()
pg.display.set_caption(c.ORIGINAL_CAPTION)
SCREEN = pg.display.set_mode(c.SCREEN_SIZE)
GFX = load_all_gfx(os.path.join("resources","graphics"))
ZOMBIE_RECT = loadZombieImageRect()
PLANT_RECT = loadPlantImageRect()
|
py | 1a319e427f87af1d094eb6666894b81f7cc6f4b2 | from datetime import date
from silverstrike.models import Account, Split, Transaction
def create_transaction(title, src, dst, amount, type, date=date.today(), category=None):
t = Transaction.objects.create(title=title, date=date, transaction_type=type,
src=src, dst=dst, amount=amount)
Split.objects.bulk_create([
Split(title=title, account=src, opposing_account=dst,
amount=-amount, transaction=t, date=date, category=category),
Split(title=title, account=dst, opposing_account=src,
amount=amount, transaction=t, date=date, category=category)])
return t
def create_account(name, account_type=Account.AccountType.PERSONAL):
return Account.objects.create(name=name, account_type=account_type)
|
py | 1a319e8f5ba9b01bdd4c2984b082fc280e65e3b4 | from PyBall.models import BaseModel
class Metric(BaseModel):
_fields = {
'group': {'default_value': None, 'field_type': str},
'name': {'default_value': None, 'field_type': str},
'unit': {'default_value': None, 'field_type': str},
'metricId': {'default_value': None, 'field_type': int},
}
|
py | 1a319f558ac25c16a0681e378a4b8caa7f491ed8 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that show how to use various Dataproc
operators to manage a cluster and submit jobs.
"""
import os
from datetime import datetime
from airflow import models
from airflow.providers.google.cloud.operators.dataproc import (
DataprocCreateClusterOperator,
DataprocCreateWorkflowTemplateOperator,
DataprocDeleteClusterOperator,
DataprocInstantiateWorkflowTemplateOperator,
DataprocSubmitJobOperator,
DataprocUpdateClusterOperator,
)
from airflow.providers.google.cloud.sensors.dataproc import DataprocJobSensor
PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "an-id")
CLUSTER_NAME = os.environ.get("GCP_DATAPROC_CLUSTER_NAME", "example-cluster")
REGION = os.environ.get("GCP_LOCATION", "europe-west1")
ZONE = os.environ.get("GCP_REGION", "europe-west1-b")
BUCKET = os.environ.get("GCP_DATAPROC_BUCKET", "dataproc-system-tests")
OUTPUT_FOLDER = "wordcount"
OUTPUT_PATH = f"gs://{BUCKET}/{OUTPUT_FOLDER}/"
PYSPARK_MAIN = os.environ.get("PYSPARK_MAIN", "hello_world.py")
PYSPARK_URI = f"gs://{BUCKET}/{PYSPARK_MAIN}"
SPARKR_MAIN = os.environ.get("SPARKR_MAIN", "hello_world.R")
SPARKR_URI = f"gs://{BUCKET}/{SPARKR_MAIN}"
# Cluster definition
# [START how_to_cloud_dataproc_create_cluster]
CLUSTER_CONFIG = {
"master_config": {
"num_instances": 1,
"machine_type_uri": "n1-standard-4",
"disk_config": {"boot_disk_type": "pd-standard", "boot_disk_size_gb": 1024},
},
"worker_config": {
"num_instances": 2,
"machine_type_uri": "n1-standard-4",
"disk_config": {"boot_disk_type": "pd-standard", "boot_disk_size_gb": 1024},
},
}
# [END how_to_cloud_dataproc_create_cluster]
# Update options
# [START how_to_cloud_dataproc_updatemask_cluster_operator]
CLUSTER_UPDATE = {
"config": {"worker_config": {"num_instances": 3}, "secondary_worker_config": {"num_instances": 3}}
}
UPDATE_MASK = {
"paths": ["config.worker_config.num_instances", "config.secondary_worker_config.num_instances"]
}
# [END how_to_cloud_dataproc_updatemask_cluster_operator]
TIMEOUT = {"seconds": 1 * 24 * 60 * 60}
# Jobs definitions
# [START how_to_cloud_dataproc_pig_config]
PIG_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"pig_job": {"query_list": {"queries": ["define sin HiveUDF('sin');"]}},
}
# [END how_to_cloud_dataproc_pig_config]
# [START how_to_cloud_dataproc_sparksql_config]
SPARK_SQL_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"spark_sql_job": {"query_list": {"queries": ["SHOW DATABASES;"]}},
}
# [END how_to_cloud_dataproc_sparksql_config]
# [START how_to_cloud_dataproc_spark_config]
SPARK_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"spark_job": {
"jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
"main_class": "org.apache.spark.examples.SparkPi",
},
}
# [END how_to_cloud_dataproc_spark_config]
# [START how_to_cloud_dataproc_pyspark_config]
PYSPARK_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"pyspark_job": {"main_python_file_uri": PYSPARK_URI},
}
# [END how_to_cloud_dataproc_pyspark_config]
# [START how_to_cloud_dataproc_sparkr_config]
SPARKR_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"spark_r_job": {"main_r_file_uri": SPARKR_URI},
}
# [END how_to_cloud_dataproc_sparkr_config]
# [START how_to_cloud_dataproc_hive_config]
HIVE_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"hive_job": {"query_list": {"queries": ["SHOW DATABASES;"]}},
}
# [END how_to_cloud_dataproc_hive_config]
# [START how_to_cloud_dataproc_hadoop_config]
HADOOP_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"hadoop_job": {
"main_jar_file_uri": "file:///usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar",
"args": ["wordcount", "gs://pub/shakespeare/rose.txt", OUTPUT_PATH],
},
}
# [END how_to_cloud_dataproc_hadoop_config]
WORKFLOW_NAME = "airflow-dataproc-test"
WORKFLOW_TEMPLATE = {
"id": WORKFLOW_NAME,
"placement": {
"managed_cluster": {
"cluster_name": CLUSTER_NAME,
"config": CLUSTER_CONFIG,
}
},
"jobs": [{"step_id": "pig_job_1", "pig_job": PIG_JOB["pig_job"]}],
}
with models.DAG(
"example_gcp_dataproc",
schedule_interval='@once',
start_date=datetime(2021, 1, 1),
catchup=False,
) as dag:
# [START how_to_cloud_dataproc_create_cluster_operator]
create_cluster = DataprocCreateClusterOperator(
task_id="create_cluster",
project_id=PROJECT_ID,
cluster_config=CLUSTER_CONFIG,
region=REGION,
cluster_name=CLUSTER_NAME,
)
# [END how_to_cloud_dataproc_create_cluster_operator]
# [START how_to_cloud_dataproc_update_cluster_operator]
scale_cluster = DataprocUpdateClusterOperator(
task_id="scale_cluster",
cluster_name=CLUSTER_NAME,
cluster=CLUSTER_UPDATE,
update_mask=UPDATE_MASK,
graceful_decommission_timeout=TIMEOUT,
project_id=PROJECT_ID,
region=REGION,
)
# [END how_to_cloud_dataproc_update_cluster_operator]
# [START how_to_cloud_dataproc_create_workflow_template]
create_workflow_template = DataprocCreateWorkflowTemplateOperator(
task_id="create_workflow_template",
template=WORKFLOW_TEMPLATE,
project_id=PROJECT_ID,
region=REGION,
)
# [END how_to_cloud_dataproc_create_workflow_template]
# [START how_to_cloud_dataproc_trigger_workflow_template]
trigger_workflow = DataprocInstantiateWorkflowTemplateOperator(
task_id="trigger_workflow", region=REGION, project_id=PROJECT_ID, template_id=WORKFLOW_NAME
)
# [END how_to_cloud_dataproc_trigger_workflow_template]
pig_task = DataprocSubmitJobOperator(
task_id="pig_task", job=PIG_JOB, region=REGION, project_id=PROJECT_ID
)
spark_sql_task = DataprocSubmitJobOperator(
task_id="spark_sql_task", job=SPARK_SQL_JOB, region=REGION, project_id=PROJECT_ID
)
spark_task = DataprocSubmitJobOperator(
task_id="spark_task", job=SPARK_JOB, region=REGION, project_id=PROJECT_ID
)
# [START cloud_dataproc_async_submit_sensor]
spark_task_async = DataprocSubmitJobOperator(
task_id="spark_task_async", job=SPARK_JOB, region=REGION, project_id=PROJECT_ID, asynchronous=True
)
spark_task_async_sensor = DataprocJobSensor(
task_id='spark_task_async_sensor_task',
region=REGION,
project_id=PROJECT_ID,
dataproc_job_id=spark_task_async.output,
poke_interval=10,
)
# [END cloud_dataproc_async_submit_sensor]
# [START how_to_cloud_dataproc_submit_job_to_cluster_operator]
pyspark_task = DataprocSubmitJobOperator(
task_id="pyspark_task", job=PYSPARK_JOB, region=REGION, project_id=PROJECT_ID
)
# [END how_to_cloud_dataproc_submit_job_to_cluster_operator]
sparkr_task = DataprocSubmitJobOperator(
task_id="sparkr_task", job=SPARKR_JOB, region=REGION, project_id=PROJECT_ID
)
hive_task = DataprocSubmitJobOperator(
task_id="hive_task", job=HIVE_JOB, region=REGION, project_id=PROJECT_ID
)
hadoop_task = DataprocSubmitJobOperator(
task_id="hadoop_task", job=HADOOP_JOB, region=REGION, project_id=PROJECT_ID
)
# [START how_to_cloud_dataproc_delete_cluster_operator]
delete_cluster = DataprocDeleteClusterOperator(
task_id="delete_cluster", project_id=PROJECT_ID, cluster_name=CLUSTER_NAME, region=REGION
)
# [END how_to_cloud_dataproc_delete_cluster_operator]
create_cluster >> scale_cluster
scale_cluster >> create_workflow_template >> trigger_workflow >> delete_cluster
scale_cluster >> hive_task >> delete_cluster
scale_cluster >> pig_task >> delete_cluster
scale_cluster >> spark_sql_task >> delete_cluster
scale_cluster >> spark_task >> delete_cluster
scale_cluster >> spark_task_async
spark_task_async_sensor >> delete_cluster
scale_cluster >> pyspark_task >> delete_cluster
scale_cluster >> sparkr_task >> delete_cluster
scale_cluster >> hadoop_task >> delete_cluster
# Task dependency created via `XComArgs`:
# spark_task_async >> spark_task_async_sensor
|
py | 1a31a12c44deccc0f336d6af44167a1c6d9a6790 | # Generated by Django 3.2.12 on 2022-04-27 20:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0007_StudentProxyModel'),
]
operations = [
migrations.AddField(
model_name='user',
name='github_username',
field=models.CharField(blank=True, db_index=True, default='', max_length=256),
),
migrations.AddField(
model_name='user',
name='linkedin_username',
field=models.CharField(blank=True, db_index=True, default='', max_length=256),
),
migrations.AddConstraint(
model_name='user',
constraint=models.UniqueConstraint(condition=models.Q(('github_username', ''), _negated=True), fields=('github_username',), name='unique_github_username'),
),
migrations.AddConstraint(
model_name='user',
constraint=models.UniqueConstraint(condition=models.Q(('linkedin_username', ''), _negated=True), fields=('linkedin_username',), name='unique_linkedin_username'),
),
]
|
py | 1a31a1fa6617e041b3f4e1eaa1cb5f9af9335108 | # -*- coding: utf-8 -*-
'''
Provide external pillar data from RethinkDB
.. versionadded:: 2018.3.0
:depends: rethinkdb (on the salt-master)
salt master rethinkdb configuration
===================================
These variables must be configured in your master configuration file.
* ``rethinkdb.host`` - The RethinkDB server. Defaults to ``'salt'``
* ``rethinkdb.port`` - The port the RethinkDB server listens on.
Defaults to ``'28015'``
* ``rethinkdb.database`` - The database to connect to.
Defaults to ``'salt'``
* ``rethinkdb.username`` - The username for connecting to RethinkDB.
Defaults to ``''``
* ``rethinkdb.password`` - The password for connecting to RethinkDB.
Defaults to ``''``
salt-master ext_pillar configuration
====================================
The ext_pillar function arguments are given in single line dictionary notation.
.. code-block:: yaml
ext_pillar:
- rethinkdb: {table: ext_pillar, id_field: minion_id, field: pillar_root, pillar_key: external_pillar}
In the example above the following happens.
* The salt-master will look for external pillars in the 'ext_pillar' table
on the RethinkDB host
* The minion id will be matched against the 'minion_id' field
* Pillars will be retrieved from the nested field 'pillar_root'
* Found pillars will be merged inside a key called 'external_pillar'
Module Documentation
====================
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libraries
import logging
# Import 3rd party libraries
try:
import rethinkdb
HAS_RETHINKDB = True
except ImportError:
HAS_RETHINKDB = False
__virtualname__ = 'rethinkdb'
__opts__ = {
'rethinkdb.host': 'salt',
'rethinkdb.port': '28015',
'rethinkdb.database': 'salt',
'rethinkdb.username': None,
'rethinkdb.password': None
}
def __virtual__():
if not HAS_RETHINKDB:
return False
return True
# Configure logging
log = logging.getLogger(__name__)
def ext_pillar(minion_id,
pillar,
table='pillar',
id_field=None,
field=None,
pillar_key=None):
'''
Collect minion external pillars from a RethinkDB database
Arguments:
* `table`: The RethinkDB table containing external pillar information.
Defaults to ``'pillar'``
* `id_field`: Field in document containing the minion id.
If blank then we assume the table index matches minion ids
* `field`: Specific field in the document used for pillar data, if blank
then the entire document will be used
* `pillar_key`: The salt-master will nest found external pillars under
this key before merging into the minion pillars. If blank, external
pillars will be merged at top level
'''
host = __opts__['rethinkdb.host']
port = __opts__['rethinkdb.port']
database = __opts__['rethinkdb.database']
username = __opts__['rethinkdb.username']
password = __opts__['rethinkdb.password']
log.debug('Connecting to %s:%s as user \'%s\' for RethinkDB ext_pillar',
host, port, username)
# Connect to the database
conn = rethinkdb.connect(host=host,
port=port,
db=database,
user=username,
password=password)
data = None
try:
if id_field:
log.debug('ext_pillar.rethinkdb: looking up pillar. '
'table: %s, field: %s, minion: %s',
table, id_field, minion_id)
if field:
data = rethinkdb.table(table).filter(
{id_field: minion_id}).pluck(field).run(conn)
else:
data = rethinkdb.table(table).filter(
{id_field: minion_id}).run(conn)
else:
log.debug('ext_pillar.rethinkdb: looking up pillar. '
'table: %s, field: id, minion: %s',
table, minion_id)
if field:
data = rethinkdb.table(table).get(minion_id).pluck(field).run(
conn)
else:
data = rethinkdb.table(table).get(minion_id).run(conn)
finally:
if conn.is_open():
conn.close()
if data.items:
# Return nothing if multiple documents are found for a minion
if len(data.items) > 1:
log.error('ext_pillar.rethinkdb: ambiguous documents found for '
'minion %s', minion_id)
return {}
else:
result = data.items.pop()
if pillar_key:
return {pillar_key: result}
return result
else:
# No document found in the database
log.debug('ext_pillar.rethinkdb: no document found')
return {}
|
py | 1a31a23f86a579c13b6439bbb8164dd5f5021b3e | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_config import cfg
CONF = cfg.CONF
class FilterTests(object):
# Provide support for checking if a batch of list items all
# exist within a contiguous range in a total list
def _match_with_list(self, this_batch, total_list,
batch_size=None,
list_start=None, list_end=None):
if batch_size is None:
batch_size = len(this_batch)
if list_start is None:
list_start = 0
if list_end is None:
list_end = len(total_list)
for batch_item in range(0, batch_size):
found = False
for list_item in range(list_start, list_end):
if this_batch[batch_item]['id'] == total_list[list_item]['id']:
found = True
self.assertTrue(found)
def _create_entity(self, entity_type):
f = getattr(self.identity_api, 'create_%s' % entity_type, None)
if f is None:
f = getattr(self.assignment_api, 'create_%s' % entity_type)
return f
def _delete_entity(self, entity_type):
f = getattr(self.identity_api, 'delete_%s' % entity_type, None)
if f is None:
f = getattr(self.assignment_api, 'delete_%s' % entity_type)
return f
def _list_entities(self, entity_type):
f = getattr(self.identity_api, 'list_%ss' % entity_type, None)
if f is None:
f = getattr(self.assignment_api, 'list_%ss' % entity_type)
return f
def _create_one_entity(self, entity_type, domain_id, name):
new_entity = {'name': name,
'domain_id': domain_id}
if entity_type in ['user', 'group']:
# The manager layer creates the ID for users and groups
new_entity = self._create_entity(entity_type)(new_entity)
else:
new_entity['id'] = '0000' + uuid.uuid4().hex
self._create_entity(entity_type)(new_entity['id'], new_entity)
return new_entity
def _create_test_data(self, entity_type, number, domain_id=None,
name_dict=None):
"""Create entity test data
:param entity_type: type of entity to create, e.g. 'user', group' etc.
:param number: number of entities to create,
:param domain_id: if not defined, all users will be created in the
default domain.
:param name_dict: optional dict containing entity number and name pairs
"""
entity_list = []
if domain_id is None:
domain_id = CONF.identity.default_domain_id
name_dict = name_dict or {}
for x in range(number):
# If this index has a name defined in the name_dict, then use it
name = name_dict.get(x, uuid.uuid4().hex)
new_entity = self._create_one_entity(entity_type, domain_id, name)
entity_list.append(new_entity)
return entity_list
def _delete_test_data(self, entity_type, entity_list):
for entity in entity_list:
self._delete_entity(entity_type)(entity['id'])
|
py | 1a31a34a3be78c220b0ec87af77ced209111f14e | import numpy as np
import pickle
import os
import time
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from PIL import Image
class ImageWriter(object):
def __init__(self, data_dir, dataset, unnormalizer):
self.data_dir = data_dir
self.dataset = dataset
self.unnormalizer = unnormalizer
self.init_data()
def init_data(self):
if not os.path.exists(self.data_dir):
os.mkdir(self.data_dir)
self.output_dir = os.path.join(self.data_dir, "{}_by_id".format(self.dataset))
print(self.output_dir)
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
def write_partition(self, partition):
to_pil = torchvision.transforms.ToPILImage()
for elem in partition:
img_tensor = elem[0].cpu()
unnormalized = self.unnormalizer(img_tensor)
img = to_pil(unnormalized)
img_id = elem[2]
img_file = os.path.join(self.output_dir, "image-{}.png".format(img_id))
img.save(img_file, 'PNG')
class ProbabilityByImageLogger(object):
def __init__(self, pickle_dir, pickle_prefix, max_num_images=None):
self.pickle_dir = pickle_dir
self.pickle_prefix = pickle_prefix
self.init_data()
self.max_num_images = max_num_images
self.probabilities = {}
self.backward_selects = {}
self.forward_selects = {}
self.losses = {}
def next_epoch(self):
self.write()
def init_data(self):
# Store frequency of each image getting backpropped
data_pickle_dir = os.path.join(self.pickle_dir, "probabilities_by_image")
self.probabilities_pickle_file = os.path.join(data_pickle_dir,
"{}_probabilities".format(self.pickle_prefix))
self.backward_selects_pickle_file = os.path.join(data_pickle_dir,
"{}_selects".format(self.pickle_prefix))
self.forward_selects_pickle_file = os.path.join(data_pickle_dir,
"{}_forwardselects".format(self.pickle_prefix))
self.losses_pickle_file = os.path.join(data_pickle_dir,
"{}_losses".format(self.pickle_prefix))
# Make images hist pickle path
if not os.path.exists(data_pickle_dir):
os.mkdir(data_pickle_dir)
def update_data(self, image_ids, probabilities, backward_selects, forward_selects, losses):
for image_id, probability in zip(image_ids, probabilities):
if image_id not in self.probabilities.keys():
if self.max_num_images:
if image_id >= self.max_num_images:
continue
self.probabilities[image_id] = []
self.probabilities[image_id].append(probability)
for image_id, is_selected in zip(image_ids, backward_selects):
if image_id not in self.backward_selects.keys():
if self.max_num_images:
if image_id >= self.max_num_images:
continue
self.backward_selects[image_id] = []
self.backward_selects[image_id].append(int(is_selected))
for image_id, is_selected in zip(image_ids, forward_selects):
if image_id not in self.forward_selects.keys():
if self.max_num_images:
if image_id >= self.max_num_images:
continue
self.forward_selects[image_id] = []
self.forward_selects[image_id].append(int(is_selected))
for image_id, loss in zip(image_ids, losses):
if image_id not in self.losses.keys():
if self.max_num_images:
if image_id >= self.max_num_images:
continue
self.losses[image_id] = []
self.losses[image_id].append(loss)
def handle_backward_batch(self, batch):
ids = [em.example.image_id for em in batch]
probabilities = [em.example.get_sp(False) for em in batch]
backward_selects = [em.example.get_select(False) for em in batch]
forward_selects = [em.example.get_select(True) for em in batch]
losses = [em.example.loss for em in batch]
self.update_data(ids, probabilities, backward_selects, forward_selects, losses)
def write(self):
latest_file = "{}.pickle".format(self.probabilities_pickle_file)
with open(latest_file, "wb") as handle:
print(latest_file)
pickle.dump(self.probabilities, handle, protocol=pickle.HIGHEST_PROTOCOL)
latest_file = "{}.pickle".format(self.backward_selects_pickle_file)
with open(latest_file, "wb") as handle:
print(latest_file)
pickle.dump(self.backward_selects, handle, protocol=pickle.HIGHEST_PROTOCOL)
latest_file = "{}.pickle".format(self.forward_selects_pickle_file)
with open(latest_file, "wb") as handle:
print(latest_file)
pickle.dump(self.forward_selects, handle, protocol=pickle.HIGHEST_PROTOCOL)
class ImageIdHistLogger(object):
def __init__(self, pickle_dir, pickle_prefix, num_images, log_interval):
self.current_epoch = 0
self.pickle_dir = pickle_dir
self.pickle_prefix = pickle_prefix
self.log_interval = log_interval
self.init_data(num_images)
def next_epoch(self):
self.write()
self.current_epoch += 1
def init_data(self, num_images):
# Store frequency of each image getting backpropped
keys = range(num_images)
self.data = dict(zip(keys, [0] * len(keys)))
data_pickle_dir = os.path.join(self.pickle_dir, "image_id_hist")
self.data_pickle_file = os.path.join(data_pickle_dir,
"{}_images_hist".format(self.pickle_prefix))
# Make images hist pickle path
if not os.path.exists(data_pickle_dir):
os.mkdir(data_pickle_dir)
def update_data(self, image_ids):
for chosen_id in image_ids:
self.data[chosen_id] += 1
def handle_backward_batch(self, batch):
ids = [em.example.image_id.item() for em in batch if em.example.select]
self.update_data(ids)
def write(self):
latest_file = "{}.pickle".format(self.data_pickle_file)
with open(latest_file, "wb") as handle:
print(latest_file)
pickle.dump(self.data, handle, protocol=pickle.HIGHEST_PROTOCOL)
epoch_file = "{}.epoch_{}.pickle".format(self.data_pickle_file,
self.current_epoch)
if self.current_epoch % self.log_interval == 0:
with open(epoch_file, "wb") as handle:
print(epoch_file)
pickle.dump(self.data, handle, protocol=pickle.HIGHEST_PROTOCOL)
class LossesByEpochLogger(object):
def __init__(self, pickle_dir, pickle_prefix, log_frequency):
self.current_epoch = 0
self.pickle_dir = pickle_dir
self.log_frequency = log_frequency
self.pickle_prefix = pickle_prefix
self.init_data()
def next_epoch(self):
self.write()
self.current_epoch += 1
self.data = []
def init_data(self):
# Store frequency of each image getting backpropped
self.data = []
data_pickle_dir = os.path.join(self.pickle_dir, "losses")
self.data_pickle_file = os.path.join(data_pickle_dir,
"{}_losses".format(self.pickle_prefix))
# Make images hist pickle path
if not os.path.exists(data_pickle_dir):
os.mkdir(data_pickle_dir)
def update_data(self, losses):
self.data += losses
def handle_backward_batch(self, batch):
losses = [em.example.loss.item() for em in batch]
self.update_data(losses)
def write(self):
epoch_file = "{}.epoch_{}.pickle".format(self.data_pickle_file,
self.current_epoch)
if self.current_epoch % self.log_frequency == 0:
with open(epoch_file, "wb") as handle:
print(epoch_file)
pickle.dump(self.data, handle, protocol=pickle.HIGHEST_PROTOCOL)
class LossesByImageLogger(object):
def __init__(self, pickle_dir, pickle_prefix, max_num_images=None):
self.pickle_dir = pickle_dir
self.pickle_prefix = pickle_prefix
self.init_data()
self.max_num_images = max_num_images
self.data = {}
def next_epoch(self):
self.write()
def init_data(self):
# Store frequency of each image getting backpropped
data_pickle_dir = os.path.join(self.pickle_dir, "losses_by_image")
self.data_pickle_file = os.path.join(data_pickle_dir,
"{}_losses".format(self.pickle_prefix))
# Make images hist pickle path
if not os.path.exists(data_pickle_dir):
os.mkdir(data_pickle_dir)
def update_data(self, image_ids, losses):
for image_id, loss in zip(image_ids, losses):
if image_id not in self.data.keys():
if self.max_num_images:
if image_id >= self.max_num_images:
continue
self.data[image_id] = []
self.data[image_id].append(loss)
def handle_backward_batch(self, batch):
ids = [em.example.image_id for em in batch]
losses = [em.example.loss for em in batch]
self.update_data(ids, losses)
def write(self):
latest_file = "{}.pickle".format(self.data_pickle_file)
with open(latest_file, "wb") as handle:
print(latest_file)
pickle.dump(self.data, handle, protocol=pickle.HIGHEST_PROTOCOL)
class VariancesByImageLogger(object):
def __init__(self, pickle_dir, pickle_prefix, max_num_images=None):
self.pickle_dir = pickle_dir
self.pickle_prefix = pickle_prefix
self.init_data()
self.max_num_images = max_num_images
self.data = {}
def next_epoch(self):
self.write()
def init_data(self):
# Store frequency of each image getting backpropped
data_pickle_dir = os.path.join(self.pickle_dir, "variance_by_image")
self.data_pickle_file = os.path.join(data_pickle_dir,
"{}_variances".format(self.pickle_prefix))
# Make images hist pickle path
if not os.path.exists(data_pickle_dir):
os.mkdir(data_pickle_dir)
def update_data(self, image_ids, losses):
for image_id, loss in zip(image_ids, losses):
if image_id not in self.data.keys():
if self.max_num_images:
if image_id >= self.max_num_images:
continue
self.data[image_id] = []
self.data[image_id].append(loss)
def handle_backward_batch(self, batch):
ids = [em.example.image_id for em in batch]
losses = [em.example.loss for em in batch]
self.update_data(ids, losses)
def write(self):
variance = {}
for image_id in self.data.keys():
variance[image_id] = np.var(self.data[image_id])
latest_file = "{}.pickle".format(self.data_pickle_file)
with open(latest_file, "wb") as handle:
print(latest_file)
pickle.dump(variance, handle, protocol=pickle.HIGHEST_PROTOCOL)
class VariancesByEpochLogger(object):
def __init__(self, pickle_dir, pickle_prefix, log_frequency):
self.current_epoch = 0
self.pickle_dir = pickle_dir
self.log_frequency = log_frequency
self.pickle_prefix = pickle_prefix
self.init_data()
def next_epoch(self):
self.write()
self.current_epoch += 1
self.data = []
def init_data(self):
# Store frequency of each image getting backpropped
self.data = []
data_pickle_dir = os.path.join(self.pickle_dir, "variance_by_epoch")
self.data_pickle_file = os.path.join(data_pickle_dir,
"{}_variances".format(self.pickle_prefix))
# Make images hist pickle path
if not os.path.exists(data_pickle_dir):
os.mkdir(data_pickle_dir)
def update_data(self, variance):
self.data += [variance]
def handle_backward_batch(self, batch):
losses = [em.example.loss.item() for em in batch]
variance = np.var(losses)
self.update_data(variance)
def write(self):
epoch_file = "{}.epoch_{}.pickle".format(self.data_pickle_file,
self.current_epoch)
if self.current_epoch % self.log_frequency == 0:
with open(epoch_file, "wb") as handle:
print(epoch_file)
pickle.dump(self.data, handle, protocol=pickle.HIGHEST_PROTOCOL)
class VariancesByAverageProbabilityByImageLogger(object):
def __init__(self, pickle_dir, pickle_prefix, max_num_images=None):
self.pickle_dir = pickle_dir
self.pickle_prefix = pickle_prefix
self.init_data()
self.max_num_images = max_num_images
self.data = {"losses": {}, "probabilities": {}}
def next_epoch(self):
self.write()
def init_data(self):
# Store frequency of each image getting backpropped
data_pickle_dir = os.path.join(self.pickle_dir, "variance_by_avg_prob")
self.data_pickle_file = os.path.join(data_pickle_dir,
"{}_variances".format(self.pickle_prefix))
# Make images hist pickle path
if not os.path.exists(data_pickle_dir):
os.mkdir(data_pickle_dir)
def update_data(self, image_ids, probabilities, losses):
for image_id, prob, loss in zip(image_ids, probabilities, losses):
if image_id not in self.data["losses"].keys():
if self.max_num_images:
if image_id >= self.max_num_images:
continue
self.data["losses"][image_id] = []
self.data["probabilities"][image_id] = []
self.data["losses"][image_id].append(loss)
self.data["probabilities"][image_id].append(prob)
def handle_backward_batch(self, batch):
ids = [em.example.image_id for em in batch]
losses = [em.example.loss for em in batch]
probabilities = [em.example.select_probability for em in batch]
self.update_data(ids, probabilities, losses)
def write(self):
out = {}
for image_id in self.data["losses"].keys():
var = np.var(self.data["losses"][image_id])
avg_prob = np.average(self.data["probabilities"][image_id])
out[image_id] = (avg_prob, var)
latest_file = "{}.pickle".format(self.data_pickle_file)
with open(latest_file, "wb") as handle:
print(latest_file)
pickle.dump(out, handle, protocol=pickle.HIGHEST_PROTOCOL)
class Logger(object):
def __init__(self, log_interval=1, epoch=0, num_backpropped=0, num_skipped=0, num_skipped_fp=0, num_forwards=0, start_time_seconds=None):
self.current_epoch = epoch
self.current_batch = 0
self.log_interval = log_interval
self.global_num_backpropped = num_backpropped
self.global_num_skipped = num_skipped
self.global_num_skipped_fp = num_skipped_fp
self.global_num_forwards= num_forwards
self.partition_loss = 0
self.partition_backpropped_loss = 0
self.partition_num_backpropped = 0
self.partition_num_skipped = 0
self.partition_num_correct = 0
self.debug = False
if start_time_seconds is None:
self.start_time_seconds = time.time()
else:
self.start_time_seconds = start_time_seconds
def next_epoch(self):
self.current_epoch += 1
@property
def partition_seen(self):
return self.partition_num_backpropped + self.partition_num_skipped
@property
def average_partition_loss(self):
return self.partition_loss / float(self.partition_seen)
@property
def average_partition_backpropped_loss(self):
return self.partition_backpropped_loss / float(self.partition_num_backpropped)
@property
def partition_accuracy(self):
return 100. * self.partition_num_correct / self.partition_seen
@property
def train_debug(self):
return 'train_debug,{},{},{},{},{:.6f},{},{:.6f},{:4f}'.format(
self.current_epoch,
self.global_num_backpropped,
self.global_num_skipped,
self.global_num_skipped_fp,
self.average_partition_backpropped_loss,
self.global_num_forwards,
self.partition_accuracy,
time.time() - self.start_time_seconds)
def next_partition(self):
self.partition_loss = 0
self.partition_backpropped_loss = 0
self.partition_num_backpropped = 0
self.partition_num_skipped = 0
self.partition_num_correct = 0
def handle_forward_batch(self, batch):
# Populate batch_stats
# self.partition_loss += sum([example.loss for em in batch])
num_skipped_fp = sum([int(not em.example.forward_select) for em in batch])
self.global_num_skipped_fp += num_skipped_fp
self.global_num_forwards += sum([int(em.example.forward_select) for em in batch])
def handle_backward_batch(self, batch):
self.current_batch += 1
num_backpropped = sum([int(em.example.select) for em in batch])
num_skipped = sum([int(not em.example.select) for em in batch])
self.global_num_backpropped += num_backpropped
self.global_num_skipped += num_skipped
if self.debug:
self.partition_num_backpropped += num_backpropped
self.partition_num_skipped += num_skipped
self.partition_backpropped_loss += sum([em.example.backpropped_loss
for em in batch
if em.example.backpropped_loss])
chosen = [em for em in batch if em.example.select]
self.partition_num_correct += sum([1 for em in chosen if em.example.correct])
self.write()
def write(self):
if self.current_batch % self.log_interval == 0:
print(self.train_debug)
|
py | 1a31a43149f6200eacb469f6da5159ed00aa95a2 | from bolinette import types, data
from bolinette.data import ext, mapping
from bolinette.data.defaults.entities import Role
@ext.model("role")
class RoleModel(data.Model[Role]):
id = types.defs.Column(types.db.Integer, primary_key=True)
name = types.defs.Column(
types.db.String, unique=True, nullable=False, entity_key=True
)
def payloads(self):
yield [mapping.Column(self.name, required=True)]
def responses(self):
yield [mapping.Column(self.name)]
yield "complete", [
mapping.Column(self.name),
mapping.List(mapping.Definition("user"), key="users"),
]
|
py | 1a31a535e1341605c1e381ab96a134023e21db77 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class FetchOptions(Package):
"""Mock package with fetch_options."""
homepage = "http://www.fetch-options-example.com"
url = 'https://example.com/some/tarball-1.0.tar.gz'
fetch_options = {'timeout': 42, 'cookie': 'foobar'}
timeout = {'timeout': 65}
cookie = {'cookie': 'baz'}
version('1.2', 'abc12', fetch_options=cookie)
version('1.1', 'abc11', fetch_options=timeout)
version('1.0', 'abc10')
|
py | 1a31a6eebd02a1e312035417b22205fd65a4de08 | import Doberman
class ValveControlNode(Doberman.ControlNode):
"""
A logic node to control a nitrogen level valve, based on a levelmeter and a control valve,
with optional inhibits from a vacuum or a scale
"""
def process(self, package):
liquid_level = package['liquid_level']
fill_rate = package['liquid_level_rate']
valve_status = package['valve_state']
low_level = self.config['liquid_level_low']
high_level = self.config['liquid_level_high']
min_fill_rate = self.config['min_fill_rate']
max_fill_time = self.config['max_fill_time']
max_iso_vac = self.config.get('max_iso_vac', -1)
min_scale = self.config.get('min_scale', -1)
vac_is_good = max_iso_vac == -1 or package.get('iso_vac_pressure', 0) < max_iso_vac
scale_is_good = min_scale == -1 or package.get('scale_weight', 0) < min_scale
some_value = 10000 # FIXME
if liquid_level < low_level:
if valve_status == 0:
# valve is closed, level is too low
if vac_is_good and scale_is_good:
# open the valve
self.set_output(1)
self.valve_opened = package['time']
self.logger.info('Scheduling valve opening')
else:
self.logger.info('Would love to open the valve but either the scale or vac is out of range')
else:
# valve is open, check to see for how long
if hasattr(self, 'valve_opened'):
if (dt := (package['time']-self.valve_opened)) > some_value:
# filling too slowly! Something fishy
# TODO something reasonable
pass
else:
# probably still waiting for the pipes to chill
pass
else:
# we don't have a self.valve_opened, valve was probably opened by something else
# TODO how to handle?
pass
elif low_level < liquid_level < high_level:
if valve_status == 1:
if hasattr(self, 'valve_opened'):
if (dt := (package['time']-self.valve_opened)) > max_fill_time:
# filling too long!
# TODO something reasonable
self.logger.critical(f'Valve has been open for {dt/60:.1f} minutes without reaching full, something wrong?')
else:
if fill_rate < min_fill_rate and dt > some_value:
# filling too slowly! Something fishy
# TODO something reasonable
pass
else:
fill_pct = (liquid_level - low_level)/(high_level - low_level)
self.logger.debug(f'Valve has been open for {int(dt//60)}m{int(dt%60)}s, filling at {fill_rate:.1f} ({fill_pct:.1f}%)')
else:
# we don't have a self.valve_opened, valve was probably opened by something else
# TODO how to handle?
pass
else:
# valve is closed, we're in "normal" conditions
pass
else:
# liquid level > high
if valve_status == 1:
# reached FULL
self.set_output(0)
self.logger.info('Scheduling valve closing')
else:
# valve is closed
pass
|
py | 1a31a8f5863a86eb8d319300f7a27f54d199a016 | #
# Copyright (c) 2018 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
from functools import partial
import attr
from plugincode.scan import ScanPlugin
from plugincode.scan import scan_impl
from scancode import CommandLineOption
from scancode import MISC_GROUP
from scancode import SCAN_OPTIONS_GROUP
from scancode import SCAN_GROUP
from scancode.api import DEJACODE_LICENSE_URL
def reindex_licenses(ctx, param, value):
if not value or ctx.resilient_parsing:
return
# TODO: check for temp file configuration and use that for the cache!!!
from licensedcode.cache import get_cached_index
import click
click.echo('Checking and rebuilding the license index...')
get_cached_index(check_consistency=True,)
click.echo('Done.')
ctx.exit(0)
@scan_impl
class LicenseScanner(ScanPlugin):
"""
Scan a Resource for licenses.
"""
resource_attributes = OrderedDict([
('licenses', attr.ib(default=attr.Factory(list))),
('license_expressions', attr.ib(default=attr.Factory(list))),
])
sort_order = 2
options = [
CommandLineOption(('-l', '--license'),
is_flag=True,
help='Scan <input> for licenses.',
help_group=SCAN_GROUP,
sort_order=10),
CommandLineOption(('--license-score',),
type=int, default=0, show_default=True,
required_options=['license'],
help='Do not return license matches with a score lower than this score. '
'A number between 0 and 100.',
help_group=SCAN_OPTIONS_GROUP),
CommandLineOption(('--license-text',),
is_flag=True,
required_options=['license'],
help='Include the detected licenses matched text.',
help_group=SCAN_OPTIONS_GROUP),
CommandLineOption(('--license-text-diagnostics',),
is_flag=True,
required_options=['license_text'],
help='In the matched license text, include diagnostic highlights '
'surrounding with square brackets [] words that are not matched.',
help_group=SCAN_OPTIONS_GROUP),
CommandLineOption(('--license-url-template',),
default=DEJACODE_LICENSE_URL, show_default=True,
required_options=['license'],
help='Set the template URL used for the license reference URLs. '
'Curly braces ({}) are replaced by the license key.',
help_group=SCAN_OPTIONS_GROUP),
CommandLineOption(('--license-diag',),
# not yet supported in Click 6.7 but added in CommandLineOption
hidden=True,
is_flag=True,
required_options=['license'],
help='(DEPRECATED: this is always included by default now). '
'Include diagnostic information in license scan results.',
help_group=SCAN_OPTIONS_GROUP),
CommandLineOption(
('--reindex-licenses',),
is_flag=True, is_eager=True,
callback=reindex_licenses,
help='Check the license index cache and reindex if needed and exit.',
help_group=MISC_GROUP)
]
def is_enabled(self, license, **kwargs): # NOQA
return license
def setup(self, **kwargs):
"""
This is a cache warmup such that child process inherit from this.
"""
from licensedcode.cache import get_index
get_index(return_value=False)
def get_scanner(self, license_score=0, license_text=False,
license_text_diagnostics=False,
license_url_template=DEJACODE_LICENSE_URL,
**kwargs):
from scancode.api import get_licenses
return partial(get_licenses,
min_score=license_score,
include_text=license_text,
license_text_diagnostics=license_text_diagnostics,
license_url_template=license_url_template
)
|
py | 1a31a9b32e0cb4abac221f438542a0fd55e55081 | import os
import sys
# Try to mute and then load TensorFlow and Keras
# Muting seems to not work lately on Linux in any way
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
stdin = sys.stdin
sys.stdin = open(os.devnull, 'w')
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
from keras.callbacks import Callback
sys.stdin = stdin
sys.stderr = stderr
# Own Tensorboard class giving ability to use single writer across multiple .fit() calls
# Allows us also to easily log additional data
# Dramatically decreases amount of data being saved into Tensorboard logs and write time (as appends to one file)
class TensorBoard(Callback):
# Set initial step and writer (we want one log file for all .fit() calls)
def __init__(self, log_dir):
self.step = 1
self.log_dir = log_dir
self.writer = tf.summary.FileWriter(self.log_dir)
# Saves logs with our step number (otherwise every .fit() will start writing from 0th step)
def on_epoch_end(self, epoch, logs=None):
self.update_stats(self.step, **logs)
# Custom method for saving own (and also internal) metrics (can be called externally)
def update_stats(self, step, **stats):
self._write_logs(stats, step)
# More or less the same writer as in Keras' Tensorboard callback
# Physically writes to the log files
def _write_logs(self, logs, index):
for name, value in logs.items():
if name in ['batch', 'size']:
continue
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
self.writer.add_summary(summary, index)
self.writer.flush()
|
py | 1a31a9d946b5cb3f9d1ec0a83e14532bcc908c0d | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import OrderNotFound
class bitflyer (Exchange):
def describe(self):
return self.deep_extend(super(bitflyer, self).describe(), {
'id': 'bitflyer',
'name': 'bitFlyer',
'countries': 'JP',
'version': 'v1',
'rateLimit': 1000, # their nonce-timestamp is in seconds...
'has': {
'CORS': False,
'withdraw': True,
'fetchMyTrades': True,
'fetchOrders': True,
'fetchOrder': True,
'fetchOpenOrders': 'emulated',
'fetchClosedOrders': 'emulated',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/28051642-56154182-660e-11e7-9b0d-6042d1e6edd8.jpg',
'api': 'https://api.bitflyer.jp',
'www': 'https://bitflyer.jp',
'doc': 'https://bitflyer.jp/API',
},
'api': {
'public': {
'get': [
'getmarkets/usa', # new(wip)
'getmarkets/eu', # new(wip)
'getmarkets', # or 'markets'
'getboard', # ...
'getticker',
'getexecutions',
'gethealth',
'getchats',
],
},
'private': {
'get': [
'getpermissions',
'getbalance',
'getcollateral',
'getcollateralaccounts',
'getaddresses',
'getcoinins',
'getcoinouts',
'getbankaccounts',
'getdeposits',
'getwithdrawals',
'getchildorders',
'getparentorders',
'getparentorder',
'getexecutions',
'getpositions',
'gettradingcommission',
],
'post': [
'sendcoin',
'withdraw',
'sendchildorder',
'cancelchildorder',
'sendparentorder',
'cancelparentorder',
'cancelallchildorders',
],
},
},
'fees': {
'trading': {
'maker': 0.25 / 100,
'taker': 0.25 / 100,
},
},
})
async def fetch_markets(self):
jp_markets = await self.publicGetGetmarkets()
us_markets = await self.publicGetGetmarketsUsa()
eu_markets = await self.publicGetGetmarketsEu()
markets = self.array_concat(jp_markets, us_markets)
markets = self.array_concat(markets, eu_markets)
result = []
for p in range(0, len(markets)):
market = markets[p]
id = market['product_code']
currencies = id.split('_')
base = None
quote = None
symbol = id
numCurrencies = len(currencies)
if numCurrencies == 1:
base = symbol[0:3]
quote = symbol[3:6]
elif numCurrencies == 2:
base = currencies[0]
quote = currencies[1]
symbol = base + '/' + quote
else:
base = currencies[1]
quote = currencies[2]
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetGetbalance()
balances = {}
for b in range(0, len(response)):
account = response[b]
currency = account['currency_code']
balances[currency] = account
result = {'info': response}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
account = self.account()
if currency in balances:
account['total'] = balances[currency]['amount']
account['free'] = balances[currency]['available']
account['used'] = account['total'] - account['free']
result[currency] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
orderbook = await self.publicGetGetboard(self.extend({
'product_code': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook, None, 'bids', 'asks', 'price', 'size')
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
ticker = await self.publicGetGetticker(self.extend({
'product_code': self.market_id(symbol),
}, params))
timestamp = self.parse8601(ticker['timestamp'])
last = float(ticker['ltp'])
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': float(ticker['best_bid']),
'bidVolume': None,
'ask': float(ticker['best_ask']),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': float(ticker['volume_by_product']),
'quoteVolume': None,
'info': ticker,
}
def parse_trade(self, trade, market=None):
side = None
order = None
if 'side' in trade:
if trade['side']:
side = trade['side'].lower()
id = side + '_child_order_acceptance_id'
if id in trade:
order = trade[id]
timestamp = self.parse8601(trade['exec_date'])
return {
'id': str(trade['id']),
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'order': order,
'type': None,
'side': side,
'price': trade['price'],
'amount': trade['size'],
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetGetexecutions(self.extend({
'product_code': market['id'],
}, params))
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
order = {
'product_code': self.market_id(symbol),
'child_order_type': type.upper(),
'side': side.upper(),
'price': price,
'size': amount,
}
result = await self.privatePostSendchildorder(self.extend(order, params))
# {"status": - 200, "error_message": "Insufficient funds", "data": null}
return {
'info': result,
'id': result['child_order_acceptance_id'],
}
async def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ExchangeError(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
return await self.privatePostCancelchildorder(self.extend({
'product_code': self.market_id(symbol),
'child_order_acceptance_id': id,
}, params))
def parse_order_status(self, status):
statuses = {
'ACTIVE': 'open',
'COMPLETED': 'closed',
'CANCELED': 'canceled',
'EXPIRED': 'canceled',
'REJECTED': 'canceled',
}
if status in statuses:
return statuses[status]
return status.lower()
def parse_order(self, order, market=None):
timestamp = self.parse8601(order['child_order_date'])
amount = self.safe_float(order, 'size')
remaining = self.safe_float(order, 'outstanding_size')
filled = self.safe_float(order, 'executed_size')
price = self.safe_float(order, 'price')
cost = price * filled
status = self.parse_order_status(order['child_order_state'])
type = order['child_order_type'].lower()
side = order['side'].lower()
symbol = None
if market is None:
marketId = self.safe_string(order, 'product_code')
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
fee = None
feeCost = self.safe_float(order, 'total_commission')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': None,
'rate': None,
}
return {
'id': order['child_order_acceptance_id'],
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'fee': fee,
}
async def fetch_orders(self, symbol=None, since=None, limit=100, params={}):
if symbol is None:
raise ExchangeError(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'product_code': market['id'],
'count': limit,
}
response = await self.privateGetGetchildorders(self.extend(request, params))
orders = self.parse_orders(response, market, since, limit)
if symbol:
orders = self.filter_by(orders, 'symbol', symbol)
return orders
async def fetch_open_orders(self, symbol=None, since=None, limit=100, params={}):
params['child_order_state'] = 'ACTIVE'
return self.fetch_orders(symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=100, params={}):
params['child_order_state'] = 'COMPLETED'
return self.fetch_orders(symbol, since, limit, params)
async def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ExchangeError(self.id + ' fetchOrder() requires a symbol argument')
orders = await self.fetch_orders(symbol)
ordersById = self.index_by(orders, 'id')
if id in ordersById:
return ordersById[id]
raise OrderNotFound(self.id + ' No order found with id ' + id)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ExchangeError(self.id + ' fetchMyTrades requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'product_code': market['id'],
}
if limit:
request['count'] = limit
response = await self.privateGetGetexecutions(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
if code != 'JPY' and code != 'USD' and code != 'EUR':
raise ExchangeError(self.id + ' allows withdrawing JPY, USD, EUR only, ' + code + ' is not supported')
currency = self.currency(code)
response = await self.privatePostWithdraw(self.extend({
'currency_code': currency['id'],
'amount': amount,
# 'bank_account_id': 1234,
}, params))
return {
'info': response,
'id': response['message_id'],
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.version + '/'
if api == 'private':
request += 'me/'
request += path
if method == 'GET':
if params:
request += '?' + self.urlencode(params)
url = self.urls['api'] + request
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
auth = ''.join([nonce, method, request])
if params:
if method != 'GET':
body = self.json(params)
auth += body
headers = {
'ACCESS-KEY': self.apiKey,
'ACCESS-TIMESTAMP': nonce,
'ACCESS-SIGN': self.hmac(self.encode(auth), self.encode(self.secret)),
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
|
py | 1a31a9e8940eb34657c13f5375fb8bf54e7feb35 | def longestPalindrome(s: str) -> str:
max_length = 0
start = 0
for i in range(len(s)):
for j in range(i + max_length, len(s)):
length = j - i + 1
if i + length > len(s): break
if length > max_length and isPalin(s, i, j + 1):
start = i
max_length = length
return s[start:start+max_length] if max_length < len(s) else s
def isPalin(s: str, start: int, end: int) -> bool:
for i in range(int((end - start) / 2)):
if s[start + i] != s[end-i-1]:
return False
return True |
py | 1a31ab3b885b75aac7644ba03fc82b4ef1ffca1c | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=unused-import
from unittest import TestCase
class TestImport(TestCase):
def test_import_init(self):
"""
Test that the metrics root module has the right symbols
"""
try:
from opentelemetry.sdk._metrics import ( # noqa: F401
Counter,
Histogram,
Meter,
MeterProvider,
ObservableCounter,
ObservableGauge,
ObservableUpDownCounter,
UpDownCounter,
)
except Exception as error:
self.fail(f"Unexpected error {error} was raised")
def test_import_export(self):
"""
Test that the metrics export module has the right symbols
"""
try:
from opentelemetry.sdk._metrics.export import ( # noqa: F401
AggregationTemporality,
ConsoleMetricExporter,
Gauge,
Histogram,
InMemoryMetricReader,
Metric,
MetricExporter,
MetricExportResult,
MetricReader,
PeriodicExportingMetricReader,
PointT,
Sum,
)
except Exception as error:
self.fail(f"Unexpected error {error} was raised")
def test_import_view(self):
"""
Test that the metrics view module has the right symbols
"""
try:
from opentelemetry.sdk._metrics.view import ( # noqa: F401
Aggregation,
DefaultAggregation,
DropAggregation,
ExplicitBucketHistogramAggregation,
LastValueAggregation,
SumAggregation,
View,
)
except Exception as error:
self.fail(f"Unexpected error {error} was raised")
|
py | 1a31ac36c4315bafda84635c0d40733b3653e435 | """\
Lisp generator functions for wxCheckBox objects
@copyright: 2002-2004 D. H. aka crazyinsomniac on sourceforge
@copyright: 2014-2016 Carsten Grohmann
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
from . import checkbox_base
import common
import wcodegen
class LispCheckBoxGenerator(wcodegen.LispWidgetCodeWriter,
checkbox_base.CheckBoxMixin):
tmpl = '(setf %(name)s (%(klass)s_Create %(parent)s %(id)s %(label)s -1 -1 -1 -1 %(style)s))\n'
tmpl_set3statevalue = '(%(klass)s_Set3StateValue %(name)s %(value_3state)s)\n'
def _prepare_tmpl_content(self, obj):
super(LispCheckBoxGenerator, self)._prepare_tmpl_content(obj)
self._prepare_checkbox_content(obj)
def get_code(self, obj):
init_lines, prop_lines, layout_lines = \
super(LispCheckBoxGenerator, self).get_code(obj)
self._get_checkbox_code(prop_lines)
return init_lines, prop_lines, layout_lines
def initialize():
klass = 'wxCheckBox'
common.class_names['EditCheckBox'] = klass
common.register('lisp', klass, LispCheckBoxGenerator(klass))
|
py | 1a31ac600fb9beab9128ff9bfe927647712167c8 |
from south.db import db
from django.db import models
from apps.feed_import.models import *
class Migration:
def forwards(self, orm):
"Write your forwards migration here"
def backwards(self, orm):
"Write your backwards migration here"
models = {
}
complete_apps = ['feed_import']
|
py | 1a31ac9293aba43a3465b09260ebc81b6d7dc1df | from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='monitor',
version='0.1.0',
description='Monitor component of BIGSEA Asperathos framework',
url='',
author='Igor Natanael, Roberto Nascimento Jr.',
author_email='',
license='Apache 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: Apache 2.0',
'Programming Language :: Python :: 2.7',
],
keywords='webservice monitoring monasca asperathos bigsea',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=['flask'],
entry_points={
'console_scripts': [
'monitor=monitor.cli.main:main',
],
},
)
|
py | 1a31ad8689536b3767bb3196f3a59baea10b39f3 | from app.deckbuilder_app import run
if __name__ == "__main__":
run()
|
py | 1a31ad936898c95d031774017397df80fd0dd673 | import unittest
import os, sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
import autosar
### BEGIN TEST DATA
def apply_test_data(ws):
package=ws.createPackage("DataType", role="DataType")
package.createSubPackage("DataTypeSemantics", role="CompuMethod")
package.createSubPackage("DataTypeUnits", role="Unit")
package.createBooleanDataType('Boolean')
package.createIntegerDataType('SInt8', -128, 127)
package.createIntegerDataType('SInt16', -32768, 32767)
package.createIntegerDataType('SInt32', -2147483648, 2147483647)
package.createIntegerDataType('UInt8', 0, 255)
package.createIntegerDataType('UInt16', 0, 65535)
package.createIntegerDataType('UInt32', 0, 4294967295)
package.createRealDataType('Float', None, None, minValType='INFINITE', maxValType='INFINITE')
package.createRealDataType('Double', None, None, minValType='INFINITE', maxValType='INFINITE', hasNaN=True, encoding='DOUBLE')
package.createIntegerDataType('ButtonStatus_T', valueTable=['ButtonStatus_Released','ButtonStatus_Pressed','ButtonStatus_Error','ButtonStatus_NotAvailable'])
valueTableList = [
'VehicleModeInternal_Off',
'VehicleModeInternal_Accessory',
'VehicleModeInternal_Run',
'VehicleModeInternal_Crank',
'VehicleModeInternal_Spare1',
'VehicleModeInternal_Spare2',
'VehicleModeInternal_Error',
'VehicleModeInternal_NotAvailable'
]
package.createIntegerDataType('VehicleModeInternal_T', valueTable=valueTableList)
package.createIntegerDataType('BspApi_DigitalId_T', 0, 255, offset=0, scaling=1/1, forceFloatScaling=True, unit='Id')
package.createIntegerDataType('BspApi_DigitalState_T', valueTable=['BspApi_DigitalState_Inactive','BspApi_DigitalState_Active','BspApi_DigitalState_Error','BspApi_DigitalState_NotAvailable'])
package=ws.createPackage("Constant", role="Constant")
package.createConstant('ButtonStatus_IV', 'ButtonStatus_T', 3)
package.createConstant('VehicleModeInternal_IV', 'VehicleModeInternal_T', 7)
package=ws.createPackage("PortInterface", role="PortInterface")
package.createSenderReceiverInterface("EcuM_CurrentMode", modeGroups=autosar.ModeGroup("currentMode", "/ModeDclrGroup/EcuM_Mode"), isService=True, adminData={"SDG_GID": "edve:BSWM", "SD": "EcuM"})
package.createSenderReceiverInterface("ButtonStatus_I", autosar.DataElement('ButtonStatus', 'ButtonStatus_T'))
package.createSenderReceiverInterface("VehicleModeInternal_I", autosar.DataElement('VehicleModeInternal', 'VehicleModeInternal_T'))
portInterface=package.createClientServerInterface("BspApi_I", ["GetDiscreteInput", "SetDiscreteOutput"], autosar.ApplicationError("E_NOT_OK", 1), isService=True)
portInterface["GetDiscreteInput"].createInArgument("inputId", "BspApi_DigitalId_T")
portInterface["GetDiscreteInput"].createOutArgument("inputValue", "BspApi_DigitalState_T")
portInterface["SetDiscreteOutput"].createInArgument("outputId", "BspApi_DigitalId_T")
portInterface["SetDiscreteOutput"].createInArgument("outputValue", "BspApi_DigitalState_T")
portInterface["SetDiscreteOutput"].possibleErrors = "E_NOT_OK"
package=ws.createPackage("ModeDclrGroup", role="ModeDclrGroup")
package.createModeDeclarationGroup("EcuM_Mode", ["POST_RUN", "RUN", "SHUTDOWN", "SLEEP", "STARTUP", "WAKE_SLEEP"], "STARTUP", adminData={"SDG_GID": "edve:BSWM", "SD": "EcuM"})
package=ws.createPackage("ComponentType", role="ComponentType")
swc = package.createApplicationSoftwareComponent('SteeringWheelButtonReader')
swc.createProvidePort('SWS_PushButtonStatus_Back', 'ButtonStatus_I', initValueRef='ButtonStatus_IV')
swc.createProvidePort('SWS_PushButtonStatus_Down', 'ButtonStatus_I', initValueRef='ButtonStatus_IV')
swc.createProvidePort('SWS_PushButtonStatus_Enter', 'ButtonStatus_I', initValueRef='ButtonStatus_IV')
swc.createProvidePort('SWS_PushButtonStatus_Home', 'ButtonStatus_I', initValueRef='ButtonStatus_IV')
swc.createProvidePort('SWS_PushButtonStatus_Left', 'ButtonStatus_I', initValueRef='ButtonStatus_IV')
swc.createProvidePort('SWS_PushButtonStatus_Right', 'ButtonStatus_I', initValueRef='ButtonStatus_IV')
swc.createProvidePort('SWS_PushButtonStatus_Up', 'ButtonStatus_I', initValueRef='ButtonStatus_IV')
swc.createRequirePort('EcuM_CurrentMode', 'EcuM_CurrentMode')
swc.createRequirePort('VehicleModeInternal', 'VehicleModeInternal_I', initValueRef='VehicleModeInternal_IV')
swc.createRequirePort('BspApi', 'BspApi_I')
portAccessList = [
'SWS_PushButtonStatus_Back',
'SWS_PushButtonStatus_Down',
'SWS_PushButtonStatus_Enter',
'SWS_PushButtonStatus_Home',
'SWS_PushButtonStatus_Left',
'SWS_PushButtonStatus_Right',
'SWS_PushButtonStatus_Up'
]
swc.behavior.createRunnable('SteeringWheelButtonReader_Init', portAccess=portAccessList)
portAccessList = [
'SWS_PushButtonStatus_Back',
'SWS_PushButtonStatus_Down',
'SWS_PushButtonStatus_Enter',
'SWS_PushButtonStatus_Home',
'SWS_PushButtonStatus_Left',
'SWS_PushButtonStatus_Right',
'SWS_PushButtonStatus_Up'
]
swc.behavior.createRunnable('SteeringWheelButtonReader_Exit', portAccess=portAccessList)
portAccessList = [
'VehicleModeInternal',
'SWS_PushButtonStatus_Back',
'SWS_PushButtonStatus_Down',
'SWS_PushButtonStatus_Enter',
'SWS_PushButtonStatus_Home',
'SWS_PushButtonStatus_Left',
'SWS_PushButtonStatus_Right',
'SWS_PushButtonStatus_Up',
'BspApi/GetDiscreteInput'
]
swc.behavior.createRunnable('SteeringWheelButtonReader_Run', portAccess=portAccessList)
swc.behavior.createTimingEvent('SteeringWheelButtonReader_Run', period=10)
swc.behavior.createModeSwitchEvent('SteeringWheelButtonReader_Init', 'EcuM_CurrentMode/RUN')
### END TEST DATA
class TestPartition(unittest.TestCase):
def test_addComponent(self):
ws = autosar.workspace()
apply_test_data(ws)
partition = autosar.rte.Partition()
partition.addComponent(ws.find('/ComponentType/SteeringWheelButtonReader'))
self.assertEqual(len(partition.components), 1)
def test_unconnected(self):
ws = autosar.workspace()
apply_test_data(ws)
partition = autosar.rte.Partition()
partition.addComponent(ws.find('/ComponentType/SteeringWheelButtonReader'))
self.assertEqual(len(partition.components), 1)
unconnected = list(partition.unconnectedPorts())
self.assertEqual(len(unconnected), 10)
if __name__ == '__main__':
unittest.main() |
py | 1a31ae591c565203a8dbd9b0de816b698e7d114f | # API v1
import logging
from django.conf import settings
from django.contrib import auth, messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.views.generic import FormView
from oauth2_provider.contrib.rest_framework import OAuth2Authentication, TokenHasScope
from rest_framework import mixins, status, viewsets
from rest_framework.decorators import action
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from apps.authentication.models import Email
from apps.authentication.models import OnlineUser as User
from apps.inventory.models import Item
from apps.payment.models import PaymentTransaction
from apps.payment.transaction_constants import TransactionSource
from apps.shop.forms import SetRFIDForm
from apps.shop.models import MagicToken, OrderLine
from apps.shop.serializers import (
ItemSerializer,
OrderLineSerializer,
TransactionSerializer,
UserOrderLineSerializer,
UserSerializer,
)
from apps.shop.utils import create_magic_token
class OrderLineViewSet(viewsets.GenericViewSet, mixins.CreateModelMixin):
queryset = OrderLine.objects.all()
serializer_class = OrderLineSerializer
authentication_classes = [OAuth2Authentication]
permission_classes = [TokenHasScope]
required_scopes = ["shop.readwrite"]
@action(detail=False, url_path="userorders")
def user_orders(self, request):
"""
Endpoint for fetching a users orders history
Intended for the nibble kiosk
"""
pk = self.request.query_params.get("pk")
if not pk:
return Response(
"Request must include a 'pk' query parameter where 'pk' is the users user id",
status=status.HTTP_400_BAD_REQUEST,
)
user = get_object_or_404(User, pk=pk)
# Only include the latest purchases
amount = 50
orders = OrderLine.objects.filter(user=user).order_by("-datetime")[:amount]
serializer = UserOrderLineSerializer(orders, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class TransactionViewSet(viewsets.GenericViewSet, mixins.CreateModelMixin):
queryset = PaymentTransaction.objects.all()
serializer_class = TransactionSerializer
authentication_classes = [OAuth2Authentication]
permission_classes = [TokenHasScope]
required_scopes = ["shop.readwrite"]
def perform_create(self, serializer):
"""
Transactions created by this view are strictly allowed to handle cash additions.
"""
serializer.save(source=TransactionSource.CASH)
class UserOrderViewSet(
viewsets.GenericViewSet, mixins.ListModelMixin, mixins.RetrieveModelMixin
):
serializer_class = UserOrderLineSerializer
permission_classes = (IsAuthenticated,)
def get_queryset(self):
return OrderLine.objects.filter(user=self.request.user)
class UserViewSet(
viewsets.GenericViewSet, mixins.RetrieveModelMixin, mixins.ListModelMixin, APIView
):
queryset = User.objects.all()
serializer_class = UserSerializer
authentication_classes = [OAuth2Authentication]
permission_classes = [TokenHasScope]
required_scopes = ["shop.readwrite"]
filterset_fields = ("rfid",)
class InventoryViewSet(
viewsets.GenericViewSet, mixins.RetrieveModelMixin, mixins.ListModelMixin
):
queryset = Item.objects.filter(available=True).order_by("pk")
serializer_class = ItemSerializer
permission_classes = (AllowAny,)
pagination_class = None
class SetRFIDView(APIView):
authentication_classes = [OAuth2Authentication]
permission_classes = [TokenHasScope]
required_scopes = ["shop.readwrite"]
def post(self, request):
username = request.data.get("username", "").lower()
password = request.data.get("password", "")
request_magic_link = request.data.get("magic_link", False)
send_magic_link_email = request.data.get("send_email", True)
if not username:
return Response(
"Missing authentication details", status=status.HTTP_400_BAD_REQUEST
)
if "@" in username:
email = Email.objects.filter(email=username)
if email:
username = email[0].user.username
user = auth.authenticate(username=username, password=password)
rfid = request.data.get("rfid", "")
if not rfid:
return Response(
"Missing RFID from request payload", status=status.HTTP_400_BAD_REQUEST
)
if user and rfid:
if user.rfid == rfid:
return Response("OK", status=status.HTTP_200_OK)
user.rfid = rfid
user.save()
return Response("OK", status=status.HTTP_200_OK)
if not user and username and rfid and request_magic_link:
onlineuser = None
try:
onlineuser = User.objects.get(username=username)
except User.DoesNotExist:
return Response(
"User does not exist", status=status.HTTP_400_BAD_REQUEST
)
magic_token = create_magic_token(
onlineuser, rfid, send_token_by_email=send_magic_link_email
)
data = {
"token": str(magic_token.token),
"url": "{}{}".format(
settings.BASE_URL,
reverse("shop_set_rfid", args=[str(magic_token.token)]),
),
}
return Response(data=data, status=status.HTTP_201_CREATED)
return Response("Invalid user credentials", status=status.HTTP_400_BAD_REQUEST)
@method_decorator(login_required, name="dispatch")
class SetRFIDWebView(FormView):
form_class = SetRFIDForm
template_name = "shop/set_rfid.html"
success_url = reverse_lazy("home")
def get(self, request, token="", *args, **kwargs):
get_object_or_404(MagicToken, token=token)
return super().get(request, token, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs["current_rfid"] = self.request.user.rfid
kwargs["token"] = self.kwargs.get("token")
return super().get_context_data(**kwargs)
def get_initial(self):
initial = super().get_initial()
initial["rfid"] = MagicToken.objects.get(token=self.kwargs.get("token")).data
return initial
def post(self, request, token="", *args, **kwargs):
logger = logging.getLogger(__name__)
form = self.get_form()
if not form.is_valid():
return self.form_invalid(form)
if not token:
form.add_error("Det finnes ingen token i denne forespørselen.")
return self.form_invalid(form)
magictoken = None
try:
magictoken = MagicToken.objects.get(token=token)
except MagicToken.DoesNotExist:
form.add_error("Tokenet du prøver å bruke eksisterer ikke.")
return self.form_invalid(form)
old_rfid = magictoken.user.rfid
magictoken.user.rfid = magictoken.data
magictoken.user.save()
logger.debug(
'{authed_user} updated RFID for {user} (from "{old}" to "{new}").'.format(
authed_user=self.request.user,
user=magictoken.user,
old=old_rfid,
new=magictoken.data,
)
)
magictoken.delete()
messages.success(request, "Oppdaterte RFID for {}".format(magictoken.user))
return self.form_valid(form)
|
py | 1a31af198fd0b5c175f1e035b96011beb1af43e7 | import sys
import os
sys.path.append('/spug/spug_api')
import random
import string
from public import db
from config import BASE_DIR
from apps.account.models import User
import apps.configuration.models
import apps.deploy.models
import apps.assets.models
import apps.schedule.models
import apps.setting.models
# init database
db.drop_all()
db.create_all()
with open(os.path.join(BASE_DIR, 'libs', 'sql', 'permissions.sql'), 'r') as f:
line = f.readline()
while line:
if line.startswith('INSERT INTO'):
db.engine.execute(line.strip())
line = f.readline()
# create default admin
username = 'admin'
password = 'spug'
User(username=username, password=password, nickname='Administrator', is_supper=True).save()
print('*' * 80)
print('Database name: ' + (os.getenv('MYSQL_DATABASE') or 'spug'))
print('Database username: ' + (os.getenv('MYSQL_USER') or 'spuguser'))
print('Database password: ' + (os.getenv('MYSQL_PASSWORD') or 'spugpwd'))
print('Login web site account: %s %s' % (username, password))
print('*' * 80)
|
py | 1a31b0dede10b517f37e0c7c06459a528cae967d | # from https://github.com/ajbrock/BigGAN-PyTorch (MIT license)
# some modifications in class Generator and G_D
# new class "Unet_Discriminator" based on original class "Discriminator"
import numpy as np
import math
import functools
import torch
import torch.nn as nn
from torch.nn import init
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter as P
from unetgan import layers
from unetgan import utils
import copy
from matplotlib import pyplot as plt
# Architectures for G
# Attention is passed in in the format '32_64' to mean applying an attention
# block at both resolution 32x32 and 64x64.
def G_arch(ch=64, attention='64', ksize='333333', dilation='111111'):
arch = {}
arch[256] = {'in_channels': [ch * item for item in [16, 16, 8, 8, 4, 2]],
'out_channels': [ch * item for item in [16, 8, 8, 4, 2, 1]],
'upsample': [True] * 6,
'resolution': [8, 16, 32, 64, 128, 256],
'attention': {2 ** i: (2 ** i in [int(item) for item in attention.split('_')])
for i in range(3, 9)}}
arch[128] = {'in_channels': [ch * item for item in [16, 16, 8, 4, 2]],
'out_channels': [ch * item for item in [16, 8, 4, 2, 1]],
'upsample': [True] * 5,
'resolution': [8, 16, 32, 64, 128],
'attention': {2 ** i: (2 ** i in [int(item) for item in attention.split('_')])
for i in range(3, 8)}}
return arch
class Generator(nn.Module):
def __init__(self, G_ch=64, dim_z=128, bottom_width=4, resolution=128,
G_kernel_size=3, G_attn='64', n_classes=1000,
num_G_SVs=1, num_G_SV_itrs=1,
G_shared=True, shared_dim=0, hier=False,
cross_replica=False, mybn=False,
G_activation=nn.ReLU(inplace=False),
G_lr=5e-5, G_B1=0.0, G_B2=0.999, adam_eps=1e-8,
BN_eps=1e-5, SN_eps=1e-12, G_mixed_precision=False, G_fp16=False,
G_init='ortho', skip_init=False, no_optim=False,
G_param='SN', norm_style='bn',
**kwargs):
super(Generator, self).__init__()
# Channel width mulitplier
self.ch = G_ch
# Dimensionality of the latent space
self.dim_z = dim_z
# The initial spatial dimensions
self.bottom_width = bottom_width
# Resolution of the output
self.resolution = resolution
# Kernel size?
self.kernel_size = G_kernel_size
# Attention?
self.attention = G_attn
# number of classes, for use in categorical conditional generation
self.n_classes = n_classes
# Use shared embeddings?
self.G_shared = G_shared
# Dimensionality of the shared embedding? Unused if not using G_shared
self.shared_dim = shared_dim if shared_dim > 0 else dim_z
# Hierarchical latent space?
self.hier = hier
# Cross replica batchnorm?
self.cross_replica = cross_replica
# Use my batchnorm?
self.mybn = mybn
# nonlinearity for residual blocks
self.activation = G_activation
# Initialization style
self.init = G_init
# Parameterization style
self.G_param = G_param
# Normalization style
self.norm_style = norm_style
# Epsilon for BatchNorm?
self.BN_eps = BN_eps
# Epsilon for Spectral Norm?
self.SN_eps = SN_eps
# fp16?
self.fp16 = G_fp16
# Architecture dict
self.arch = G_arch(self.ch, self.attention)[resolution]
self.unconditional = kwargs["unconditional"]
# If using hierarchical latents, adjust z
if self.hier:
# Number of places z slots into
self.num_slots = len(self.arch['in_channels']) + 1
self.z_chunk_size = (self.dim_z // self.num_slots)
if not self.unconditional:
self.dim_z = self.z_chunk_size * self.num_slots
else:
self.num_slots = 1
self.z_chunk_size = 0
# Which convs, batchnorms, and linear layers to use
if self.G_param == 'SN':
self.which_conv = functools.partial(layers.SNConv2d,
kernel_size=3, padding=1,
num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
eps=self.SN_eps)
self.which_linear = functools.partial(layers.SNLinear,
num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
eps=self.SN_eps)
else:
self.which_conv = functools.partial(nn.Conv2d, kernel_size=3, padding=1)
self.which_linear = nn.Linear
# We use a non-spectral-normed embedding here regardless;
# For some reason applying SN to G's embedding seems to randomly cripple G
self.which_embedding = nn.Embedding
if self.unconditional:
bn_linear = nn.Linear
input_size = self.dim_z + (self.shared_dim if self.G_shared else 0)
else:
bn_linear = (functools.partial(self.which_linear, bias=False) if self.G_shared
else self.which_embedding)
input_size = (self.shared_dim + self.z_chunk_size if self.G_shared
else self.n_classes)
self.which_bn = functools.partial(layers.ccbn,
which_linear=bn_linear,
cross_replica=self.cross_replica,
mybn=self.mybn,
input_size=input_size,
norm_style=self.norm_style,
eps=self.BN_eps,
self_modulation=self.unconditional)
# Prepare model
# If not using shared embeddings, self.shared is just a passthrough
self.shared = (self.which_embedding(n_classes, self.shared_dim) if G_shared
else layers.identity())
# First linear layer
if self.unconditional:
self.linear = self.which_linear(self.dim_z, self.arch['in_channels'][0] * (self.bottom_width ** 2))
else:
self.linear = self.which_linear(self.dim_z // self.num_slots,
self.arch['in_channels'][0] * (self.bottom_width ** 2))
# self.blocks is a doubly-nested list of modules, the outer loop intended
# to be over blocks at a given resolution (resblocks and/or self-attention)
# while the inner loop is over a given block
self.blocks = []
for index in range(len(self.arch['out_channels'])):
self.blocks += [[layers.GBlock(in_channels=self.arch['in_channels'][index],
out_channels=self.arch['out_channels'][index],
which_conv=self.which_conv,
which_bn=self.which_bn,
activation=self.activation,
upsample=(functools.partial(F.interpolate, scale_factor=2)
if self.arch['upsample'][index] else None))]]
# If attention on this block, attach it to the end
if self.arch['attention'][self.arch['resolution'][index]]:
print('Adding attention layer in G at resolution %d' % self.arch['resolution'][index])
self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index], self.which_conv)]
# Turn self.blocks into a ModuleList so that it's all properly registered.
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
# output layer: batchnorm-relu-conv.
# Consider using a non-spectral conv here
self.output_layer = nn.Sequential(layers.bn(self.arch['out_channels'][-1],
cross_replica=self.cross_replica,
mybn=self.mybn),
self.activation,
self.which_conv(self.arch['out_channels'][-1], 3))
# Initialize weights. Optionally skip init for testing.
if not skip_init:
self.init_weights()
# Set up optimizer
# If this is an EMA copy, no need for an optim, so just return now
if no_optim:
return
self.lr, self.B1, self.B2, self.adam_eps = G_lr, G_B1, G_B2, adam_eps
if G_mixed_precision:
print('Using fp16 adam in G...')
import utils
self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0,
eps=self.adam_eps)
else:
self.optim = optim.Adam(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0,
eps=self.adam_eps)
# LR scheduling, left here for forward compatibility
# self.lr_sched = {'itr' : 0}# if self.progressive else {}
# self.j = 0
# Initialize
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print('Param count for G''s initialized parameters: %d' % self.param_count)
# Note on this forward function: we pass in a y vector which has
# already been passed through G.shared to enable easy class-wise
# interpolation later. If we passed in the one-hot and then ran it through
# G.shared in this forward function, it would be harder to handle.
def forward(self, z, y):
# If hierarchical, concatenate zs and ys
if self.hier:
# faces
if self.unconditional:
ys = [z for _ in range(self.num_slots)]
else:
zs = torch.split(z, self.z_chunk_size, 1)
z = zs[0]
ys = [torch.cat([y, item], 1) for item in zs[1:]]
else:
if self.unconditional:
ys = [None] * len(self.blocks)
else:
ys = [y] * len(self.blocks)
# First linear layer
h = self.linear(z)
# Reshape
h = h.view(h.size(0), -1, self.bottom_width, self.bottom_width)
# Loop over blocks
for index, blocklist in enumerate(self.blocks):
# Second inner loop in case block has multiple layers
for block in blocklist:
h = block(h, ys[index])
# Apply batchnorm-relu-conv-tanh at output
return torch.tanh(self.output_layer(h))
# Discriminator architecture, same paradigm as G's above
def D_arch(ch=64, attention='64', ksize='333333', dilation='111111'):
arch = {}
arch[256] = {'in_channels': [3] + [ch * item for item in [1, 2, 4, 8, 8, 16]],
'out_channels': [item * ch for item in [1, 2, 4, 8, 8, 16, 16]],
'downsample': [True] * 6 + [False],
'resolution': [128, 64, 32, 16, 8, 4, 4],
'attention': {2 ** i: 2 ** i in [int(item) for item in attention.split('_')]
for i in range(2, 8)}}
arch[128] = {'in_channels': [3] + [ch * item for item in [1, 2, 4, 8, 16]],
'out_channels': [item * ch for item in [1, 2, 4, 8, 16, 16]],
'downsample': [True] * 5 + [False],
'resolution': [64, 32, 16, 8, 4, 4],
'attention': {2 ** i: 2 ** i in [int(item) for item in attention.split('_')]
for i in range(2, 8)}}
return arch
def D_unet_arch(ch=64, attention='64', ksize='333333', dilation='111111', out_channel_multiplier=1):
arch = {}
n = 2
ocm = out_channel_multiplier
# covers bigger perceptual fields
arch[128] = {'in_channels': [3] + [ch * item for item in [1, 2, 4, 8, 16, 8 * n, 4 * 2, 2 * 2, 1 * 2, 1]],
'out_channels': [item * ch for item in [1, 2, 4, 8, 16, 8, 4, 2, 1, 1]],
'downsample': [True] * 5 + [False] * 5,
'upsample': [False] * 5 + [True] * 5,
'resolution': [64, 32, 16, 8, 4, 8, 16, 32, 64, 128],
'attention': {2 ** i: 2 ** i in [int(item) for item in attention.split('_')]
for i in range(2, 11)}}
arch[256] = {'in_channels': [3] + [ch * item for item in [1, 2, 4, 8, 8, 16, 8 * 2, 8 * 2, 4 * 2, 2 * 2, 1 * 2, 1]],
'out_channels': [item * ch for item in [1, 2, 4, 8, 8, 16, 8, 8, 4, 2, 1, 1]],
'downsample': [True] * 6 + [False] * 6,
'upsample': [False] * 6 + [True] * 6,
'resolution': [128, 64, 32, 16, 8, 4, 8, 16, 32, 64, 128, 256],
'attention': {2 ** i: 2 ** i in [int(item) for item in attention.split('_')]
for i in range(2, 13)}}
return arch
class Unet_Discriminator(nn.Module):
def __init__(self, D_ch=64, D_wide=True, resolution=128,
D_kernel_size=3, D_attn='64', n_classes=1000,
num_D_SVs=1, num_D_SV_itrs=1, D_activation=nn.ReLU(inplace=False),
D_lr=2e-4, D_B1=0.0, D_B2=0.999, adam_eps=1e-8,
SN_eps=1e-12, output_dim=1, D_mixed_precision=False, D_fp16=False,
D_init='ortho', skip_init=False, D_param='SN', decoder_skip_connection=True, **kwargs):
super(Unet_Discriminator, self).__init__()
# Width multiplier
self.ch = D_ch
# Use Wide D as in BigGAN and SA-GAN or skinny D as in SN-GAN?
self.D_wide = D_wide
# Resolution
self.resolution = resolution
# Kernel size
self.kernel_size = D_kernel_size
# Attention?
self.attention = D_attn
# Number of classes
self.n_classes = n_classes
# Activation
self.activation = D_activation
# Initialization style
self.init = D_init
# Parameterization style
self.D_param = D_param
# Epsilon for Spectral Norm?
self.SN_eps = SN_eps
# Fp16?
self.fp16 = D_fp16
if self.resolution == 128:
self.save_features = [0, 1, 2, 3, 4]
elif self.resolution == 256:
self.save_features = [0, 1, 2, 3, 4, 5]
self.out_channel_multiplier = 1 # 4
# Architecture
self.arch = D_unet_arch(self.ch, self.attention, out_channel_multiplier=self.out_channel_multiplier)[resolution]
self.unconditional = kwargs["unconditional"]
# Which convs, batchnorms, and linear layers to use
# No option to turn off SN in D right now
if self.D_param == 'SN':
self.which_conv = functools.partial(layers.SNConv2d,
kernel_size=3, padding=1,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
self.which_linear = functools.partial(layers.SNLinear,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
self.which_embedding = functools.partial(layers.SNEmbedding,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
# Prepare model
# self.blocks is a doubly-nested list of modules, the outer loop intended
# to be over blocks at a given resolution (resblocks and/or self-attention)
self.blocks = []
for index in range(len(self.arch['out_channels'])):
if self.arch["downsample"][index]:
self.blocks += [[layers.DBlock(in_channels=self.arch['in_channels'][index],
out_channels=self.arch['out_channels'][index],
which_conv=self.which_conv,
wide=self.D_wide,
activation=self.activation,
preactivation=(index > 0),
downsample=(
nn.AvgPool2d(2) if self.arch['downsample'][index] else None))]]
elif self.arch["upsample"][index]:
upsample_function = (
functools.partial(F.interpolate, scale_factor=2, mode="nearest") # mode=nearest is default
if self.arch['upsample'][index] else None)
self.blocks += [[layers.GBlock2(in_channels=self.arch['in_channels'][index],
out_channels=self.arch['out_channels'][index],
which_conv=self.which_conv,
# which_bn=self.which_bn,
activation=self.activation,
upsample=upsample_function, skip_connection=True)]]
# If attention on this block, attach it to the end
attention_condition = index < 5
if self.arch['attention'][self.arch['resolution'][index]] and attention_condition: # index < 5
print('Adding attention layer in D at resolution %d' % self.arch['resolution'][index])
print("index = ", index)
self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index],
self.which_conv)]
# Turn self.blocks into a ModuleList so that it's all properly registered.
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
last_layer = nn.Conv2d(self.ch * self.out_channel_multiplier, 1, kernel_size=1)
self.blocks.append(last_layer)
#
# Linear output layer. The output dimension is typically 1, but may be
# larger if we're e.g. turning this into a VAE with an inference output
self.linear = self.which_linear(self.arch['out_channels'][-1], output_dim)
self.linear_middle = self.which_linear(16 * self.ch, output_dim)
# Embedding for projection discrimination
# if not kwargs["agnostic_unet"] and not kwargs["unconditional"]:
# self.embed = self.which_embedding(self.n_classes, self.arch['out_channels'][-1]+extra)
if not kwargs["unconditional"]:
self.embed_middle = self.which_embedding(self.n_classes, 16 * self.ch)
self.embed = self.which_embedding(self.n_classes, self.arch['out_channels'][-1])
# Initialize weights
if not skip_init:
self.init_weights()
###
print("_____params______")
for name, param in self.named_parameters():
print(name, param.size())
# Set up optimizer
self.lr, self.B1, self.B2, self.adam_eps = D_lr, D_B1, D_B2, adam_eps
if D_mixed_precision:
print('Using fp16 adam in D...')
import utils
self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
else:
self.optim = optim.Adam(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
# LR scheduling, left here for forward compatibility
# self.lr_sched = {'itr' : 0}# if self.progressive else {}
# self.j = 0
# Initialize
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print('Param count for D''s initialized parameters: %d' % self.param_count)
def forward(self, x, y=None):
# Stick x into h for cleaner for loops without flow control
h = x
residual_features = []
residual_features.append(x)
# Loop over blocks
for index, blocklist in enumerate(self.blocks[:-1]):
if self.resolution == 128:
if index == 6:
h = torch.cat((h, residual_features[4]), dim=1)
elif index == 7:
h = torch.cat((h, residual_features[3]), dim=1)
elif index == 8: #
h = torch.cat((h, residual_features[2]), dim=1)
elif index == 9: #
h = torch.cat((h, residual_features[1]), dim=1)
if self.resolution == 256:
if index == 7:
h = torch.cat((h, residual_features[5]), dim=1)
elif index == 8:
h = torch.cat((h, residual_features[4]), dim=1)
elif index == 9: #
h = torch.cat((h, residual_features[3]), dim=1)
elif index == 10: #
h = torch.cat((h, residual_features[2]), dim=1)
elif index == 11:
h = torch.cat((h, residual_features[1]), dim=1)
for block in blocklist:
h = block(h)
if index in self.save_features[:-1]:
residual_features.append(h)
if index == self.save_features[-1]:
# Apply global sum pooling as in SN-GAN
h_ = torch.sum(self.activation(h), [2, 3])
# Get initial class-unconditional output
bottleneck_out = self.linear_middle(h_)
# Get projection of final featureset onto class vectors and add to evidence
if self.unconditional:
projection = 0
else:
# this is the bottleneck classifier c
emb_mid = self.embed_middle(y)
projection = torch.sum(emb_mid * h_, 1, keepdim=True)
bottleneck_out = bottleneck_out + projection
out = self.blocks[-1](h)
if self.unconditional:
proj = 0
else:
emb = self.embed(y)
emb = emb.view(emb.size(0), emb.size(1), 1, 1).expand_as(h)
proj = torch.sum(emb * h, 1, keepdim=True)
################
out = out + proj
out = out.view(out.size(0), 1, self.resolution, self.resolution)
return out, bottleneck_out
# Parallelized G_D to minimize cross-gpu communication
# Without this, Generator outputs would get all-gathered and then rebroadcast.
class G_D(nn.Module):
def __init__(self, G, D, config):
super(G_D, self).__init__()
self.G = G
self.D = D
self.config = config
def forward(self, z, gy, x=None, dy=None, train_G=False, return_G_z=False,
split_D=False, dw1=[], dw2=[], reference_x=None, mixup=False, mixup_only=False, target_map=None):
if mixup:
gy = dy
# why? so the mixup samples consist of same class
# If training G, enable grad tape
with torch.set_grad_enabled(train_G):
G_z = self.G(z, self.G.shared(gy))
# Cast as necessary
if self.G.fp16 and not self.D.fp16:
G_z = G_z.float()
if self.D.fp16 and not self.G.fp16:
G_z = G_z.half()
if mixup:
initial_x_size = x.size(0)
mixed = target_map * x + (1 - target_map) * G_z
mixed_y = dy
if not mixup_only:
# we get here in the cutmix cons extra case
D_input = torch.cat([G_z, x], 0) if x is not None else G_z
D_class = torch.cat([gy, dy], 0) if dy is not None else gy
dmap = torch.tensor([])
if mixup:
# we get here in the cutmix "consistency loss and augmentation" case, if "mixup" is true for the current round (depends on p mixup)
D_input = torch.cat([D_input, mixed], 0)
if self.config["dataset"] != "coco_animals":
D_class = torch.cat([D_class.float(), mixed_y.float()], 0)
else:
D_class = torch.cat([D_class.long(), mixed_y.long()], 0)
else:
# not reached in cutmix "consistency loss and augmentation"
D_input = mixed
D_class = mixed_y
dmap = torch.tensor([])
del G_z
del x
G_z = None
x = None
D_out, D_middle = self.D(D_input, D_class)
del D_input
del D_class
if x is not None:
if not mixup:
out = torch.split(D_out, [G_z.shape[0], x.shape[0]]) # D_fake, D_real
else:
out = torch.split(D_out, [G_z.shape[0], x.shape[0], mixed.shape[0]]) # D_fake, D_real, D_mixed
out = out + (G_z,)
if mixup:
out = out + (mixed,)
if not mixup:
D_middle = torch.split(D_middle, [G_z.shape[0], x.shape[0]]) # D_middle_fake, D_middle_real
else:
D_middle = torch.split(D_middle, [G_z.shape[0], x.shape[0], mixed.shape[0]])
out = out + D_middle
###return target map as well
if mixup:
out = out + (target_map,)
return out
else:
# in mixup# you arrive here
out = (D_out,)
if return_G_z:
out = out + (G_z,)
if mixup_only:
out = out + (mixed,)
out = out + (D_middle,)
##return target map as well
if mixup:
out = out + (target_map,)
return out
|
py | 1a31b133a67385896713aeeccba70e05424bea00 | import factory
from factory import Faker, fuzzy
from users.models import Profile, User
CONTACT_LANGUAGE_CHOICES = ["fi", "sv", "en"]
class UserFactory(factory.django.DjangoModelFactory):
class Meta:
model = User
first_name = Faker("first_name")
last_name = Faker("last_name")
email = Faker("email")
class ProfileFactory(factory.django.DjangoModelFactory):
class Meta:
model = Profile
id = factory.Faker("uuid4")
first_name = Faker("first_name")
last_name = Faker("last_name")
email = Faker("email")
phone_number = Faker("phone_number")
street_address = Faker("street_address")
city = Faker("city")
postal_code = Faker("postcode")
date_of_birth = Faker("date_of_birth", minimum_age=17, maximum_age=99)
contact_language = fuzzy.FuzzyChoice(list(CONTACT_LANGUAGE_CHOICES))
user = factory.SubFactory(UserFactory)
|
py | 1a31b2b99e9c980a24e47becfb9860295751e8c0 | import agate
import decimal
import unittest
from unittest import mock
import dbt.flags as flags
from dbt.task.debug import DebugTask
from dbt.adapters.base.query_headers import MacroQueryStringSetter
from dbt.adapters.postgres import PostgresAdapter
from dbt.adapters.postgres import Plugin as PostgresPlugin
from dbt.contracts.files import FileHash
from dbt.contracts.graph.manifest import ManifestStateCheck
from dbt.clients import agate_helper
from dbt.exceptions import ValidationException, DbtConfigError
from dbt.logger import GLOBAL_LOGGER as logger # noqa
from psycopg2 import extensions as psycopg2_extensions
from psycopg2 import DatabaseError
from .utils import config_from_parts_or_dicts, inject_adapter, mock_connection, TestAdapterConversions, load_internal_manifest_macros, clear_plugin
class TestPostgresAdapter(unittest.TestCase):
def setUp(self):
project_cfg = {
'name': 'X',
'version': '0.1',
'profile': 'test',
'project-root': '/tmp/dbt/does-not-exist',
'config-version': 2,
}
profile_cfg = {
'outputs': {
'test': {
'type': 'postgres',
'dbname': 'postgres',
'user': 'root',
'host': 'thishostshouldnotexist',
'pass': 'password',
'port': 5432,
'schema': 'public',
}
},
'target': 'test'
}
self.config = config_from_parts_or_dicts(project_cfg, profile_cfg)
self._adapter = None
@property
def adapter(self):
if self._adapter is None:
self._adapter = PostgresAdapter(self.config)
inject_adapter(self._adapter, PostgresPlugin)
return self._adapter
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_acquire_connection_validations(self, psycopg2):
try:
connection = self.adapter.acquire_connection('dummy')
except ValidationException as e:
self.fail('got ValidationException: {}'.format(str(e)))
except BaseException as e:
self.fail('acquiring connection failed with unknown exception: {}'
.format(str(e)))
self.assertEqual(connection.type, 'postgres')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once()
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_acquire_connection(self, psycopg2):
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
self.assertEqual(connection.state, 'open')
self.assertNotEqual(connection.handle, None)
psycopg2.connect.assert_called_once()
def test_cancel_open_connections_empty(self):
self.assertEqual(len(list(self.adapter.cancel_open_connections())), 0)
def test_cancel_open_connections_master(self):
key = self.adapter.connections.get_thread_identifier()
self.adapter.connections.thread_connections[key] = mock_connection('master')
self.assertEqual(len(list(self.adapter.cancel_open_connections())), 0)
def test_cancel_open_connections_single(self):
master = mock_connection('master')
model = mock_connection('model')
key = self.adapter.connections.get_thread_identifier()
model.handle.get_backend_pid.return_value = 42
self.adapter.connections.thread_connections.update({
key: master,
1: model,
})
with mock.patch.object(self.adapter.connections, 'add_query') as add_query:
query_result = mock.MagicMock()
add_query.return_value = (None, query_result)
self.assertEqual(len(list(self.adapter.cancel_open_connections())), 1)
add_query.assert_called_once_with('select pg_terminate_backend(42)')
master.handle.get_backend_pid.assert_not_called()
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_default_connect_timeout(self, psycopg2):
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='postgres',
user='root',
host='thishostshouldnotexist',
password='password',
port=5432,
connect_timeout=10,
application_name='dbt')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_changed_connect_timeout(self, psycopg2):
self.config.credentials = self.config.credentials.replace(connect_timeout=30)
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='postgres',
user='root',
host='thishostshouldnotexist',
password='password',
port=5432,
connect_timeout=30,
application_name='dbt')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_default_keepalive(self, psycopg2):
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='postgres',
user='root',
host='thishostshouldnotexist',
password='password',
port=5432,
connect_timeout=10,
application_name='dbt')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_changed_keepalive(self, psycopg2):
self.config.credentials = self.config.credentials.replace(keepalives_idle=256)
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='postgres',
user='root',
host='thishostshouldnotexist',
password='password',
port=5432,
connect_timeout=10,
keepalives_idle=256,
application_name='dbt')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_default_application_name(self, psycopg2):
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='postgres',
user='root',
host='thishostshouldnotexist',
password='password',
port=5432,
connect_timeout=10,
application_name='dbt')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_changed_application_name(self, psycopg2):
self.config.credentials = self.config.credentials.replace(application_name='myapp')
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='postgres',
user='root',
host='thishostshouldnotexist',
password='password',
port=5432,
connect_timeout=10,
application_name='myapp')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_role(self, psycopg2):
self.config.credentials = self.config.credentials.replace(role='somerole')
connection = self.adapter.acquire_connection('dummy')
cursor = connection.handle.cursor()
cursor.execute.assert_called_once_with('set role somerole')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_search_path(self, psycopg2):
self.config.credentials = self.config.credentials.replace(search_path="test")
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='postgres',
user='root',
host='thishostshouldnotexist',
password='password',
port=5432,
connect_timeout=10,
application_name='dbt',
options="-c search_path=test")
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_sslmode(self, psycopg2):
self.config.credentials = self.config.credentials.replace(sslmode="require")
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='postgres',
user='root',
host='thishostshouldnotexist',
password='password',
port=5432,
connect_timeout=10,
sslmode="require",
application_name='dbt')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_ssl_parameters(self, psycopg2):
self.config.credentials = self.config.credentials.replace(sslmode="verify-ca")
self.config.credentials = self.config.credentials.replace(sslcert="service.crt")
self.config.credentials = self.config.credentials.replace(sslkey="service.key")
self.config.credentials = self.config.credentials.replace(sslrootcert="ca.crt")
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='postgres',
user='root',
host='thishostshouldnotexist',
password='password',
port=5432,
connect_timeout=10,
sslmode="verify-ca",
sslcert="service.crt",
sslkey="service.key",
sslrootcert="ca.crt",
application_name='dbt')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_schema_with_space(self, psycopg2):
self.config.credentials = self.config.credentials.replace(search_path="test test")
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='postgres',
user='root',
host='thishostshouldnotexist',
password='password',
port=5432,
connect_timeout=10,
application_name='dbt',
options="-c search_path=test\ test")
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_set_zero_keepalive(self, psycopg2):
self.config.credentials = self.config.credentials.replace(keepalives_idle=0)
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='postgres',
user='root',
host='thishostshouldnotexist',
password='password',
port=5432,
connect_timeout=10,
application_name='dbt')
@mock.patch.object(PostgresAdapter, 'execute_macro')
@mock.patch.object(PostgresAdapter, '_get_catalog_schemas')
def test_get_catalog_various_schemas(self, mock_get_schemas, mock_execute):
column_names = ['table_database', 'table_schema', 'table_name']
rows = [
('dbt', 'foo', 'bar'),
('dbt', 'FOO', 'baz'),
('dbt', None, 'bar'),
('dbt', 'quux', 'bar'),
('dbt', 'skip', 'bar'),
]
mock_execute.return_value = agate.Table(rows=rows,
column_names=column_names)
mock_get_schemas.return_value.items.return_value = [(mock.MagicMock(database='dbt'), {'foo', 'FOO', 'quux'})]
mock_manifest = mock.MagicMock()
mock_manifest.get_used_schemas.return_value = {('dbt', 'foo'),
('dbt', 'quux')}
catalog, exceptions = self.adapter.get_catalog(mock_manifest)
self.assertEqual(
set(map(tuple, catalog)),
{('dbt', 'foo', 'bar'), ('dbt', 'FOO', 'baz'), ('dbt', 'quux', 'bar')}
)
self.assertEqual(exceptions, [])
class TestConnectingPostgresAdapter(unittest.TestCase):
def setUp(self):
self.target_dict = {
'type': 'postgres',
'dbname': 'postgres',
'user': 'root',
'host': 'thishostshouldnotexist',
'pass': 'password',
'port': 5432,
'schema': 'public'
}
profile_cfg = {
'outputs': {
'test': self.target_dict,
},
'target': 'test'
}
project_cfg = {
'name': 'X',
'version': '0.1',
'profile': 'test',
'project-root': '/tmp/dbt/does-not-exist',
'quoting': {
'identifier': False,
'schema': True,
},
'config-version': 2,
}
self.config = config_from_parts_or_dicts(project_cfg, profile_cfg)
self.handle = mock.MagicMock(spec=psycopg2_extensions.connection)
self.cursor = self.handle.cursor.return_value
self.mock_execute = self.cursor.execute
self.patcher = mock.patch('dbt.adapters.postgres.connections.psycopg2')
self.psycopg2 = self.patcher.start()
# Create the Manifest.state_check patcher
@mock.patch('dbt.parser.manifest.ManifestLoader.build_manifest_state_check')
def _mock_state_check(self):
config = self.root_project
all_projects = self.all_projects
return ManifestStateCheck(
vars_hash=FileHash.from_contents('vars'),
project_hashes={name: FileHash.from_contents(name) for name in all_projects},
profile_hash=FileHash.from_contents('profile'),
)
self.load_state_check = mock.patch('dbt.parser.manifest.ManifestLoader.build_manifest_state_check')
self.mock_state_check = self.load_state_check.start()
self.mock_state_check.side_effect = _mock_state_check
self.psycopg2.connect.return_value = self.handle
self.adapter = PostgresAdapter(self.config)
self.adapter._macro_manifest_lazy = load_internal_manifest_macros(self.config)
self.adapter.connections.query_header = MacroQueryStringSetter(self.config, self.adapter._macro_manifest_lazy)
self.qh_patch = mock.patch.object(self.adapter.connections.query_header, 'add')
self.mock_query_header_add = self.qh_patch.start()
self.mock_query_header_add.side_effect = lambda q: '/* dbt */\n{}'.format(q)
self.adapter.acquire_connection()
inject_adapter(self.adapter, PostgresPlugin)
def tearDown(self):
# we want a unique self.handle every time.
self.adapter.cleanup_connections()
self.qh_patch.stop()
self.patcher.stop()
self.load_state_check.stop()
clear_plugin(PostgresPlugin)
def test_quoting_on_drop_schema(self):
relation = self.adapter.Relation.create(
database='postgres', schema='test_schema',
quote_policy=self.adapter.config.quoting,
)
self.adapter.drop_schema(relation)
self.mock_execute.assert_has_calls([
mock.call('/* dbt */\ndrop schema if exists "test_schema" cascade', None)
])
def test_quoting_on_drop(self):
relation = self.adapter.Relation.create(
database='postgres',
schema='test_schema',
identifier='test_table',
type='table',
quote_policy=self.adapter.config.quoting,
)
self.adapter.drop_relation(relation)
self.mock_execute.assert_has_calls([
mock.call('/* dbt */\ndrop table if exists "postgres"."test_schema".test_table cascade', None)
])
def test_quoting_on_truncate(self):
relation = self.adapter.Relation.create(
database='postgres',
schema='test_schema',
identifier='test_table',
type='table',
quote_policy=self.adapter.config.quoting,
)
self.adapter.truncate_relation(relation)
self.mock_execute.assert_has_calls([
mock.call('/* dbt */\ntruncate table "postgres"."test_schema".test_table', None)
])
def test_quoting_on_rename(self):
from_relation = self.adapter.Relation.create(
database='postgres',
schema='test_schema',
identifier='table_a',
type='table',
quote_policy=self.adapter.config.quoting,
)
to_relation = self.adapter.Relation.create(
database='postgres',
schema='test_schema',
identifier='table_b',
type='table',
quote_policy=self.adapter.config.quoting,
)
self.adapter.rename_relation(
from_relation=from_relation,
to_relation=to_relation
)
self.mock_execute.assert_has_calls([
mock.call('/* dbt */\nalter table "postgres"."test_schema".table_a rename to table_b', None)
])
def test_debug_connection_ok(self):
DebugTask.validate_connection(self.target_dict)
self.mock_execute.assert_has_calls([
mock.call('/* dbt */\nselect 1 as id', None)
])
def test_debug_connection_fail_nopass(self):
del self.target_dict['pass']
with self.assertRaises(DbtConfigError):
DebugTask.validate_connection(self.target_dict)
def test_connection_fail_select(self):
self.mock_execute.side_effect = DatabaseError()
with self.assertRaises(DbtConfigError):
DebugTask.validate_connection(self.target_dict)
self.mock_execute.assert_has_calls([
mock.call('/* dbt */\nselect 1 as id', None)
])
def test_dbname_verification_is_case_insensitive(self):
# Override adapter settings from setUp()
self.target_dict['dbname'] = 'Postgres'
profile_cfg = {
'outputs': {
'test': self.target_dict,
},
'target': 'test'
}
project_cfg = {
'name': 'X',
'version': '0.1',
'profile': 'test',
'project-root': '/tmp/dbt/does-not-exist',
'quoting': {
'identifier': False,
'schema': True,
},
'config-version': 2,
}
self.config = config_from_parts_or_dicts(project_cfg, profile_cfg)
self.adapter.cleanup_connections()
self._adapter = PostgresAdapter(self.config)
self.adapter.verify_database('postgres')
class TestPostgresFilterCatalog(unittest.TestCase):
def test__catalog_filter_table(self):
manifest = mock.MagicMock()
manifest.get_used_schemas.return_value = [['a', 'B'], ['a', '1234']]
column_names = ['table_name', 'table_database', 'table_schema', 'something']
rows = [
['foo', 'a', 'b', '1234'], # include
['foo', 'a', '1234', '1234'], # include, w/ table schema as str
['foo', 'c', 'B', '1234'], # skip
['1234', 'A', 'B', '1234'], # include, w/ table name as str
]
table = agate.Table(
rows, column_names, agate_helper.DEFAULT_TYPE_TESTER
)
result = PostgresAdapter._catalog_filter_table(table, manifest)
assert len(result) == 3
for row in result.rows:
assert isinstance(row['table_schema'], str)
assert isinstance(row['table_database'], str)
assert isinstance(row['table_name'], str)
assert isinstance(row['something'], decimal.Decimal)
class TestPostgresAdapterConversions(TestAdapterConversions):
def test_convert_text_type(self):
rows = [
['', 'a1', 'stringval1'],
['', 'a2', 'stringvalasdfasdfasdfa'],
['', 'a3', 'stringval3'],
]
agate_table = self._make_table_of(rows, agate.Text)
expected = ['text', 'text', 'text']
for col_idx, expect in enumerate(expected):
assert PostgresAdapter.convert_text_type(agate_table, col_idx) == expect
def test_convert_number_type(self):
rows = [
['', '23.98', '-1'],
['', '12.78', '-2'],
['', '79.41', '-3'],
]
agate_table = self._make_table_of(rows, agate.Number)
expected = ['integer', 'float8', 'integer']
for col_idx, expect in enumerate(expected):
assert PostgresAdapter.convert_number_type(agate_table, col_idx) == expect
def test_convert_boolean_type(self):
rows = [
['', 'false', 'true'],
['', 'false', 'false'],
['', 'false', 'true'],
]
agate_table = self._make_table_of(rows, agate.Boolean)
expected = ['boolean', 'boolean', 'boolean']
for col_idx, expect in enumerate(expected):
assert PostgresAdapter.convert_boolean_type(agate_table, col_idx) == expect
def test_convert_datetime_type(self):
rows = [
['', '20190101T01:01:01Z', '2019-01-01 01:01:01'],
['', '20190102T01:01:01Z', '2019-01-01 01:01:01'],
['', '20190103T01:01:01Z', '2019-01-01 01:01:01'],
]
agate_table = self._make_table_of(rows, [agate.DateTime, agate_helper.ISODateTime, agate.DateTime])
expected = ['timestamp without time zone', 'timestamp without time zone', 'timestamp without time zone']
for col_idx, expect in enumerate(expected):
assert PostgresAdapter.convert_datetime_type(agate_table, col_idx) == expect
def test_convert_date_type(self):
rows = [
['', '2019-01-01', '2019-01-04'],
['', '2019-01-02', '2019-01-04'],
['', '2019-01-03', '2019-01-04'],
]
agate_table = self._make_table_of(rows, agate.Date)
expected = ['date', 'date', 'date']
for col_idx, expect in enumerate(expected):
assert PostgresAdapter.convert_date_type(agate_table, col_idx) == expect
def test_convert_time_type(self):
# dbt's default type testers actually don't have a TimeDelta at all.
agate.TimeDelta
rows = [
['', '120s', '10s'],
['', '3m', '11s'],
['', '1h', '12s'],
]
agate_table = self._make_table_of(rows, agate.TimeDelta)
expected = ['time', 'time', 'time']
for col_idx, expect in enumerate(expected):
assert PostgresAdapter.convert_time_type(agate_table, col_idx) == expect
|
py | 1a31b3538bdc37601f647927e4d5b5a74f168edd | """ Authentication library
======================
A base authentication & authorization module.
Includes the base class BaseAuth.
Authentication and authorization in NIPAP
-----------------------------------------
NIPAP offers basic authentication with two different backends, a simple
two-level authorization model and a trust-system for simplifying system
integration.
Readonly users are only authorized to run queries which do not modify any
data in the database. No further granularity of access control is offered at
this point.
Trusted users can perform operations which will be logged as performed by
another user. This feature is meant for system integration, for example to
be used by a NIPAP client which have its own means of authentication users;
say for example a web application supporting the NTLM single sign-on
feature. By letting the web application use a trusted account to
authenticate against the NIPAP service, it can specify the username of the
end-user, so that audit logs will be written with the correct information.
Without the trusted-bit, all queries performed by end-users through this
system would look like they were performed by the system itself.
The NIPAP auth system also has a concept of authoritative source. The
authoritative source is a string which simply defines what system is the
authoritative source of data for a prefix. Well-behaved clients SHOULD
present a warning to the user when trying to alter a prefix with an
authoritative source different than the system itself, as other system might
depend on the information being unchanged. This is however, by no means
enforced by the NIPAP service.
Authentication backends
-----------------------
Two authentication backends are shipped with NIPAP:
* LdapAuth - authenticates users against an LDAP server
* SqliteAuth - authenticates users against a local SQLite-database
The authentication classes presented here are used both in the NIPAP web UI
and in the XML-RPC backend. So far only the SqliteAuth backend supports
trusted and readonly users.
What authentication backend to use can be specified by suffixing the
username with @`backend`, where `backend` is set in the configuration file.
If not defined, a (configurable) default backend is used.
Authentication options
----------------------
With each NIPAP query authentication options can be specified. The
authentication options are passed as a dict with the following keys taken
into account:
* :attr:`authoritative_source` - Authoritative source for the query.
* :attr:`username` - Username to impersonate, requires authentication as \
trusted user.
* :attr:`full_name` - Full name of impersonated user.
* :attr:`readonly` - True for read-only users
Classes
-------
"""
import logging
from datetime import datetime, timedelta
import hashlib
from nipapconfig import NipapConfig
# Used by auth modules
import sqlite3
import ldap
import string
import random
class AuthFactory:
""" An factory for authentication backends.
"""
_logger = None
_config = None
_auth_cache = {}
_backends = {}
def __init__(self):
""" Constructor.
"""
# Initialize stuff.
self._config = NipapConfig()
self._logger = logging.getLogger(self.__class__.__name__)
self._init_backends()
def _init_backends(self):
""" Initialize auth backends.
"""
# fetch auth backends from config file
self._backends = {}
for section in self._config.sections():
# does the section define an auth backend?
section_components = section.rsplit('.', 1)
if section_components[0] == 'auth.backends':
auth_backend = section_components[1]
self._backends[auth_backend] = eval(self._config.get(section, 'type'))
self._logger.debug("Registered auth backends %s" % str(self._backends))
def reload(self):
""" Reload AuthFactory.
"""
self._auth_cache = {}
self._init_backends()
def get_auth(self, username, password, authoritative_source, auth_options={}):
""" Returns an authentication object.
Examines the auth backend given after the '@' in the username and
returns a suitable instance of a subclass of the BaseAuth class.
* `username` [string]
Username to authenticate as.
* `password` [string]
Password to authenticate with.
* `authoritative_source` [string]
Authoritative source of the query.
* `auth_options` [dict]
A dict which, if authenticated as a trusted user, can override
`username` and `authoritative_source`.
"""
# validate arguments
if (authoritative_source is None):
raise AuthError("Missing authoritative_source.")
# remove invalid cache entries
rem = list()
for key in self._auth_cache:
if self._auth_cache[key]['valid_until'] < datetime.utcnow():
rem.append(key)
for key in rem:
del(self._auth_cache[key])
user_authbackend = username.rsplit('@', 1)
# Find out what auth backend to use.
# If no auth backend was specified in username, use default
backend = ""
if len(user_authbackend) == 1:
backend = self._config.get('auth', 'default_backend')
self._logger.debug("Using default auth backend %s" % backend)
else:
backend = user_authbackend[1]
# do we have a cached instance?
auth_str = ( str(username) + str(password) + str(authoritative_source)
+ str(auth_options) )
if auth_str in self._auth_cache:
self._logger.debug('found cached auth object for user %s' % username)
return self._auth_cache[auth_str]['auth_object']
# Create auth object
try:
auth = self._backends[backend](backend, user_authbackend[0], password, authoritative_source, auth_options)
except KeyError:
raise AuthError("Invalid auth backend '%s' specified" %
str(backend))
# save auth object to cache
self._auth_cache[auth_str] = {
'valid_until': datetime.utcnow() + timedelta(seconds=self._config.getint('auth', 'auth_cache_timeout')),
'auth_object': auth
}
return auth
class BaseAuth:
""" A base authentication class.
All authentication modules should extend this class.
"""
username = None
password = None
authenticated_as = None
full_name = None
authoritative_source = None
auth_backend = None
trusted = None
readonly = None
_logger = None
_auth_options = None
_cfg = None
def __init__(self, username, password, authoritative_source, auth_backend, auth_options={}):
""" Constructor.
Note that the instance variables not are set by the constructor but
by the :func:`authenticate` method. Therefore, run the
:func:`authenticate`-method before trying to access those
variables!
* `username` [string]
Username to authenticate as.
* `password` [string]
Password to authenticate with.
* `authoritative_source` [string]
Authoritative source of the query.
* `auth_backend` [string]
Name of authentication backend.
* `auth_options` [dict]
A dict which, if authenticated as a trusted user, can override
`username` and `authoritative_source`.
"""
self._logger = logging.getLogger(self.__class__.__name__)
self._cfg = NipapConfig()
self.username = username
self.password = password
self.auth_backend = auth_backend
self.authoritative_source = authoritative_source
self._auth_options = auth_options
def authenticate(self):
""" Verify authentication.
Returns True/False dependant on whether the authentication
succeeded or not.
"""
return False
def authorize(self):
""" Verify authorization.
Check if a user is authorized to perform a specific operation.
"""
return False
class LdapAuth(BaseAuth):
""" An authentication and authorization class for LDAP auth.
"""
_ldap_uri = None
_ldap_basedn = None
_ldap_conn = None
_authenticated = None
def __init__(self, name, username, password, authoritative_source, auth_options={}):
""" Constructor.
Note that the instance variables not are set by the constructor but
by the :func:`authenticate` method. Therefore, run the
:func:`authenticate`-method before trying to access those
variables!
* `name` [string]
Name of auth backend.
* `username` [string]
Username to authenticate as.
* `password` [string]
Password to authenticate with.
* `authoritative_source` [string]
Authoritative source of the query.
* `auth_options` [dict]
A dict which, if authenticated as a trusted user, can override
`username` and `authoritative_source`.
"""
BaseAuth.__init__(self, username, password, authoritative_source, name, auth_options)
self._ldap_uri = self._cfg.get('auth.backends.' + self.auth_backend, 'uri')
self._ldap_basedn = self._cfg.get('auth.backends.' + self.auth_backend, 'basedn')
self._logger.debug('Creating LdapAuth instance')
self._logger.debug('LDAP URI: ' + self._ldap_uri)
self._ldap_conn = ldap.initialize(self._ldap_uri)
def authenticate(self):
""" Verify authentication.
Returns True/False dependant on whether the authentication
succeeded or not.
"""
# if authentication has been performed, return last result
if self._authenticated is not None:
return self._authenticated
try:
self._ldap_conn.simple_bind_s('uid=' + self.username + ',' + self._ldap_basedn, self.password)
except ldap.SERVER_DOWN as exc:
raise AuthError('Could not connect to LDAP server')
except (ldap.INVALID_CREDENTIALS, ldap.INVALID_DN_SYNTAX,
ldap.UNWILLING_TO_PERFORM) as exc:
# Auth failed
self._logger.debug('erroneous password for user %s' % self.username)
self._authenticated = False
return self._authenticated
# auth succeeded
self.authenticated_as = self.username
self._authenticated = True
self.trusted = False
self.readonly = False
try:
res = self._ldap_conn.search_s(self._ldap_basedn, ldap.SCOPE_SUBTREE, 'uid=' + self.username, ['cn'])
self.full_name = res[0][1]['cn'][0]
except:
self.full_name = ''
self._logger.debug('successfully authenticated as %s, username %s' % (self.authenticated_as, self.username))
return self._authenticated
class SqliteAuth(BaseAuth):
""" An authentication and authorization class for local auth.
"""
_db_conn = None
_db_curs = None
_authenticated = None
def __init__(self, name, username, password, authoritative_source, auth_options={}):
""" Constructor.
Note that the instance variables not are set by the constructor but
by the :func:`authenticate` method. Therefore, run the
:func:`authenticate`-method before trying to access those
variables!
* `name` [string]
Name of auth backend.
* `username` [string]
Username to authenticate as.
* `password` [string]
Password to authenticate with.
* `authoritative_source` [string]
Authoritative source of the query.
* `auth_options` [dict]
A dict which, if authenticated as a trusted user, can override
`username` and `authoritative_source`.
If the user database and tables are not found, they are created.
"""
BaseAuth.__init__(self, username, password, authoritative_source, name, auth_options)
self._logger.debug('Creating SqliteAuth instance')
# connect to database
try:
self._db_conn = sqlite3.connect(self._cfg.get('auth.backends.' + self.auth_backend, 'db_path'), check_same_thread = False)
self._db_conn.row_factory = sqlite3.Row
self._db_curs = self._db_conn.cursor()
except sqlite3.Error as exc:
self._logger.error('Could not open user database: %s' % str(exc))
raise AuthError(str(exc))
def _latest_db_version(self):
""" Check if database is of the latest version
Fairly stupid functions that simply checks for existence of columns.
"""
# make sure that user table exists
sql_verify_table = '''SELECT * FROM sqlite_master
WHERE type = 'table' AND name = 'user' '''
self._db_curs.execute(sql_verify_table)
if len(self._db_curs.fetchall()) < 1:
raise AuthSqliteError("No 'user' table.")
for column in ('username', 'pwd_salt', 'pwd_hash', 'full_name',
'trusted', 'readonly'):
sql = "SELECT %s FROM user" % column
try:
self._db_curs.execute(sql)
except:
return False
return True
def _create_database(self):
""" Set up database
Creates tables required for the authentication module.
"""
self._logger.info('creating user database')
sql = '''CREATE TABLE IF NOT EXISTS user (
username NOT NULL PRIMARY KEY,
pwd_salt NOT NULL,
pwd_hash NOT NULL,
full_name,
trusted NOT NULL DEFAULT 0,
readonly NOT NULL DEFAULT 0
)'''
self._db_curs.execute(sql)
self._db_conn.commit()
def _upgrade_database(self):
""" Upgrade database to latest version
This is a fairly primitive function that won't look at how the
database looks like but just blindly run commands.
"""
self._logger.info('upgrading user database')
# add readonly column
try:
sql = '''ALTER TABLE user ADD COLUMN readonly NOT NULL DEFAULT 0'''
self._db_curs.execute(sql)
except:
pass
self._db_conn.commit()
def authenticate(self):
""" Verify authentication.
Returns True/False dependant on whether the authentication
succeeded or not.
"""
# if authentication has been performed, return last result
if self._authenticated is not None:
return self._authenticated
self._logger.debug('Trying to authenticate as user \'%s\'' % self.username)
user = self.get_user(self.username)
# Was user found?
if user is None:
self._logger.debug('unknown user %s' % self.username)
self._authenticated = False
return self._authenticated
# verify password
if self._gen_hash(self.password, user['pwd_salt']) != user['pwd_hash']:
# Auth failed
self._logger.debug('erroneous password for user %s' % self.username)
self._authenticated = False
return self._authenticated
# auth succeeded
self.authenticated_as = self.username
self._authenticated = True
self.trusted = bool(user['trusted'])
self.readonly = bool(user['readonly'])
if self.trusted:
# user can impersonate other users
# this also means that a system and full_name can be specified
if 'username' in self._auth_options:
self.username = self._auth_options['username']
# TODO: b0rk out if full_name is supplied and username not?
if 'full_name' in self._auth_options:
self.full_name = self._auth_options['full_name']
if 'authoritative_source' in self._auth_options:
self.authoritative_source = self._auth_options['authoritative_source']
if 'readonly' in self._auth_options:
self.readonly = self._auth_options['readonly']
else:
self.full_name = user['full_name']
self._logger.debug('successfully authenticated as %s, username %s, full_name %s, readonly %s' % (self.authenticated_as, self.username, self.full_name, str(self.readonly)))
return self._authenticated
def get_user(self, username):
""" Fetch the user from the database
The function will return None if the user is not found
"""
sql = '''SELECT * FROM user WHERE username = ?'''
self._db_curs.execute(sql, (username, ))
user = self._db_curs.fetchone()
return user
def add_user(self, username, password, full_name=None, trusted=False, readonly=False):
""" Add user to SQLite database.
* `username` [string]
Username of new user.
* `password` [string]
Password of new user.
* `full_name` [string]
Full name of new user.
* `trusted` [boolean]
Whether the new user should be trusted or not.
* `readonly` [boolean]
Whether the new user can only read or not
"""
# generate salt
char_set = string.ascii_letters + string.digits
salt = ''.join(random.choice(char_set) for x in range(8))
sql = '''INSERT INTO user
(username, pwd_salt, pwd_hash, full_name, trusted, readonly)
VALUES
(?, ?, ?, ?, ?, ?)'''
try:
self._db_curs.execute(sql, (username, salt,
self._gen_hash(password, salt), full_name, trusted, readonly))
self._db_conn.commit()
except (sqlite3.OperationalError, sqlite3.IntegrityError) as error:
raise AuthError(error)
def remove_user(self, username):
""" Remove user from the SQLite database.
* `username` [string]
Username of user to remove.
"""
sql = '''DELETE FROM user WHERE username = ?'''
try:
self._db_curs.execute(sql, (username, ))
self._db_conn.commit()
except (sqlite3.OperationalError, sqlite3.IntegrityError) as error:
raise AuthError(error)
return self._db_curs.rowcount
def list_users(self):
""" List all users.
"""
sql = "SELECT * FROM user ORDER BY username"
self._db_curs.execute(sql)
users = list()
for row in self._db_curs:
users.append(dict(row))
return users
def _gen_hash(self, password, salt):
""" Generate password hash.
"""
# generate hash
h = hashlib.sha1()
h.update(salt)
h.update(password)
return h.hexdigest()
class AuthError(Exception):
""" General auth exception.
"""
error_code = 1500
class AuthenticationFailed(AuthError):
""" Authentication failed.
"""
error_code = 1510
class AuthorizationFailed(AuthError):
""" Authorization failed.
"""
error_code = 1520
class AuthSqliteError(AuthError):
""" Problem with the Sqlite database
"""
|
py | 1a31b35d015607bd7e72a873fbe1f425857d5178 | # Attributes vocab http://linked.data.gov.au/def/tern-cv/dd085299-ae86-4371-ae15-61dfa432f924
ATTRIBUTE_TYPES = [
"http://linked.data.gov.au/def/tern-cv/a7609534-b988-43ba-940c-c216d9c05f59",
"http://linked.data.gov.au/def/tern-cv/02dde7af-0ec4-46b0-9c48-ffd26736ac9e",
"http://linked.data.gov.au/def/tern-cv/039f87e5-ffd9-4676-b126-c74844d2e095",
"http://linked.data.gov.au/def/tern-cv/06265baf-9125-4bd1-9aca-1ee94395de52",
"http://linked.data.gov.au/def/tern-cv/08035bb8-d972-4110-b3e9-1939bc972ace",
"http://linked.data.gov.au/def/tern-cv/082c84fe-d923-4162-9b73-294b7a8a2dda",
"http://linked.data.gov.au/def/tern-cv/0ed27ce3-1fa4-477f-b36c-5cd64d8b6ed0",
"http://linked.data.gov.au/def/tern-cv/11f6e3d7-78e4-4665-9b84-a93b56f12fab",
"http://linked.data.gov.au/def/tern-cv/15fdb5ff-f316-4c4f-a438-eb2854479a4c",
"http://linked.data.gov.au/def/tern-cv/1a283759-58e3-45bb-ab70-6f0bd82038fc",
"http://linked.data.gov.au/def/tern-cv/1c15747b-aec7-41e1-a490-c0b317ae441a",
"http://linked.data.gov.au/def/tern-cv/1cf8e48e-266d-4431-a778-9b80347c40f6",
"http://linked.data.gov.au/def/tern-cv/20049a27-4f90-45ee-8708-4b9457ccf7b3",
"http://linked.data.gov.au/def/tern-cv/20d4e292-adc1-4585-8f81-c2a0e92eacab",
"http://linked.data.gov.au/def/tern-cv/226e3bbe-7640-4bc6-872f-0542802880a1",
"http://linked.data.gov.au/def/tern-cv/263d7143-e88d-4105-83e7-00d75035a2b3",
"http://linked.data.gov.au/def/tern-cv/2ad7e552-5bca-45cf-805a-10ba5a98862a",
"http://linked.data.gov.au/def/tern-cv/2f9cdbf8-7973-45f7-9a31-1a53017b67a3",
"http://linked.data.gov.au/def/tern-cv/360063cc-4bff-455e-8c8e-9562ff57994f",
"http://linked.data.gov.au/def/tern-cv/37012c72-03e1-4e3f-af80-9594aade1f3d",
"http://linked.data.gov.au/def/tern-cv/376efe60-0550-42b4-b90b-110015806dc7",
"http://linked.data.gov.au/def/tern-cv/3d64b224-02d5-48d4-bdf5-6caba81cfc64",
"http://linked.data.gov.au/def/tern-cv/3e1eaba4-c58e-42cf-b75a-8124a0bc2e9b",
"http://linked.data.gov.au/def/tern-cv/3f7355ff-03cb-4fc7-81db-219ddfba14f5",
"http://linked.data.gov.au/def/tern-cv/3f92e928-d5ba-43bf-84ac-a45e678e694f",
"http://linked.data.gov.au/def/tern-cv/43ed01cc-0089-4a6b-8754-0990dd0f697b",
"http://linked.data.gov.au/def/tern-cv/45a86abc-43c7-4a30-ac73-fc8d62538140",
"http://linked.data.gov.au/def/tern-cv/46d2e568-72cd-4df3-b0ed-4ebdaf771e4c",
"http://linked.data.gov.au/def/tern-cv/54e40f12-8c13-495a-9f8d-838d78faa5a7",
"http://linked.data.gov.au/def/tern-cv/56195246-ec5d-4050-a1c6-af786fbec715",
"http://linked.data.gov.au/def/tern-cv/5968433f-6748-40ff-9cdb-6a138cb0378d",
"http://linked.data.gov.au/def/tern-cv/5acbfe4f-19e0-456c-89db-961013e6cd7c",
"http://linked.data.gov.au/def/tern-cv/5fb736ae-63ff-4943-bad7-cbf292e70b1b",
"http://linked.data.gov.au/def/tern-cv/61311a0e-5b09-406c-9f64-b0637271ecf2",
"http://linked.data.gov.au/def/tern-cv/6882ac67-b3e5-4195-a0c9-330c7bf14625",
"http://linked.data.gov.au/def/tern-cv/6bd73c25-98f6-4227-ab5d-a3bab18b7a8d",
"http://linked.data.gov.au/def/tern-cv/6d351831-45da-403c-919b-1cee41950488",
"http://linked.data.gov.au/def/tern-cv/70aa04d4-1bd2-479f-b430-925e5bc07aad",
"http://linked.data.gov.au/def/tern-cv/73241f84-d579-4193-a57f-4776dabf96c0",
"http://linked.data.gov.au/def/tern-cv/7455b778-fe96-4d3a-906f-3ed1faae8055",
"http://linked.data.gov.au/def/tern-cv/755b1456-b76f-4d54-8690-10e41e25c5a7",
"http://linked.data.gov.au/def/tern-cv/76806e0f-61a2-447c-8177-7e666637e23a",
"http://linked.data.gov.au/def/tern-cv/800c04ac-9406-4655-92cd-5142764948bc",
"http://linked.data.gov.au/def/tern-cv/8080742b-6855-4e27-812b-671d4dfe7e0f",
"http://linked.data.gov.au/def/tern-cv/889dfc31-5b1c-48c0-8bc7-e12f13d63891",
"http://linked.data.gov.au/def/tern-cv/8afbe0ce-7f7c-4a2f-bdfe-e78fe8b2dd72",
"http://linked.data.gov.au/def/tern-cv/8b57540f-e050-4aec-9d05-46ab64c69c37",
"http://linked.data.gov.au/def/tern-cv/8b7b7346-4317-4bcc-9ff9-24dbf8e9c637",
"http://linked.data.gov.au/def/tern-cv/8e7dfefe-e3ee-40ac-9024-ede48922bee6",
"http://linked.data.gov.au/def/tern-cv/95a8a081-abed-4932-83de-47e0b2bea0cc",
"http://linked.data.gov.au/def/tern-cv/98d2cc31-354d-4e95-a7cd-218040188023",
"http://linked.data.gov.au/def/tern-cv/9b2ab960-da97-473a-81af-d50ab6041739",
"http://linked.data.gov.au/def/tern-cv/9f8cc716-bc8c-40c4-be6b-6270813e94f3",
"http://linked.data.gov.au/def/tern-cv/a37fd6b6-d041-451a-8ac6-4083a1be26c5",
"http://linked.data.gov.au/def/tern-cv/a4132e35-686a-4dc3-9a77-b9a00b3753f9",
"http://linked.data.gov.au/def/tern-cv/a421a75f-c695-4d7f-b361-335b86b15201",
"http://linked.data.gov.au/def/tern-cv/a685171d-2f9d-45b6-b870-1d918ea45148",
"http://linked.data.gov.au/def/tern-cv/ac9570b4-3aaa-4a4f-b9b9-770cb0ef8c2e",
"http://linked.data.gov.au/def/tern-cv/acc89d4d-cbcf-48a6-9de5-54c33eb7d140",
"http://linked.data.gov.au/def/tern-cv/aeb3149f-9354-4a0b-829a-acbaaa416a1f",
"http://linked.data.gov.au/def/tern-cv/b15c4b8e-208e-4ab0-b470-0aa56b72ab38",
"http://linked.data.gov.au/def/tern-cv/b564f403-1b7d-4c54-b0c3-ca3fbabeed89",
"http://linked.data.gov.au/def/tern-cv/b736e783-39b6-4408-9561-91561aa53a1d",
"http://linked.data.gov.au/def/tern-cv/ba619127-7393-4d55-a025-3d94a844d419",
"http://linked.data.gov.au/def/tern-cv/ba9c2502-5d10-4fb9-8c6b-9f654a3dc46d",
"http://linked.data.gov.au/def/tern-cv/bb61ac09-00de-493d-9ef8-eb81fad72724",
"http://linked.data.gov.au/def/tern-cv/c230dd97-3af1-43cf-bd27-ffcf4979d3a1",
"http://linked.data.gov.au/def/tern-cv/c5b8b894-b5ee-49bf-be93-52491afe73a9",
"http://linked.data.gov.au/def/tern-cv/c60e774a-8cb7-4002-b3f1-abb1a47369bb",
"http://linked.data.gov.au/def/tern-cv/c6bd6ddd-77d8-4693-b528-67e8893dbeb7",
"http://linked.data.gov.au/def/tern-cv/cc229ff9-8cca-4ddd-8f01-e1e3fb5892af",
"http://linked.data.gov.au/def/tern-cv/ccba73b3-0527-4a56-a711-e5628a7d0dbd",
"http://linked.data.gov.au/def/tern-cv/d0580fd1-848b-4cc8-92fb-8d455e4fe083",
"http://linked.data.gov.au/def/tern-cv/d1092494-ca45-41de-8f08-9fb9d2142fb9",
"http://linked.data.gov.au/def/tern-cv/d82a71d7-677a-45fe-968f-6dad4b8e8488",
"http://linked.data.gov.au/def/tern-cv/da5ef737-f328-404b-8cc8-2fad54cac397",
"http://linked.data.gov.au/def/tern-cv/dbf4aae7-4179-486b-b181-4f7942f657db",
"http://linked.data.gov.au/def/tern-cv/dbff3449-2ec9-4722-a953-87c78da86f74",
"http://linked.data.gov.au/def/tern-cv/dd34fc05-012d-4092-9324-a4cbcf29bf0a",
"http://linked.data.gov.au/def/tern-cv/e1021d25-7d76-4faa-bfcc-fb6d225eb3ec",
"http://linked.data.gov.au/def/tern-cv/e4109f0f-4129-480d-a99a-21c9cbb88e13",
"http://linked.data.gov.au/def/tern-cv/e66d3943-4d28-4125-a1f0-fb5b1d72171d",
"http://linked.data.gov.au/def/tern-cv/e926b2ca-2688-486c-aa28-435f91c3c110",
"http://linked.data.gov.au/def/tern-cv/ebe5f483-e279-4153-a6d9-2a0dd82e6ea2",
"http://linked.data.gov.au/def/tern-cv/f23851f3-4eac-43ec-b9ce-f40cc9df9a74",
"http://linked.data.gov.au/def/tern-cv/f5a2a443-c78f-410d-a794-16ca1e23d0ad",
"http://linked.data.gov.au/def/tern-cv/f5b14209-f890-4d6e-a10f-fee5b1688ed6",
"http://linked.data.gov.au/def/tern-cv/f5c125b1-81c2-448c-81e6-7ff6a2facd07",
"http://linked.data.gov.au/def/tern-cv/f81aa91e-5f57-4e49-bc6e-4d821d1f9de2",
"http://linked.data.gov.au/def/tern-cv/f82d406b-96ee-41d4-8f84-18e3e52fe530",
"http://linked.data.gov.au/def/tern-cv/335a84cd-01af-49cb-8532-acf71dc1d980",
"http://linked.data.gov.au/def/tern-cv/e5953626-5c62-4baa-b8e5-36a7e6660611",
"http://linked.data.gov.au/def/tern-cv/5a13a61f-a43f-40cf-bc3f-3e0cc2e64ce1",
]
|
py | 1a31b3cbf78f07dd9cca6b20f9cdf08f6b6868bc | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 5000
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
one_target_per_seq=False,
n_seq_per_batch=16,
subsample_target=2,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
input_padding=8,
lag=0,
reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
loss_function=partial(scaled_cost3, ignore_inactive=False, loss_func=mdn_nll),
updates_func=momentum,
learning_rate=5e-3,
learning_rate_changes_by_iteration={
50: 1e-3,
200: 5e-4,
400: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
auto_reshape=False,
plotter=MDNPlotter
)
"""
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
12345678901234567890
"""
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 1024
NUM_FILTERS = 50
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 10,
'stride': 2,
'nonlinearity': rectify,
'W': Normal(std=1/sqrt(source.seq_length))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': N,
'W': Normal(std=1/sqrt(N * NUM_FILTERS)),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N,
'W': Normal(std=1/sqrt(N)),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.output_shape()[1] * source.output_shape()[2],
'W': Normal(std=1/sqrt(N)),
'nonlinearity': rectify
},
{
'type': ReshapeLayer,
'shape': (16 * 256, 5)
},
# {
# 'type': DenseLayer,
# 'num_units': source.output_shape()[1] * source.output_shape()[2],
# 'W': Normal(std=1/sqrt(N)),
# 'nonlinearity': T.nnet.softplus
# }
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 1,
'nonlinearity_mu': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
|
py | 1a31b3e01f8b6e2855e59fe2bc51da8af57b31c3 | from setuptools import setup
setup(
name='git-lint-branch',
version='0.0',
py_modules=['git_lint_branch'],
install_requires=[
'typer',
'pygit2',
'spacy',
'colorama',
],
entry_points='''
[console_scripts]
git-lint-branch=git_lint_branch.main:app
''',
)
|
py | 1a31b57844690dc8df93f50ec2a42c28eacef564 | # -*- coding: utf-8 -*-
# Copyright 2021, CS GROUP - France, https://www.csgroup.eu/
#
# This file is part of EODAG project
# https://www.github.com/CS-SI/EODAG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Explicitly import here everything you want to use from the eodag package
isort:skip_file
"""
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")))
from eodag import EODataAccessGateway, api, config, setup_logging
from eodag.api.core import DEFAULT_ITEMS_PER_PAGE, DEFAULT_MAX_ITEMS_PER_PAGE
from eodag.api.product import EOProduct
from eodag.api.product.drivers import DRIVERS
from eodag.api.product.drivers.base import NoDriver
from eodag.api.product.metadata_mapping import format_metadata
from eodag.api.search_result import SearchResult
from eodag.cli import download, eodag, list_pt, search_crunch
from eodag.config import load_default_config, merge_configs
from eodag.plugins.authentication.base import Authentication
from eodag.plugins.crunch.filter_date import FilterDate
from eodag.plugins.crunch.filter_latest_tpl_name import FilterLatestByName
from eodag.plugins.crunch.filter_property import FilterProperty
from eodag.plugins.crunch.filter_overlap import FilterOverlap
from eodag.plugins.download.aws import AwsDownload
from eodag.plugins.download.base import Download
from eodag.plugins.download.http import HTTPDownload
from eodag.plugins.manager import PluginManager
from eodag.plugins.search.base import Search
from eodag.rest import server as eodag_http_server
from eodag.rest.utils import eodag_api, get_date
from eodag.utils import (
get_geometry_from_various,
get_timestamp,
makedirs,
merge_mappings,
path_to_uri,
ProgressCallback,
uri_to_path,
DownloadedCallback,
)
from eodag.utils.exceptions import (
AddressNotFound,
AuthenticationError,
DownloadError,
MisconfiguredError,
NoMatchingProductType,
PluginImplementationError,
UnsupportedDatasetAddressScheme,
UnsupportedProvider,
ValidationError,
)
from eodag.utils.stac_reader import fetch_stac_items
from tests import TESTS_DOWNLOAD_PATH, TEST_RESOURCES_PATH
|
py | 1a31b62c79343fef5bb65bb0ed067320f4bfd2a7 | # -*- coding:utf-8 -*-
import logging
from os.path import dirname as pdir
from os.path import join as pjoin
from pony.orm import Database
ROOT_PATH = pdir(pdir(__file__))
DB_PATH = pjoin(ROOT_PATH, 'db', 'project.db')
logging.basicConfig(
filename=pjoin(ROOT_PATH, 'db', 'project.log'),
format='[%(asctime)-15s] [%(processName)s:%(process)d] %(name)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
level=logging.INFO
)
db = Database('sqlite', DB_PATH, create_db=True)
protocols = pjoin(ROOT_PATH, 'protocols')
|
py | 1a31b6e4817c4ee0307800ca57755dd9718af791 | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
import numbers
from botocore.utils import parse_timestamp
from botocore.docs.utils import escape_controls
from botocore.compat import six
class SharedExampleDocumenter(object):
def document_shared_example(self, example, prefix, section,
operation_model):
"""Documents a single shared example based on its definition.
:param example: The model of the example
:param prefix: The prefix to use in the method example.
:param section: The section to write to.
:param operation_model: The model of the operation used in the example
"""
section.style.new_paragraph()
section.write(example.get('description'))
section.style.new_line()
self.document_input(section, example, prefix,
operation_model.input_shape)
self.document_output(section, example, operation_model.output_shape)
def document_input(self, section, example, prefix, shape):
input_section = section.add_new_section('input')
input_section.style.start_codeblock()
if prefix is not None:
input_section.write(prefix)
params = example.get('input', {})
comments = example.get('comments')
if comments:
comments = comments.get('input')
param_section = input_section.add_new_section('parameters')
self._document_params(param_section, params, comments, [], shape)
closing_section = input_section.add_new_section('input-close')
closing_section.style.new_line()
closing_section.style.new_line()
closing_section.write('print(response)')
closing_section.style.end_codeblock()
def document_output(self, section, example, shape):
output_section = section.add_new_section('output')
output_section.style.new_line()
output_section.write('Expected Output:')
output_section.style.new_line()
output_section.style.start_codeblock()
params = example.get('output', {})
# There might not be an output, but we will return metadata anyway
params['ResponseMetadata'] = {"...": "..."}
comments = example.get('comments')
if comments:
comments = comments.get('output')
self._document_dict(output_section, params, comments, [], shape, True)
closing_section = output_section.add_new_section('output-close')
closing_section.style.end_codeblock()
def _document(self, section, value, comments, path, shape):
"""
:param section: The section to add the docs to.
:param value: The input / output values representing the parameters that
are included in the example.
:param comments: The dictionary containing all the comments to be
applied to the example.
:param path: A list describing where the documenter is in traversing the
parameters. This is used to find the equivalent location
in the comments dictionary.
"""
if isinstance(value, dict):
self._document_dict(section, value, comments, path, shape)
elif isinstance(value, list):
self._document_list(section, value, comments, path, shape)
elif isinstance(value, numbers.Number):
self._document_number(section, value, path)
elif shape and shape.type_name == 'timestamp':
self._document_datetime(section, value, path)
else:
self._document_str(section, value, path)
def _document_dict(self, section, value, comments, path, shape,
top_level=False):
dict_section = section.add_new_section('dict-value')
self._start_nested_value(dict_section, '{')
for key, val in value.items():
path.append('.%s' % key)
item_section = dict_section.add_new_section(key)
item_section.style.new_line()
item_comment = self._get_comment(path, comments)
if item_comment:
item_section.write(item_comment)
item_section.style.new_line()
item_section.write("'%s': " % key)
# Shape could be none if there is no output besides ResponseMetadata
item_shape = None
if shape:
if shape.type_name == 'structure':
item_shape = shape.members.get(key)
elif shape.type_name == 'map':
item_shape = shape.value
self._document(item_section, val, comments, path, item_shape)
path.pop()
dict_section_end = dict_section.add_new_section('ending-brace')
self._end_nested_value(dict_section_end, '}')
if not top_level:
dict_section_end.write(',')
def _document_params(self, section, value, comments, path, shape):
param_section = section.add_new_section('param-values')
self._start_nested_value(param_section, '(')
for key, val in value.items():
path.append('.%s' % key)
item_section = param_section.add_new_section(key)
item_section.style.new_line()
item_comment = self._get_comment(path, comments)
if item_comment:
item_section.write(item_comment)
item_section.style.new_line()
item_section.write(key + '=')
# Shape could be none if there are no input parameters
item_shape = None
if shape:
item_shape = shape.members.get(key)
self._document(item_section, val, comments, path, item_shape)
path.pop()
param_section_end = param_section.add_new_section('ending-parenthesis')
self._end_nested_value(param_section_end, ')')
def _document_list(self, section, value, comments, path, shape):
list_section = section.add_new_section('list-section')
self._start_nested_value(list_section, '[')
item_shape = shape.member
for index, val in enumerate(value):
item_section = list_section.add_new_section(index)
item_section.style.new_line()
path.append('[%s]' % index)
item_comment = self._get_comment(path, comments)
if item_comment:
item_section.write(item_comment)
item_section.style.new_line()
self._document(item_section, val, comments, path, item_shape)
path.pop()
list_section_end = list_section.add_new_section('ending-bracket')
self._end_nested_value(list_section_end, '],')
def _document_str(self, section, value, path):
# We do the string conversion because this might accept a type that
# we don't specifically address.
safe_value = escape_controls(value)
section.write(u"'%s'," % six.text_type(safe_value))
def _document_number(self, section, value, path):
section.write("%s," % str(value))
def _document_datetime(self, section, value, path):
datetime_tuple = parse_timestamp(value).timetuple()
datetime_str = str(datetime_tuple[0])
for i in range(1, len(datetime_tuple)):
datetime_str += ", " + str(datetime_tuple[i])
section.write("datetime(%s)," % datetime_str)
def _get_comment(self, path, comments):
key = re.sub(r'^\.', '', ''.join(path))
if comments and key in comments:
return '# ' + comments[key]
else:
return ''
def _start_nested_value(self, section, start):
section.write(start)
section.style.indent()
section.style.indent()
def _end_nested_value(self, section, end):
section.style.dedent()
section.style.dedent()
section.style.new_line()
section.write(end)
def document_shared_examples(section, operation_model, example_prefix,
shared_examples):
"""Documents the shared examples
:param section: The section to write to.
:param operation_model: The model of the operation.
:param example_prefix: The prefix to use in the method example.
:param shared_examples: The shared JSON examples from the model.
"""
container_section = section.add_new_section('shared-examples')
container_section.style.new_paragraph()
container_section.style.bold('Examples')
documenter = SharedExampleDocumenter()
for example in shared_examples:
documenter.document_shared_example(
example=example,
section=container_section.add_new_section(example['id']),
prefix=example_prefix,
operation_model=operation_model
)
|
py | 1a31b76bf722b967f4deeecd3e141170fb49bd06 | # coding: utf-8
"""
CRM cards
Allows an app to extend the CRM UI by surfacing custom cards in the sidebar of record pages. These cards are defined up-front as part of app configuration, then populated by external data fetch requests when the record page is accessed by a user. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.extensions.cards.configuration import Configuration
class ObjectToken(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"name": "str", "label": "str", "data_type": "str", "value": "str"}
attribute_map = {"name": "name", "label": "label", "data_type": "dataType", "value": "value"}
def __init__(self, name=None, label=None, data_type=None, value=None, local_vars_configuration=None): # noqa: E501
"""ObjectToken - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._label = None
self._data_type = None
self._value = None
self.discriminator = None
if name is not None:
self.name = name
if label is not None:
self.label = label
if data_type is not None:
self.data_type = data_type
self.value = value
@property
def name(self):
"""Gets the name of this ObjectToken. # noqa: E501
:return: The name of this ObjectToken. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ObjectToken.
:param name: The name of this ObjectToken. # noqa: E501
:type: str
"""
self._name = name
@property
def label(self):
"""Gets the label of this ObjectToken. # noqa: E501
:return: The label of this ObjectToken. # noqa: E501
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this ObjectToken.
:param label: The label of this ObjectToken. # noqa: E501
:type: str
"""
self._label = label
@property
def data_type(self):
"""Gets the data_type of this ObjectToken. # noqa: E501
:return: The data_type of this ObjectToken. # noqa: E501
:rtype: str
"""
return self._data_type
@data_type.setter
def data_type(self, data_type):
"""Sets the data_type of this ObjectToken.
:param data_type: The data_type of this ObjectToken. # noqa: E501
:type: str
"""
allowed_values = ["BOOLEAN", "CURRENCY", "DATE", "DATETIME", "EMAIL", "LINK", "NUMERIC", "STRING", "STATUS"] # noqa: E501
if self.local_vars_configuration.client_side_validation and data_type not in allowed_values: # noqa: E501
raise ValueError("Invalid value for `data_type` ({0}), must be one of {1}".format(data_type, allowed_values)) # noqa: E501
self._data_type = data_type
@property
def value(self):
"""Gets the value of this ObjectToken. # noqa: E501
:return: The value of this ObjectToken. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this ObjectToken.
:param value: The value of this ObjectToken. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and value is None: # noqa: E501
raise ValueError("Invalid value for `value`, must not be `None`") # noqa: E501
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ObjectToken):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ObjectToken):
return True
return self.to_dict() != other.to_dict()
|
py | 1a31b7e50f0900047074d1e1b0c12bd777939494 | """Utilities for including Python state in TensorFlow checkpoints."""
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import python_state as core_python_state
# pylint: disable=g-import-not-at-top
try:
# In Python 2.x, use the faster string buffering option.
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
# pylint: enable=g-import-not-at-top
class NumpyState(base.Trackable):
"""A trackable object whose NumPy array attributes are saved/restored.
Example usage:
```python
arrays = tf.contrib.checkpoint.NumpyState()
checkpoint = tf.train.Checkpoint(numpy_arrays=arrays)
arrays.x = numpy.zeros([3, 4])
save_path = checkpoint.save("/tmp/ckpt")
arrays.x[1, 1] = 4.
checkpoint.restore(save_path)
assert (arrays.x == numpy.zeros([3, 4])).all()
second_checkpoint = tf.train.Checkpoint(
numpy_arrays=tf.contrib.checkpoint.NumpyState())
# Attributes of NumpyState objects are created automatically by restore()
second_checkpoint.restore(save_path)
assert (second_checkpoint.numpy_arrays.x == numpy.zeros([3, 4])).all()
```
Note that `NumpyState` objects re-create the attributes of the previously
saved object on `restore()`. This is in contrast to TensorFlow variables, for
which a `Variable` object must be created and assigned to an attribute.
This snippet works both when graph building and when executing eagerly. On
save, the NumPy array(s) are fed as strings to be saved in the checkpoint (via
a placeholder when graph building, or as a string constant when executing
eagerly). When restoring they skip the TensorFlow graph entirely, and so no
restore ops need be run. This means that restoration always happens eagerly,
rather than waiting for `checkpoint.restore(...).run_restore_ops()` like
TensorFlow variables when graph building.
"""
def _lookup_dependency(self, name):
"""Create placeholder NumPy arrays for to-be-restored attributes.
Typically `_lookup_dependency` is used to check by name whether a dependency
exists. We cheat slightly by creating a trackable object for `name` if
we don't already have one, giving us attribute re-creation behavior when
loading a checkpoint.
Args:
name: The name of the dependency being checked.
Returns:
An existing dependency if one exists, or a new `_NumpyWrapper` placeholder
dependency (which will generally be restored immediately).
"""
value = super(NumpyState, self)._lookup_dependency(name)
if value is None:
value = _NumpyWrapper(numpy.array([]))
new_reference = base.TrackableReference(name=name, ref=value)
self._unconditional_checkpoint_dependencies.append(new_reference)
self._unconditional_dependency_names[name] = value
super(NumpyState, self).__setattr__(name, value)
return value
def __getattribute__(self, name):
"""Un-wrap `_NumpyWrapper` objects when accessing attributes."""
value = super(NumpyState, self).__getattribute__(name)
if isinstance(value, _NumpyWrapper):
return value.array
return value
def __setattr__(self, name, value):
"""Automatically wrap NumPy arrays assigned to attributes."""
# TODO(allenl): Consider supporting lists/tuples, either ad-hoc or by making
# ndarrays trackable natively and using standard trackable list
# tracking.
if isinstance(value, (numpy.ndarray, numpy.generic)):
try:
existing = super(NumpyState, self).__getattribute__(name)
existing.array = value
return
except AttributeError:
value = _NumpyWrapper(value)
self._track_trackable(value, name=name, overwrite=True)
elif (name not in ("_self_setattr_tracking", "_self_update_uid")
and getattr(self, "_self_setattr_tracking", True)):
# Mixing restore()-created attributes with user-added trackable
# objects is tricky, since we can't use the `_lookup_dependency` trick to
# re-create attributes (we might accidentally steal the restoration for
# another trackable object). For now `NumpyState` objects must be
# leaf nodes. Theoretically we could add some extra arguments to
# `_lookup_dependency` to figure out whether we should create a NumPy
# array for the attribute or not.
raise NotImplementedError(
("Assigned %s to the %s property of %s, which is not a NumPy array. "
"Currently mixing NumPy arrays and other trackable objects is "
"not supported. File a feature request if this limitation bothers "
"you.")
% (value, name, self))
super(NumpyState, self).__setattr__(name, value)
class _NumpyWrapper(core_python_state.PythonState):
"""Wraps a NumPy array for storage in an object-based checkpoint."""
def __init__(self, array):
"""Specify a NumPy array to wrap.
Args:
array: The NumPy array to save and restore (may be overwritten).
"""
self.array = array
def serialize(self):
"""Callback to serialize the array."""
string_file = BytesIO()
try:
numpy.save(string_file, self.array, allow_pickle=False)
serialized = string_file.getvalue()
finally:
string_file.close()
return serialized
def deserialize(self, string_value):
"""Callback to deserialize the array."""
string_file = BytesIO(string_value)
try:
self.array = numpy.load(string_file, allow_pickle=False)
finally:
string_file.close()
|
py | 1a31b8ce7720217c468173fe78a2a4191284c045 | from typing import List, Optional
import aiosqlite
from chiadoge.types.blockchain_format.coin import Coin
from chiadoge.types.blockchain_format.sized_bytes import bytes32
from chiadoge.types.coin_record import CoinRecord
from chiadoge.types.full_block import FullBlock
from chiadoge.util.db_wrapper import DBWrapper
from chiadoge.util.ints import uint32, uint64
from chiadoge.util.lru_cache import LRUCache
class CoinStore:
"""
This object handles CoinRecords in DB.
A cache is maintained for quicker access to recent coins.
"""
coin_record_db: aiosqlite.Connection
coin_record_cache: LRUCache
cache_size: uint32
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper, cache_size: uint32 = uint32(60000)):
self = cls()
self.cache_size = cache_size
self.db_wrapper = db_wrapper
self.coin_record_db = db_wrapper.db
await self.coin_record_db.execute("pragma journal_mode=wal")
await self.coin_record_db.execute("pragma synchronous=2")
await self.coin_record_db.execute(
(
"CREATE TABLE IF NOT EXISTS coin_record("
"coin_name text PRIMARY KEY,"
" confirmed_index bigint,"
" spent_index bigint,"
" spent int,"
" coinbase int,"
" puzzle_hash text,"
" coin_parent text,"
" amount blob,"
" timestamp bigint)"
)
)
# Useful for reorg lookups
await self.coin_record_db.execute(
"CREATE INDEX IF NOT EXISTS coin_confirmed_index on coin_record(confirmed_index)"
)
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_spent_index on coin_record(spent_index)")
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_spent on coin_record(spent)")
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_puzzle_hash on coin_record(puzzle_hash)")
await self.coin_record_db.commit()
self.coin_record_cache = LRUCache(cache_size)
return self
async def new_block(self, block: FullBlock, tx_additions: List[Coin], tx_removals: List[bytes32]):
"""
Only called for blocks which are blocks (and thus have rewards and transactions)
"""
if block.is_transaction_block() is False:
return None
assert block.foliage_transaction_block is not None
for coin in tx_additions:
record: CoinRecord = CoinRecord(
coin,
block.height,
uint32(0),
False,
False,
block.foliage_transaction_block.timestamp,
)
await self._add_coin_record(record, False)
included_reward_coins = block.get_included_reward_coins()
if block.height == 0:
assert len(included_reward_coins) == 0
else:
assert len(included_reward_coins) >= 2
for coin in included_reward_coins:
reward_coin_r: CoinRecord = CoinRecord(
coin,
block.height,
uint32(0),
False,
True,
block.foliage_transaction_block.timestamp,
)
await self._add_coin_record(reward_coin_r, False)
total_amount_spent: int = 0
for coin_name in tx_removals:
total_amount_spent += await self._set_spent(coin_name, block.height)
# Sanity check, already checked in block_body_validation
assert sum([a.amount for a in tx_additions]) <= total_amount_spent
# Checks DB and DiffStores for CoinRecord with coin_name and returns it
async def get_coin_record(self, coin_name: bytes32) -> Optional[CoinRecord]:
cached = self.coin_record_cache.get(coin_name)
if cached is not None:
return cached
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE coin_name=?", (coin_name.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
record = CoinRecord(coin, row[1], row[2], row[3], row[4], row[8])
self.coin_record_cache.put(record.coin.name(), record)
return record
return None
async def get_coins_added_at_height(self, height: uint32) -> List[CoinRecord]:
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE confirmed_index=?", (height,))
rows = await cursor.fetchall()
await cursor.close()
coins = []
for row in rows:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coins.append(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return coins
async def get_coins_removed_at_height(self, height: uint32) -> List[CoinRecord]:
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE spent_index=?", (height,))
rows = await cursor.fetchall()
await cursor.close()
coins = []
for row in rows:
spent: bool = bool(row[3])
if spent:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coin_record = CoinRecord(coin, row[1], row[2], spent, row[4], row[8])
coins.append(coin_record)
return coins
# Checks DB and DiffStores for CoinRecords with puzzle_hash and returns them
async def get_coin_records_by_puzzle_hash(
self,
include_spent_coins: bool,
puzzle_hash: bytes32,
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
coins = set()
cursor = await self.coin_record_db.execute(
f"SELECT * from coin_record WHERE puzzle_hash=? AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
(puzzle_hash.hex(), start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return list(coins)
async def get_coin_records_by_puzzle_hashes(
self,
include_spent_coins: bool,
puzzle_hashes: List[bytes32],
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
if len(puzzle_hashes) == 0:
return []
coins = set()
puzzle_hashes_db = tuple([ph.hex() for ph in puzzle_hashes])
cursor = await self.coin_record_db.execute(
f'SELECT * from coin_record WHERE puzzle_hash in ({"?," * (len(puzzle_hashes_db) - 1)}?) '
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
puzzle_hashes_db + (start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return list(coins)
async def rollback_to_block(self, block_index: int):
"""
Note that block_index can be negative, in which case everything is rolled back
"""
# Update memory cache
delete_queue: bytes32 = []
for coin_name, coin_record in list(self.coin_record_cache.cache.items()):
if int(coin_record.spent_block_index) > block_index:
new_record = CoinRecord(
coin_record.coin,
coin_record.confirmed_block_index,
uint32(0),
False,
coin_record.coinbase,
coin_record.timestamp,
)
self.coin_record_cache.put(coin_record.coin.name(), new_record)
if int(coin_record.confirmed_block_index) > block_index:
delete_queue.append(coin_name)
for coin_name in delete_queue:
self.coin_record_cache.remove(coin_name)
# Delete from storage
c1 = await self.coin_record_db.execute("DELETE FROM coin_record WHERE confirmed_index>?", (block_index,))
await c1.close()
c2 = await self.coin_record_db.execute(
"UPDATE coin_record SET spent_index = 0, spent = 0 WHERE spent_index>?",
(block_index,),
)
await c2.close()
# Store CoinRecord in DB and ram cache
async def _add_coin_record(self, record: CoinRecord, allow_replace: bool) -> None:
if self.coin_record_cache.get(record.coin.name()) is not None:
self.coin_record_cache.remove(record.coin.name())
cursor = await self.coin_record_db.execute(
f"INSERT {'OR REPLACE ' if allow_replace else ''}INTO coin_record VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)",
(
record.coin.name().hex(),
record.confirmed_block_index,
record.spent_block_index,
int(record.spent),
int(record.coinbase),
str(record.coin.puzzle_hash.hex()),
str(record.coin.parent_coin_info.hex()),
bytes(record.coin.amount),
record.timestamp,
),
)
await cursor.close()
# Update coin_record to be spent in DB
async def _set_spent(self, coin_name: bytes32, index: uint32) -> uint64:
current: Optional[CoinRecord] = await self.get_coin_record(coin_name)
if current is None:
raise ValueError(f"Cannot spend a coin that does not exist in db: {coin_name}")
assert not current.spent # Redundant sanity check, already checked in block_body_validation
spent: CoinRecord = CoinRecord(
current.coin,
current.confirmed_block_index,
index,
True,
current.coinbase,
current.timestamp,
) # type: ignore # noqa
await self._add_coin_record(spent, True)
return current.coin.amount
|
py | 1a31b9e5f10ea987feb8ecf3c21a3f4b24de5e1a | from django.db import models
from django import forms
from django.contrib.admin import widgets
import datetime
# Create your models here.
class Proje(models.Model):
proje_id = models.AutoField(primary_key = True)
proje_adi = models.CharField(max_length = 50)
proje_baslama_tarih = models.DateField('Başlangıç Tarihi')
proje_teslim_tarih = models.DateField('Bitiş Tarihi')
class Calisan(models.Model):
calisan_id = models.AutoField(primary_key = True)
calisan_adi = models.CharField(max_length = 10)
calisan_soyadi = models.CharField(max_length = 10)
calisan_yetki = models.CharField(max_length = 5)
calisan_sifre = models.CharField(max_length = 10)
calisan_email = models.CharField(max_length = 30)
class Gorev(models.Model):
gorev_id = models.AutoField(primary_key = True)
gorev_adi = models.CharField(max_length = 10)
gorev_alan = models.ForeignKey(Calisan, on_delete = models.CASCADE, related_name = 'gorev_alan')
gorev_veren = models.ForeignKey(Calisan, on_delete = models.CASCADE, related_name = 'gorev_veren')
gorev_teslim_tarih = models.DateField('Son Teslim Tarihi')
proje = models.ForeignKey(Proje, on_delete = models.CASCADE)
gorev_durum = models.BooleanField(default=False)
class ProjeForm(forms.ModelForm):
class Meta:
model = Proje
fields = ['proje_adi','proje_baslama_tarih', 'proje_teslim_tarih']
def __init__(self, *args, **kwargs):
super(ProjeForm, self).__init__(*args, **kwargs)
self.fields['proje_adi'].widget = widgets.AdminTextInputWidget()
self.fields['proje_baslama_tarih'].widget = widgets.AdminDateWidget()
self.fields['proje_teslim_tarih'].widget = widgets.AdminDateWidget()
class CalisanForm(forms.ModelForm):
class Meta:
model = Calisan
fields = ['calisan_adi','calisan_soyadi','calisan_email']
def __init__(self, *args, **kwargs):
super(CalisanForm, self).__init__(*args, **kwargs)
self.fields['calisan_adi'].widget = widgets.AdminTextInputWidget()
self.fields['calisan_soyadi'].widget = widgets.AdminTextInputWidget()
self.fields['calisan_email'].widget = widgets.AdminEmailInputWidget()
class GorevForm(forms.Form):
gorev_alan = forms.ModelChoiceField(queryset = Calisan.objects.all().values_list('calisan_adi',flat = True))
gorev_veren = forms.ModelChoiceField(queryset = Calisan.objects.filter(calisan_yetki = 'admin').values_list('calisan_adi',flat = True))
proje = forms.ModelChoiceField(queryset = Proje.objects.all().values_list('proje_adi',flat = True))
class LoginForm(forms.Form):
email = forms.CharField(label = "Email")
password = forms.CharField(label = "Şifre", widget = forms.PasswordInput)
|
py | 1a31baedd5d4874e35f81e2c7f027abde7c835a9 | # coding: utf-8
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.1.2-pre.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import unittest
import openapi_client
from openapi_client.model.string_parameter_definition import StringParameterDefinition
class TestStringParameterDefinition(unittest.TestCase):
"""StringParameterDefinition unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def test_StringParameterDefinition(self):
"""Test StringParameterDefinition"""
# FIXME: construct object with mandatory attributes with example values
# model = StringParameterDefinition() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a31bb6294818851d643aa63fe3cae2579282484 | from __future__ import absolute_import
import os, itertools, json, numpy, pickle
from ann_benchmarks.plotting.metrics import all_metrics as metrics
import matplotlib.pyplot as plt
def create_pointset(data, xn, yn):
xm, ym = (metrics[xn], metrics[yn])
rev = ym["worst"] < 0
data.sort(key=lambda t: t[-1], reverse=rev) # sort by y coordinate
axs, ays, als = [], [], []
# Generate Pareto frontier
xs, ys, ls = [], [], []
last_x = xm["worst"]
comparator = \
(lambda xv, lx: xv > lx) if last_x < 0 else (lambda xv, lx: xv < lx)
for algo, algo_name, xv, yv in data:
if not xv or not yv:
continue
axs.append(xv)
ays.append(yv)
als.append(algo_name)
if comparator(xv, last_x):
last_x = xv
xs.append(xv)
ys.append(yv)
ls.append(algo_name)
return xs, ys, ls, axs, ays, als
def compute_metrics(true_nn_distances, res, metric_1, metric_2):
all_results = {}
for i, (definition, run) in enumerate(res):
algo = definition.algorithm
algo_name = run.attrs['name']
# cache distances to avoid access to hdf5 file
run_distances = list(run['distances'])
metric_1_value = metrics[metric_1]['function'](true_nn_distances, run_distances, run.attrs)
metric_2_value = metrics[metric_2]['function'](true_nn_distances, run_distances, run.attrs)
print('%3d: %80s %12.3f %12.3f' % (i, algo_name, metric_1_value, metric_2_value))
all_results.setdefault(algo, []).append((algo, algo_name, metric_1_value, metric_2_value))
return all_results
def compute_all_metrics(true_nn_distances, run, algo):
algo_name = run.attrs["name"]
print('--')
print(algo_name)
results = {}
# cache distances to avoid access to hdf5 file
run_distances = list(run["distances"])
run_attrs = dict(run.attrs)
for name, metric in metrics.items():
v = metric["function"](true_nn_distances, run_distances, run_attrs)
results[name] = v
if v:
print('%s: %g' % (name, v))
return (algo, algo_name, results)
def generate_n_colors(n):
vs = numpy.linspace(0.4, 1.0, 7)
colors = [(.9, .4, .4, 1.)]
def euclidean(a, b):
return sum((x-y)**2 for x, y in zip(a, b))
while len(colors) < n:
new_color = max(itertools.product(vs, vs, vs), key=lambda a: min(euclidean(a, b) for b in colors))
colors.append(new_color + (1.,))
return colors
def create_linestyles(unique_algorithms):
colors = dict(zip(unique_algorithms, generate_n_colors(len(unique_algorithms))))
linestyles = dict((algo, ['--', '-.', '-', ':'][i%4]) for i, algo in enumerate(unique_algorithms))
markerstyles = dict((algo, ['+', '<', 'o', '*', 'x'][i%5]) for i, algo in enumerate(unique_algorithms))
faded = dict((algo, (r, g, b, 0.3)) for algo, (r, g, b, a) in colors.items())
return dict((algo, (colors[algo], faded[algo], linestyles[algo], markerstyles[algo])) for algo in unique_algorithms)
def get_up_down(metric):
if metric["worst"] == float("inf"):
return "down"
return "up"
def get_left_right(metric):
if metric["worst"] == float("inf"):
return "left"
return "right"
def get_plot_label(xm, ym):
return "%(xlabel)s-%(ylabel)s tradeoff - %(updown)s and to the %(leftright)s is better" % {
"xlabel" : xm["description"], "ylabel" : ym["description"], "updown" : get_up_down(ym), "leftright" : get_left_right(xm) }
|
py | 1a31bbabc94bc3a2fddab00d2fe4daa1895e53eb | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: ydb/public/api/grpc/ydb_coordination_v1.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from ydb.public.api.protos import ydb_coordination_pb2 as ydb_dot_public_dot_api_dot_protos_dot_ydb__coordination__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='ydb/public/api/grpc/ydb_coordination_v1.proto',
package='Ydb.Coordination.V1',
syntax='proto3',
serialized_options=b'\n\036com.yandex.ydb.coordination.v1B\020CoordinationGrpcP\001',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n-ydb/public/api/grpc/ydb_coordination_v1.proto\x12\x13Ydb.Coordination.V1\x1a,ydb/public/api/protos/ydb_coordination.proto2\xca\x03\n\x13\x43oordinationService\x12R\n\x07Session\x12 .Ydb.Coordination.SessionRequest\x1a!.Ydb.Coordination.SessionResponse(\x01\x30\x01\x12W\n\nCreateNode\x12#.Ydb.Coordination.CreateNodeRequest\x1a$.Ydb.Coordination.CreateNodeResponse\x12T\n\tAlterNode\x12\".Ydb.Coordination.AlterNodeRequest\x1a#.Ydb.Coordination.AlterNodeResponse\x12Q\n\x08\x44ropNode\x12!.Ydb.Coordination.DropNodeRequest\x1a\".Ydb.Coordination.DropNodeResponse\x12]\n\x0c\x44\x65scribeNode\x12%.Ydb.Coordination.DescribeNodeRequest\x1a&.Ydb.Coordination.DescribeNodeResponseB4\n\x1e\x63om.yandex.ydb.coordination.v1B\x10\x43oordinationGrpcP\x01\x62\x06proto3'
,
dependencies=[ydb_dot_public_dot_api_dot_protos_dot_ydb__coordination__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR._options = None
_COORDINATIONSERVICE = _descriptor.ServiceDescriptor(
name='CoordinationService',
full_name='Ydb.Coordination.V1.CoordinationService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=117,
serialized_end=575,
methods=[
_descriptor.MethodDescriptor(
name='Session',
full_name='Ydb.Coordination.V1.CoordinationService.Session',
index=0,
containing_service=None,
input_type=ydb_dot_public_dot_api_dot_protos_dot_ydb__coordination__pb2._SESSIONREQUEST,
output_type=ydb_dot_public_dot_api_dot_protos_dot_ydb__coordination__pb2._SESSIONRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CreateNode',
full_name='Ydb.Coordination.V1.CoordinationService.CreateNode',
index=1,
containing_service=None,
input_type=ydb_dot_public_dot_api_dot_protos_dot_ydb__coordination__pb2._CREATENODEREQUEST,
output_type=ydb_dot_public_dot_api_dot_protos_dot_ydb__coordination__pb2._CREATENODERESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='AlterNode',
full_name='Ydb.Coordination.V1.CoordinationService.AlterNode',
index=2,
containing_service=None,
input_type=ydb_dot_public_dot_api_dot_protos_dot_ydb__coordination__pb2._ALTERNODEREQUEST,
output_type=ydb_dot_public_dot_api_dot_protos_dot_ydb__coordination__pb2._ALTERNODERESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DropNode',
full_name='Ydb.Coordination.V1.CoordinationService.DropNode',
index=3,
containing_service=None,
input_type=ydb_dot_public_dot_api_dot_protos_dot_ydb__coordination__pb2._DROPNODEREQUEST,
output_type=ydb_dot_public_dot_api_dot_protos_dot_ydb__coordination__pb2._DROPNODERESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DescribeNode',
full_name='Ydb.Coordination.V1.CoordinationService.DescribeNode',
index=4,
containing_service=None,
input_type=ydb_dot_public_dot_api_dot_protos_dot_ydb__coordination__pb2._DESCRIBENODEREQUEST,
output_type=ydb_dot_public_dot_api_dot_protos_dot_ydb__coordination__pb2._DESCRIBENODERESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_COORDINATIONSERVICE)
DESCRIPTOR.services_by_name['CoordinationService'] = _COORDINATIONSERVICE
# @@protoc_insertion_point(module_scope)
|
py | 1a31bc5394b6f37482463e581c96f41107bd034f | import unittest
import tohil
class TestMethods(unittest.TestCase):
def test_convert1(self):
"""exercise tohil.convert with no to= and with to=str"""
self.assertEqual(tohil.convert(10), "10")
self.assertEqual(tohil.convert(10, to=str), "10")
self.assertEqual(tohil.convert("10"), "10")
self.assertEqual(tohil.convert("10", to=str), "10")
def test_convert2(self):
"""exercise tohil.convert and to=int and to=float"""
self.assertEqual(tohil.convert("10", to=int), 10)
self.assertEqual(tohil.convert("10", to=float), 10.0)
def test_convert3(self):
"""exercise tohil.convert to=bool"""
self.assertEqual(tohil.convert(True, to=bool), True)
self.assertEqual(tohil.convert("t", to=bool), True)
self.assertEqual(tohil.convert("1", to=bool), True)
self.assertEqual(tohil.convert(1, to=bool), True)
self.assertEqual(tohil.convert(False, to=bool), False)
self.assertEqual(tohil.convert("f", to=bool), False)
self.assertEqual(tohil.convert("0", to=bool), False)
self.assertEqual(tohil.convert(0, to=bool), False)
def test_convert4(self):
"""exercise tohil.convert to=list"""
self.assertEqual(tohil.convert("1 2 3 4 5", to=list), ["1", "2", "3", "4", "5"])
def test_convert5(self):
"""exercise tohil.convert and to=dict"""
self.assertEqual(
tohil.convert("a 1 b 2 c 3 d 4", to=dict),
{"a": "1", "b": "2", "c": "3", "d": "4"},
)
def test_convert6(self):
"""exercise tohil.convert and to=tuple"""
self.assertEqual(
tohil.convert("a 1 b 2 c 3 d 4", to=tuple),
("a", "1", "b", "2", "c", "3", "d", "4"),
)
def test_convert7(self):
"""exercise tohil.convert and to=set"""
self.assertEqual(
sorted(tohil.convert("1 2 3 4 5 6 6", to=set)),
["1", "2", "3", "4", "5", "6"],
)
def test_convert8(self):
"""exercise tohil.convert and to=tohil.tclobj"""
self.assertEqual(
repr(tohil.convert("1 2 3", to=tohil.tclobj)), "<tohil.tclobj: '1 2 3'>"
)
if __name__ == "__main__":
unittest.main()
|
py | 1a31bc78de1a8e4a503b150313310e5726cfad7f | from starflyer import Handler, redirect, asjson, AttributeMapper
from camper import BaseForm, db, BaseHandler, is_admin, logged_in, ensure_barcamp
from wtforms import *
from sfext.babel import T
from .base import BarcampBaseHandler, LocationNotFound
import uuid
class ParticipantDataEditForm(BaseForm):
"""form for defining a pareticipant data form"""
# base data
title = TextField(T("Name of field"), [validators.Length(max=50), validators.Required()],
description = T('the name of the field to be shown in the form, e.g. "t-shirt size"'),
)
description = TextAreaField(T("Description"),
description = T('please describe what the user should enter in this field.'),
)
fieldtype = RadioField(T("field type"), [validators.Required()],
choices=[
('checkbox',T('a yes/no field')),
('textfield',T('1 line of text')),
('textarea',T('multiple lines of text')),
('select',T('select one choice out of many'))],
description = T('please chose between a one-line text field or multi-line text area'),
)
choices = TextAreaField(T("Choices"),
description = T('please put each choice on a separate line.'),
)
required = BooleanField(T("field required?"),
description = T('If you enable this then the user cannot register before this field has been filled in.'),
)
class ParticipantsDataEditView(BarcampBaseHandler):
"""let the user define the participant data form fields"""
template = "admin/participants_data_edit.html"
@ensure_barcamp()
@logged_in()
@is_admin()
def get(self, slug = None):
"""render the view"""
form = ParticipantDataEditForm(self.request.form, config = self.config)
registration_form = self.barcamp.registration_form
if self.request.method == 'POST' and form.validate():
f = form.data
f['name'] = unicode(uuid.uuid4())
# clean up choices
new_choices = []
for c in f['choices'].split("\n"):
choice = c.strip()
if choice:
new_choices.append((choice, choice)) # value and name are the same
f['choices'] = new_choices
self.barcamp.registration_form.append(f)
self.barcamp.save()
return redirect(self.url_for("barcamps.registration_form_editor", slug = self.barcamp.slug))
return self.render(
view = self.barcamp_view,
barcamp = self.barcamp,
title = self.barcamp.name,
form = form,
fields = self.barcamp.registration_form,
**self.barcamp
)
post = get
@ensure_barcamp()
@logged_in()
@is_admin()
def delete(self, slug = None):
"""delete a form entry"""
idx = self.request.args.get("idx", None)
rf = self.barcamp.registration_form
if idx is not None and int(idx) < len(rf) and int(idx) >= 0:
del self.barcamp.registration_form[int(idx)]
self.barcamp.save()
return redirect(self.url_for("barcamps.registration_form_editor", slug = self.barcamp.slug))
|
py | 1a31bddf9178b9adce8d22d44e502379135bb23d | # -*- coding: utf-8 -*-
"""Compound ZIP file plugin related functions and classes for testing."""
import zipfile
from plaso.containers import sessions
from plaso.storage.fake import writer as fake_writer
from tests.parsers import test_lib
class CompoundZIPPluginTestCase(test_lib.ParserTestCase):
"""Compound ZIP file plugin test case."""
def _ParseZIPFileWithPlugin(
self, path_segments, plugin, knowledge_base_values=None):
"""Parses a file as a ZIP file and returns an event generator.
This method will first test if a ZIP file contains the required paths
using plugin.CheckRequiredPaths() and then extracts events using
plugin.Process().
Args:
path_segments (list[str]): path segments inside the test data directory.
plugin (CompoundZIPPlugin): compound ZIP file plugin.
knowledge_base_values (Optional[dict[str, object]]): knowledge base
values.
Returns:
FakeStorageWriter: storage writer.
"""
session = sessions.Session()
storage_writer = fake_writer.FakeStorageWriter(session)
storage_writer.Open()
file_entry = self._GetTestFileEntry(path_segments)
parser_mediator = self._CreateParserMediator(
storage_writer, file_entry=file_entry,
knowledge_base_values=knowledge_base_values)
file_object = file_entry.GetFileObject()
try:
zip_file = zipfile.ZipFile(file_object, 'r', allowZip64=True)
required_paths_exist = plugin.CheckRequiredPaths(zip_file)
self.assertTrue(required_paths_exist)
plugin.Process(parser_mediator, zip_file=zip_file)
zip_file.close()
finally:
file_object.close()
return storage_writer
|
py | 1a31bf34db44bf2fe72a65cea1c1cfa060df18d4 | #predicting-house-prices.py
#Day 6: Multiple Linear Regression: Predicting House Prices
#Intro to Statistics
#By derekhh
#Apr 2, 2016
from sklearn import linear_model
f, n = input().split()
f = int(f)
n = int(n)
clf = linear_model.LinearRegression()
x_train = []
y_train = []
for i in range(n):
tmp = [float(n) for n in input().split()]
x_train.append(tmp[0: len(tmp) - 1])
y_train.append(tmp[len(tmp) - 1])
clf.fit(x_train, y_train)
x_test = []
n = int(input())
for i in range(n):
tmp = [float(n) for n in input().split()]
x_test.append(tmp)
y_test = clf.predict(x_test)
for y in y_test:
print(y) |
py | 1a31bf5ad3819a451fa858838d6d4cdd866ca7eb | """
Django settings for backend project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ["SECRET_KEY"]
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ["*",]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
'core',
'posts',
'categories',
'tags',
'profiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME':os.environ["POSTGRES_DB"],
'USER':os.environ["POSTGRES_USER"],
'PASSWORD':os.environ["POSTGRES_PASSWORD"],
'HOST': 'postgres',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
STATIC_ROOT = os.path.join(BASE_DIR, "static_serve/")
# REST Framework
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES':(
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES':(
'rest_framework.permissions.IsAuthenticatedOrReadOnly',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 12
}
CORS_ORIGIN_ALLOW_ALL = True
# CORS_ALLOW_CREDENTIALS = True
# CORS_ORIGIN_WHITELIST = [
# '*',
# ]
|
py | 1a31c076aba6e6aabae06381fd0d774ce6e9537e | """
Apicurio Registry API [v2]
Apicurio Registry is a datastore for standard event schemas and API designs. Apicurio Registry enables developers to manage and share the structure of their data using a REST interface. For example, client applications can dynamically push or pull the latest updates to or from the registry without needing to redeploy. Apicurio Registry also enables developers to create rules that govern how registry content can evolve over time. For example, this includes rules for content validation and version compatibility. The Apicurio Registry REST API enables client applications to manage the artifacts in the registry. This API provides create, read, update, and delete operations for schema and API artifacts, rules, versions, and metadata. The supported artifact types include: - Apache Avro schema - AsyncAPI specification - Google protocol buffers - GraphQL schema - JSON Schema - Kafka Connect schema - OpenAPI specification - Web Services Description Language - XML Schema Definition **Important**: The Apicurio Registry REST API is available from `https://MY-REGISTRY-URL/apis/registry/v2` by default. Therefore you must prefix all API operation paths with `../apis/registry/v2` in this case. For example: `../apis/registry/v2/ids/globalIds/{globalId}`. # noqa: E501
The version of the OpenAPI document: 2.2.0.Final
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import copy
import logging
import multiprocessing
import sys
import urllib3
from http import client as http_client
from apicurioregistryclient.exceptions import ApiValueError
JSON_SCHEMA_VALIDATION_KEYWORDS = {
'multipleOf', 'maximum', 'exclusiveMaximum',
'minimum', 'exclusiveMinimum', 'maxLength',
'minLength', 'pattern', 'maxItems', 'minItems'
}
class Configuration(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param host: Base url
:param api_key: Dict to store API key(s).
Each entry in the dict specifies an API key.
The dict key is the name of the security scheme in the OAS specification.
The dict value is the API key secret.
:param api_key_prefix: Dict to store API prefix (e.g. Bearer)
The dict key is the name of the security scheme in the OAS specification.
The dict value is an API key prefix when generating the auth data.
:param username: Username for HTTP basic authentication
:param password: Password for HTTP basic authentication
:param discard_unknown_keys: Boolean value indicating whether to discard
unknown properties. A server may send a response that includes additional
properties that are not known by the client in the following scenarios:
1. The OpenAPI document is incomplete, i.e. it does not match the server
implementation.
2. The client was generated using an older version of the OpenAPI document
and the server has been upgraded since then.
If a schema in the OpenAPI document defines the additionalProperties attribute,
then all undeclared properties received by the server are injected into the
additional properties map. In that case, there are undeclared properties, and
nothing to discard.
:param disabled_client_side_validations (string): Comma-separated list of
JSON schema validation keywords to disable JSON schema structural validation
rules. The following keywords may be specified: multipleOf, maximum,
exclusiveMaximum, minimum, exclusiveMinimum, maxLength, minLength, pattern,
maxItems, minItems.
By default, the validation is performed for data generated locally by the client
and data received from the server, independent of any validation performed by
the server side. If the input data does not satisfy the JSON schema validation
rules specified in the OpenAPI document, an exception is raised.
If disabled_client_side_validations is set, structural validation is
disabled. This can be useful to troubleshoot data validation problem, such as
when the OpenAPI document validation rules do not match the actual API data
received by the server.
:param server_index: Index to servers configuration.
:param server_variables: Mapping with string values to replace variables in
templated server configuration. The validation of enums is performed for
variables with defined enum values before.
:param server_operation_index: Mapping from operation ID to an index to server
configuration.
:param server_operation_variables: Mapping from operation ID to a mapping with
string values to replace variables in templated server configuration.
The validation of enums is performed for variables with defined enum values before.
:param ssl_ca_cert: str - the path to a file of concatenated CA certificates
in PEM format
"""
_default = None
def __init__(self, host=None,
api_key=None, api_key_prefix=None,
access_token=None,
username=None, password=None,
discard_unknown_keys=False,
disabled_client_side_validations="",
server_index=None, server_variables=None,
server_operation_index=None, server_operation_variables=None,
ssl_ca_cert=None,
):
"""Constructor
"""
self._base_path = "http://localhost" if host is None else host
"""Default Base url
"""
self.server_index = 0 if server_index is None and host is None else server_index
self.server_operation_index = server_operation_index or {}
"""Default server index
"""
self.server_variables = server_variables or {}
self.server_operation_variables = server_operation_variables or {}
"""Default server variables
"""
self.temp_folder_path = None
"""Temp file folder for downloading files
"""
# Authentication Settings
self.access_token = access_token
self.api_key = {}
if api_key:
self.api_key = api_key
"""dict to store API key(s)
"""
self.api_key_prefix = {}
if api_key_prefix:
self.api_key_prefix = api_key_prefix
"""dict to store API prefix (e.g. Bearer)
"""
self.refresh_api_key_hook = None
"""function hook to refresh API key if expired
"""
self.username = username
"""Username for HTTP basic authentication
"""
self.password = password
"""Password for HTTP basic authentication
"""
self.discard_unknown_keys = discard_unknown_keys
self.disabled_client_side_validations = disabled_client_side_validations
self.logger = {}
"""Logging Settings
"""
self.logger["package_logger"] = logging.getLogger("apicurioregistryclient")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
"""Log format
"""
self.logger_stream_handler = None
"""Log stream handler
"""
self.logger_file_handler = None
"""Log file handler
"""
self.logger_file = None
"""Debug file location
"""
self.debug = False
"""Debug switch
"""
self.verify_ssl = True
"""SSL/TLS verification
Set this to false to skip verifying SSL certificate when calling API
from https server.
"""
self.ssl_ca_cert = ssl_ca_cert
"""Set this to customize the certificate file to verify the peer.
"""
self.cert_file = None
"""client certificate file
"""
self.key_file = None
"""client key file
"""
self.assert_hostname = None
"""Set this to True/False to enable/disable SSL hostname verification.
"""
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
"""urllib3 connection pool's maximum number of connections saved
per pool. urllib3 uses 1 connection as default value, but this is
not the best value when you are making a lot of possibly parallel
requests to the same host, which is often the case here.
cpu_count * 5 is used as default value to increase performance.
"""
self.proxy = None
"""Proxy URL
"""
self.no_proxy = None
"""bypass proxy for host in the no_proxy list.
"""
self.proxy_headers = None
"""Proxy headers
"""
self.safe_chars_for_path_param = ''
"""Safe chars for path_param
"""
self.retries = None
"""Adding retries to override urllib3 default value 3
"""
# Enable client side validation
self.client_side_validation = True
# Options to pass down to the underlying urllib3 socket
self.socket_options = None
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ('logger', 'logger_file_handler'):
setattr(result, k, copy.deepcopy(v, memo))
# shallow copy of loggers
result.logger = copy.copy(self.logger)
# use setters to configure loggers
result.logger_file = self.logger_file
result.debug = self.debug
return result
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
if name == 'disabled_client_side_validations':
s = set(filter(None, value.split(',')))
for v in s:
if v not in JSON_SCHEMA_VALIDATION_KEYWORDS:
raise ApiValueError(
"Invalid keyword: '{0}''".format(v))
self._disabled_client_side_validations = s
@classmethod
def set_default(cls, default):
"""Set default instance of configuration.
It stores default configuration, which can be
returned by get_default_copy method.
:param default: object of Configuration
"""
cls._default = copy.deepcopy(default)
@classmethod
def get_default_copy(cls):
"""Return new instance of configuration.
This method returns newly created, based on default constructor,
object of Configuration class or returns a copy of default
configuration passed by the set_default method.
:return: The configuration object.
"""
if cls._default is not None:
return copy.deepcopy(cls._default)
return Configuration()
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in self.logger.items():
logger.addHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in self.logger.items():
logger.setLevel(logging.DEBUG)
# turn on http_client debug
http_client.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in self.logger.items():
logger.setLevel(logging.WARNING)
# turn off http_client debug
http_client.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier, alias=None):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:param alias: The alternative identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook is not None:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier, self.api_key.get(alias) if alias is not None else None)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
username = ""
if self.username is not None:
username = self.username
password = ""
if self.password is not None:
password = self.password
return urllib3.util.make_headers(
basic_auth=username + ':' + password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
auth = {}
return auth
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 2.2.0.Final\n"\
"SDK Package Version: 1.0.0".\
format(env=sys.platform, pyversion=sys.version)
def get_host_settings(self):
"""Gets an array of host settings
:return: An array of host settings
"""
return [
{
'url': "",
'description': "No description provided",
}
]
def get_host_from_settings(self, index, variables=None, servers=None):
"""Gets host URL based on the index and variables
:param index: array index of the host settings
:param variables: hash of variable and the corresponding value
:param servers: an array of host settings or None
:return: URL based on host settings
"""
if index is None:
return self._base_path
variables = {} if variables is None else variables
servers = self.get_host_settings() if servers is None else servers
try:
server = servers[index]
except IndexError:
raise ValueError(
"Invalid index {0} when selecting the host settings. "
"Must be less than {1}".format(index, len(servers)))
url = server['url']
# go through variables and replace placeholders
for variable_name, variable in server.get('variables', {}).items():
used_value = variables.get(
variable_name, variable['default_value'])
if 'enum_values' in variable \
and used_value not in variable['enum_values']:
raise ValueError(
"The variable `{0}` in the host URL has invalid value "
"{1}. Must be {2}.".format(
variable_name, variables[variable_name],
variable['enum_values']))
url = url.replace("{" + variable_name + "}", used_value)
return url
@property
def host(self):
"""Return generated host."""
return self.get_host_from_settings(self.server_index, variables=self.server_variables)
@host.setter
def host(self, value):
"""Fix base path."""
self._base_path = value
self.server_index = None
|
py | 1a31c08ad8f0247c55b05b8cdfb1a02708b11743 | n = 5 # size
for x in range(1, n + 1):
for y in range(1, x + 1):
if (x % 2 != 0):
print(chr(y + 64), end="") # for char
else:
print(y, end="")
print() |
py | 1a31c229d3317c906eaa2726d4e1dec64221ddb9 | address_resolver_abi = [
{
"inputs": [
{
"internalType": "address",
"name": "_owner",
"type": "address"
}
],
"payable": False,
"stateMutability": "nonpayable",
"type": "constructor",
"signature": "constructor"
},
{
"anonymous": False,
"inputs": [
{
"indexed": False,
"internalType": "address",
"name": "oldOwner",
"type": "address"
},
{
"indexed": False,
"internalType": "address",
"name": "newOwner",
"type": "address"
}
],
"name": "OwnerChanged",
"type": "event",
"signature": "0xb532073b38c83145e3e5135377a08bf9aab55bc0fd7c1179cd4fb995d2a5159c"
},
{
"anonymous": False,
"inputs": [
{
"indexed": False,
"internalType": "address",
"name": "newOwner",
"type": "address"
}
],
"name": "OwnerNominated",
"type": "event",
"signature": "0x906a1c6bd7e3091ea86693dd029a831c19049ce77f1dce2ce0bab1cacbabce22"
},
{
"constant": False,
"inputs": [],
"name": "acceptOwnership",
"outputs": [],
"payable": False,
"stateMutability": "nonpayable",
"type": "function",
"signature": "0x79ba5097"
},
{
"constant": True,
"inputs": [
{
"internalType": "bytes32",
"name": "name",
"type": "bytes32"
}
],
"name": "getAddress",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"payable": False,
"stateMutability": "view",
"type": "function",
"signature": "0x21f8a721"
},
{
"constant": True,
"inputs": [
{
"internalType": "bytes32",
"name": "key",
"type": "bytes32"
}
],
"name": "getSynth",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"payable": False,
"stateMutability": "view",
"type": "function",
"signature": "0x51456061"
},
{
"constant": False,
"inputs": [
{
"internalType": "bytes32[]",
"name": "names",
"type": "bytes32[]"
},
{
"internalType": "address[]",
"name": "destinations",
"type": "address[]"
}
],
"name": "importAddresses",
"outputs": [],
"payable": False,
"stateMutability": "nonpayable",
"type": "function",
"signature": "0xab0b8f77"
},
{
"constant": False,
"inputs": [
{
"internalType": "address",
"name": "_owner",
"type": "address"
}
],
"name": "nominateNewOwner",
"outputs": [],
"payable": False,
"stateMutability": "nonpayable",
"type": "function",
"signature": "0x1627540c"
},
{
"constant": True,
"inputs": [],
"name": "nominatedOwner",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"payable": False,
"stateMutability": "view",
"type": "function",
"signature": "0x53a47bb7"
},
{
"constant": True,
"inputs": [],
"name": "owner",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"payable": False,
"stateMutability": "view",
"type": "function",
"signature": "0x8da5cb5b"
},
{
"constant": True,
"inputs": [
{
"internalType": "bytes32",
"name": "",
"type": "bytes32"
}
],
"name": "repository",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"payable": False,
"stateMutability": "view",
"type": "function",
"signature": "0x187f7935"
},
{
"constant": True,
"inputs": [
{
"internalType": "bytes32",
"name": "name",
"type": "bytes32"
},
{
"internalType": "string",
"name": "reason",
"type": "string"
}
],
"name": "requireAndGetAddress",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"payable": False,
"stateMutability": "view",
"type": "function",
"signature": "0xdacb2d01"
}
]
|
py | 1a31c28e496c6080a3bb16d64a464a0d513d6b00 | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.conf import settings
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
'''creates and saves a new user'''
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
'''Create and saves a new super user'''
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
'''custom user model that supports using email instead of username'''
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
class Tag(models.Model):
'''Tag to be used for a recipe'''
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self):
return self.name
|
py | 1a31c2aa3d7097c3344ff3ee6a88381b316e28dc | """Time series estimator that predicts using the naive forecasting approach."""
import numpy as np
from rayml.model_family import ModelFamily
from rayml.pipelines.components.estimators import Estimator
from rayml.pipelines.components.transformers import TimeSeriesFeaturizer
from rayml.problem_types import ProblemTypes
from rayml.utils import infer_feature_types
class TimeSeriesBaselineEstimator(Estimator):
"""Time series estimator that predicts using the naive forecasting approach.
This is useful as a simple baseline estimator for time series problems.
Args:
gap (int): Gap between prediction date and target date and must be a positive integer. If gap is 0, target date will be shifted ahead by 1 time period. Defaults to 1.
forecast_horizon (int): Number of time steps the model is expected to predict.
random_seed (int): Seed for the random number generator. Defaults to 0.
"""
name = "Time Series Baseline Estimator"
hyperparameter_ranges = {}
"""{}"""
model_family = ModelFamily.BASELINE
"""ModelFamily.BASELINE"""
supported_problem_types = [
ProblemTypes.TIME_SERIES_REGRESSION,
ProblemTypes.TIME_SERIES_BINARY,
ProblemTypes.TIME_SERIES_MULTICLASS,
]
"""[
ProblemTypes.TIME_SERIES_REGRESSION,
ProblemTypes.TIME_SERIES_BINARY,
ProblemTypes.TIME_SERIES_MULTICLASS,
]"""
def __init__(self, gap=1, forecast_horizon=1, random_seed=0, **kwargs):
self._prediction_value = None
self.start_delay = forecast_horizon + gap
self._classes = None
self._num_features = None
self._delay_index = None
if gap < 0:
raise ValueError(
f"gap value must be a positive integer. {gap} was provided."
)
parameters = {"gap": gap, "forecast_horizon": forecast_horizon}
parameters.update(kwargs)
super().__init__(
parameters=parameters, component_obj=None, random_seed=random_seed
)
def fit(self, X, y=None):
"""Fits time series baseline estimator to data.
Args:
X (pd.DataFrame): The input training data of shape [n_samples, n_features].
y (pd.Series): The target training data of length [n_samples].
Returns:
self
Raises:
ValueError: If input y is None.
"""
X = infer_feature_types(X)
if y is None:
raise ValueError("Cannot fit Time Series Baseline Classifier if y is None")
vals, _ = np.unique(y, return_counts=True)
self._classes = list(vals)
return self
def predict(self, X):
"""Make predictions using fitted time series baseline estimator.
Args:
X (pd.DataFrame): Data of shape [n_samples, n_features].
Returns:
pd.Series: Predicted values.
Raises:
ValueError: If input y is None.
"""
X = infer_feature_types(X)
feature_name = TimeSeriesFeaturizer.target_colname_prefix.format(
self.start_delay
)
if feature_name not in X.columns:
raise ValueError(
"Time Series Baseline Estimator is meant to be used in a pipeline with "
"a Time Series Featurizer"
)
self._num_features = X.shape[1]
self._delay_index = X.columns.tolist().index(feature_name)
return X.ww[feature_name]
def predict_proba(self, X):
"""Make prediction probabilities using fitted time series baseline estimator.
Args:
X (pd.DataFrame): Data of shape [n_samples, n_features].
Returns:
pd.DataFrame: Predicted probability values.
Raises:
ValueError: If input y is None.
"""
preds = self.predict(X).astype("int")
proba_arr = np.zeros((len(preds), len(self._classes)))
proba_arr[np.arange(len(preds)), preds] = 1
return infer_feature_types(proba_arr)
@property
def feature_importance(self):
"""Returns importance associated with each feature.
Since baseline estimators do not use input features to calculate predictions, returns an array of zeroes.
Returns:
np.ndarray (float): An array of zeroes.
"""
importance = np.array([0] * self._num_features)
importance[self._delay_index] = 1
return importance
|
py | 1a31c2eafe6119e256d21e9ccbdf0218fed77602 | # Copyright (c) 2016 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import oslo_serialization
from cinder.i18n import _
from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common
from cinder.volume.drivers.coprhd.helpers import consistencygroup
from cinder.volume.drivers.coprhd.helpers import volume
class Snapshot(common.CoprHDResource):
# Commonly used URIs for the 'Snapshot' module
URI_SNAPSHOTS = '/{0}/snapshots/{1}'
URI_BLOCK_SNAPSHOTS = '/block/snapshots/{0}'
URI_SEARCH_SNAPSHOT_BY_TAG = '/block/snapshots/search?tag={0}'
URI_SNAPSHOT_LIST = '/{0}/{1}/{2}/protection/snapshots'
URI_SNAPSHOT_TASKS_BY_OPID = '/vdc/tasks/{0}'
URI_RESOURCE_DEACTIVATE = '{0}/deactivate'
URI_CONSISTENCY_GROUP = "/block/consistency-groups"
URI_CONSISTENCY_GROUPS_SNAPSHOT_INSTANCE = (
URI_CONSISTENCY_GROUP + "/{0}/protection/snapshots/{1}")
URI_CONSISTENCY_GROUPS_SNAPSHOT_DEACTIVATE = (
URI_CONSISTENCY_GROUPS_SNAPSHOT_INSTANCE + "/deactivate")
URI_BLOCK_SNAPSHOTS_TAG = URI_BLOCK_SNAPSHOTS + '/tags'
VOLUMES = 'volumes'
CG = 'consistency-groups'
BLOCK = 'block'
is_timeout = False
timeout = 300
def snapshot_list_uri(self, otype, otypename, ouri):
"""Makes REST API call to list snapshots under a volume.
:param otype : block
:param otypename : either volume or consistency-group should be
provided
:param ouri : uri of volume or consistency-group
:returns: list of snapshots
"""
(s, h) = common.service_json_request(
self.ipaddr, self.port,
"GET",
Snapshot.URI_SNAPSHOT_LIST.format(otype, otypename, ouri), None)
o = common.json_decode(s)
return o['snapshot']
def snapshot_show_uri(self, otype, resource_uri, suri):
"""Retrieves snapshot details based on snapshot Name or Label.
:param otype : block
:param suri : uri of the Snapshot.
:param resource_uri: uri of the source resource
:returns: Snapshot details in JSON response payload
"""
if(resource_uri is not None and
resource_uri.find('BlockConsistencyGroup') > 0):
(s, h) = common.service_json_request(
self.ipaddr, self.port,
"GET",
Snapshot.URI_CONSISTENCY_GROUPS_SNAPSHOT_INSTANCE.format(
resource_uri,
suri),
None)
else:
(s, h) = common.service_json_request(
self.ipaddr, self.port,
"GET",
Snapshot.URI_SNAPSHOTS.format(otype, suri), None)
return common.json_decode(s)
def snapshot_query(self, storageres_type,
storageres_typename, resuri, snapshot_name):
if resuri is not None:
uris = self.snapshot_list_uri(
storageres_type,
storageres_typename,
resuri)
for uri in uris:
snapshot = self.snapshot_show_uri(
storageres_type,
resuri,
uri['id'])
if (False == common.get_node_value(snapshot, 'inactive') and
snapshot['name'] == snapshot_name):
return snapshot['id']
raise common.CoprHdError(
common.CoprHdError.SOS_FAILURE_ERR,
(_("snapshot with the name: "
"%s Not Found") % snapshot_name))
def snapshot_show_task_opid(self, otype, snap, taskid):
(s, h) = common.service_json_request(
self.ipaddr, self.port,
"GET",
Snapshot.URI_SNAPSHOT_TASKS_BY_OPID.format(taskid),
None)
if (not s):
return None
o = common.json_decode(s)
return o
# Blocks the operation until the task is complete/error out/timeout
def block_until_complete(self, storageres_type, resuri,
task_id, synctimeout=0):
if synctimeout:
t = threading.Timer(synctimeout, common.timeout_handler)
else:
synctimeout = self.timeout
t = threading.Timer(synctimeout, common.timeout_handler)
t.start()
while True:
out = self.snapshot_show_task_opid(
storageres_type, resuri, task_id)
if out:
if out["state"] == "ready":
# cancel the timer and return
t.cancel()
break
# if the status of the task is 'error' then cancel the timer
# and raise exception
if out["state"] == "error":
# cancel the timer
t.cancel()
error_message = "Please see logs for more details"
if("service_error" in out and
"details" in out["service_error"]):
error_message = out["service_error"]["details"]
raise common.CoprHdError(
common.CoprHdError.VALUE_ERR,
(_("Task: %(task_id)s is failed with error: "
"%(error_message)s") %
{'task_id': task_id,
'error_message': error_message}))
if self.is_timeout:
self.is_timeout = False
raise common.CoprHdError(common.CoprHdError.TIME_OUT,
(_("Task did not complete in %d secs."
" Operation timed out. Task in"
" CoprHD will continue") %
synctimeout))
return
def storage_resource_query(self,
storageres_type,
volume_name,
cg_name,
project,
tenant):
resourcepath = "/" + project
if tenant is not None:
resourcepath = tenant + resourcepath
resUri = None
resourceObj = None
if Snapshot.BLOCK == storageres_type and volume_name is not None:
resourceObj = volume.Volume(self.ipaddr, self.port)
resUri = resourceObj.volume_query(resourcepath, volume_name)
elif Snapshot.BLOCK == storageres_type and cg_name is not None:
resourceObj = consistencygroup.ConsistencyGroup(
self.ipaddr,
self.port)
resUri = resourceObj.consistencygroup_query(
cg_name,
project,
tenant)
else:
resourceObj = None
return resUri
def snapshot_create(self, otype, typename, ouri,
snaplabel, inactive, sync,
readonly=False, synctimeout=0):
"""New snapshot is created, for a given volume.
:param otype : block type should be provided
:param typename : either volume or consistency-groups should
be provided
:param ouri : uri of volume
:param snaplabel : name of the snapshot
:param inactive : if true, the snapshot will not activate the
synchronization between source and target volumes
:param sync : synchronous request
:param synctimeout : Query for task status for "synctimeout" secs.
If the task doesn't complete in synctimeout
secs, an exception is thrown
"""
# check snapshot is already exist
is_snapshot_exist = True
try:
self.snapshot_query(otype, typename, ouri, snaplabel)
except common.CoprHdError as e:
if e.err_code == common.CoprHdError.NOT_FOUND_ERR:
is_snapshot_exist = False
else:
raise
if is_snapshot_exist:
raise common.CoprHdError(
common.CoprHdError.ENTRY_ALREADY_EXISTS_ERR,
(_("Snapshot with name %(snaplabel)s"
" already exists under %(typename)s") %
{'snaplabel': snaplabel,
'typename': typename
}))
parms = {
'name': snaplabel,
# if true, the snapshot will not activate the synchronization
# between source and target volumes
'create_inactive': inactive
}
if readonly is True:
parms['read_only'] = readonly
body = oslo_serialization.jsonutils.dumps(parms)
# REST api call
(s, h) = common.service_json_request(
self.ipaddr, self.port,
"POST",
Snapshot.URI_SNAPSHOT_LIST.format(otype, typename, ouri), body)
o = common.json_decode(s)
task = o["task"][0]
if sync:
return (
self.block_until_complete(
otype,
task['resource']['id'],
task["id"], synctimeout)
)
else:
return o
def snapshot_delete_uri(self, otype, resource_uri,
suri, sync, synctimeout=0):
"""Delete a snapshot by uri.
:param otype : block
:param resource_uri: uri of the source resource
:param suri : Uri of the Snapshot
:param sync : To perform operation synchronously
:param synctimeout : Query for task status for "synctimeout" secs. If
the task doesn't complete in synctimeout secs, an
exception is thrown
"""
s = None
if resource_uri.find("Volume") > 0:
(s, h) = common.service_json_request(
self.ipaddr, self.port,
"POST",
Snapshot.URI_RESOURCE_DEACTIVATE.format(
Snapshot.URI_BLOCK_SNAPSHOTS.format(suri)),
None)
elif resource_uri.find("BlockConsistencyGroup") > 0:
(s, h) = common.service_json_request(
self.ipaddr, self.port,
"POST",
Snapshot.URI_CONSISTENCY_GROUPS_SNAPSHOT_DEACTIVATE.format(
resource_uri,
suri),
None)
o = common.json_decode(s)
task = o["task"][0]
if sync:
return (
self.block_until_complete(
otype,
task['resource']['id'],
task["id"], synctimeout)
)
else:
return o
def snapshot_delete(self, storageres_type,
storageres_typename, resource_uri,
name, sync, synctimeout=0):
snapshotUri = self.snapshot_query(
storageres_type,
storageres_typename,
resource_uri,
name)
self.snapshot_delete_uri(
storageres_type,
resource_uri,
snapshotUri,
sync, synctimeout)
|
py | 1a31c35d127c7436b80836822499cc12e6d33fec | import random
import string
import cherrypy
@cherrypy.expose
class StringGeneratorWebService(object):
@cherrypy.tools.accept(media='text/plain')
def GET(self):
return cherrypy.session['mystring']
def POST(self, length=8):
some_string = ''.join(random.sample(string.hexdigits, int(length)))
cherrypy.session['mystring'] = some_string
return some_string
def PUT(self, another_string):
cherrypy.session['mystring'] = another_string
def DELETE(self):
cherrypy.session.pop('mystring', None)
if __name__ == '__main__':
conf = {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.sessions.on': True,
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'text/plain')],
}
}
cherrypy.quickstart(StringGeneratorWebService(), '/', conf)
|
py | 1a31c432f32fcf130a588a9644d23ad798c430c7 | """
Question 74 :
Write a program to generate a list 5 random numbers between
100 and 200 inclusive,
Hints : Use random.random() to generate a list of random values.
"""
# Solution :
import random
print(random.sample(range(100), 5))
# Note : Evey time when you run the program the random output is generated.
"""
sample() :
The sample() method return a list with a randomly selection of a
specified number of items from a sequence,
NOte : This method does not change the original sequence.
"""
"""
Output :
[58, 72, 48, 19, 33]
""" |
py | 1a31c451a554f2872d3008f66c51e2abbdc6c1f7 | from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1MqrUiQg3GyZ3wTEGJ3LqFn5Xz4jy4bLZU(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/zeronet/KT1MqrUiQg3GyZ3wTEGJ3LqFn5Xz4jy4bLZU.json')
def test_storage_encoding_KT1MqrUiQg3GyZ3wTEGJ3LqFn5Xz4jy4bLZU(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1MqrUiQg3GyZ3wTEGJ3LqFn5Xz4jy4bLZU(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1MqrUiQg3GyZ3wTEGJ3LqFn5Xz4jy4bLZU(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
|
py | 1a31c4f4453e79728bc0151a44da8e6eeb86a966 | # Time: O(n)
# Space: O(1)
# inplace solution
class Solution(object):
def addSpaces(self, s, spaces):
"""
:type s: str
:type spaces: List[int]
:rtype: str
"""
prev = len(s)
s = list(s)
s.extend([None]*len(spaces))
for i in reversed(xrange(len(spaces))):
for j in reversed(xrange(spaces[i], prev)):
s[j+1+i] = s[j]
s[spaces[i]+i] = ' '
prev = spaces[i]
return "".join(s)
|
py | 1a31c5de49e7d666354ffed29546b434c15731f3 | #!/usr/bin/env python3
# Copyright (c) 2013-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
import re
import sys
import dns.resolver
import collections
NSEEDS=512
MAX_SEEDS_PER_ASN=4
MIN_BLOCKS = 1530000
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
with open("suspicious_hosts.txt", mode="r", encoding="utf-8") as f:
SUSPICIOUS_HOSTS = {s.strip() for s in f if s.strip()}
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(
r"^/DeviantCore:("
r"4.0.(0|1|2|99|99.1|99.2)|"
r"4.1.(0|99)"
r")")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def dedup(ips):
'''deduplicate by address,port'''
d = {}
for ip in ips:
d[ip['ip'],ip['port']] = ip
return list(d.values())
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
def lookup_asn(net, ip):
'''
Look up the asn for an IP (4 or 6) address by querying cymru.com, or None
if it could not be found.
'''
try:
if net == 'ipv4':
ipaddr = ip
prefix = '.origin'
else: # http://www.team-cymru.com/IP-ASN-mapping.html
res = str() # 2001:4860:b002:23::68
for nb in ip.split(':')[:4]: # pick the first 4 nibbles
for c in nb.zfill(4): # right padded with '0'
res += c + '.' # 2001 4860 b002 0023
ipaddr = res.rstrip('.') # 2.0.0.1.4.8.6.0.b.0.0.2.0.0.2.3
prefix = '.origin6'
asn = int([x.to_text() for x in dns.resolver.query('.'.join(
reversed(ipaddr.split('.'))) + prefix + '.asn.cymru.com',
'TXT').response.answer][0].split('\"')[1].split(' ')[0])
return asn
except Exception:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip + '"\n')
return None
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_per_net):
# Sift out ips by type
ips_ipv46 = [ip for ip in ips if ip['net'] in ['ipv4', 'ipv6']]
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv46 by ASN, and limit to max_per_net per network
result = []
net_count = collections.defaultdict(int)
asn_count = collections.defaultdict(int)
for ip in ips_ipv46:
if net_count[ip['net']] == max_per_net:
continue
asn = lookup_asn(ip['net'], ip['ip'])
if asn is None or asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
net_count[ip['net']] += 1
result.append(ip)
# Add back Onions (up to max_per_net)
result.extend(ips_onion[0:max_per_net])
return result
def ip_stats(ips):
hist = collections.defaultdict(int)
for ip in ips:
if ip is not None:
hist[ip['net']] += 1
return '%6d %6d %6d' % (hist['ipv4'], hist['ipv6'], hist['onion'])
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
print('\x1b[7m IPv4 IPv6 Onion Pass \x1b[0m', file=sys.stderr)
print('%s Initial' % (ip_stats(ips)), file=sys.stderr)
# Skip entries with invalid address.
ips = [ip for ip in ips if ip is not None]
print('%s Skip entries with invalid address' % (ip_stats(ips)), file=sys.stderr)
# Skip duplicates (in case multiple seeds files were concatenated)
ips = dedup(ips)
print('%s After removing duplicates' % (ip_stats(ips)), file=sys.stderr)
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
print('%s Skip entries from suspicious hosts' % (ip_stats(ips)), file=sys.stderr)
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
print('%s Enforce minimal number of blocks' % (ip_stats(ips)), file=sys.stderr)
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
print('%s Require service bit 1' % (ip_stats(ips)), file=sys.stderr)
# Require at least 50% 30-day uptime for clearnet, 10% for onion.
req_uptime = {
'ipv4': 50,
'ipv6': 50,
'onion': 10,
}
ips = [ip for ip in ips if ip['uptime'] > req_uptime[ip['net']]]
print('%s Require minimum uptime' % (ip_stats(ips)), file=sys.stderr)
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
print('%s Require a known and recent user agent' % (ip_stats(ips)), file=sys.stderr)
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple deviant ports, these are likely abusive
ips = filtermultiport(ips)
print('%s Filter out hosts with multiple deviant ports' % (ip_stats(ips)), file=sys.stderr)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
print('%s Look up ASNs and limit results per ASN and per net' % (ip_stats(ips)), file=sys.stderr)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
|
py | 1a31c5f31533312a5fb5f6a52af855c0bcd1a27e | # -*- coding: utf-8 -*-
# File: parallel.py
import atexit
import pickle
import errno
import traceback
import itertools
import multiprocessing as mp
import os
import sys
import uuid
import weakref
from contextlib import contextmanager
import zmq
from six.moves import queue, range
from ..utils import logger
from ..utils.concurrency import (
StoppableThread, enable_death_signal, ensure_proc_terminate, start_proc_mask_signal)
from ..utils.serialize import dumps_once as dumps, loads_once as loads
from .base import DataFlow, DataFlowReentrantGuard, DataFlowTerminated, ProxyDataFlow
__all__ = ['PrefetchData', 'MultiProcessPrefetchData',
'MultiProcessRunner', 'MultiProcessRunnerZMQ', 'MultiThreadRunner',
'PrefetchDataZMQ', 'MultiThreadPrefetchData']
# from https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/__init__.py
class _ExceptionWrapper:
MAGIC = b"EXC_MAGIC"
"""Wraps an exception plus traceback to communicate across threads"""
def __init__(self, exc_info):
# It is important that we don't store exc_info, see
# NOTE [ Python Traceback Reference Cycle Problem ]
self.exc_type = exc_info[0]
self.exc_msg = "".join(traceback.format_exception(*exc_info))
def pack(self):
return self.MAGIC + pickle.dumps(self)
@staticmethod
def unpack(dp):
if isinstance(dp, bytes) and dp.startswith(_ExceptionWrapper.MAGIC):
return pickle.loads(dp[len(_ExceptionWrapper.MAGIC):])
def _repeat_iter(get_itr):
while True:
yield from get_itr()
def _bind_guard(sock, name):
try:
sock.bind(name)
except zmq.ZMQError:
logger.error(
"ZMQError in socket.bind('{}'). Perhaps you're \
using pipes on a non-local file system. See documentation of MultiProcessRunnerZMQ \
for more information.".format(name))
raise
def _get_pipe_name(name):
if sys.platform.startswith('linux'):
# linux supports abstract sockets: http://api.zeromq.org/4-1:zmq-ipc
pipename = "ipc://@{}-pipe-{}".format(name, str(uuid.uuid1())[:8])
pipedir = os.environ.get('TENSORPACK_PIPEDIR', None)
if pipedir is not None:
logger.warn("TENSORPACK_PIPEDIR is not used on Linux any more! Abstract sockets will be used.")
else:
pipedir = os.environ.get('TENSORPACK_PIPEDIR', None)
if pipedir is not None:
logger.info("ZMQ uses TENSORPACK_PIPEDIR={}".format(pipedir))
else:
pipedir = '.'
assert os.path.isdir(pipedir), pipedir
filename = '{}/{}-pipe-{}'.format(pipedir.rstrip('/'), name, str(uuid.uuid1())[:6])
assert not os.path.exists(filename), "Pipe {} exists! You may be unlucky.".format(filename)
pipename = "ipc://{}".format(filename)
return pipename
def del_weakref(x):
o = x()
if o is not None:
o.__del__()
@contextmanager
def _zmq_catch_error(name):
try:
yield
except zmq.ContextTerminated:
logger.info("[{}] Context terminated.".format(name))
raise DataFlowTerminated()
except zmq.ZMQError as e:
if e.errno == errno.ENOTSOCK: # socket closed
logger.info("[{}] Socket closed.".format(name))
raise DataFlowTerminated()
else:
raise
except Exception:
raise
class _MultiProcessZMQDataFlow(DataFlow):
def __init__(self):
assert os.name != 'nt', "ZMQ IPC doesn't support windows!"
self._reset_done = False
self._procs = []
def reset_state(self):
"""
All forked dataflows should only be reset **once and only once** in spawned processes.
Subclasses should call this method with super.
"""
assert not self._reset_done, "reset_state() was called twice! This violates the API of DataFlow!"
self._reset_done = True
# __del__ not guaranteed to get called at exit
atexit.register(del_weakref, weakref.ref(self))
def _start_processes(self):
start_proc_mask_signal(self._procs)
def __del__(self):
try:
if not self._reset_done:
return
if not self.context.closed:
self.socket.close(0)
self.context.destroy(0)
for x in self._procs:
x.terminate()
x.join(5)
print("{} successfully cleaned-up.".format(type(self).__name__))
except Exception:
pass
class MultiProcessRunner(ProxyDataFlow):
"""
Running a DataFlow in >=1 processes using Python multiprocessing utilities.
It will fork the process that calls :meth:`__init__`, collect datapoints from `ds` in each
process by a Python :class:`multiprocessing.Queue`.
Note:
1. (Data integrity) An iterator cannot run faster automatically -- what's happening is
that the process will be forked ``num_proc`` times.
There will be ``num_proc`` dataflow running in parallel and **independently**.
As a result, we have the following guarantee on the dataflow correctness:
a. When ``num_proc=1``, this dataflow produces the same data as the
given dataflow in the same order.
b. When ``num_proc>1``, if each sample from the given dataflow is i.i.d.,
then this dataflow produces the **same distribution** of data as the given dataflow.
This implies that there will be duplication, reordering, etc.
You probably only want to use it for training.
For example, if your original dataflow contains no randomness and produces the same first datapoint,
then after parallel prefetching, the datapoint will be produced ``num_proc`` times
at the beginning.
Even when your original dataflow is fully shuffled, you still need to be aware of the
`Birthday Paradox <https://en.wikipedia.org/wiki/Birthday_problem>`_
and know that you'll likely see duplicates.
To utilize parallelism with more strict data integrity, you can use
the parallel versions of :class:`MapData`: :class:`MultiThreadMapData`, :class:`MultiProcessMapData`.
2. This has more serialization overhead than :class:`MultiProcessRunnerZMQ` when data is large.
3. You can nest like this: ``MultiProcessRunnerZMQ(MultiProcessRunner(df, num_proc=a), num_proc=b)``.
A total of ``a`` instances of ``df`` worker processes will be created.
4. Fork happens in `__init__`. `reset_state()` is a no-op.
DataFlow in the worker processes will be reset at the time of fork.
5. This DataFlow does support windows. However, Windows requires more strict picklability on processes,
which means that some code that's forkable on Linux may not be forkable on Windows. If that happens you'll
need to re-organize some part of code that's not forkable.
"""
class _Worker(mp.Process):
def __init__(self, ds, queue, idx):
super(MultiProcessRunner._Worker, self).__init__()
self.ds = ds
self.queue = queue
self.idx = idx
def run(self):
enable_death_signal(_warn=self.idx == 0)
# reset all ds so each process will produce different data
self.ds.reset_state()
while True:
for dp in self.ds:
self.queue.put(dp)
def __init__(self, ds, num_prefetch, num_proc):
"""
Args:
ds (DataFlow): input DataFlow.
num_prefetch (int): size of the queue to hold prefetched datapoints.
Required.
num_proc (int): number of processes to use. Required.
"""
# https://docs.python.org/3.6/library/multiprocessing.html?highlight=process#the-spawn-and-forkserver-start-methods
if os.name == 'nt':
logger.warn("MultiProcessRunner does support Windows. \
However, Windows requires more strict picklability on processes, which may \
lead of failure on some of the code.")
super(MultiProcessRunner, self).__init__(ds)
try:
self._size = len(ds)
except NotImplementedError:
self._size = -1
assert num_proc > 0, num_proc
assert num_prefetch > 0, num_prefetch
self.num_proc = num_proc
self.num_prefetch = num_prefetch
if num_proc > 1:
logger.info("[MultiProcessRunner] Will fork a dataflow more than one times. "
"This assumes the datapoints are i.i.d.")
self.queue = mp.Queue(self.num_prefetch)
self.procs = [MultiProcessRunner._Worker(self.ds, self.queue, idx)
for idx in range(self.num_proc)]
ensure_proc_terminate(self.procs)
self._reset_done = False
def __iter__(self):
for k in itertools.count():
if self._size > 0 and k >= self._size:
break
dp = self.queue.get()
yield dp
def reset_state(self):
assert not self._reset_done, "reset_state() was called twice! This violates the API of DataFlow!"
self._reset_done = True
start_proc_mask_signal(self.procs)
class MultiProcessRunnerZMQ(_MultiProcessZMQDataFlow):
"""
Run a DataFlow in >=1 processes, with ZeroMQ for communication.
It will fork the calling process of :meth:`reset_state()`,
and collect datapoints from the given dataflow in each process by ZeroMQ IPC pipe.
This is typically faster than :class:`MultiProcessRunner`.
Note:
1. (Data integrity) An iterator cannot run faster automatically -- what's happening is
that the process will be forked ``num_proc`` times.
There will be ``num_proc`` dataflow running in parallel and **independently**.
As a result, we have the following guarantee on the dataflow correctness:
a. When ``num_proc=1``, this dataflow produces the same data as the
given dataflow in the same order.
b. When ``num_proc>1``, if each sample from the given dataflow is i.i.d.,
then this dataflow produces the **same distribution** of data as the given dataflow.
This implies that there will be duplication, reordering, etc.
You probably only want to use it for training.
For example, if your original dataflow contains no randomness and produces the same first datapoint,
then after parallel prefetching, the datapoint will be produced ``num_proc`` times
at the beginning.
Even when your original dataflow is fully shuffled, you still need to be aware of the
`Birthday Paradox <https://en.wikipedia.org/wiki/Birthday_problem>`_
and know that you'll likely see duplicates.
To utilize parallelism with more strict data integrity, you can use
the parallel versions of :class:`MapData`: :class:`MultiThreadMapData`, :class:`MultiProcessMapData`.
2. `reset_state()` of the given dataflow will be called **once and only once** in the worker processes.
3. The fork of processes happened in this dataflow's `reset_state()` method.
Please note that forking a TensorFlow GPU session may be unsafe.
If you're managing this dataflow on your own,
it's better to fork before creating the session.
4. (Fork-safety) After the fork has happened, this dataflow becomes not fork-safe.
i.e., if you fork an already reset instance of this dataflow,
it won't be usable in the forked process. Therefore, do not nest two `MultiProcessRunnerZMQ`.
5. (Thread-safety) ZMQ is not thread safe. Therefore, do not call :meth:`get_data` of the same dataflow in
more than 1 threads.
6. This dataflow does not support windows. Use `MultiProcessRunner` which works on windows.
7. (For Mac only) A UNIX named pipe will be created in the current directory.
However, certain non-local filesystem such as NFS/GlusterFS/AFS doesn't always support pipes.
You can change the directory by ``export TENSORPACK_PIPEDIR=/other/dir``.
In particular, you can use somewhere under '/tmp' which is usually local.
Note that some non-local FS may appear to support pipes and code
may appear to run but crash with bizarre error.
Also note that ZMQ limits the maximum length of pipe path.
If you hit the limit, you can set the directory to a softlink
which points to a local directory.
"""
class _Worker(mp.Process):
def __init__(self, ds, conn_name, hwm, idx):
super(MultiProcessRunnerZMQ._Worker, self).__init__()
self.ds = ds
self.conn_name = conn_name
self.hwm = hwm
self.idx = idx
def run(self):
enable_death_signal(_warn=self.idx == 0)
self.ds.reset_state()
itr = _repeat_iter(lambda: self.ds)
context = zmq.Context()
socket = context.socket(zmq.PUSH)
socket.set_hwm(self.hwm)
socket.connect(self.conn_name)
try:
while True:
try:
dp = next(itr)
socket.send(dumps(dp), copy=False)
except Exception:
dp = _ExceptionWrapper(sys.exc_info()).pack()
socket.send(dumps(dp), copy=False)
raise
# sigint could still propagate here, e.g. when nested
except KeyboardInterrupt:
pass
finally:
socket.close(0)
context.destroy(0)
def __init__(self, ds, num_proc=1, hwm=50):
"""
Args:
ds (DataFlow): input DataFlow.
num_proc (int): number of processes to use.
hwm (int): the zmq "high-water mark" (queue size) for both sender and receiver.
"""
super(MultiProcessRunnerZMQ, self).__init__()
self.ds = ds
self.num_proc = num_proc
self._hwm = hwm
if num_proc > 1:
logger.info("[MultiProcessRunnerZMQ] Will fork a dataflow more than one times. "
"This assumes the datapoints are i.i.d.")
try:
self._size = ds.__len__()
except NotImplementedError:
self._size = -1
def _recv(self):
ret = loads(self.socket.recv(copy=False))
exc = _ExceptionWrapper.unpack(ret)
if exc is not None:
logger.error("Exception '{}' in worker:".format(str(exc.exc_type)))
raise exc.exc_type(exc.exc_msg)
return ret
def __len__(self):
return self.ds.__len__()
def __iter__(self):
with self._guard, _zmq_catch_error('MultiProcessRunnerZMQ'):
for k in itertools.count():
if self._size > 0 and k >= self._size:
break
yield self._recv()
def reset_state(self):
super(MultiProcessRunnerZMQ, self).reset_state()
self._guard = DataFlowReentrantGuard()
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PULL)
self.socket.set_hwm(self._hwm)
pipename = _get_pipe_name('dataflow')
_bind_guard(self.socket, pipename)
self._procs = [MultiProcessRunnerZMQ._Worker(self.ds, pipename, self._hwm, idx)
for idx in range(self.num_proc)]
self._start_processes()
class MultiThreadRunner(DataFlow):
"""
Create multiple dataflow instances and run them each in one thread.
Collect outputs from them with a queue.
Note:
1. (Data integrity) An iterator cannot run faster automatically -- what's happening is
that each thread will create a dataflow iterator.
There will be ``num_thread`` dataflow running in parallel and **independently**.
As a result, we have the following guarantee on the dataflow correctness:
a. When ``num_thread=1``, this dataflow produces the same data as the
given dataflow in the same order.
b. When ``num_thread>1``, if each sample from the given dataflow is i.i.d.,
then this dataflow produces the **same distribution** of data as the given dataflow.
This implies that there will be duplication, reordering, etc.
You probably only want to use it for training.
For example, if your original dataflow contains no randomness and produces the same first datapoint,
then after parallel prefetching, the datapoint will be produced ``num_thread`` times
at the beginning.
Even when your original dataflow is fully shuffled, you still need to be aware of the
`Birthday Paradox <https://en.wikipedia.org/wiki/Birthday_problem>`_
and know that you'll likely see duplicates.
To utilize parallelism with more strict data integrity, you can use
the parallel versions of :class:`MapData`: :class:`MultiThreadMapData`, :class:`MultiProcessMapData`.
"""
class _Worker(StoppableThread):
def __init__(self, get_df, queue):
super(MultiThreadRunner._Worker, self).__init__()
self.df = get_df()
assert isinstance(self.df, DataFlow), self.df
self.queue = queue
self.daemon = True
def run(self):
self.df.reset_state()
try:
while True:
for dp in self.df:
if self.stopped():
return
self.queue_put_stoppable(self.queue, dp)
except Exception:
if self.stopped():
pass # skip duplicated error messages
else:
raise
finally:
self.stop()
def __init__(self, get_df, num_prefetch, num_thread):
"""
Args:
get_df ( -> DataFlow): a callable which returns a DataFlow.
Each thread will call this function to get the DataFlow to use.
Therefore do not return the same DataFlow object for each call,
unless your dataflow is stateless.
num_prefetch (int): size of the queue
num_thread (int): number of threads
"""
assert num_thread > 0, num_thread
assert num_prefetch > 0, num_prefetch
self.num_thread = num_thread
self.queue = queue.Queue(maxsize=num_prefetch)
self.threads = [
MultiThreadRunner._Worker(get_df, self.queue)
for _ in range(num_thread)]
try:
self._size = self.__len__()
except NotImplementedError:
self._size = -1
def reset_state(self):
for th in self.threads:
th.df.reset_state()
th.start()
def __len__(self):
return self.threads[0].df.__len__()
def __iter__(self):
for k in itertools.count():
if self._size > 0 and k >= self._size:
break
yield self.queue.get()
def __del__(self):
for p in self.threads:
if p.is_alive():
p.stop()
p.join()
class PlasmaPutData(ProxyDataFlow):
"""
Put each data point to plasma shared memory object store, and yield the object id instead.
Experimental.
"""
def __init__(self, ds, socket="/tmp/plasma"):
self._socket = socket
super(PlasmaPutData, self).__init__(ds)
def reset_state(self):
super(PlasmaPutData, self).reset_state()
self.client = plasma.connect(self._socket, "", 0)
def __iter__(self):
for dp in self.ds:
oid = self.client.put(dp)
yield [oid.binary()]
class PlasmaGetData(ProxyDataFlow):
"""
Take plasma object id from a DataFlow, and retrieve it from plasma shared
memory object store.
Experimental.
"""
def __init__(self, ds, socket="/tmp/plasma"):
self._socket = socket
super(PlasmaGetData, self).__init__(ds)
def reset_state(self):
super(PlasmaGetData, self).reset_state()
self.client = plasma.connect(self._socket, "", 0)
def __iter__(self):
for dp in self.ds:
oid = plasma.ObjectID(dp[0])
dp = self.client.get(oid)
yield dp
plasma = None
# These plasma code is only experimental
# try:
# import pyarrow.plasma as plasma
# except ImportError:
# from ..utils.develop import create_dummy_class
# PlasmaPutData = create_dummy_class('PlasmaPutData', 'pyarrow') # noqa
# PlasmaGetData = create_dummy_class('PlasmaGetData', 'pyarrow') # noqa
# The old inappropriate names:
PrefetchData = MultiProcessRunner
MultiProcessPrefetchData = MultiProcessRunner
PrefetchDataZMQ = MultiProcessRunnerZMQ
MultiThreadPrefetchData = MultiThreadRunner
if __name__ == '__main__':
import time
from .raw import DataFromGenerator
from .common import FixedSizeData
x = DataFromGenerator(itertools.count())
x = FixedSizeData(x, 100)
x = MultiProcessRunnerZMQ(x, 2)
x.reset_state()
for idx, dp in enumerate(x):
print(dp)
time.sleep(0.1)
|
py | 1a31c6724d9b574aa4ee0297cb1e5b05d82ac0e9 | from core.himesis import Himesis
import uuid
class HSon2Man(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule Son2Man.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HSon2Man, self).__init__(name='HSon2Man', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """Son2Man"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Son2Man')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["attr1"] = """Son2Man"""
# match class Child() node
self.add_node()
self.vs[3]["mm__"] = """Child"""
self.vs[3]["attr1"] = """+"""
# match class Family() node
self.add_node()
self.vs[4]["mm__"] = """Family"""
self.vs[4]["attr1"] = """1"""
# apply class Man() node
self.add_node()
self.vs[5]["mm__"] = """Man"""
self.vs[5]["attr1"] = """1"""
# match association Child--family-->Family node
self.add_node()
self.vs[6]["attr1"] = """family"""
self.vs[6]["mm__"] = """directLink_S"""
# match association Family--sons-->Child node
self.add_node()
self.vs[7]["attr1"] = """sons"""
self.vs[7]["mm__"] = """directLink_S"""
# Add the edges
self.add_edges([
(0,3), # matchmodel -> match_class Child()
(0,4), # matchmodel -> match_class Family()
(1,5), # applymodel -> -> apply_class Man()
(3,6), # match_class Child() -> association family
(6,4), # association family -> match_class Family()
(4,7), # match_class Family() -> association sons
(7,3), # association sons -> match_class Child()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((5,'fullName'),('concat',((3,'firstName'),(4,'lastName')))), ]
|
py | 1a31c74c365dd57a475f319d394c97cdc307d096 | """
This module lets you practice one form of the ACCUMULATOR pattern,
namely, the "IN GRAPHICS" form which features:
-- DRAWING OBJECTS via ACCUMULATING positions and/or sizes,
as in: x = x + pixels
Additionally, it emphasizes that you must
** DO A CONCRETE EXAMPLE BY HAND **
before you can implement a solution to the problem in Python.
Authors: David Mutchler, Dave Fisher, Valerie Galluzzi, Amanda Stouder,
their colleagues and Mitch Lugsch.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
# ----------------------------------------------------------------------
# Students: As you work each of these problems, ask yourself:
# 1. Do I need a loop?
# If so, HOW MANY LOOPS?
#
# 2. Where I need a loop, what needs to happen:
# -- BEFORE the loop?
# -- IN the loop?
# -- AFTER the loop?
# ----------------------------------------------------------------------
def main():
""" Calls the TEST functions in this module. """
run_test_draw_squares_from_circle()
run_test_draw_circles_from_rectangle()
run_test_draw_lines_from_rectangles()
def run_test_draw_squares_from_circle():
""" Tests the draw_squares_from_circle function. """
print()
print('--------------------------------------------------')
print('Testing the draw_squares_from_circle function:')
print(' See the graphics windows that pop up.')
print('--------------------------------------------------')
# ------------------------------------------------------------------
# TWO tests on ONE window.
# ------------------------------------------------------------------
title = 'Tests 1 and 2 of DRAW_SQUARES_FROM_CIRCLE: '
title = title + ' 7 little squares from green circle, 4 big squares'
window1 = rg.RoseWindow(650, 350, title)
# Test 1:
circle = rg.Circle(rg.Point(100, 100), 20)
circle.fill_color = 'green'
draw_squares_from_circle(7, circle, window1)
# Test 2:
circle = rg.Circle(rg.Point(350, 70), 50)
draw_squares_from_circle(4, circle, window1)
window1.close_on_mouse_click()
# ------------------------------------------------------------------
# A third test on ANOTHER window.
# ------------------------------------------------------------------
title = 'Test 3 of DRAW_SQUARES_FROM_CIRCLE: '
title += ' 20 teeny squares from blue circle!'
window2 = rg.RoseWindow(525, 300, title)
# Test 3:
circle = rg.Circle(rg.Point(50, 50), 10)
circle.fill_color = 'blue'
draw_squares_from_circle(20, circle, window2)
window2.close_on_mouse_click()
def draw_squares_from_circle(n, circle, window):
"""
What comes in: Three arguments:
-- A positive integer n.
-- An rg.Circle.
-- An rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
See draw_squares_from_circle.pdf in this project for pictures
that may help you better understand the following specification:
First draws the given rg.Circle on the given rg.RoseWindow.
Then draws n rg.Squares on the given rg.RoseWindow, such that:
-- The first rg.Square circumscribes the given rg.Circle.
-- Each subsequent rg.Square has its upper-left quarter
on top of the lower-right quarter of the previous rg.Square,
so that the squares form an overlapping sequence
that goes down and to the right.
Must ** render ** but ** NOT close ** the window.
Type hints:
:type n: int
:type circle: rg.Circle
:type window: rg.RoseWindow
"""
# ------------------------------------------------------------------
# DONE: 2. Implement and test this function.
# Tests have been written for you (above).
#
# CONSIDER using the ACCUMULATOR IN GRAPHICS pattern,
# as in draw_row_of_circles in m1e,
# instead of directly using the loop variable.
#
####################################################################
# HINT: To figure out the code that computes the necessary
# positions of each square,
# ** FIRST DO A CONCRETE EXAMPLE BY HAND! **
####################################################################
# ------------------------------------------------------------------
point = circle.center
x = point.x
y = point.y
circle.attach_to(window)
for _ in range(n): # Loop that does NOT use its index variable
point = rg.Point(x, y)
square = rg.Square(point, circle.radius * 2)
# Attach the object(s) to the window.
square.attach_to(window)
# Increment x and y
x = x + (circle.radius)
y = y + (circle.radius)
window.render()
def run_test_draw_circles_from_rectangle():
""" Tests the draw_circles_from_rectangle function. """
print()
print('--------------------------------------------------')
print('Testing the draw_circles_from_rectangle function:')
print(' See the graphics windows that pop up.')
print('--------------------------------------------------')
# ------------------------------------------------------------------
# DONE: 3. Implement this TEST function.
# It TESTS the draw_circles_from_rectangle function
# defined below. Include at least ** 3 ** tests, of which
# *** at least TWO tests are on ONE window and
# *** at least ONE test is on a DIFFERENT window.
#
####################################################################
# HINT: Consider using the same test cases as suggested by the
# pictures in draw_circles_from_rectangle.pdf in this project.
# Follow the same form as the example in a previous problem.
####################################################################
# ------------------------------------------------------------------
title = 'Tests 1 and 2 of DRAW_CIRCLES_FROM_RECTANGLE: '
window1 = rg.RoseWindow(720, 500, title)
# Test 1:
rectangle = rg.Rectangle(rg.Point(400, 250), rg.Point(440, 325))
rectangle.fill_color = 'green'
rectangle.outline_color = 'black'
rectangle.outline_thickness = 5
draw_circles_from_rectangle(4, 5, rectangle, window1)
# Test 2:
rectangle = rg.Rectangle(rg.Point(600, 400), rg.Point(500, 450))
rectangle.fill_color = 'blue'
rectangle.outline_color = 'red'
rectangle.outline_thickness = 3
draw_circles_from_rectangle(8, 3, rectangle, window1)
window1.close_on_mouse_click()
title = 'Test 3 of DRAW_CIRCLES_FROM_RECTANGLE: '
window2 = rg.RoseWindow(620, 380, title)
# Test 3:
rectangle = rg.Rectangle(rg.Point(350, 280), rg.Point(375, 330))
rectangle.fill_color = 'yellow'
rectangle.outline_color = 'brown'
rectangle.outline_thickness = 5
draw_circles_from_rectangle(6, 10, rectangle, window2)
window2.close_on_mouse_click()
def draw_circles_from_rectangle(m, n, rectangle, window):
"""
What comes in: Four arguments:
-- Positive integers m and n.
-- An rg.Rectangle.
-- An rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
See draw_circles_from_rectangle.pdf in this project for pictures
that may help you better understand the following specification:
First draws the given rg.Rectangle on the given rg.RoseWindow.
Then draws m rg.Circles on the given rg.RoseWindow, such that:
-- The diameter of each rg.Circle is the same as the height
of the given rg.Rectangle.
-- The first rg.Circle is immediately to the left of the
given rg.Rectangle
-- Each subsequent rg.Circle is immediately to the left
of the previous rg.Circle, so that the circles form a row
that goes to the left.
-- Each rg. Circle has the same fill_color as the given
rg.Rectangle (and has no outline_color).
Then draws n rg.Circles on the given RoseWindow, such that:
-- The diameter of each rg.Circle is the same as the width
of the given rg.Rectangle.
-- The first rg.Circle is immediately above the
given rg.Rectangle
-- Each subsequent rg.Circle is immediately above the previous
rg.Circle, so that the circles form a column that goes up.
-- Each rg.Circle has the same outline_color as the given
rg.Rectangle (and has no fill_color).
Must ** render ** but ** NOT close ** the window.
Type hints:
:type m: int
:type n: int
:type rectangle: rg.Rectangle
:type window: rg.RoseWindow
"""
# ------------------------------------------------------------------
# DONE: 4. Implement and test this function.
# Tests have been written for you (above).
#
# CONSIDER using the ACCUMULATOR IN GRAPHICS pattern,
# as in draw_row_of_circles in m1e,
# instead of directly using the loop variable.
#
####################################################################
# HINT: To figure out the code that computes the necessary
# positions of each circle,
# ** FIRST DO A CONCRETE EXAMPLE BY HAND! **
####################################################################
# ------------------------------------------------------------------
rectangle_center = rectangle.get_center()
x = rectangle_center.x
y = rectangle_center.y
width = rectangle.get_width()
height = rectangle.get_height()
rectangle.attach_to(window)
for _ in range(m): # Loop that does NOT use its index variable
center1 = rg.Point(x - ((width / 2) + (height / 2)), rectangle_center.y)
circle1 = rg.Circle(center1, height / 2)
circle1.fill_color = rectangle.fill_color
# Attach the object(s) to the window.
circle1.attach_to(window)
# Increment x
x = x - height
for _ in range(n):
center2 = rg.Point(rectangle_center.x, y - ((height / 2) + (width / 2)))
circle2 = rg.Circle(center2, width / 2)
circle2.outline_color = rectangle.outline_color
# Attach the object(s) to the window.
circle2.attach_to(window)
# Increment y
y = y - width
window.render()
def run_test_draw_lines_from_rectangles():
""" Tests the draw_lines_from_rectangles function. """
print()
print('--------------------------------------------------')
print('Testing the draw_lines_from_rectangles function:')
print(' See the graphics windows that pop up.')
print('--------------------------------------------------')
# TWO tests on ONE window.
title = 'Tests 1 & 2 of DRAW_LINES_FROM_RECTANGLES:'
title += ' 5 lines, 8 lines!'
window1 = rg.RoseWindow(900, 400, title)
rectangle1 = rg.Rectangle(rg.Point(100, 25), rg.Point(150, 125))
rectangle2 = rg.Rectangle(rg.Point(300, 150), rg.Point(400, 175))
rectangle1.outline_color = 'red'
rectangle2.outline_color = 'blue'
draw_lines_from_rectangles(rectangle1, rectangle2, 5, window1)
rectangle1 = rg.Rectangle(rg.Point(870, 30), rg.Point(750, 100))
rectangle2 = rg.Rectangle(rg.Point(700, 90), rg.Point(650, 60))
rectangle2.outline_color = 'green'
draw_lines_from_rectangles(rectangle1, rectangle2, 8, window1)
window1.close_on_mouse_click()
# A third test on ANOTHER window.
title = 'Test 3 of DRAW_LINES_FROM_RECTANGLES: 11 lines!'
window2 = rg.RoseWindow(700, 700, title)
rectangle1 = rg.Rectangle(rg.Point(550, 200), rg.Point(650, 100))
rectangle2 = rg.Rectangle(rg.Point(600, 50), rg.Point(650, 75))
rectangle1.outline_color = 'brown'
rectangle2.outline_color = 'cyan'
rectangle2.outline_thickness = 10
draw_lines_from_rectangles(rectangle1, rectangle2, 11, window2)
window2.close_on_mouse_click()
def draw_lines_from_rectangles(rectangle1, rectangle2, n, window):
"""
What comes in: Four arguments:
-- Two rg.Rectangles.
-- A positive integer n.
-- An rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
See draw_lines_from_rectangles.pdf in this project
for pictures that may help you better understand
the following specification:
First draws the given rg.Rectangles on the given rg.RoseWindow.
Then draws n rg.Lines on the given rg.RoseWindow, such that:
-- The 1st rg.Line goes from the center of one of the
1st rg.Rectangle to the center of the 2nd rg.Rectangle.
-- The 2nd rg.Line goes from the lower-left corner of the
1st rg.Rectangle and is parallel to the 1st rg.Line,
with the same length and direction as the 1st rg.Line.
-- Subsequent rg.Lines are shifted from the previous rg.Line in
the same way that the 2nd rg.Line is shifted from the 1st.
-- Each of the rg.Lines has thickness 5.
-- The colors of the rg.Lines alternate, as follows:
- The 1st, 3rd, 5th, ... rg.Line has color R1_color
- The 2nd, 4th, 6th, ... rg.Line has color R2_color
where
- R1_color is the outline color of the 1st rg.Rectangle
- R2_color is the outline color of the 2nd rg.Rectangle
Must ** render ** but ** NOT close ** the window.
Type hints:
:type rectangle1: rg.Rectangle
:type rectangle2: rg.Rectangle
:type n: int
:type window: rg.RoseWindow
"""
# ------------------------------------------------------------------
# DONE: 5. Implement and test this function.
# Tests have been written for you (above).
#
# CONSIDER using the ACCUMULATOR IN GRAPHICS pattern,
# as in draw_row_of_circles in m1e,
# instead of directly using the loop variable.
#
####################################################################
# HINT: To figure out the code that computes the necessary
# endpoints for each line,
# ** FIRST DO A CONCRETE EXAMPLE BY HAND! **
####################################################################
# ------------------------------------------------------------------
center_R1 = rectangle1.get_center()
center_R2 = rectangle2.get_center()
width_R1 = rectangle1.get_width()
height_R1 = rectangle1.get_height()
x1 = center_R1.x
y1 = center_R1.y
x2 = center_R2.x
y2 = center_R2.y
rectangle1.attach_to(window)
rectangle2.attach_to(window)
for k in range(n):
if (k + 1) % 2 == 0:
start = rg.Point(x1, y1)
end = rg.Point(x2, y2)
line = rg.Line(start, end)
line.thickness = 5
line.color = rectangle2.outline_color
# Attach the object(s) to the window.
line.attach_to(window)
# Increment variables
x1 = x1 - (width_R1 / 2)
y1 = y1 + (height_R1 / 2)
x2 = x2 - (width_R1 / 2)
y2 = y2 + (height_R1 / 2)
else:
start = rg.Point(x1, y1)
end = rg.Point(x2, y2)
line = rg.Line(start, end)
line.thickness = 5
line.color = rectangle1.outline_color
# Attach the object(s) to the window.
line.attach_to(window)
# Increment variables
x1 = x1 - (width_R1 / 2)
y1 = y1 + (height_R1 / 2)
x2 = x2 - (width_R1 / 2)
y2 = y2 + (height_R1 / 2)
window.render()
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
py | 1a31c8d225cb281739a14a9a643d3ce33103fb97 | # -*- coding: utf-8 -*-
import datetime
import os
from pyvirtualdisplay import Display
from selenium import webdriver
import constants
# Choose and configure the browser of your choice
def get_browser():
# # These work on Mac
# return webdriver.Chrome()
# return webdriver.Firefox()
# On Linux you need to initialize a display
global display
display = Display(visible=0, size=(1024, 768))
display.start()
options = webdriver.ChromeOptions()
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--ignore-certificate-errors")
options.add_experimental_option("useAutomationExtension", False);
return webdriver.Chrome(options=options)
# If present and callable, it will be called at the end of the whole test suite
def teardown():
global display
try:
display.stop()
except NameError:
pass
# A failed login by a provider will be retried so many times as set here
MAX_LOGIN_ATTEMPTS = 3
# Multiplies the wait times set in expected values
WAIT_MULTIPLIER = 1
# Minimum wait time
MIN_WAIT = 0
# The host and port where the tested ap should listen.
HOST = '127.0.0.1'
PORT = 443
# The host alias set in the /etc/hosts file.
# The actual tests will navigate selenium browser to this host.
# This is necessary because some providers don't support localhost as the
# callback url.
HOST_ALIAS = 'authomatic.org'
# Only frameworks included here will be tested.
INCLUDE_FRAMEWORKS = [
# 'django',
'flask', # Runs with https
'pyramid', # Runs with https
]
# Only providers included here will be tested.
# Leave commented-out entries (with explanation) to prevent trying to re-add tests for services
# Which aren't testable in an automated environment.
INCLUDE_PROVIDERS = [
# OAuth 1.0a - This mostly deprecated as a service 'in the wild' - we should drop support.
# 'bitbucket',
# 'flickr',
# 'plurk',
'twitter',
# 'tumblr',
# 'ubuntuone', # UbuntuOne service is no longer available
# 'vimeo',
# Xero requires creation of a new trial project every month which makes
# the setup of the automated test too laborious to support it.
# 'xero',
# 'xing',
# 'yahoo',
# OAuth 2.0
# 'amazon', # Asks for a captcha (cannot be automated)
# 'behance', # doesn't support third party authorization anymore.
# 'bitly', # deprecated for test suite refactoring - consider re-enabling
# 'deviantart', # deprecated for test suite refactoring - consider re-enabling
'facebook',
# 'foursquare', # deprecated for test suite refactoring - consider re-enabling
# 'google', # deprecated for test suite refactoring - consider re-enabling
# 'github', # Asks for 2FA/one-time-pass verification in Travis CI environment.
# 'linkedin', # # Asks for verification (captcha) in the login form in Travis CI environment.
# 'paypal', # deprecated for test suite refactoring - consider re-enabling
# 'reddit', # deprecated for test suite refactoring - consider re-enabling
# 'vk', # deprecated for test suite refactoring - consider re-enabling
# 'windowslive', # Asks for verification (captcha) in the login form in Travis CI environment.
# 'yammer', # deprecated for test suite refactoring - consider re-enabling
# 'yandex', # deprecated for test suite refactoring - consider re-enabling
# OpenID
# 'openid_livejournal', # Login and password elements are not visible.
# 'openid_verisignlabs', # deprecated for test suite refactoring - consider re-enabling
# 'openid_wordpress', # deprecated for test suite refactoring - consider re-enabling
# 'openid_yahoo', # deprecated for test suite refactoring - consider re-enabling
]
# Recommended setup for Travis CI environment.
if os.environ.get('TRAVIS'):
MAX_LOGIN_ATTEMPTS = 20
WAIT_MULTIPLIER = 2
MIN_WAIT = 2
# Use these constants if you have the same user info by all tested providers.
EMAIL = '[email protected]'
FIRST_NAME = 'Authomatic'
LAST_NAME = 'Testuser'
NAME = FIRST_NAME + ' ' + LAST_NAME
USERNAME = 'authomaticproject'
USERNAME_REVERSE = 'projectauthomatic'
NICKNAME = 'Mr. AP'
BIRTH_YEAR = 2000
BIRTH_MONTH = 5
BIRTH_DAY = 5
BIRTH_DATE = datetime.datetime(BIRTH_YEAR, BIRTH_MONTH, BIRTH_DAY)
CITY = 'London'
COUNTRY = 'Great Britain'
COUNTRY_ISO2 = 'gb'
POSTAL_CODE = 'EC1A1DH'
PHONE = '??????????'
PHONE_INTERNATIONAL = '0044??????????'
GENDER = constants.GENDER_MALE
LOCALE = 'en_UK'
LOCATION = CITY + ', ' + COUNTRY
# Common values for all providers
COMMON = {
# Could be same if the user sets it so
'user_birth_date': BIRTH_DATE,
'user_birth_day': BIRTH_DAY,
'user_birth_month': BIRTH_MONTH,
'user_birth_year': BIRTH_YEAR,
'user_login': EMAIL,
'user_email': EMAIL,
'user_first_name': FIRST_NAME,
'user_last_name': LAST_NAME,
'user_name': NAME,
'user_username': USERNAME,
'user_username_reverse': USERNAME_REVERSE,
'user_nickname': NICKNAME,
'user_birth_year': BIRTH_YEAR,
'user_city': CITY,
'user_country': COUNTRY,
'user_gender': GENDER,
'user_phone': PHONE,
'user_postal_code': POSTAL_CODE,
'user_locale': LOCALE,
'user_location': LOCATION,
# It is not a good idea to have the same password for all providers
# 'user_password': '##########',
# Provider and user specific value
# 'user_id': '',
# 'user_locale': None,
# 'user_timezone': None,
# Provider specific format
# 'user_picture': '',
# 'user_link': '',
# Provider specific value
# 'consumer_key': '',
# 'consumer_secret': '',
}
# Values from COMMON will be overridden by values from PROVIDERS[provider_name]
# if set.
# Since this file is public, only put providers in here if they aren't secret.
# Otherwise, secret providers should be added to config_secret.py[.enc]
PROVIDERS = {
# # OAuth 2.0
# 'facebook': {
# 'consumer_key': '##########',
# 'consumer_secret': '##########',
# 'user_password': '##########',
# 'user_id': '??????????',
# },
}
|
py | 1a31c911d1151e0d5b62a79d859e88200ace4f18 | from typing import Any, Dict
LOGGING: Dict[str, Any] = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"colored": {
"style": "{",
"()": "colorlog.ColoredFormatter",
"format": "{asctime:<15} {name:<18}:{lineno:<3} {log_color}{levelname:<7} {message}",
},
"colored_dev": {
"style": "{",
"()": "colorlog.ColoredFormatter",
"format": "{asctime:<15} {relative_path_and_lineno:<35} {log_color}{levelname:<7} {message}",
},
"verbose": {
"style": "{",
"format": "{asctime:<15} {name:<18}:{lineno:<3} {levelname:<7} {message}",
},
},
"filters": {
"package_path": {
"()": "baracoda.utils.PackagePathFilter",
}
},
"handlers": {
"colored_stream": {
"level": "DEBUG",
"class": "colorlog.StreamHandler",
"formatter": "colored",
},
"colored_stream_dev": {
"level": "DEBUG",
"class": "colorlog.StreamHandler",
"formatter": "colored_dev",
"filters": ["package_path"],
},
"console": {
"level": "INFO",
"class": "logging.StreamHandler",
"formatter": "verbose",
},
"slack": {
"level": "ERROR",
"class": "baracoda.utils.SlackHandler",
"formatter": "verbose",
},
},
"loggers": {
"baracoda": {
"handlers": ["console", "slack"],
"level": "INFO",
"propagate": True,
},
},
}
|
py | 1a31c992ebe1b9a348ec1c6743ff6998ddaad3d0 | from enum import Enum
from typing import TYPE_CHECKING, Callable, Dict, Optional
from prompt_toolkit.clipboard import ClipboardData
if TYPE_CHECKING:
from .key_processor import KeyPressEvent
from .key_bindings.vi import TextObject
__all__ = [
'InputMode',
'CharacterFind',
'ViState',
]
class InputMode(str, Enum):
value: str
INSERT = 'vi-insert'
INSERT_MULTIPLE = 'vi-insert-multiple'
NAVIGATION = 'vi-navigation' # Normal mode.
REPLACE = 'vi-replace'
class CharacterFind:
def __init__(self, character: str, backwards: bool = False) -> None:
self.character = character
self.backwards = backwards
class ViState:
"""
Mutable class to hold the state of the Vi navigation.
"""
def __init__(self) -> None:
#: None or CharacterFind instance. (This is used to repeat the last
#: search in Vi mode, by pressing the 'n' or 'N' in navigation mode.)
self.last_character_find = None
# When an operator is given and we are waiting for text object,
# -- e.g. in the case of 'dw', after the 'd' --, an operator callback
# is set here.
self.operator_func: Optional[Callable[['KeyPressEvent', 'TextObject'], None]] = None
self.operator_arg: Optional[int] = None
#: Named registers. Maps register name (e.g. 'a') to
#: :class:`ClipboardData` instances.
self.named_registers: Dict[str, ClipboardData] = {}
#: The Vi mode we're currently in to.
self.__input_mode = InputMode.INSERT
#: Waiting for digraph.
self.waiting_for_digraph = False
self.digraph_symbol1: Optional[str] = None # (None or a symbol.)
#: When true, make ~ act as an operator.
self.tilde_operator = False
#: Register in which we are recording a macro.
#: `None` when not recording anything.
# Note that the recording is only stored in the register after the
# recording is stopped. So we record in a separate `current_recording`
# variable.
self.recording_register: Optional[str] = None
self.current_recording = ''
# Temporary navigation (normal) mode.
# This happens when control-o has been pressed in insert or replace
# mode. The user can now do one navigation action and we'll return back
# to insert/replace.
self.temporary_navigation_mode = False
@property
def input_mode(self) -> InputMode:
" Get `InputMode`. "
return self.__input_mode
@input_mode.setter
def input_mode(self, value: InputMode) -> None:
" Set `InputMode`. "
if value == InputMode.NAVIGATION:
self.waiting_for_digraph = False
self.operator_func = None
self.operator_arg = None
self.__input_mode = value
def reset(self) -> None:
"""
Reset state, go back to the given mode. INSERT by default.
"""
# Go back to insert mode.
self.input_mode = InputMode.INSERT
self.waiting_for_digraph = False
self.operator_func = None
self.operator_arg = None
# Reset recording state.
self.recording_register = None
self.current_recording = ''
|
py | 1a31c9c2726bfba526a1c4af2d684674cd3cde50 | # Generated by Django 3.0.5 on 2020-05-16 12:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20200422_1538'),
]
operations = [
migrations.AddField(
model_name='user',
name='facebook',
field=models.CharField(blank=True, max_length=128),
),
migrations.AddField(
model_name='user',
name='instagram',
field=models.CharField(blank=True, max_length=128),
),
migrations.AddField(
model_name='user',
name='linkedin',
field=models.CharField(blank=True, max_length=128),
),
migrations.AddField(
model_name='user',
name='twitter',
field=models.CharField(blank=True, max_length=128),
),
]
|
py | 1a31ca061c3be6f29d480fbd96e04e634604e3ba | # Copyright 2013 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Impala exception classes. Also implements PEP 249."""
from __future__ import absolute_import
class Error(Exception):
pass
class Warning(Exception):
pass
# DB API (PEP 249) exceptions
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class InternalError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class DataError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
# RPC errors
class RPCError(Error):
pass
class HiveServer2Error(RPCError):
pass
class HttpError(RPCError):
"""An error containing an http response code"""
def __init__(self, code, message, body, http_headers):
self.code = code
self.message = message
self.body = body
self.http_headers = http_headers
def __str__(self):
# Don't try to print the body as we don't know what format it is.
return "HTTP code {}: {}".format(self.code, self.message)
class BeeswaxError(RPCError):
pass
class QueryStateError(BeeswaxError):
pass
class DisconnectedError(BeeswaxError):
pass
|
py | 1a31cb076904c0f742e55e64010490b56bdaa00b | # Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import
import os
from molecule import logger
from molecule.api import Driver
from molecule.util import lru_cache
log = logger.get_logger(__name__)
class Podman(Driver):
"""
The class responsible for managing `Podman`_ containers. `Podman`_ is
not default driver used in Molecule.
Molecule uses Podman ansible connector and podman CLI while mapping
variables from ``molecule.yml`` into ``create.yml`` and ``destroy.yml``.
.. _`podman connection`: https://docs.ansible.com/ansible/latest/plugins/connection/podman.html
.. code-block:: yaml
driver:
name: podman
platforms:
- name: instance
hostname: instance
image: image_name:tag
dockerfile: Dockerfile.j2
pull: True|False
pre_build_image: True|False
registry:
url: registry.example.com
credentials:
username: $USERNAME
password: $PASSWORD
override_command: True|False
command: sleep infinity
tty: True|False
pid_mode: host
privileged: True|False
security_opts:
- seccomp=unconfined
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
tmpfs:
- /tmp
- /run
capabilities:
- SYS_ADMIN
exposed_ports:
- 53/udp
- 53/tcp
published_ports:
- 0.0.0.0:8053:53/udp
- 0.0.0.0:8053:53/tcp
ulimits:
- nofile=1024:1028
dns_servers:
- 8.8.8.8
network: host
etc_hosts: {'host1.example.com': '10.3.1.5'}
cert_path: /foo/bar/cert.pem
tls_verify: true
env:
FOO: bar
restart_policy: on-failure
restart_retries: 1
buildargs:
http_proxy: http://proxy.example.com:8080/
If specifying the `CMD`_ directive in your ``Dockerfile.j2`` or consuming a
built image which declares a ``CMD`` directive, then you must set
``override_command: False``. Otherwise, Molecule takes care to honour the
value of the ``command`` key or uses the default of ``bash -c "while true;
do sleep 10000; done"`` to run the container until it is provisioned.
When attempting to utilize a container image with `systemd`_ as your init
system inside the container to simulate a real machine, make sure to set
the ``privileged``, ``volumes``, ``command``, and ``environment``
values. An example using the ``centos:7`` image is below:
.. note:: Do note that running containers in privileged mode is considerably
less secure.
.. code-block:: yaml
platforms:
- name: instance
image: centos:7
privileged: true
volumes:
- "/sys/fs/cgroup:/sys/fs/cgroup:rw"
command: "/usr/sbin/init"
tty: True
.. code-block:: bash
$ pip install molecule[podman]
When pulling from a private registry, it is the user's discretion to decide
whether to use hard-code strings or environment variables for passing
credentials to molecule.
.. important::
Hard-coded credentials in ``molecule.yml`` should be avoided, instead use
`variable substitution`_.
Provide a list of files Molecule will preserve, relative to the scenario
ephemeral directory, after any ``destroy`` subcommand execution.
.. code-block:: yaml
driver:
name: podman
safe_files:
- foo
.. _`Podman`: https://podman.io/
.. _`systemd`: https://www.freedesktop.org/wiki/Software/systemd/
.. _`CMD`: https://docs.docker.com/engine/reference/builder/#cmd
""" # noqa
def __init__(self, config=None):
super(Podman, self).__init__(config)
self._name = 'podman'
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def login_cmd_template(self):
return (
'podman exec '
'-e COLUMNS={columns} '
'-e LINES={lines} '
'-e TERM=bash '
'-e TERM=xterm '
'-ti {instance} bash'
)
@property
def default_safe_files(self):
return [os.path.join(self._config.scenario.ephemeral_directory, 'Dockerfile')]
@property
def default_ssh_connection_options(self):
return []
def login_options(self, instance_name):
return {'instance': instance_name}
def ansible_connection_options(self, instance_name):
return {'ansible_connection': 'podman'}
@lru_cache()
def sanity_checks(self):
"""Implement Podman driver sanity checks."""
log.info("Sanity checks: '{}'".format(self._name))
|
py | 1a31cb5eeda4736094255147ec14dd4714968cbe |
stack = [9, 2, 3, 1, 4]
size = 0
def push(x):
global size
stack[size] = x
size += 1
def pop():
global size
size -= 1
return stack[size]
push(5)
push(6)
push(1)
print(stack)
print(pop())
print(stack)
print(pop())
print(stack)
|
py | 1a31cb62fad46dca8bdd5cb3fac154991298fa14 | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
#pylint: disable-msg=W0122,R0914,R0912
"""
File : pkg.py
Author : Valentin Kuznetsov <[email protected]>
Description: AbstractGenerator class provides basic functionality
to generate CMSSW class from given template
"""
from __future__ import print_function
# system modules
import os
import sys
import time
import pprint
# package modules
from FWCore.Skeletons.utils import parse_word, functor, user_info, tree, template_directory
class AbstractPkg(object):
"""
AbstractPkg takes care how to generate code from template/PKG
package area. The PKG can be any directory which may include
any types of files, e.g. C++ (.cc), python (.py), etc.
This class relies on specific logic which we outline here:
- each template may use tags defined with double underscores
enclosure, e.g. __class__, __record__, etc.
- each template may have example tags, such tags should
start with @example_. While processing template user may
choose to strip them off or keep the code behind those tags
- in addition user may specify pure python code which can
operate with user defined tags. This code snipped should
be enclosed with #python_begin and #python_end lines
which declares start and end of python block
"""
def __init__(self, config=None):
super(AbstractPkg, self).__init__()
if not config:
self.config = {}
else:
self.config = config
self.pname = self.config.get('pname', None)
self.tmpl = self.config.get('tmpl', None)
self.debug = self.config.get('debug', 0)
self.tdir = template_directory()
self.author = user_info(self.config.get('author', None))
self.date = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
self.not_in_dir = self.config.get('not_in_dir', [])
self.working_dir = self.config.get('working_dir')
def tmpl_etags(self):
"Scan template files and return example tags"
keys = []
sdir = '%s/%s' % (self.tdir, self.tmpl)
for name in os.listdir(sdir):
if name[-1] == '~':
continue
if name == 'CVS':
continue
fname = os.path.join(sdir, name)
with open(fname, 'r') as stream:
for line in stream.readlines():
if line.find('@example_') != -1: # possible tag
keys += [k for k in line.split() if \
k.find('@example_') != -1]
return set(keys)
def print_etags(self):
"Print out template example tags"
for key in self.tmpl_etags():
print(key)
def tmpl_tags(self):
"Scan template files and return template tags"
keys = []
sdir = '%s/%s' % (self.tdir, self.tmpl)
for name in os.listdir(sdir):
if name[-1] == '~':
continue
if name == 'CVS':
continue
fname = os.path.join(sdir, name)
with open(fname, 'r') as stream:
for line in stream.readlines():
if line.find('__') != -1: # possible key
keys += [k for k in parse_word(line)]
return set(keys)
def print_tags(self):
"Print out template keys"
for key in self.tmpl_tags():
print(key)
def parse_etags(self, line):
"""
Determine either skip or keep given line based on class tags
meta-strings
"""
tmpl_etags = self.tmpl_etags()
keep_etags = self.config.get('tmpl_etags', [])
for tag in tmpl_etags:
for valid_tag in keep_etags:
if line.find(valid_tag) != -1:
line = line.replace(valid_tag, '')
return line
if line.find(tag) != -1:
line = ''
return line
if len(keep_etags) == 0:
return line.replace('@default', '')
if '@default' in line:
return ''
return line
def write(self, fname, tmpl_name, kwds):
"Create new file from given template name and set of arguments"
code = ""
read_code = False
if os.path.exists(fname):
return
with open(fname, 'w') as stream:
for line in open(tmpl_name, 'r').readlines():
line = self.parse_etags(line)
if not line:
continue
if line.find('#python_begin') != -1:
read_code = True
continue
if line.find('#python_end') != -1:
read_code = False
if read_code:
code += line
if code and not read_code:
res = functor(code, kwds, self.debug)
stream.write(res)
code = ""
continue
if not read_code:
for key, val in kwds.items():
if isinstance(val, str):
line = line.replace(key, val)
stream.write(line)
def get_kwds(self):
"Return keyword arguments to be used in methods"
kwds = {'__pkgname__': self.config.get('pkgname', 'Package'),
'__author__': self.author,
'__date__': self.date,
'__class__': self.pname,
'__class_lowercase__': self.pname.lower(),
'__class_space__': " "*len(self.pname),
'__name__': self.pname,
'__subsys__': self.config.get('subsystem', 'Subsystem')}
args = self.config.get('args', None)
kwds.update(args)
if self.debug:
print("Template tags:")
pprint.pprint(kwds)
return kwds
def generate(self):
"Generate package templates in a given directory"
# keep current location, since generate will switch directories
cdir = os.getcwd()
# read from configutation which template files to create
tmpl_files = self.config.get('tmpl_files', 'all')
# setup keyword arguments which we'll pass to write method
kwds = self.get_kwds()
# create template package dir and cd into it
if tmpl_files == 'all' and self.tmpl not in self.not_in_dir:
if os.path.isdir(self.pname):
msg = "Can't create package '%s'\n" % self.pname
msg += "Directory %s is already exists" % self.pname
print(msg)
sys.exit(1)
os.makedirs(self.pname)
os.chdir(self.pname)
# read directory driver information and create file list to generate
sdir = os.path.join(self.tdir, self.tmpl)
sources = [s for s in os.listdir(sdir) \
if s != 'Driver.dir' and s.find('~') == -1]
driver = os.path.join(sdir, 'Driver.dir')
if os.path.isfile(driver):
sources = [s.replace('\n', '') for s in open(driver, 'r').readlines()]
if 'CVS' in sources:
sources.remove('CVS')
# special case of Skeleton, which requires to generate only given
# file type if self.pname has extension of that type
names = set([s.split('.')[0] for s in sources])
if names == set(['Skeleton']):
if self.pname.find('.') != -1:
_, ext = os.path.splitext(self.pname)
sources = [s for s in sources if s.rfind(ext) != -1]
self.pname = self.pname.replace(ext, '')
kwds = self.get_kwds()
if not sources:
msg = 'Unable to find skeleton for extension "%s"' % ext
print(msg)
sys.exit(1)
bdir = os.environ.get('CMSSW_BASE', '')
dirs = os.getcwd().replace(bdir, '').split('/')
ldir = os.getcwd().split('/')[-1]
idir = ''
subsys = kwds['__subsys__']
pkgname = kwds['__pkgname__']
if sources == ['Skeleton.cc', 'Skeleton.h']:
if ldir == 'interface' and os.getcwd().find(bdir) != -1:
idir = '%s/%s/interface/' % (subsys, pkgname)
# run within some directory of the Sybsystem/Pkg area
# and only for mkskel <file>.cc
elif sources == ['Skeleton.cc'] and \
len(dirs) == 5 and dirs[0] == '' and dirs[1] == 'src':
idir = '%s/%s/interface/' % (subsys, pkgname)
elif sources == ['Skeleton.h'] and ldir == 'interface' and \
len(dirs) == 5 and dirs[0] == '' and dirs[1] == 'src':
idir = '%s/%s/interface/' % (subsys, pkgname)
kwds.update({'__incdir__': idir})
# loop over source files, create dirs as necessary and generate files
# names for writing templates
gen_files = []
for src in sources:
if tmpl_files != 'all':
fname, ext = os.path.splitext(src)
if tmpl_files != ext:
continue
#also reject if this is the wrong directory
if self.working_dir and src.split('/')[-2] != self.working_dir:
continue
src = src.split('/')[-1]
if self.debug:
print("Read", src)
items = src.split('/')
if items[-1] == '/':
items = items[:-1]
tname = items[-1] # template file name
tmpl_name = os.path.join(sdir, items[-1]) # full tmpl file name
if os.path.isfile(tmpl_name):
ftype = 'file'
else:
ftype = 'dir'
name2gen = src # new file we'll create
if items[-1] == 'testBuildFile.xml':
name2gen = '/'.join(src.split('/')[:-1])+'/BuildFile.xml'
if -1 !=tname.split('.')[0].find(self.tmpl): # need to substitute
name2gen = name2gen.replace(self.tmpl, self.pname)
name2gen = os.path.join(os.getcwd(), name2gen)
if self.debug:
print("Create", name2gen)
if ftype == 'dir':
if not os.path.isdir(name2gen):
os.makedirs(name2gen)
continue # we're done with dir
fdir = os.path.dirname(name2gen)
if not os.path.isdir(fdir):
os.makedirs(fdir)
self.write(name2gen, tmpl_name, kwds)
gen_files.append(name2gen.split('/')[-1])
if tmpl_files == 'all' and self.tmpl not in self.not_in_dir:
msg = 'New package "%s" of %s type is successfully generated' \
% (self.pname, self.tmpl)
else:
msg = 'Generated %s file' % ', '.join(gen_files)
if len(gen_files) > 1:
msg += 's'
print(msg)
# return back where we started
os.chdir(cdir)
if msg.find('New package') != -1:
tree(self.pname)
|
py | 1a31cbe01406213bfcaaf90c2882397642fc68d4 | # vim: set fenc=utf8 ts=4 sw=4 et :
import os
import io
import json
import unittest
from shlex import split
from .testcase import TestCase
from pdml2flow.conf import Conf
import pdml2flow
TEST_DIR_PDML2FLOW="test/pdml2flow_tests/"
TEST_DIR_PDML2FRAME="test/pdml2frame_tests/"
class TestSystem(TestCase):
def read_json(self, f):
objs = []
data = ''
for line in f:
data += line
try:
objs.append(json.loads(data))
data = ''
except ValueError:
# Not yet a complete JSON value
pass
return objs
def get_test(run, directory, test):
def system_test(self):
if os.path.isfile('{}/{}/skip'.format(directory, test)):
self.skipTest('Skipfile found')
with open('{}/{}/stdin'.format(directory, test)) as f_stdin, \
io.StringIO() as f_stdout, \
io.StringIO() as f_stderr:
# wire up io
Conf.IN = f_stdin
Conf.OUT = f_stdout
Conf.OUT_DEBUG = f_stderr
Conf.OUT_WARNING = f_stderr
Conf.OUT_ERROR = f_stderr
try:
# try to load arguments
with open('{}/{}/args'.format(directory, test)) as f:
Conf.ARGS = split(f.read())
except FileNotFoundError:
Conf.ARGS = ''
# run
run()
# compare stdout
stdout_raw = f_stdout.getvalue()
stderr_raw = f_stderr.getvalue()
with open('{}/{}/stdout'.format(directory, test)) as f:
expected_raw = f.read()
# Try parsing as json, and compare objects
run_objs = self.read_json(stdout_raw)
expected_objs = self.read_json(expected_raw)
self.assertEqual(
len(run_objs),
len(expected_objs)
)
for e in expected_objs:
self.assertIn(
e,
expected_objs
)
for o in run_objs:
self.assertIn(
o,
expected_objs
)
# if no object loaded: do a raw comparison, line by line
if len(run_objs) == 0 or len(expected_objs) == 0:
self.assertEqual(
sorted(
stdout_raw.splitlines()
),
sorted(
expected_raw.splitlines()
)
)
try:
# try compare stderr
with open('{}/{}/stderr'.format(directory, test)) as f:
expected_raw = f.read()
self.assertEqual(
expected_raw,
stderr_raw
)
except FileNotFoundError:
self.assertEqual(
'',
stderr_raw
)
return system_test
def add_tests(run, directory):
for test in os.listdir(directory):
# append test
setattr(
TestSystem,
'test_{}_{}'.format(run.__name__, test),
get_test(run, directory, test)
)
# Add tests
add_tests(pdml2flow.pdml2flow, TEST_DIR_PDML2FLOW)
add_tests(pdml2flow.pdml2frame, TEST_DIR_PDML2FRAME)
|
py | 1a31ccb0e17c08f0f3f6b0e3910e2d208f66cdaa | from models.image_classification import alexnet, vgg16, resnet
class ModelSelector:
@staticmethod
def get_model(model_name):
model_mux = {
"alexnet": alexnet.AlexNet,
"vgg16": vgg16.VGG16,
"resnet": resnet.ResNet,
}
return model_mux.get(model_name, "Invalid model name")
|
py | 1a31ccb3f1245b30ad2d213301d9f68b761e7368 | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
# auth: clsn
# by: covid-19
# date 20200328
#**********************
import requests
import json
import pymysql
import datetime
import sys
# 解决 python2 中文报错
reload(sys)
sys.setdefaultencoding('utf8')
now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# 数据库配置
db_config = {
'host': '61.149.146.136',
'port': 63306,
'user': 'clsn',
'password': '123456',
'db': 'clsn',
'charset': 'utf8'
}
def to_dingtalk():
# 请求的URL,WebHook地址
webhook = "https://oapi.dingtalk.com/robot/send?access_token=9f54eaaa734cdab863149bfff2b2fa1be86ea2ec5eb89cad6bf93e7c6b771066"
#构建请求头部
header = {
"Content-Type": "application/json",
"Charset": "UTF-8"
}
#构建请求数据
message = {
"actionCard": {
"title": title,
"text": text,
"hideAvatar": "0",
"btnOrientation": "0"
},
"msgtype": "actionCard"
}
#对请求的数据进行json封装
message_json = json.dumps(message)
# print message_json
#发送请求
info = requests.post(url=webhook,data=message_json,headers=header)
#打印返回的结果
print(info.text)
def get_now_info():
with pymysql.connect(**db_config) as curr:
sql1 = "select max(monitor_time) from covid_19_overseas"
curr.execute(sql1)
date_time = list(curr.fetchone())
# print date_time[0]
img = "\n"
head = "## <font color=#FFBF00>[风险提示]</font> <font color=#000000>全球新型冠状病毒肺炎疫情</font>\n"
msg_from = '\n >数据更新至 {}'.format(date_time[0])
sql2 = "select sum(confirm) as '累计确诊',sum(heal) as '治愈',sum(confirmCompare) as '新增确诊',sum(dead) as '死亡' from " \
"covid_19_overseas where monitor_time = '{}' ".format(date_time[0])
curr.execute(sql2)
data = list(curr.fetchone())
bf_dead = round(data[3]/data[0]*100,2)
bf_heal = round(data[1]/data[0]*100,2)
info = """\n ## **确诊病例:** <font color=#FF0000>{}</font>
\n ## **死亡病例:** <font color=#404040>{} ({}%)</font>
\n ## **治愈病例:** <font color=#9DEA15> {} ({}%)</font>
\n ## **新增病例:** <font color=#FFBF00> {}</font>\n""" .format(format(data[0],','),
format(data[3],','),bf_dead,
format(data[1],','),bf_heal,
format(data[2],','))
sql3 = "select confirm as '累计确诊', heal as '治愈',confirmCompare as '新增确诊',dead as '死亡',country as '国家' from " \
"covid_19_overseas where monitor_time = '{}' limit 5;".format(date_time[0])
curr.execute(sql3)
top_data = list(curr.fetchall())
country_info = ''
for data in top_data:
# print data
info_ = """ \n -国家:{}
\n ## **确诊病例:** <font color=#FF0000>{}</font>
\n ## **死亡病例:** <font color=#404040>{}</font>
\n ## **治愈病例:** <font color=#9DEA15> {}</font>
\n ## **新增病例:** <font color=#FFBF00> {}</font>\n *** \n """.format(data[4],
format(data[0], ','),
format(data[3], ','),
format(data[1], ','),
format(data[2], ','))
country_info = country_info + info_
talk_all = '\n# *风险等级TOP5*\n'
to_dingtalk_data = img + head + "***" + info + "***" + talk_all + "***" + country_info + msg_from
return to_dingtalk_data
if __name__=="__main__":
title = "新型冠状病毒疫情(国际)实时追踪"
text = get_now_info()
to_dingtalk()
|
py | 1a31cce74f4cb9372a48b42abce3732a14243062 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from builtins import __test_sink, __test_source
class Test1_C:
attribute = ...
def __init__(self):
self.instance = ...
class Test1_C1(Test1_C):
attribute = ...
def __init__(self):
self.instance = ...
class Test1_C2(Test1_C1):
attribute = ...
def __init__(self):
self.instance = ...
class Test1_D:
attribute = ...
def __init__(self):
self.instance = ...
class Test2_C:
def foo(self, attribute):
...
class Test2_C1(Test2_C):
def foo(self, attribute):
...
class Test2_C2(Test2_C1):
def foo(self, attribute):
...
class Test2_D:
def foo(self, attribute):
...
class UnrelatedClass:
attribute = ...
def __init__(self):
self.instance = ...
def foo(self, x):
...
def test1_alarm1(c: Test1_C1):
__test_sink(c.attribute)
def test1_alarm2(c: Test1_C1):
__test_sink(c.instance)
def test1_alarm3(c: Test1_C2):
__test_sink(c.attribute)
def test1_alarm4(c: Test1_C2):
__test_sink(c.instance)
def test1_alarm5(c: Test1_C):
__test_sink(c.attribute)
def test1_alarm6(c: Test1_C):
__test_sink(c.instance)
def test1_noalarm1(c: Test1_D):
__test_sink(c.attribute)
def test1_noalarm2(c: Test1_D):
__test_sink(c.instance)
def test2_alarm1(c: Test2_D):
c.foo(__test_source())
def test2_noalarm1(c: Test2_C1):
c.foo(__test_source())
def test2_noalarm2(c: Test2_C2):
c.foo(__test_source())
def test2_noalarm3(c: Test2_C):
c.foo(__test_source())
def misc_noalarm1(c: UnrelatedClass):
__test_sink(c.attribute)
def misc_noalarm2(c: UnrelatedClass):
__test_sink(c.instance)
def misc_noalarm3(c: UnrelatedClass):
c.foo(__test_source())
|
py | 1a31cf3e21e344c7267d7d1151404ffe294f1b9f | def encrypt(text,key):
output = ""
for i in range(len(text)):
char = text[i]
if (char.isupper()):
output += chr((ord(char) + key - 65) % 26 + 65)
elif (char.islower()):
output += chr((ord(char) + key - 97) % 26 + 97)
else:
output += char
return output
def decrypt(text,key):
output = ""
for i in range(len(text)):
char = text[i]
if (char.isupper()):
output += chr((ord(char) - 65 - key) % 26 + 65)
elif (char.islower()):
output += chr((ord(char) - 97 - key) % 26 + 97)
else:
output += char
return output
message = input('Enter your message:')
choice = input('What do you want to do?\nType 1 for encrypt:\nType 2 for decrypt:\n')
if (int(choice) == 1):
shift = input('Enter your key to encrypt (numbers to be shift):')
message = encrypt(message, int(shift))
print('Your encrypt message is: ', message)
exit()
elif (int(choice) == 2):
key = input('Enter your key for decryping (number that has been shifted):')
message = decrypt(message, int(key))
print('Your decrypt message is:', message)
exit()
else:
print('Error, Terminated.')
exit()
|
py | 1a31cf6c6396f4488def4ae46b562b4dadf6c9d7 | import json
from base64 import b64decode, b64encode
import numpy as np
from numpy.lib.format import dtype_to_descr, descr_to_dtype
def default(obj):
if isinstance(obj, (np.ndarray, np.generic)):
return {
'__numpy__': b64encode(obj.data if obj.flags.c_contiguous else obj.tobytes()).decode('ascii'),
'dtype': dtype_to_descr(obj.dtype),
'shape': obj.shape
}
raise TypeError(f'Object of type {type(obj)} is not JSON serializable')
def object_hook(dct):
if '__numpy__' in dct:
np_obj = np.frombuffer(b64decode(dct['__numpy__']), descr_to_dtype(dct['dtype']))
shape = dct['shape']
return np_obj.reshape(shape) if shape else np_obj[0] # Scalar test
return dct
_dumps = json.dumps
_loads = json.loads
_dump = json.dump
_load = json.load
def dumps(*args, **kwargs):
kwargs.setdefault('default', default)
return _dumps(*args, **kwargs)
def loads(*args, **kwargs):
kwargs.setdefault('object_hook', object_hook)
return _loads(*args, **kwargs)
def dump(*args, **kwargs):
kwargs.setdefault('default', default)
return _dump(*args, **kwargs)
def load(*args, **kwargs):
kwargs.setdefault('object_hook', object_hook)
return _load(*args, **kwargs)
def patch():
"""Monkey patches the json module in order to support serialization/deserialization of Numpy arrays and scalars."""
json.dumps = dumps
json.loads = loads
json.dump = dump
json.load = load
|
py | 1a31d010dcc4efe0a8a567323ce0788046d32432 | from django.core.management import BaseCommand
from wagtail.images import get_image_model
from cms.tagging import TAG_ENRICHMENT, I18N_TAGS
from core.utils import overwrite_media_domain
class Command(BaseCommand):
def handle(self, *args, **options):
get_image_model().objects.filter(tags__name='delete', collection__name='services').delete()
for k, v in TAG_ENRICHMENT.items():
for v1 in v:
for i in get_image_model().objects.filter(tags__name=k).exclude(tags__name=v1):
i.tags.add(v1)
for tid, text in I18N_TAGS:
for i in get_image_model().objects.filter(tags__id=tid).exclude(tags__name=text):
i.tags.add(text)
for image in get_image_model().objects.all().order_by('id'):
if image.collection.name == 'services':
image.make_semantic_tags()
if image.all_tags_str:
image.title = image.all_tags_str[:254]
image.url_800x800 = overwrite_media_domain(image.get_rendition('max-800x800|format-jpeg').url)
image.url_400x400 = overwrite_media_domain(image.get_rendition('max-400x400|format-jpeg').url)
image.url_200x200 = overwrite_media_domain(image.get_rendition('max-200x200|format-jpeg').url)
# wagtail admin interface
image.get_rendition('max-165x165|format-jpeg')
image.save()
|
py | 1a31d038cdf7e5e3147ffdc08260375a4725a244 | import json
from datacite import schema40
from osf.metadata import utils
from website.settings import DOMAIN
serializer_registry = {}
def register(schema_id):
"""Register classes into serializer_registry"""
def decorator(cls):
serializer_registry[schema_id] = cls
return cls
return decorator
class MetadataRecordSerializer(object):
def serialize_json(self, metadata_record):
raise NotImplementedError
def serialize_xml(self, metadata_record):
raise NotImplementedError
@classmethod
def serialize(cls, metadata_record, format='json'):
if format == 'json':
return cls.serialize_json(metadata_record)
if format == 'xml':
return cls.serialize_xml(metadata_record)
raise ValueError('Format "{}" is not supported.'.format(format))
@register(schema_id='datacite')
class DataciteMetadataRecordSerializer(MetadataRecordSerializer):
osf_schema = 'osf_datacite.json'
@classmethod
def serialize_json(cls, record):
osfstorage_file = record.file
target = osfstorage_file.target
doc = {
'creators': utils.datacite_format_creators(target.visible_contributors),
'titles': [
{
'title': osfstorage_file.name
},
{
'title': target.title,
'titleType': 'AlternativeTitle'
}
],
'publisher': 'Open Science Framework',
'dates': [
{
'date': str(osfstorage_file.created),
'dateType': 'Created'
},
{
'date': str(osfstorage_file.modified),
'dateType': 'Updated'
}
],
}
file_description = record.metadata.get('file_description')
if file_description:
doc['descriptions'] = [
{
'description': file_description,
'descriptionType': 'Abstract'
}
]
subject_list = []
if target.subjects.all().exists():
subject_list = utils.datacite_format_subjects(target)
tags_on_file = osfstorage_file.tags.values_list('name', flat=True)
for tag_name in tags_on_file:
subject_list.append({'subject': tag_name})
if subject_list:
doc['subjects'] = subject_list
resource_type = record.metadata.get('resource_type', '(:unas)')
doc['resourceType'] = {
'resourceType': resource_type,
'resourceTypeGeneral': utils.DATACITE_RESOURCE_TYPE_MAP.get(resource_type)
}
doc['publicationYear'] = str(osfstorage_file.created.year)
related_publication_doi = record.metadata.get('related_publication_doi')
if related_publication_doi:
doc['relatedIdentifiers'] = [
{
'relatedIdentifier': related_publication_doi,
'relatedIdentifierType': 'DOI',
'relationType': 'IsSupplementTo'
}
]
if osfstorage_file.guids.exists():
doc['alternateIdentifiers'] = [
{
'alternateIdentifier': DOMAIN + osfstorage_file.guids.first()._id,
'alternateIdentifierType': 'URL'
}
]
funders = record.metadata.get('funders')
if funders:
doc['fundingReferences'] = []
for funder in funders:
funder_info = {}
if funder.get('funding_agency'):
funder_info['funderName'] = funder['funding_agency']
if funder.get('grant_number'):
funder_info['awardNumber'] = {'awardNumber': funder['grant_number']}
doc['fundingReferences'].append(funder_info)
if getattr(target, 'node_license', None):
doc['rightsList'] = [utils.datacite_format_rights(target.node_license)]
latest_version_identifier = osfstorage_file.versions.all().order_by('-created').values_list('identifier', flat=True)
if latest_version_identifier:
doc['version'] = latest_version_identifier[0]
return json.dumps(doc)
@classmethod
def serialize_xml(cls, record):
data = json.loads(cls.serialize_json(record))
return schema40.tostring(data)
|
py | 1a31d07fd4e46201d6a0217fc89fd73d5a24df6e | # -*- coding: utf-8 -*-
"""
Data conversion utility for numpy
=====================================
Convert cytoscape.js style graphs from numpy object.
http://www.numpy.org
"""
import numpy as np
def from_ndarray(data, name=None, labels=None, directed=False, weighted=False):
"""
This method is converter to change ndarray to cytoscape.js style JSON.
:param data: ndarray object.
:param name: This is the network name.
:param labels: This is the list of nodes' names
:param directed: If this parapeter is True, the graph will be directed. On the other hand, the graph will be undirected.
:param weighted: If this parapeter is True, the graph will have weighted edges. On the other hand, the graph will have unweighted edges.
:return : The cytoscape.js object.
"""
mat_dim = data.shape
if mat_dim[0] != mat_dim[1]:
raise ValueError('Data should be square matrix.')
data_size = mat_dim[0]
if labels is not None:
label_len = len(labels)
if label_len != data_size:
raise ValueError('Label length is not equal to the size of data.')
network_name = name
if network_name is None:
network_name = 'from ndarray'
g = {
'data': {
'name': network_name
},
'elements': {
'nodes': [],
'edges': []
}
}
g['elements']['nodes'] = __get_nodes(labels, data_size)
if weighted:
g['elements']['edges'] = __get_weighted_edges(matrix=data)
else:
g['elements']['edges'] = __get_unweighted_edges(matrix=data)
return g
def __get_nodes(labels, size):
nodes = []
if labels is None:
node_labels = np.arange(size)
else:
node_labels = labels
for idx, label in enumerate(node_labels):
nodes.append(__get_node(idx, label))
return nodes
def __get_node(node_id, name):
n = {
'data': {
'id': str(node_id),
'name': str(name)
}
}
return n
def __get_egdes(matrix, labels):
pass
def __get_edge(source, target, weight=None):
e = {
'data': {
'id': source + '-' + target,
'source': source,
'target': target
}
}
if weight is not None:
e['data']['weight'] = weight
return e
def __get_unweighted_edges(matrix):
size = matrix.shape[0]
edges = []
row_idx = 0
for row in matrix:
idx = row_idx
while idx < size:
if row[idx] == 1:
e = __get_edge(str(row_idx), str(idx))
edges.append(e)
idx += 1
row_idx += 1
return edges
def __get_weighted_edges(matrix):
size = matrix.shape[0]
edges = []
row_idx = 0
for row in matrix:
idx = row_idx
while idx < size:
if not np.isnan(row[idx]):
e = __get_edge(str(row_idx), str(idx), weight=row[idx])
edges.append(e)
idx += 1
row_idx += 1
return edges
pass
|
py | 1a31d0d6f8e846b61189219d3929f3c10631eeac | """
CLI command for "deploy" command
"""
import logging
import os
import click
from samcli.cli.cli_config_file import TomlProvider, configuration_option
from samcli.cli.main import aws_creds_options, common_options, pass_context, print_cmdline_args
from samcli.commands._utils.cdk_support_decorators import unsupported_command_cdk
from samcli.commands._utils.options import (
capabilities_option,
guided_deploy_stack_name,
metadata_option,
notification_arns_option,
parameter_override_option,
no_progressbar_option,
tags_option,
template_click_option,
signing_profiles_option,
stack_name_option,
s3_bucket_option,
image_repository_option,
image_repositories_option,
s3_prefix_option,
kms_key_id_option,
use_json_option,
force_upload_option,
resolve_s3_option,
role_arn_option,
resolve_image_repos_option,
)
from samcli.commands.deploy.utils import sanitize_parameter_overrides
from samcli.lib.telemetry.metric import track_command
from samcli.lib.cli_validation.image_repository_validation import image_repository_validation
from samcli.lib.utils import osutils
from samcli.lib.bootstrap.bootstrap import manage_stack
from samcli.lib.utils.version_checker import check_newer_version
from samcli.lib.bootstrap.companion_stack.companion_stack_manager import sync_ecr_stack
SHORT_HELP = "Deploy an AWS SAM application."
HELP_TEXT = """The sam deploy command creates a Cloudformation Stack and deploys your resources.
\b
Set SAM_CLI_POLL_DELAY Environment Vairable with a value of seconds in your shell to configure
how often SAM CLI checks the Stack state, which is useful when seeing throttling from CloudFormation.
\b
e.g. sam deploy --template-file packaged.yaml --stack-name sam-app --capabilities CAPABILITY_IAM
\b
"""
CONFIG_SECTION = "parameters"
LOG = logging.getLogger(__name__)
@click.command(
"deploy",
short_help=SHORT_HELP,
context_settings={"ignore_unknown_options": False, "allow_interspersed_args": True, "allow_extra_args": True},
help=HELP_TEXT,
)
@configuration_option(provider=TomlProvider(section=CONFIG_SECTION))
@click.option(
"--guided",
"-g",
required=False,
is_flag=True,
is_eager=True,
help="Specify this flag to allow SAM CLI to guide you through the deployment using guided prompts.",
)
@template_click_option(include_build=True)
@click.option(
"--no-execute-changeset",
required=False,
is_flag=True,
help="Indicates whether to execute the change set. "
"Specify this flag if you want to view your stack changes "
"before executing the change set. The command creates an AWS CloudFormation "
"change set and then exits without executing the change set. if "
"the changeset looks satisfactory, the stack changes can be made by "
"running the same command without specifying `--no-execute-changeset`",
)
@click.option(
"--fail-on-empty-changeset/--no-fail-on-empty-changeset",
default=True,
required=False,
is_flag=True,
help="Specify if the CLI should return a non-zero exit code if there are no "
"changes to be made to the stack. The default behavior is to return a "
"non-zero exit code.",
)
@click.option(
"--confirm-changeset/--no-confirm-changeset",
default=False,
required=False,
is_flag=True,
help="Prompt to confirm if the computed changeset is to be deployed by SAM CLI.",
)
@click.option(
"--disable-rollback/--no-disable-rollback",
default=False,
required=False,
is_flag=True,
help="Preserves the state of previously provisioned resources when an operation fails.",
)
@stack_name_option(callback=guided_deploy_stack_name) # pylint: disable=E1120
@s3_bucket_option(guided=True) # pylint: disable=E1120
@image_repository_option
@image_repositories_option
@force_upload_option
@s3_prefix_option
@kms_key_id_option
@role_arn_option
@use_json_option
@resolve_s3_option(guided=True) # pylint: disable=E1120
@resolve_image_repos_option
@metadata_option
@notification_arns_option
@tags_option
@parameter_override_option
@signing_profiles_option
@no_progressbar_option
@capabilities_option
@aws_creds_options
@common_options
@image_repository_validation
@pass_context
@track_command
@check_newer_version
@print_cmdline_args
@unsupported_command_cdk(alternative_command="cdk deploy")
def cli(
ctx,
template_file,
stack_name,
s3_bucket,
image_repository,
image_repositories,
force_upload,
no_progressbar,
s3_prefix,
kms_key_id,
parameter_overrides,
capabilities,
no_execute_changeset,
role_arn,
notification_arns,
fail_on_empty_changeset,
use_json,
tags,
metadata,
guided,
confirm_changeset,
signing_profiles,
resolve_s3,
resolve_image_repos,
config_file,
config_env,
disable_rollback,
):
"""
`sam deploy` command entry point
"""
# All logic must be implemented in the ``do_cli`` method. This helps with easy unit testing
do_cli(
template_file,
stack_name,
s3_bucket,
image_repository,
image_repositories,
force_upload,
no_progressbar,
s3_prefix,
kms_key_id,
parameter_overrides,
capabilities,
no_execute_changeset,
role_arn,
notification_arns,
fail_on_empty_changeset,
use_json,
tags,
metadata,
guided,
confirm_changeset,
ctx.region,
ctx.profile,
signing_profiles,
resolve_s3,
config_file,
config_env,
resolve_image_repos,
disable_rollback,
) # pragma: no cover
def do_cli(
template_file,
stack_name,
s3_bucket,
image_repository,
image_repositories,
force_upload,
no_progressbar,
s3_prefix,
kms_key_id,
parameter_overrides,
capabilities,
no_execute_changeset,
role_arn,
notification_arns,
fail_on_empty_changeset,
use_json,
tags,
metadata,
guided,
confirm_changeset,
region,
profile,
signing_profiles,
resolve_s3,
config_file,
config_env,
resolve_image_repos,
disable_rollback,
):
"""
Implementation of the ``cli`` method
"""
from samcli.commands.package.package_context import PackageContext
from samcli.commands.deploy.deploy_context import DeployContext
from samcli.commands.deploy.guided_context import GuidedContext
from samcli.commands.deploy.exceptions import DeployResolveS3AndS3SetError
if guided:
# Allow for a guided deploy to prompt and save those details.
guided_context = GuidedContext(
template_file=template_file,
stack_name=stack_name,
s3_bucket=s3_bucket,
image_repository=image_repository,
image_repositories=image_repositories,
s3_prefix=s3_prefix,
region=region,
profile=profile,
confirm_changeset=confirm_changeset,
capabilities=capabilities,
signing_profiles=signing_profiles,
parameter_overrides=parameter_overrides,
config_section=CONFIG_SECTION,
config_env=config_env,
config_file=config_file,
disable_rollback=disable_rollback,
)
guided_context.run()
else:
if resolve_s3:
if bool(s3_bucket):
raise DeployResolveS3AndS3SetError()
s3_bucket = manage_stack(profile=profile, region=region)
click.echo(f"\n\t\tManaged S3 bucket: {s3_bucket}")
click.echo("\t\tA different default S3 bucket can be set in samconfig.toml")
click.echo("\t\tOr by specifying --s3-bucket explicitly.")
# TODO Refactor resolve-s3 and resolve-image-repos into one place
# after we figure out how to enable resolve-images-repos in package
if resolve_image_repos:
image_repositories = sync_ecr_stack(
template_file, stack_name, region, s3_bucket, s3_prefix, image_repositories
)
with osutils.tempfile_platform_independent() as output_template_file:
with PackageContext(
template_file=template_file,
s3_bucket=guided_context.guided_s3_bucket if guided else s3_bucket,
s3_prefix=guided_context.guided_s3_prefix if guided else s3_prefix,
image_repository=guided_context.guided_image_repository if guided else image_repository,
image_repositories=guided_context.guided_image_repositories if guided else image_repositories,
output_template_file=output_template_file.name,
kms_key_id=kms_key_id,
use_json=use_json,
force_upload=force_upload,
no_progressbar=no_progressbar,
metadata=metadata,
on_deploy=True,
region=guided_context.guided_region if guided else region,
profile=profile,
signing_profiles=guided_context.signing_profiles if guided else signing_profiles,
) as package_context:
package_context.run()
# 500ms of sleep time between stack checks and describe stack events.
DEFAULT_POLL_DELAY = 0.5
try:
poll_delay = float(os.getenv("SAM_CLI_POLL_DELAY", str(DEFAULT_POLL_DELAY)))
except ValueError:
poll_delay = DEFAULT_POLL_DELAY
if poll_delay <= 0:
poll_delay = DEFAULT_POLL_DELAY
with DeployContext(
template_file=output_template_file.name,
stack_name=guided_context.guided_stack_name if guided else stack_name,
s3_bucket=guided_context.guided_s3_bucket if guided else s3_bucket,
image_repository=guided_context.guided_image_repository if guided else image_repository,
image_repositories=guided_context.guided_image_repositories if guided else image_repositories,
force_upload=force_upload,
no_progressbar=no_progressbar,
s3_prefix=guided_context.guided_s3_prefix if guided else s3_prefix,
kms_key_id=kms_key_id,
parameter_overrides=sanitize_parameter_overrides(guided_context.guided_parameter_overrides)
if guided
else parameter_overrides,
capabilities=guided_context.guided_capabilities if guided else capabilities,
no_execute_changeset=no_execute_changeset,
role_arn=role_arn,
notification_arns=notification_arns,
fail_on_empty_changeset=fail_on_empty_changeset,
tags=tags,
region=guided_context.guided_region if guided else region,
profile=profile,
confirm_changeset=guided_context.confirm_changeset if guided else confirm_changeset,
signing_profiles=guided_context.signing_profiles if guided else signing_profiles,
use_changeset=True,
disable_rollback=guided_context.disable_rollback if guided else disable_rollback,
poll_delay=poll_delay,
) as deploy_context:
deploy_context.run()
|
py | 1a31d1b1f617458bd575295ecb5ad49c25b6a0f2 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServiceAssociationLinksOperations(object):
"""ServiceAssociationLinksOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ServiceAssociationLinksListResult"
"""Gets a list of service association links for a subnet.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceAssociationLinksListResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.ServiceAssociationLinksListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ServiceAssociationLinksListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceAssociationLinksListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/ServiceAssociationLinks'} # type: ignore
|
py | 1a31d285e29dd85d201bcaa5408cd210f6e039c7 | import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.activation import LeakyReLU
from utils import initialize_weights_he
# The MNIST datasets are hosted on yann.lecun.com that has moved under CloudFlare protection
# Run this script to enable the datasets download
# Reference: https://github.com/pytorch/vision/issues/1938
from six.moves import urllib
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
from Networks import ResNetBlock
import torch
import numpy as np
from torchvision import datasets
import torchvision.transforms as transforms
import cv2
from zu_resnet import ResNetEncoder
# define the NN architecture
class ConvAutoencoder_NAV2(nn.Module):
def __init__(self, imgChannels=1, zDim=512,featureDim=12*10*10, fix_params=False):
super(ConvAutoencoder_NAV2, self).__init__()
self.featureDim = featureDim
## encoder layers ##
# https://stackoverflow.com/questions/39691902/ordering-of-batch-normalization-and-dropout
self.encode = nn.Sequential(
nn.Conv2d(imgChannels, 32, 5, padding=2) ,
nn.BatchNorm2d(32),
nn.ReLU(),
ResNetBlock(32,64,3),
ResNetBlock(64,128,3),
ResNetBlock(128,256,3),
ResNetBlock(256,128,3), # 64x5x5 = 3200 feature vector
).apply(initialize_weights_he)
## decoder layers ##
## a kernel of 2 and a stride of 2 will increase the spatial dims by 2
self.decode = nn.Sequential(
nn.ConvTranspose2d(128, 256, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(256, 128, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(128, 64, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(64, imgChannels, 2, stride=2),
).apply(initialize_weights_he)
def fix_params(self):
for param in self.encode.parameters():
param.requires_grad = False
for param in self.decode.parameters():
param.requires_grad = False
def encode_(self, x):
return self.encode(x)
def forward(self, x):
x = self.encode(x)
# print(x.shape)
# x = x.reshape(64,5,5)
x = self.decode(x)
x = torch.sigmoid(x)
return x
# define the NN architecture
class ConvAutoencoder_NAV3(nn.Module):
def __init__(self, imgChannels=1, zDim=512,featureDim=12*10*10, fix_params=False):
super(ConvAutoencoder_NAV3, self).__init__()
self.featureDim = featureDim
## encoder layers ##
# https://stackoverflow.com/questions/39691902/ordering-of-batch-normalization-and-dropout
self.encode = ResNetEncoder(12,blocks_sizes=[64,128,256,384],deepths=[2,2,2,2])
## decoder layers ##
## a kernel of 2 and a stride of 2 will increase the spatial dims by 2
self.decode = nn.Sequential(
nn.ConvTranspose2d(384, 512, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(512, 256, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(256, 128, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(128, 64, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(64, imgChannels, 2, stride=2)
).apply(initialize_weights_he)
def fix_params(self):
for param in self.encode.parameters():
param.requires_grad = False
for param in self.decode.parameters():
param.requires_grad = False
def encode_(self, x):
return self.encode(x)
def forward(self, x):
x = self.encode(x)
# print(x.shape)
# x = x.reshape(64,5,5)
x = self.decode(x)
x = torch.sigmoid(x)
return x
# define the NN architecture
class ConvAutoencoder_NAV4(nn.Module):
def __init__(self, imgChannels=1, zDim=512,featureDim=12*10*10, fix_params=False):
super(ConvAutoencoder_NAV4, self).__init__()
self.featureDim = featureDim
## encoder layers ##
# https://stackoverflow.com/questions/39691902/ordering-of-batch-normalization-and-dropout
self.encode = nn.Sequential(
ResNetBlock(imgChannels,64,3),
ResNetBlock(64,128,3),
ResNetBlock(128,256,3),
ResNetBlock(256,128,3), # 64x5x5 = 3200 feature vector
).apply(initialize_weights_he)
## decoder layers ##
## a kernel of 2 and a stride of 2 will increase the spatial dims by 2
self.decode = nn.Sequential(
nn.ConvTranspose2d(128, 256, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(256, 128, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(128, 64, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(64, imgChannels, 2, stride=2),
).apply(initialize_weights_he)
def fix_params(self):
for param in self.encode.parameters():
param.requires_grad = False
for param in self.decode.parameters():
param.requires_grad = False
def encode_(self, x):
return self.encode(x)
def forward(self, x):
x = self.encode(x)
# print(x.shape)
# x = x.reshape(64,5,5)
x = self.decode(x)
x = torch.sigmoid(x)
return x
# define the NN architecture
class ConvAutoencoder_NAV6(nn.Module):
def __init__(self, imgChannels=1, zDim=1024,featureDim=64*5*5, fix_params=False):
super(ConvAutoencoder_NAV6, self).__init__()
self.featureDim = featureDim
## encoder layers ##
# https://stackoverflow.com/questions/39691902/ordering-of-batch-normalization-and-dropout
self.encode = nn.Sequential(
ResNetBlock(imgChannels,64,3),
ResNetBlock(64,128,3),
ResNetBlock(128,256,3),
ResNetBlock(256,64,3), # 64x5x5 = 3200 feature vector,
nn.Flatten(),
nn.Linear(featureDim,zDim)
).apply(initialize_weights_he)
self. FC_1 = nn.Linear(zDim,featureDim)
## decoder layers ##
## a kernel of 2 and a stride of 2 will increase the spatial dims by 2
self.decode = nn.Sequential(
nn.ConvTranspose2d(64, 128, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(128, 256, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(256, 128, 2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(128, 64, 2, stride=2),
).apply(initialize_weights_he)
def fix_params(self):
for param in self.encode.parameters():
param.requires_grad = False
for param in self.decode.parameters():
param.requires_grad = False
def encode_(self, x):
return self.encode(x)
def forward(self, x):
x = self.encode(x)
x = x.view(-1, self.fedim)
x = self.decode(x)
x = torch.sigmoid(x)
return x
if __name__ == '__main__':
GPU = True
device_idx = 0
if GPU:
device = torch.device("cuda:" + str(device_idx) if torch.cuda.is_available() else "cpu")
else:
device = torch.device("cpu")
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
channels = 3
n_s_f = 4
inputshape = (80,80,channels)
cv2_resz = (80,80)
imshape = (channels,*cv2_resz)
show_shape = (*cv2_resz,channels)
model = ConvAutoencoder_NAV4(imgChannels=channels*n_s_f)
# model.load_state_dict(torch.load("/home/developer/Training_results/Qricculum_Learning/big_and_small/final/Models/1/VAE_20"))
model.load_state_dict(torch.load("/home/developer/Training_results/Qricculum_Learning/big_and_small/hoffentlich/VAE_80803_615"))
model.eval()
model.to(device)
train_images = []
test_images = []
moving_database = np.load("/home/developer/Training_results/Qricculum_Learning/big_and_small/hoffentlich/VAE_dtb_12_8080_final_hoffentlich.npy")
# moving_database = np.load("/home/developer/VAE_dtb_12_128128_final.npy")
# moving_database = np.load("/home/developer/Training_results/Qricculum_Learning/big_and_small/3/VAE_dtb_3_8080.npy")
print(moving_database.shape)
print(moving_database[0])
stacked_images = []
train_data = (moving_database[0:45000]/ 2**8).astype(np.float32)
test_data = (moving_database[45000:] / 2**8).astype(np.float32)
print(train_data.shape)
print(test_data.shape)
# Create training and test dataloaders
num_workers = 10
# how many samples per batch to load
batch_size = 32
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers,shuffle=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers,shuffle=True)
import matplotlib.pyplot as plt
infostring = "net: \n" + str(model) + " \n \n \n"
print(infostring)
filename = "/home/developer/Training_results/VA/"+"Infofile.txt"
text_file = open(filename, "w")
n = text_file.write(infostring)
text_file.close()
learning_rate = 0.01
# specify loss function
criterion = nn.MSELoss()
# specify loss function
# torch.optim.Adam
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# from torch.optim.lr_scheduler import ExponentialLR
from torch.optim.lr_scheduler import MultiStepLR
# scheduler1 = ExponentialLR(optimizer, gamma=0.90)
scheduler2 = MultiStepLR(optimizer, milestones=[30,50,70,90], gamma=0.25)
# number of epochs to train the model
n_epochs = 100
# for epoch in range(1, n_epochs+1):
# # monitor training loss
# train_loss = 0.0
# test_loss = 0.0
# ##################
# # train the model #
# ##################
# for data in train_loader:
# # _ stands in for labels, here
# # no need to flatten images
# images = data
# images = images.to(device)
# # clear the gradients of all optimized variables
# optimizer.zero_grad()
# # forward pass: compute predicted outputs by passing inputs to the model
# outputs = model(images).to(device)
# # output_decoder = decoder(images)
# # print(output_decoder)
# # print(output_decoder.shape)
# # calculate the loss
# loss = criterion(outputs, images)
# # backward pass: compute gradient of the loss with respect to model parameters
# loss.backward()
# # perform a single optimization step (parameter update)
# optimizer.step()
# # update running training loss
# train_loss += loss.item()*images.size(0)
# # print avg training statistics
# train_loss = train_loss/len(train_loader)
# print('Epoch: {} \tTraining Loss: {:.6f}'.format(
# epoch,
# train_loss
# ))
# for test_i_data in test_loader:
# # _ stands in for labels, here
# # no need to flatten images
# test_images = test_i_data
# test_images = test_images.to(device)
# # clear the gradients of all optimized variables
# with torch.no_grad():
# # forward pass: compute predicted outputs by passing inputs to the model
# outputs = model(test_images).to(device)
# loss = criterion(outputs, test_images)
# test_loss += loss.item()*test_images.size(0)
# print('Epoch: {} \tTesting Loss: {:.6f}'.format(
# epoch,
# test_loss
# ))
# torch.save(model.state_dict(), "/home/developer/Training_results/VA/VAE_RESNET18"+str(epoch))
# # scheduler1.step()
# scheduler2.step()
# obtain one batch of test images
dataiter = iter(test_loader)
while True:
show_images = dataiter.next()
show_images = show_images.to(device)
# get sample outputs
output = model(show_images)
# prep images for display
show_images = show_images.detach().cpu().numpy()
# output is resized into a batch of iages
output = output.view(batch_size,n_s_f*channels,*cv2_resz)
# use detach when it's an output that requires_grad
output = output.detach().cpu().numpy()
print(output.shape)
print(show_images.shape)
# torch.save(model.state_dict(), "/home/developer/Training_results/VAE")
# plot the first ten input images and then reconstructed images
fig, axes = plt.subplots(nrows=2, ncols=4, sharex=True, sharey=True, figsize=(20,20))
axes[0][0].imshow(show_images[0][0:3].reshape(show_shape))
axes[0][0].get_xaxis().set_visible(False)
axes[0][0].get_yaxis().set_visible(False)
axes[0][1].imshow(show_images[0][3:6].reshape(show_shape))
axes[0][1].get_xaxis().set_visible(False)
axes[0][1].get_yaxis().set_visible(False)
axes[0][2].imshow(show_images[0][6:9].reshape(show_shape))
axes[0][2].get_xaxis().set_visible(False)
axes[0][2].get_yaxis().set_visible(False)
axes[0][3].imshow(show_images[0][9:12].reshape(show_shape))
axes[0][3].get_xaxis().set_visible(False)
axes[0][3].get_yaxis().set_visible(False)
axes[1][0].imshow(output[0][0:3].reshape(show_shape))
axes[1][0].get_xaxis().set_visible(False)
axes[1][0].get_yaxis().set_visible(False)
axes[1][1].imshow(output[0][3:6].reshape(show_shape))
axes[1][1].get_xaxis().set_visible(False)
axes[1][1].get_yaxis().set_visible(False)
axes[1][2].imshow(output[0][6:9].reshape(show_shape))
axes[1][2].get_xaxis().set_visible(False)
axes[1][2].get_yaxis().set_visible(False)
axes[1][3].imshow(output[0][9:12].reshape(show_shape))
axes[1][3].get_xaxis().set_visible(False)
axes[1][3].get_yaxis().set_visible(False)
# input images on top row, reconstructions on bottom
# for show_images, row in zip([show_images, output], axes):
# for img, ax in zip(show_images, row):
# ax.imshow(img[0:3].reshape(show_shape))
# ax.get_xaxis().set_visible(False)
# ax.get_yaxis().set_visible(False)
plt.show() |
py | 1a31d353dcf6f7bcbd7ba2f1cf6e4169e4e26155 | """
Parsed Config File Produces Expected Behaviors - fixed parameters
"""
import inspect
import os
import deeplenstronomy.deeplenstronomy as dl
doc = """
\tRunning tests from test_expected_behaviors_fixed.py
\tThe tests included in this module demonstrate that the values of fixed parameters
\tin the main configuration file are accurately utilized in the simulation and
\tappear as expected in the simulation metadata. The functions are:
\t\t- test_dataset_section
\t\t\tTesting that NAME, OUTDIR, and SEED properties from the DATASET section of
\t\t\tthe main config file were properly interpretted and utilized as properties
\t\t\tof the generated dataset
\t\t- test_cosmology_section
\t\t\tTesting that the cosmological parameters from the COSMOLOGY section appear
\t\t\tas expected in the simulation metadata
\t\t- test_image_size
\t\t\tTesting that the IMAGE.numPix keyword produced simulated images with the
\t\t\texpected size.
\t\t- test_bands
\t\t\tTesting that the BANDS argument was interpretted properly and produced an
\t\t\tarray of simulated images with the expected number of bands
"""
print(doc)
# Below are all of the possible operation modes
kwargs_sets = {0: {}, # default arguments
1: {'save_to_disk': True},
2: {'save_to_disk': True, 'image_file_format': 'h5'},
3: {'save_to_disk': True, 'skip_image_generation': True},
4: {'store_in_memory': False},
5: {'store_sample': True},
6: {'skip_image_generation': True, 'survey': 'des'},
7: {'solve_lens_equation': True},
8: {'return_planes': True}
}
f = open('status.txt', 'r')
current_test = int(f.read().strip())
f.close()
# Generate the dataset
kwargs_set = kwargs_sets[current_test]
config_filename = 'config.yaml'
dataset = dl.make_dataset(config_filename, **kwargs_set)
has_images = [hasattr(dataset, x + '_images') for x in dataset.configurations]
has_metadata = [hasattr(dataset, x + '_metadata')
for x in dataset.configurations]
has_planes = [hasattr(dataset, x + '_planes') for x in dataset.configurations]
images_exist = [os.path.exists(dataset.outdir +'/' + x + '_images.' +
dataset.arguments['image_file_format'])
for x in dataset.configurations]
metadata_exist = [os.path.exists(dataset.outdir +'/' + x + '_metadata.csv')
for x in dataset.configurations]
planes_exist = [os.path.exists(dataset.outdir +'/' + x + '_planes.' +
dataset.arguments['image_file_format'])
for x in dataset.configurations]
# Begin test functions
def test_dataset_section():
section = dataset.config_dict['DATASET']['PARAMETERS']
assert dataset.size == section['SIZE']
assert dataset.outdir == section['OUTDIR']
if 'SEED' in section.keys():
assert dataset.seed == section['SEED']
def test_cosmology_section():
if all(has_metadata):
section = dataset.config_dict['COSMOLOGY']['PARAMETERS']
for conf in dataset.configurations:
for band in dataset.bands:
for param, value in section.items():
md = eval(f'dataset.{conf}_metadata["{param}-{band}"]')
assert all(md.values == value)
def test_image_size():
if all(has_images):
for conf in dataset.configurations:
x = eval(f'dataset.{conf}_images').shape[-2]
y = eval(f'dataset.{conf}_images').shape[-1]
assert dataset.config_dict['IMAGE']['PARAMETERS']['numPix'] == x
assert dataset.config_dict['IMAGE']['PARAMETERS']['numPix'] == y
def test_bands():
config_bands = dataset.config_dict['SURVEY']['PARAMETERS']['BANDS'].split(',')
assert config_bands == dataset.bands
if all(has_images):
for conf in dataset.configurations:
b = eval(f'dataset.{conf}_images').shape[-3]
assert len(config_bands) == b
if all(has_metadata):
get_band = lambda col: col.split('-')[-1]
for conf in dataset.configurations:
md = eval(f'dataset.{conf}_metadata').columns
assert all([band in config_bands for band in [get_band(c) for c in md]])
|
py | 1a31d4da1d3c402dc70b70953617423690745724 | from argparse import ArgumentParser
from mmdet.apis import inference_detector, init_detector, show_result_pyplot
def main():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
args = parser.parse_args()
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
result = inference_detector(model, args.img)
# show the results
show_result_pyplot(model, args.img, result, score_thr=args.score_thr)
if __name__ == '__main__':
main()
|
py | 1a31d509e9f6211b533947cb53a6557f8f5fb0c2 | from django.urls import path
from . import views
app_name = 'matcher'
urlpatterns = [
path('', views.roulette_list_active, name='list_active'),
path('archive', views.roulette_list_archive, name='list_archive'),
path('all', views.roulette_list_all, name='list_all'),
path('roulette/<int:roulette_id>/', views.roulette, name='roulette'),
path('roulette/<int:roulette_id>/run/', views.run_roulette, name='run'),
path('roulette/<int:roulette_id>/submit/',
views.submit_roulette, name='submit'),
]
|
py | 1a31d565b82e1840f039185bdf2cf317d4532815 | import datetime
import json
from itertools import chain
from io import BytesIO
from django.template.loader import get_template
from xlsxwriter.workbook import Workbook
from xhtml2pdf import pisa
import xlrd
import logging
from django.db import transaction
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.db.models import Max
from django.http import HttpResponse, HttpResponseRedirect
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, render, redirect
from django.template.loader import render_to_string
from django.core.exceptions import ObjectDoesNotExist
from django.utils import timezone
from notification.views import AssistantshipClaim_notify,AssistantshipClaim_acad_notify,AssistantshipClaim_account_notify,AssistantshipClaim_faculty_notify
from applications.academic_information.models import (Calendar, Course, Student,Curriculum_Instructor, Curriculum,
Student_attendance)
from applications.central_mess.models import(Monthly_bill, Payments)
from applications.programme_curriculum.models import (CourseSlot, Course as Courses, Batch, Semester)
from applications.globals.models import (DepartmentInfo, Designation,
ExtraInfo, Faculty, HoldsDesignation)
from .models import (BranchChange, CoursesMtech, InitialRegistration, StudentRegistrationChecks,
MinimumCredits, Register, Thesis, FinalRegistration, ThesisTopicProcess,
Constants, FeePayments, TeachingCreditRegistration, SemesterMarks,
MarkSubmissionCheck, Dues,AssistantshipClaim, MTechGraduateSeminarReport,
PhDProgressExamination,CourseRequested, course_registration, MessDue, Assistantship_status)
from notification.views import academics_module_notif
from .forms import BranchChangeForm
demo_date = timezone.now()
# demo_date = demo_date - datetime.timedelta(days = 180)
# demo_date = demo_date + datetime.timedelta(days = 180)
# demo_date = demo_date + datetime.timedelta(days = 3)
# demo_date = demo_date - datetime.timedelta(days = 5)
student_status = None
hod_status = None
account_status = None
available_cse_seats = 100
available_ece_seats = 100
available_me_seats = 100
# assistantship_status = Assistantship_status.objects.all()
# for obj in assistantship_status:
# student_status = obj.student_status
# hod_status = obj.hod_status
# account_status = obj.account_status
@login_required(login_url='/accounts/login')
def academic_procedures_redirect(request):
return HttpResponseRedirect('/academic-procedures/main/')
@login_required(login_url='/accounts/login')
def main(request):
return HttpResponseRedirect('/academic-procedures/main/')
@login_required(login_url='/accounts/login')
def academic_procedures(request):
current_user = get_object_or_404(User, username=request.user.username)
#extra info details , user id used as main id
user_details = ExtraInfo.objects.select_related('user','department').get(user = request.user)
des = HoldsDesignation.objects.all().select_related().filter(user = request.user).first()
if str(des.designation) == "student":
obj = Student.objects.select_related('id','id__user','id__department').get(id = user_details.id)
return HttpResponseRedirect('/academic-procedures/stu/')
# return HttpResponseRedirect('/logout/')
elif str(des.designation) == "Associate Professor" or str(des.designation) == "Professor" or str(des.designation) == "Assistant Professor" :
return HttpResponseRedirect('/academic-procedures/fac/')
# return HttpResponseRedirect('/logout/')
elif str(request.user) == "acadadmin" :
return HttpResponseRedirect('/aims/')
elif str(request.user) == "rizwan":
return HttpResponseRedirect('/academic-procedures/account/')
elif str(request.user) == "talib":
Messdue = MessDue.objects.all()
dues = Dues.objects.all()
return render(request,
'../templates/academic_procedures/messdueassistant.html' ,
{
'Mess_due' : Messdue,
'dues' : dues,
})
else:
return HttpResponse('person not found')
#
#
#
#
#
#
@login_required(login_url='/accounts/login')
def academic_procedures_faculty(request):
current_user = get_object_or_404(User, username=request.user.username)
#extra info details , user id used as main id
user_details = ExtraInfo.objects.select_related('user','department').get(user = request.user)
des = HoldsDesignation.objects.all().select_related().filter(user = request.user).first()
fac_id = user_details
fac_name = user_details.user.first_name + " " + user_details.user.last_name
if str(des.designation) == "student":
return HttpResponseRedirect('/academic-procedures/main/')
elif str(request.user) == "acadadmin":
return HttpResponseRedirect('/academic-procedures/main/')
elif str(des.designation) == "Associate Professor" or str(des.designation) == "Professor" or str(des.designation) == "Assistant Professor":
object_faculty = Faculty.objects.select_related('id','id__user','id__department').get(id = user_details)
month = int(demo_date.month)
sem = []
if month>=7 and month<=12:
sem = [1,3,5,7]
else:
sem = [2,4,6,8]
student_flag = False
fac_flag = True
Faculty_department =user_details.department
# temp = Curriculum.objects.all().filter(course_code = "CS315L").first()
# Curriculum_Instructor.objects.create(curriculum_id = temp, instructor_id = user_details)
#thesis_supervision_request_list = ThesisTopicProcess.objects.all()
thesis_supervision_request_list = ThesisTopicProcess.objects.all().select_related().filter(supervisor_id = object_faculty)
approved_thesis_request_list = thesis_supervision_request_list.filter(approval_supervisor = True)
pending_thesis_request_list = thesis_supervision_request_list.filter(pending_supervisor = True)
faculty_list = get_faculty_list()
assistantship_request_list = AssistantshipClaim.objects.all()
hod_assistantship_request_list = assistantship_request_list.filter(ta_supervisor_remark = True).filter(thesis_supervisor_remark = True).filter(hod_approval = False)
hod_approved_assistantship = assistantship_request_list.filter(ta_supervisor_remark = True).filter(thesis_supervisor_remark = True).filter(acad_approval = False)
ta_approved_assistantship_request_list = AssistantshipClaim.objects.all().filter(ta_supervisor_remark=True)
thesis_approved_assistantship_request_list = AssistantshipClaim.objects.all().filter(thesis_supervisor_remark=True)
approved_assistantship_request_list = ta_approved_assistantship_request_list | thesis_approved_assistantship_request_list
mtechseminar_request_list = MTechGraduateSeminarReport.objects.all().filter(Overall_grade = '')
phdprogress_request_list = PhDProgressExamination.objects.all().filter(Overall_grade = '')
courses_list = Curriculum_Instructor.objects.select_related('curriculum_id','instructor_id','curriculum_id__course_id','instructor_id__department','instructor_id__user').filter(instructor_id=user_details).filter(curriculum_id__sem__in = sem)
r = range(4)
return render(
request,
'../templates/academic_procedures/academicfac.html' ,
{
'student_flag' : student_flag,
'fac_flag' : fac_flag,
'hod_flag' : hod_status,
'thesis_supervision_request_list' : thesis_supervision_request_list,
'pending_thesis_request_list' : pending_thesis_request_list,
'approved_thesis_request_list' : approved_thesis_request_list,
'faculty_list' : faculty_list,
'courses_list' : courses_list,
'fac_id': fac_id,
'fac_name' : fac_name,
'department' : Faculty_department,
'assistantship_request_list' : assistantship_request_list,
'approved_assistantship_request_list' : approved_assistantship_request_list,
'hod_assistantship_request_list' : hod_assistantship_request_list,
'hod_approved_assistantship' : hod_approved_assistantship,
'mtechseminar_request_list' : mtechseminar_request_list,
'phdprogress_request_list' : phdprogress_request_list,
'r' : r,
})
else:
HttpResponse("user not found")
@login_required(login_url='/accounts/login')
def account(request):
assistant_account_list = AssistantshipClaim.objects.filter(ta_supervisor_remark = True).filter(thesis_supervisor_remark = True)
assistant_pen_list = AssistantshipClaim.objects.filter(ta_supervisor_remark = True).filter(thesis_supervisor_remark = True).filter(acad_approval = True).filter(account_approval = False)
assistant_account_length = len(assistant_account_list.filter(acad_approval = True).filter(account_approval = False))
return render(request,
'../templates/ais/account.html' ,
{
'assistant_account_length' : assistant_account_length,
'assistant_account_list' : assistant_account_list ,
'assistant_pen_list' : assistant_pen_list,
'account_flag' : account_status,
})
@login_required(login_url='/accounts/login')
def academic_procedures_student(request):
current_user = get_object_or_404(User, username=request.user.username)
user_details = ExtraInfo.objects.select_related('user','department').get(id = request.user)
des = HoldsDesignation.objects.all().select_related().filter(user = request.user).first()
if str(des.designation) == "student":
obj = Student.objects.select_related('id','id__user','id__department').get(id = user_details.id)
if obj.programme.upper() == "PHD" :
student_flag = True
ug_flag = False
masters_flag = False
phd_flag = True
fac_flag = False
des_flag = False
elif obj.programme.upper() == "M.DES" :
student_flag = True
ug_flag = False
masters_flag = True
phd_flag = False
fac_flag = False
des_flag = True
elif obj.programme.upper() == "B.DES" :
student_flag = True
ug_flag = True
masters_flag = False
phd_flag = False
fac_flag = False
des_flag = True
elif obj.programme.upper() == "M.TECH" :
student_flag = True
ug_flag = False
masters_flag = True
phd_flag = False
fac_flag = False
des_flag = False
elif obj.programme.upper() == "B.TECH" :
student_flag = True
ug_flag = True
masters_flag = False
phd_flag = False
fac_flag = False
des_flag = False
else :
return HttpResponse("Student has no record")
# masters_flag=True
current_date = demo_date.date()
year = demo_date.year
registers = get_student_register(user_details.id)
user_sem = get_user_semester(request.user, ug_flag, masters_flag, phd_flag)
user_branch = get_user_branch(user_details)
batch = obj.batch_id
curr_id = batch.curriculum
curr_sem_id = Semester.objects.get(curriculum = curr_id, semester_no = obj.curr_semester_no)
try:
next_sem_id = Semester.objects.get(curriculum = curr_id, semester_no = obj.curr_semester_no+1)
except Exception as e:
next_sem_id = curr_sem_id
student_registration_check_pre = get_student_registrtion_check(obj,next_sem_id)
student_registration_check_final = get_student_registrtion_check(obj,next_sem_id)
cpi = get_cpi(user_details.id)
# branch change flag
branchchange_flag=True # True for testing, to be initialised as False
if user_sem==2:
branchchange_flag=True
pre_registration_date_flag = get_pre_registration_eligibility(current_date)
final_registration_date_flag = get_final_registration_eligibility(current_date)
add_or_drop_course_date_flag = get_add_or_drop_course_date_eligibility(current_date)
pre_registration_flag = False
final_registration_flag = False
if(student_registration_check_pre):
pre_registration_flag = student_registration_check_pre.pre_registration_flag
if(student_registration_check_final):
final_registration_flag = student_registration_check_final.final_registration_flag
acad_year = get_acad_year(user_sem, year)
currently_registered_courses = get_currently_registered_courses(user_details.id, user_sem)
next_sem_branch_course = get_sem_courses(next_sem_id, batch)
current_sem_branch_course = get_sem_courses(curr_sem_id, batch)
next_sem_registration_courses = get_sem_courses(next_sem_id, batch)
final_registration_choice, unavailable_courses_nextsem = get_final_registration_choices(next_sem_registration_courses,batch.year)
currently_registered_course = get_currently_registered_course(obj,obj.curr_semester_no)
current_credits = get_current_credits(currently_registered_course)
cur_cpi=0.0
details = {
'current_user': current_user,
'year': acad_year,
'user_sem': user_sem,
'user_branch' : str(user_branch),
'cpi' : cpi,
}
cur_cpi=details['cpi']
try:
pre_registered_course = InitialRegistration.objects.all().filter(student_id = user_details.id,semester_id = next_sem_id)
pre_registered_course_show = pre_registered_course
except Exception as e:
pre_registered_course = None
pre_registered_course_show = None
try:
final_registered_course = FinalRegistration.objects.all().filter(student_id = user_details.id,semester_id = next_sem_id)
add_courses_options = get_add_course_options(current_sem_branch_course, currently_registered_course, batch.year)
drop_courses_options = get_drop_course_options(currently_registered_course)
except Exception as e:
final_registered_course = None
drop_courses_options = None
add_courses_options = None
fee_payment_mode_list = dict(Constants.PaymentMode)
performance_list = []
result_announced = False
for i in currently_registered_courses:
try:
performance_obj = SemesterMarks.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(student_id = obj, curr_id = i).first()
except Exception as e:
performance_obj = None
performance_list.append(performance_obj)
for i in currently_registered_courses:
try:
result_announced_obj = MarkSubmissionCheck.objects.select_related().get(curr_id = i)
if result_announced_obj:
if result_announced_obj.announced == True:
result_announced = result_announced_obj.announced
else:
continue
except Exception as e:
continue
faculty_list = None
thesis_request_list = None
assistantship_list = None
pre_existing_thesis_flag = False
teaching_credit_registration_course = None
if masters_flag:
faculty_list = get_faculty_list()
thesis_request_list = ThesisTopicProcess.objects.all().filter(student_id = obj)
assistantship_list = AssistantshipClaim.objects.all().filter(student = obj)
pre_existing_thesis_flag = get_thesis_flag(obj)
if phd_flag:
pre_existing_thesis_flag = get_thesis_flag(obj)
teaching_credit_registration_course = Curriculum.objects.all().select_related().filter(batch = 2016, sem =6)
# Dues Check
#Initializing all due with -1 value , since generating no due certificate requires total due=0
lib_d, pc_d, hos_d, mess_d, acad_d = -1, -1, -1, -1, -1
if student_flag:
try:
obj = Dues.objects.select_related().get(student_id=Student.objects.select_related('id','id__user','id__department').get(id=request.user.username))
lib_d = obj.library_due
pc_d = obj.placement_cell_due
hos_d = obj.hostel_due
mess_d = obj.mess_due
acad_d = obj.academic_due
except ObjectDoesNotExist:
logging.warning("entry in DB not found for student")
tot_d = lib_d + acad_d + pc_d + hos_d + mess_d
obj = Student.objects.select_related('id','id__user','id__department').get(id=request.user.username)
course_list = []
for i in registers:
course_list.append(i.curr_id)
attendence = []
for i in course_list:
instructors = Curriculum_Instructor.objects.select_related('curriculum_id','instructor_id','curriculum_id__course_id','instructor_id__department','instructor_id__user').filter(curriculum_id=i)
pr,ab=0,0
for j in list(instructors):
presents = Student_attendance.objects.select_related('student_id','student_id__id','student_id__id__user','student_id__id__department','instructor_id','instructor_id__curriculum_id','instructor_id__curriculum_id__course_id','instructor_id__instructor_id','instructor_id__instructor_id__user','instructor_id__instructor_id__department').filter(student_id=obj,instructor_id=j, present=True)
absents = Student_attendance.objects.select_related('student_id','student_id__id','student_id__id__user','student_id__id__department','instructor_id','instructor_id__curriculum_id','instructor_id__curriculum_id__course_id','instructor_id__instructor_id','instructor_id__instructor_id__user','instructor_id__instructor_id__department').filter(student_id=obj,instructor_id=j, present=False)
pr += len(presents)
ab += len(absents)
attendence.append((i,pr,pr+ab))
cur_spi='Sem results not available' # To be fetched from db if result uploaded
Mess_bill = Monthly_bill.objects.filter(student_id = obj)
Mess_pay = Payments.objects.filter(student_id = obj)
# Branch Change Form save
if request.method=='POST':
if True:
# Processing Branch Change form
objb = BranchChange()
objb.branches=request.POST['branches']
objb.save()
return render(
request, '../templates/academic_procedures/academic.html',
{'details': details,
# 'calendar': calendar,
'currently_registered': currently_registered_course,
'pre_registered_course' : pre_registered_course,
'pre_registered_course_show' : pre_registered_course_show,
'final_registered_course' : final_registered_course,
'current_credits' : current_credits,
'courses_list': next_sem_branch_course,
'fee_payment_mode_list' : fee_payment_mode_list,
'next_sem_registration_courses': next_sem_registration_courses,
'final_registration_choice' : final_registration_choice,
'unavailable_courses_nextsem' : unavailable_courses_nextsem,
'performance_list' : performance_list,
'faculty_list' : faculty_list,
'thesis_request_list' : thesis_request_list,
'assistantship_list' : assistantship_list,
'next_sem': next_sem_id,
'curr_sem': curr_sem_id,
# 'final_register': final_register,
'student_flag' : student_flag,
'ug_flag' : ug_flag,
'masters_flag' : masters_flag,
'phd_flag' : phd_flag,
'fac_flag' : fac_flag,
'des_flag' : des_flag,
'result_announced' : result_announced,
'thesis_flag' : pre_existing_thesis_flag,
# 'change_branch': change_branch,
# 'add_course': add_course,
'add_courses_options': add_courses_options,
'drop_courses_options' : drop_courses_options,
# 'pre_register': pre_register,
'prd': pre_registration_date_flag,
'frd': final_registration_date_flag,
'adc_date_flag': add_or_drop_course_date_flag,
'pre_registration_flag' : pre_registration_flag,
'final_registration_flag': final_registration_flag,
# 'final_r': final_register_1,
'teaching_credit_registration_course' : teaching_credit_registration_course,
'cur_cpi': cur_cpi,
'cur_spi': cur_spi,
# 'mincr': minimum_credit,
'Mess_bill' : Mess_bill,
'Mess_pay' : Mess_pay,
'lib_d':lib_d,
'acad_d':acad_d,
'mess_d':mess_d,
'pc_d':pc_d,
'hos_d':hos_d,
'tot_d':tot_d,
'attendence':attendence,
'BranchChangeForm': BranchChangeForm(),
'BranchFlag':branchchange_flag,
'assistantship_flag' : student_status,
}
)
elif str(des.designation) == "Associate Professor" :
return HttpResponseRedirect('/academic-procedures/main/')
elif str(request.user) == "acadadmin" :
return HttpResponseRedirect('/academic-procedures/main/')
else:
return HttpResponse('user not found')
def dues_pdf(request):
template = get_template('academic_procedures/dues_pdf.html')
current_user = get_object_or_404(User, username=request.user.username)
user_details = ExtraInfo.objects.get(id = request.user)
des = HoldsDesignation.objects.all().filter(user = request.user).first()
name = ExtraInfo.objects.all().filter(id=request.user.username)[0].user
if str(des.designation) == "student":
obj = Student.objects.get(id = user_details.id)
context = {
'student_id' : request.user.username,
'degree' : obj.programme.upper(),
'name' : name.first_name +" "+ name.last_name,
'branch' : get_user_branch(user_details),
}
pdf = render_to_pdf('academic_procedures/dues_pdf.html',context)
if pdf:
response = HttpResponse(pdf, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename=Bonafide.pdf'
return response
return HttpResponse("PDF could not be generated")
def facultyData(request):
current_value = request.POST['current_value']
try:
# students =ExtraInfo.objects.all().filter(user_type = "student")
faculty = ExtraInfo.objects.all().filter(user_type = "faculty")
facultyNames = []
for i in faculty:
name = i.user.first_name + " " + i.user.last_name
if current_value != "":
Lowname = name.lower()
Lowcurrent_value = current_value.lower()
if Lowcurrent_value in Lowname:
facultyNames.append(name)
else:
facultyNames.append(name)
faculty = json.dumps(facultyNames)
return HttpResponse(faculty)
except Exception as e:
return HttpResponse("error")
def get_course_to_show_pg(initial_courses, final_register):
'''
This function fetches the PG courses from the database and store them into list x.
@param:
initial_courses - The courses that the registered PG student has already selected.
final_register - Finally registered courses of the user.
@variables:
x - The courses that are not being finally registered.
'''
x = []
for i in initial_courses:
flag = 0
for j in final_register:
if(str(i.course_name) == str(j.course_id)):
flag = 1
if(flag == 0):
x.append(i)
return x
def get_pg_course(usersem, specialization):
'''
This function fetches the PG Spcialization courses from the database and store them into list result.
@param:
usersem - Current semester of the user.
specialization - has the specialization of the logged in PG student.
@variables:
result - The selected Specialization courses.
'''
usersem = 2
obj = CoursesMtech.objects.select_related().filter(specialization=specialization)
obj3 = CoursesMtech.objects.select_related().filter(specialization="all")
obj2 = Course.objects.filter(sem=usersem)
result = []
for i in obj:
p = i.c_id
for j in obj2:
if(str(j.course_name) == str(p)):
result.append(j)
for i in obj3:
p = i.c_id
for j in obj2:
if(str(j.course_name) == str(p)):
result.append(j)
return result
def get_add_course(branch, final):
'''
This function shows the courses that were added after pre-registration.
@param:
branch - Branch of the Logged in student.
final - all the added courses after pre-registration.
@variables:
x - all the added courses after pre-registration.
total_course - al the remaining courses that were not added.
'''
x = []
for i in final:
x.append(i.course_id)
total_course = []
for i in branch:
if i not in x:
total_course.append(i)
return total_course
@login_required(login_url='/accounts/login')
def apply_branch_change(request):
'''
This function is used to verify the details to apply for the branch change. It checks the requirement and tells the user if he/she can change the branch or not.
@param:
request - trivial
@variables:
branches - selected branches by the user.
student - details of the logged in user.
extraInfo_user - gets the user details from the extrainfo model.
cpi_data - cpi of the logged in user.
semester - user's semester.
label_for_change - boolean variable to check the eligibility.
'''
# Get all the departments
# branch_list = DepartmentInfo.objects.all()
branches = ['CSE', 'ME', 'ECE']
# Get the current logged in user
student = User.objects.all().filter(username=request.user).first()
# Get the current logged in user's cpi
extraInfo_user = ExtraInfo.objects.all().select_related('user','department').filter(user=student).first()
cpi_data = Student.objects.all().select_related('id','id__user','id__department').filter(id=extraInfo_user.id).first()
# for i in range(len(branch_list)):
# branch_cut = branch_list[i].name
# branches.append(branch_cut)
label_for_change = False
semester = get_user_semester(extraInfo_user.id, ug_flag, masters_flag, phd_flag)
# semester = 2
if cpi_data.cpi >= 8 and semester >= 1 and semester <= 2:
label_for_change = True
context = {
'branches': branches,
'student': student,
'cpi_data': cpi_data,
'label_for_change': label_for_change,
}
return context
def branch_change_request(request):
'''
This function is used to apply the branch change request.
@param:
request - trivial
@variables:
current_user - details of the current user.
student - details of the logged in student.
extraInfo_user - gets the user details from the extrainfo model.
department - user's applied brach.
'''
if request.method == 'POST':
current_user = get_object_or_404(User, username=request.user.username)
extraInfo_user = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
student = Student.objects.all().select_related('id','id__user','id__department').filter(id=extraInfo_user.id).first()
department = DepartmentInfo.objects.all().filter(id=int(request.POST['branches'])).first()
change_save = BranchChange(
branches=department,
user=student
)
change_save.save()
return HttpResponseRedirect('/academic-procedures/main')
else:
return HttpResponseRedirect('/academic-procedures/main')
@login_required(login_url='/acounts/login')
def approve_branch_change(request):
'''
This function is used to approve the branch change requests from acad admin's frame.
@param:
request - trivial
@variables:
choices - list of students who applied for the branch change.
branches - selected brances by the student.
get_student - updating the student's branch after approval.
branch - branch of the current user.
'''
if request.method == 'POST':
values_length = len(request.POST.getlist('choice'))
choices = []
branches = []
for i in range(values_length):
for key, values in request.POST.lists():
if key == 'branch':
branches.append(values[i])
if key == 'choice':
choices.append(values[i])
else:
continue
changed_branch = []
for i in range(len(branches)):
get_student = ExtraInfo.objects.all().select_related('user','department').filter(id=choices[i][:7])
get_student = get_student[0]
branch = DepartmentInfo.objects.all().filter(name=branches[i])
get_student.department = branch[0]
changed_branch.append(get_student)
student = Student.objects.all().select_related('id','id__user','id__department').filter(id=choices[i][:7]).first()
change = BranchChange.objects.select_related('branches','user','user__id','user__id__user','user__id__department').all().filter(user=student)
change = change[0]
change.delete()
try:
ExtraInfo.objects.bulk_update(changed_branch,['department'])
messages.info(request, 'Apply for branch change successfull')
except:
messages.info(request, 'Unable to proceed, we will get back to you very soon')
return HttpResponseRedirect('/academic-procedures/main')
else:
messages.info(request, 'Unable to proceed')
return HttpResponseRedirect('/academic-procedures/main')
# Function returning Branch , Banch data which was required many times
def get_batch_query_detail(month, year):
'''
This function is used to get the batch's detail simply return branch which is required often.
@param:
month - current month
year - current year.
@variables:
stream1 - string BTech.
stream2 - string MTech.
query_option1 - year to be shown on students course sho page acad admin
'''
stream1 = "B.Tech "
stream2 = "M.Tech "
query_option1 = {}
if(month >= 7):
query_option1 = {
stream1+str(year): stream1+str(year),
stream1+str(year-1): stream1+str(year-1),
stream1+str(year-2): stream1+str(year-2),
stream1+str(year-3): stream1+str(year-3),
stream1+str(year-4): stream1+str(year-4),
stream2+str(year): stream2+str(year),
stream2+str(year-1): stream2+str(year)}
else:
query_option1 = {
stream1+str(year-1): stream1+str(year-1),
stream1+str(year-2): stream1+str(year-2),
stream1+str(year-3): stream1+str(year-3),
stream1+str(year-4): stream1+str(year-4),
stream1+str(year-5): stream1+str(year-5),
stream2+str(year-1): stream2+str(year-1),
stream2+str(year-2): stream2+str(year-2), }
return query_option1
# view when Admin drops a user course
@login_required(login_url='/accounts/login')
def dropcourseadmin(request):
'''
This function is used to get the view when Acad Admin drops any course of any student.
@param:
request - trivial
@variables:
data - user's id.
rid - Registration ID of Registers table
response_data - data to be responded.
'''
data = request.GET.get('id')
data = data.split(" - ")
course_code = data[1]
# need to add batch and programme
curriculum_object = Curriculum.objects.all().filter(course_code = course_code)
try:
Register.objects.filter(curr_id = curriculum_object.first(),student_id=int(data[0])).delete()
except:
print("hello ")
response_data = {}
return HttpResponse(json.dumps(response_data), content_type="application/json")
@login_required(login_url='/accounts/login')
def gen_course_list(request):
if(request.POST):
try:
batch = request.POST['batch']
course_id = request.POST['course']
course = Courses.objects.get(id = course_id)
obj = course_registration.objects.all().filter(course_id = course)
except Exception as e:
batch=""
course=""
obj=""
students = []
for i in obj:
if i.student_id.batch_id.year == int(batch):
students.append(i.student_id)
html = render_to_string('academic_procedures/gen_course_list.html',
{'students': students, 'batch':batch, 'course':course_id}, request)
maindict = {'html': html}
obj = json.dumps(maindict)
return HttpResponse(obj, content_type='application/json')
# view where Admin verifies the registered courses of every student
@login_required(login_url='/accounts/login')
def verify_course(request):
'''
This function is used to get the view when Acad Admin verifies the registered courses of every student.
@param:
request - trivial
@variables:
current_user - details of current user.
desig_id - Finds the Acad admin whose designation is "Upper Division Clerk".
acadadmin - details of the acad person(logged in).
roll_no - roll number of all the students.
firstname - firstname of the students.
year - current year.
month - current month.
date - current date.
'''
if(request.POST):
current_user = get_object_or_404(User, username=request.user.username)
user_details = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
desig_id = Designation.objects.all().filter(name='adminstrator').first()
temp = HoldsDesignation.objects.all().select_related().filter(designation = desig_id).first()
acadadmin = temp.working
k = str(user_details).split()
final_user = k[2]
if (str(acadadmin) != str(final_user)):
return HttpResponseRedirect('/academic-procedures/')
roll_no = request.POST["rollNo"]
obj = ExtraInfo.objects.all().select_related('user','department').filter(id=roll_no).first()
firstname = obj.user.first_name
lastname = obj.user.last_name
dict2 = {'roll_no': roll_no, 'firstname': firstname, 'lastname': lastname}
obj2 = Student.objects.all().select_related('id','id__user','id__department').filter(id=roll_no).first()
obj = Register.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(student_id = obj2)
curr_sem_id = obj2.curr_semester_no
details = []
current_sem_courses = get_currently_registered_course(roll_no,curr_sem_id)
idd = obj2
for z in current_sem_courses:
z=z[1]
course_code,course_name= str(z).split(" - ")
k = {}
# reg_ig has course registration id appended with the the roll number
# so that when we have removed the registration we can be redirected to this view
k['reg_id'] = roll_no+" - "+course_code
k['rid'] = roll_no+" - "+course_code
# Name ID Confusion here , be carefull
courseobj2 = Courses.objects.all().filter(code = course_code)
# if(str(z.student_id) == str(idd)):
for p in courseobj2:
k['course_id'] = course_code
k['course_name'] = course_name
k['sem'] = curr_sem_id
k['credits'] = p.credit
details.append(k)
year = demo_date.year
month = demo_date.month
yearr = str(year) + "-" + str(year+1)
semflag = 0
if(month >= 7):
semflag = 1
else:
semflag = 2
# TO DO Bdes
date = {'year': yearr, 'semflag': semflag}
html = render_to_string('academic_procedures/studentCourses.html',
{'details': details,
'dict2': dict2,
'date': date}, request)
maindict = {'html': html}
obj = json.dumps(maindict)
return HttpResponse(obj, content_type='application/json')
# view to generate all list of students
def acad_branch_change(request):
'''
This function is used to approve the branch changes requested by the students.
@param:
request - trivial
@variables:
current_user - logged in user
desig_id - Finds the Acad admin whose designation is "Upper Division Clerk".
acadadmin - details of the logged in acad admin.
user_details - details of the logged in user.
change_queries - gets all the details of branch changes from the database.
year - current year.
month - current month
date - current date.
total_cse_seats - total availbale CSE seats.
total_ece_seats - total availbale ECE seats.
total_me_seats - total availbale ME seats.
available_cse_seats - availbale CSE seats.
available_ece_seats - available ECE seats.
available_me_seats - available ME seats.
'''
current_user = get_object_or_404(User, username=request.user.username)
user_details = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
desig_id = Designation.objects.all().filter(name='Upper Division Clerk')
temp = HoldsDesignation.objects.all().select_related().filter(designation = desig_id).first()
acadadmin = temp.working
k = str(user_details).split()
final_user = k[2]
if (str(acadadmin) != str(final_user)):
return HttpResponseRedirect('/academic-procedures/')
# year = datetime.datetime.now().year
# month = datetime.datetime.now().month
year = demo_date.year
month = demo_date.month
yearr = str(year) + "-" + str(year+1)
semflag = 0
queryflag = 0
query_option1 = get_batch_query_detail(month, year)
query_option2 = {"CSE": "CSE", "ECE": "ECE", "ME": "ME"}
if(month >= 7):
semflag = 1
else:
semflag = 2
# TO DO Bdes
date = {'year': yearr, 'month': month, 'semflag': semflag, 'queryflag': queryflag}
change_queries = BranchChange.objects.select_related('branches','user','user__id','user__id__user','user__id__department').all()
# Total seats taken as some random value
total_cse_seats = 100
total_ece_seats = 100
total_me_seats = 100
total_cse_filled_seats = 98
total_ece_filled_seats = 98
total_me_filled_seats = 98
available_cse_seats = total_cse_seats - total_cse_filled_seats
available_ece_seats = total_ece_seats - total_ece_filled_seats
available_me_seats = total_me_seats - total_me_filled_seats
initial_branch = []
change_branch = []
available_seats = []
applied_by = []
cpi = []
for i in change_queries:
applied_by.append(i.user.id)
change_branch.append(i.branches.name)
students = Student.objects.all().select_related('id','id__user','id__department').filter(id=i.user.id).first()
user_branch = ExtraInfo.objects.all().select_related('user','department').filter(id=students.id.id).first()
initial_branch.append(user_branch.department.name)
cpi.append(students.cpi)
if i.branches.name == 'CSE':
available_seats.append(available_cse_seats)
elif i.branches.name == 'ECE':
available_seats.append(available_ece_seats)
elif i.branches.name == 'ME':
available_seats.append(available_me_seats)
else:
available_seats.append(0)
lists = zip(applied_by, change_branch, initial_branch, available_seats, cpi)
tag = False
if len(initial_branch) > 0:
tag = True
context = {
'list': lists,
'total': len(initial_branch),
'tag': tag
}
return render(
request,
'../templates/academic_procedures/academicadminforbranch.html',
{
'context': context,
'lists': lists,
'date': date,
'query_option1': query_option1,
'query_option2': query_option2,
'result_year' : result_year
}
)
@login_required(login_url='/accounts/login')
def phd_details(request):
'''
This function is used to extract the details of the PHD details.
@param:
request - trivial
@variables:
current_user - logged in user
student - details of the logged in student.
thesis - gets the thesis details of the PhD student.
faculty - gets the chosen faculty's details.
user_details - details of the logged in user.
total_thesis - total number of applied thesis.
'''
current_user = get_object_or_404(User, username=request.user.username)
user_details = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
student = Student.objects.all().select_related('id','id__user','id__department').filter(id=user_details.id).first()
thesis = Thesis.objects.all().filter(student_id=student).first()
#Professor = Designation.objects.all().filter(name='Professor')
#faculty = ExtraInfo.objects.all().filter(department=user_details.department,
# designation='Professor')
f1 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Assistant Professor"))
f2 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Professor"))
f3 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Associate Professor"))
faculty = list(chain(f1,f2,f3))
faculties_list = []
for i in faculty:
faculties_list.append(str(i.user.first_name)+" "+str(i.user.last_name))
total_thesis = True
if(thesis is None):
total_thesis = False
context = {
'total_thesis': total_thesis,
'thesis': thesis,
}
return render(
request,
'../templates/academic_procedures/phdregistration.html',
{'context': context, 'faculty': faculties_list, 'student': student}
)
#
#
#
#
#
#
##
#
#
#
#
##
#
#
#
#
#
#
###
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
def get_student_register(id):
return Register.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(student_id = id)
def get_pre_registration_eligibility(current_date):
try:
pre_registration_date = Calendar.objects.all().filter(description="Pre Registration").first()
prd_start_date = pre_registration_date.from_date
prd_end_date = pre_registration_date.to_date
if current_date>=prd_start_date and current_date<=prd_end_date:
return True
else :
return False
except Exception as e:
return False
def get_final_registration_eligibility(current_date):
try:
frd = Calendar.objects.all().filter(description="Physical Reporting at the Institute").first()
frd_start_date = frd.from_date
frd_end_date = frd.to_date
if current_date>=frd_start_date and current_date<=frd_end_date:
return True
else :
return False
except Exception as e:
return False
def get_add_or_drop_course_date_eligibility(current_date):
try:
add_drop_course_date = Calendar.objects.all().filter(description="Last Date for Adding/Dropping of course").first()
adc_start_date = add_drop_course_date.from_date
adc_end_date = add_drop_course_date.to_date
if current_date>=adc_start_date and current_date<=adc_end_date:
return True
else :
return False
except Exception as e:
return False
def get_course_verification_date_eligibilty(current_date):
try:
course_verification_date = Calendar.objects.all().filter(description="course verification date").first()
verif_start_date = course_verification_date.from_date
verif_end_date = course_verification_date.to_date
if current_date>=verif_start_date and current_date<=verif_end_date:
return True
else :
return False
except Exception as e:
return False
def get_user_branch(user_details):
return user_details.department.name
def get_acad_year(user_sem, year):
if user_sem%2 == 1:
acad_year = str(year) + "-" + str(year+1)
elif user_sem%2 == 0:
acad_year = str(year-1) + "-" + str(year)
return acad_year
def pre_registration(request):
if request.method == 'POST':
try:
current_user = get_object_or_404(User, username=request.POST.get('user'))
current_user = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
current_user = Student.objects.all().filter(id=current_user.id).first()
sem_id = Semester.objects.get(id = request.POST.get('semester'))
count = request.POST.get('ct')
count = int(count)
reg_curr=[]
for i in range(1, count+1):
i = str(i)
choice = "choice["+i+"]"
slot = "slot["+i+"]"
if request.POST.get(choice)!='0':
course_id = Courses.objects.get(id = request.POST.get(choice))
courseslot_id = CourseSlot.objects.get(id = request.POST.get(slot))
p = InitialRegistration(
course_id = course_id,
semester_id = sem_id,
student_id = current_user,
course_slot_id = courseslot_id
)
else:
continue
reg_curr.append(p)
InitialRegistration.objects.bulk_create(reg_curr)
try:
check = StudentRegistrationChecks(
student_id = current_user,
pre_registration_flag = True,
final_registration_flag = False,
semester_id = sem_id
)
check.save()
messages.info(request, 'Pre-Registration Successful')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
return HttpResponseRedirect('/academic-procedures/main')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
else:
return HttpResponseRedirect('/academic-procedures/main')
def get_student_registrtion_check(obj, sem):
return StudentRegistrationChecks.objects.all().filter(student_id = obj, semester_id = sem).first()
def final_registration(request):
if request.method == 'POST':
if request.POST.get('type_reg') == "register" :
try:
current_user = get_object_or_404(User, username=request.POST.get('user'))
current_user = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
current_user = Student.objects.all().filter(id=current_user.id).first()
sem_id = Semester.objects.get(id = request.POST.get('semester'))
choice = request.POST.getlist('choice')
slot = request.POST.getlist('slot')
values_length = 0
values_length = len(choice)
mode = str(request.POST.get('mode'))
transaction_id = str(request.POST.get('transaction_id'))
f_reg = []
for x in range(values_length):
if choice[x] != '0':
course_id = Courses.objects.get(id = choice[x])
courseslot_id = CourseSlot.objects.get(id = slot[x])
if FinalRegistration .objects.filter(student_id__batch_id__year = current_user.batch_id.year, course_id = course_id).count() < courseslot_id.max_registration_limit:
p = FinalRegistration(
course_id = course_id,
semester_id=sem_id,
student_id= current_user,
course_slot_id = courseslot_id,
verified = False
)
f_reg.append(p)
else:
messages.info(request, 'Final-Registration Falied\n'+course_id.code+'-'+course_id.name+' registration limit reached.')
return HttpResponseRedirect('/academic-procedures/main')
FinalRegistration.objects.bulk_create(f_reg)
obj = FeePayments(
student_id = current_user,
semester_id = sem_id,
mode = mode,
transaction_id = transaction_id
)
obj.save()
try:
StudentRegistrationChecks.objects.filter(student_id = current_user, semester_id = sem_id).update(final_registration_flag = True)
messages.info(request, 'Final-Registration Successful')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
return HttpResponseRedirect('/academic-procedures/main')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
elif request.POST.get('type_reg') == "change_register" :
try:
current_user = get_object_or_404(User, username=request.POST.get('user'))
current_user = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
current_user = Student.objects.all().filter(id=current_user.id).first()
sem_id = Semester.objects.get(id = request.POST.get('semester'))
FinalRegistration.objects.filter(student_id = current_user, semester_id = sem_id).delete()
count = request.POST.get('ct')
count = int(count)
mode = str(request.POST.get('mode'))
transaction_id = str(request.POST.get('transaction_id'))
f_reg=[]
for i in range(1, count+1):
i = str(i)
choice = "choice["+i+"]"
slot = "slot["+i+"]"
if request.POST.get(choice) != '0':
try:
course_id = Courses.objects.get(id = request.POST.get(choice))
courseslot_id = CourseSlot.objects.get(id = request.POST.get(slot))
if FinalRegistration .objects.filter(student_id__batch_id__year = current_user.batch_id.year, course_id = course_id).count() < courseslot_id.max_registration_limit:
p = FinalRegistration(
course_id = course_id,
semester_id=sem_id,
student_id= current_user,
course_slot_id = courseslot_id,
verified = False
)
f_reg.append(p)
else:
messages.info(request, 'Final-Registration Falied\n'+course_id.code+'-'+course_id.name+' registration limit reached.')
return HttpResponseRedirect('/academic-procedures/main')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
FinalRegistration.objects.bulk_create(f_reg)
obj = FeePayments(
student_id = current_user,
semester_id = sem_id,
mode = mode,
transaction_id = transaction_id
)
obj.save()
try:
StudentRegistrationChecks.objects.filter(student_id = current_user, semester_id = sem_id).update(final_registration_flag = True)
messages.info(request, 'registered course change Successful')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
return HttpResponseRedirect('/academic-procedures/main')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
else:
return HttpResponseRedirect('/academic-procedures/main')
def get_cpi(id):
obj = Student.objects.select_related('id','id__user','id__department').get(id = id)
return obj.cpi
def register(request):
if request.method == 'POST':
try:
current_user = get_object_or_404(User, username=request.POST.get('user'))
current_user = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
current_user = Student.objects.all().select_related('id','id__user','id__department').filter(id=current_user.id).first()
values_length = 0
values_length = len(request.POST.getlist('choice'))
sem = request.POST.get('semester')
for x in range(values_length):
reg_curr=[]
for key, values in request.POST.lists():
if (key == 'choice'):
try:
last_id = Register.objects.all().aggregate(Max('r_id'))
last_id = last_id['r_id__max']+1
except Exception as e:
last_id = 1
curr_id = get_object_or_404(Curriculum, curriculum_id=values[x])
p = Register(
r_id=last_id,
curr_id=curr_id,
year=current_user.batch,
student_id=current_user,
semester=sem
)
reg_curr.append(p)
else:
continue
Register.objects.bulk_create(reg_curr)
messages.info(request, 'Pre-Registration Successful')
return HttpResponseRedirect('/academic-procedures/main')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
else:
return HttpResponseRedirect('/academic-procedures/main')
def add_courses(request):
"""
This function is used to add courses for currernt semester
@param:
request - contains metadata about the requested page
@variables:
current_user - contains current logged in user
sem_id - contains current semester id
count - no of courses to be added
course_id - contains course id for a particular course
course_slot_id - contains course slot id for a particular course
reg_curr - list of registered courses object
choice - contains choice of a particular course
slot - contains slot of a particular course
# gg and cs
"""
if request.method == 'POST':
try:
current_user = get_object_or_404(User, username=request.POST.get('user'))
current_user = ExtraInfo.objects.all().filter(user=current_user).first()
current_user = Student.objects.all().filter(id=current_user.id).first()
sem_id = Semester.objects.get(id = request.POST.get('semester'))
count = request.POST.get('ct')
count = int(count)
reg_curr=[]
for i in range(1, count+1):
choice = "choice["+str(i)+"]"
slot = "slot["+str(i)+"]"
try:
course_id = Courses.objects.get(id = request.POST.get(choice))
courseslot_id = CourseSlot.objects.get(id = request.POST.get(slot))
# Check if maximum course registration limit has not reached and student has not already registered for that course
if course_registration.objects.filter(student_id__batch_id__year = current_user.batch_id.year, course_id = course_id).count() < courseslot_id.max_registration_limit and (course_registration.objects.filter(course_id=course_id, student_id=current_user).count() == 0):
p = course_registration(
course_id = course_id,
student_id=current_user,
course_slot_id = courseslot_id,
semester_id=sem_id
)
if p not in reg_curr:
reg_curr.append(p)
except Exception as e:
continue
course_registration.objects.bulk_create(reg_curr)
return HttpResponseRedirect('/academic-procedures/main')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
else:
return HttpResponseRedirect('/academic-procedures/main')
def drop_course(request):
if request.method == 'POST':
try:
current_user = get_object_or_404(User, username=request.POST.get('user'))
current_user = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
current_user = Student.objects.all().get(id=current_user.id)
values_length = 0
values_length = len(request.POST.getlist('choice'))
sem_id = request.POST.get('semester')
sem = Semester.objects.get(id = sem_id)
for x in range(values_length):
for key, values in request.POST.lists():
if (key == 'choice'):
course_id = get_object_or_404(Courses, id=values[x])
course_registration.objects.filter(course_id = course_id, student_id = current_user).delete()
else:
continue
messages.info(request, 'Course Successfully Dropped')
return HttpResponseRedirect('/academic-procedures/main')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
else:
return HttpResponseRedirect('/academic-procedures/main')
def add_thesis(request):
if request.method == 'POST':
try:
if(str(request.POST.get('by'))=="st"):
thesis_topic = request.POST.get('thesis_topic')
research_area = request.POST.get('research_area')
supervisor_faculty = get_object_or_404(User, username = request.POST.get('supervisor'))
supervisor_faculty = ExtraInfo.objects.select_related('user','department').get(user = supervisor_faculty)
supervisor_faculty = Faculty.objects.select_related('id','id__user','id__department').get(id = supervisor_faculty)
try:
co_supervisor_faculty = get_object_or_404(User, username = request.POST.get('co_supervisor'))
co_supervisor_faculty = ExtraInfo.objects.select_related('user','department').get(user = co_supervisor_faculty)
co_supervisor_faculty = Faculty.objects.select_related('id','id__user','id__department').get(id = co_supervisor_faculty)
except Exception as e:
co_supervisor_faculty = None
current_user = get_object_or_404(User, username=request.POST.get('user'))
current_user = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
current_user = Student.objects.all().select_related('id','id__user','id__department').filter(id=current_user.id).first()
try:
curr_id = request.POST.get('curr_id')
curr_id = Curriculum.objects.select_related().get(curriculum_id = curr_id)
except Exception as e:
curr_id = None
p = ThesisTopicProcess(
student_id = current_user,
research_area = research_area,
thesis_topic = thesis_topic,
curr_id = curr_id,
supervisor_id = supervisor_faculty,
co_supervisor_id = co_supervisor_faculty,
submission_by_student = True,
pending_supervisor = True,
)
p.save()
messages.info(request, 'Thesis Successfully Added')
return HttpResponseRedirect('/academic-procedures/main/')
elif(str(request.POST.get('by'))=="fac"):
obj = request.POST.get('obj_id')
obj = ThesisTopicProcess.objects.get(id = obj)
member1 = get_object_or_404(User, username = request.POST.get('member1'))
member1 = ExtraInfo.objects.select_related('user','department').get(user = member1)
member1 = Faculty.objects.select_related('id','id__user','id__department').get(id = member1)
member2 = get_object_or_404(User, username = request.POST.get('member2'))
member2 = ExtraInfo.objects.select_related('user','department').get(user = member2)
member2 = Faculty.objects.select_related('id','id__user','id__department').get(id = member2)
try:
member3 = get_object_or_404(User, username = request.POST.get('member3'))
member3 = ExtraInfo.objects.select_related('user','department').get(user = member3)
member3 = Faculty.objects.select_related('id','id__user','id__department').get(id = member3)
except Exception as e:
member3 = None
if(str(request.POST.get('approval'))=="yes"):
obj.pending_supervisor = False
obj.member1 = member1
obj.member2 = member2
obj.member3 = member3
obj.approval_supervisor = True
obj.forwarded_to_hod = True
obj.pending_hod = True
obj.save()
elif(request.POST.get('approval')=="no"):
obj.pending_supervisor = False
obj.member1 = member1
obj.member2 = member2
obj.member3 = member3
obj.approval_supervisor = False
obj.forwarded_to_hod = False
obj.pending_hod = False
obj.save()
else:
logging.warning("Not approved till now")
return HttpResponseRedirect('/academic-procedures/main')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
else:
return HttpResponseRedirect('/academic-procedures/main/')
return HttpResponseRedirect('/academic-procedures/main/')
def get_final_registration_choices(branch_courses,batch):
course_option = []
unavailable_courses = []
for courseslot in branch_courses:
max_limit = courseslot.max_registration_limit
lis = []
for course in courseslot.courses.all():
if FinalRegistration .objects.filter(student_id__batch_id__year = batch, course_id = course).count() < max_limit:
lis.append(course)
else:
unavailable_courses.append(course)
course_option.append((courseslot, lis))
return course_option, unavailable_courses
def get_add_course_options(branch_courses, current_register, batch):
course_option = []
courses = current_register
slots = []
for c in current_register:
slots.append(c[0])
for courseslot in branch_courses:
max_limit = courseslot.max_registration_limit
if courseslot not in slots:
lis = []
for course in courseslot.courses.all():
if course_registration.objects.filter(student_id__batch_id__year = batch, course_id = course).count() < max_limit:
lis.append(course)
course_option.append((courseslot, lis))
return course_option
def get_drop_course_options(current_register):
courses = []
for item in current_register:
if item[0].type != "Professional Core":
courses.append(item[1])
return courses
def get_user_semester(roll_no, ug_flag, masters_flag, phd_flag):
roll = str(roll_no)
now = demo_date
year, month = now.year, int(now.month)
y = str(year)
if(ug_flag):
if(roll[2].isdigit()):
roll = int(roll[:4])
else:
roll = int("20"+roll[:2])
user_year = year - roll
elif(masters_flag or phd_flag):
roll = int(roll[:2])
user_year = int(y[-2:]) - roll
sem = 'odd'
if month >= 7 and month<=12:
sem = 'odd'
else:
sem = 'even'
if sem == 'odd':
return user_year * 2 + 1
else:
return user_year * 2
def get_branch_courses(roll_no, user_sem, branch):
roll = str(roll_no)
year = int(roll[:4])
courses = Curriculum.objects.all().select_related().filter(batch=(year))
courses = courses.filter(sem = user_sem)
courses = courses.filter(floated = True)
course_list = []
for course in courses:
if branch.lower() == course.branch.lower() :
course_list.append(course)
elif course.branch.lower() == 'common':
course_list.append(course)
return course_list
def get_sem_courses(sem_id, batch):
courses = []
course_slots = CourseSlot.objects.all().filter(semester_id = sem_id)
for slot in course_slots:
courses.append(slot)
return courses
def get_currently_registered_courses(id, user_sem):
obj = Register.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(student_id=id, semester=user_sem)
ans = []
for i in obj:
course = Curriculum.objects.select_related().get(curriculum_id=i.curr_id.curriculum_id)
ans.append(course)
return ans
def get_currently_registered_course(id, sem_id):
obj = course_registration.objects.all().filter(student_id = id, semester_id=sem_id)
courses = []
for i in obj:
courses.append((i.course_slot_id,i.course_id))
return courses
def get_current_credits(obj):
credits = 0
for i in obj:
credits = credits + i[1].credit
return credits
def get_faculty_list():
f1 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Assistant Professor"))
f2 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Professor"))
f3 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Associate Professor"))
faculty = list(chain(f1,f2,f3))
faculty_list = []
for i in faculty:
faculty_list.append(i)
return faculty_list
def get_thesis_flag(student):
obj = ThesisTopicProcess.objects.all().select_related().filter(student_id = student)
if(obj):
return True
else:
return False
@login_required(login_url='/accounts/login')
def acad_person(request):
current_user = get_object_or_404(User, username=request.user.username)
user_details = ExtraInfo.objects.select_related('user','department').get(user = request.user)
des = HoldsDesignation.objects.all().select_related().filter(user = request.user).first()
if str(des.designation) == "student":
return HttpResponseRedirect('/academic-procedures/main/')
elif str(des.designation) == "Associate Professor" :
return HttpResponseRedirect('/academic-procedures/main/')
elif str(request.user) == "acadadmin" :
# year = datetime.datetime.now().year
# month = datetime.datetime.now().month
year = demo_date.year
month = demo_date.month
yearr = str(year) + "-" + str(year+1)
semflag = 0
queryflag = 0
query_option1 = get_batch_query_detail(month, year)
query_option2 = {"CSE": "CSE", "ECE": "ECE", "ME": "ME"}
if(month >= 7):
semflag = 1
else:
semflag = 2
date = {'year': yearr, 'month': month, 'semflag': semflag, 'queryflag': queryflag}
result_year = []
result_year = get_batch_all()
# result_year = [1,2]
change_queries = BranchChange.objects.select_related('branches','user','user__id','user__id__user','user__id__department').all()
course_verification_date = get_course_verification_date_eligibilty(demo_date.date())
initial_branch = []
change_branch = []
available_seats = []
applied_by = []
cpi = []
for i in change_queries:
applied_by.append(i.user.id)
change_branch.append(i.branches.name)
students = Student.objects.all().select_related('id','id__user','id__department').filter(id=i.user.id).first()
user_branch = ExtraInfo.objects.all().select_related('user','department').filter(id=students.id.id).first()
initial_branch.append(user_branch.department.name)
cpi.append(students.cpi)
if i.branches.name == 'CSE':
available_seats.append(available_cse_seats)
elif i.branches.name == 'ECE':
available_seats.append(available_ece_seats)
elif i.branches.name == 'ME':
available_seats.append(available_me_seats)
lists = zip(applied_by, change_branch, initial_branch, available_seats, cpi)
tag = False
if len(initial_branch) > 0:
tag = True
context = {
'list': lists,
'total': len(initial_branch),
'tag': tag
}
submitted_course_list = []
obj_list = MarkSubmissionCheck.objects.all().select_related().filter(verified= False,submitted = True)
for i in obj_list:
if int(i.curr_id.batch)+int(i.curr_id.sem)/2 == int(demo_date.year):
submitted_course_list.append(i.curr_id)
else:
continue
# submitted_course_list = SemesterMarks.objects.all().filter(curr_id__in = submitted_course_list)
batch_grade_data = get_batch_grade_verification_data(result_year)
return HttpResponseRedirect('/aims/')
else:
return HttpResponse('user not found')
def acad_proced_global_context():
year = demo_date.year
month = demo_date.month
yearr = str(year) + "-" + str(year+1)
semflag = 0
queryflag = 0
query_option1 = get_batch_query_detail(month, year)
query_option2 = {"CSE": "CSE", "ECE": "ECE", "ME": "ME"}
if(month >= 7):
semflag = 1
else:
semflag = 2
date = {'year': yearr, 'month': month, 'semflag': semflag, 'queryflag': queryflag}
result_year = []
result_year = get_batch_all()
# result_year = [1,2]
change_queries = BranchChange.objects.select_related('branches','user','user__id','user__id__user','user__id__department').all()
course_verification_date = get_course_verification_date_eligibilty(demo_date.date())
initial_branch = []
change_branch = []
available_seats = []
applied_by = []
cpi = []
for i in change_queries:
applied_by.append(i.user.id)
change_branch.append(i.branches.name)
students = Student.objects.all().select_related('id','id__user','id__department').filter(id=i.user.id).first()
user_branch = ExtraInfo.objects.all().select_related('user','department').filter(id=students.id.id).first()
initial_branch.append(user_branch.department.name)
cpi.append(students.cpi)
if i.branches.name == 'CSE':
available_seats.append(available_cse_seats)
elif i.branches.name == 'ECE':
available_seats.append(available_ece_seats)
elif i.branches.name == 'ME':
available_seats.append(available_me_seats)
lists = zip(applied_by, change_branch, initial_branch, available_seats, cpi)
tag = False
if len(initial_branch) > 0:
tag = True
context = {
'list': lists,
'total': len(initial_branch),
'tag': tag
}
submitted_course_list = []
obj_list = MarkSubmissionCheck.objects.all().select_related().filter(verified= False,submitted = True)
for i in obj_list:
if int(i.curr_id.batch)+int(i.curr_id.sem)/2 == int(demo_date.year):
submitted_course_list.append(i.curr_id)
else:
submitted_course_list.append(i.curr_id)
#continue
# submitted_course_list = SemesterMarks.objects.all().filter(curr_id__in = submitted_course_list)
batch_grade_data = get_batch_grade_verification_data(result_year)
batch_branch_data = get_batch_branch_data(result_year)
return {
'context': context,
'lists': lists,
'date': date,
'query_option1': query_option1,
'query_option2': query_option2,
'course_verification_date' : course_verification_date,
'submitted_course_list' : submitted_course_list,
'result_year' : result_year,
'batch_grade_data' : batch_grade_data,
'batch_branch_data': batch_branch_data
}
def get_batch_all():
result_year = []
if demo_date.month >=7:
result_year = [demo_date.year, demo_date.year-1, demo_date.year-2, demo_date.year-3]
# result_year = [1,2]
else :
result_year = [demo_date.year-1,demo_date.year-2, demo_date.year-3, demo_date.year-4]
return result_year
def announce_results(request):
i = int(request.POST.get('id'))
year = get_batch_all()
acad = get_object_or_404(User, username="acadadmin")
student_list = Student.objects.all().select_related('id','id__user','id__department').filter(batch = year[i-1])
# for obj in student_list:
# academics_module_notif(acad, obj.id.user, 'result_announced')
courses_list = Curriculum.objects.all().select_related().filter(batch = year[i-1])
rsl = []
for obj in courses_list:
try :
o = MarkSubmissionCheck.objects.select_related().get(curr_id = obj)
o.announced = True
rsl.append(o)
except Exception as e:
continue
MarkSubmissionCheck.objects.bulk_update(rsl,['announced'])
return JsonResponse({'status': 'success', 'message': 'Successfully Accepted'})
def get_batch_grade_verification_data(list):
semester_marks = []
batch_1_list_CSE = []
batch_2_list_CSE = []
batch_3_list_CSE = []
batch_4_list_CSE = []
batch_1_list_ECE = []
batch_2_list_ECE = []
batch_3_list_ECE = []
batch_4_list_ECE = []
batch_1_list_ME = []
batch_2_list_ME = []
batch_3_list_ME = []
batch_4_list_ME = []
c = Curriculum.objects.all().select_related().filter(batch = list[0]).filter(floated = True)
c_cse = c.filter(branch = 'CSE')
c_me = c.filter(branch = 'ME')
c_ece = c.filter(branch = 'ECE')
for i in c_cse:
batch_1_list_CSE.append(i)
for i in c_me:
batch_1_list_ME.append(i)
for i in c_ece:
batch_1_list_ECE.append(i)
for i in c:
try:
obj_sem = MarkSubmissionCheck.objects.select_related().get(curr_id = i)
if obj_sem:
semester_marks.append(obj_sem)
else:
continue
except Exception as e:
continue
c = Curriculum.objects.all().select_related().filter(batch = list[1]).filter(floated = True)
c_cse = c.filter(branch = 'CSE')
c_me = c.filter(branch = 'ME')
c_ece = c.filter(branch = 'ECE')
for i in c_cse:
batch_2_list_CSE.append(i)
for i in c_me:
batch_2_list_ME.append(i)
for i in c_ece:
batch_2_list_ECE.append(i)
for i in c:
try:
obj_sem = MarkSubmissionCheck.objects.select_related().get(curr_id = i)
if obj_sem:
semester_marks.append(obj_sem)
else:
continue
except Exception as e:
continue
c = Curriculum.objects.all().select_related().filter(batch = list[2]).filter(floated = True)
c_cse = c.filter(branch = 'CSE')
c_me = c.filter(branch = 'ME')
c_ece = c.filter(branch = 'ECE')
for i in c_cse:
batch_3_list_CSE.append(i)
for i in c_me:
batch_3_list_ME.append(i)
for i in c_ece:
batch_3_list_ECE.append(i)
for i in c:
try:
obj_sem = MarkSubmissionCheck.objects.select_related().get(curr_id = i)
if obj_sem:
semester_marks.append(obj_sem)
else:
continue
except Exception as e:
continue
c = Curriculum.objects.all().select_related().filter(batch = list[3]).filter(floated = True)
c_cse = c.filter(branch = 'CSE')
c_me = c.filter(branch = 'ME')
c_ece = c.filter(branch = 'ECE')
for i in c_cse:
batch_4_list_CSE.append(i)
for i in c_me:
batch_4_list_ME.append(i)
for i in c_ece:
batch_4_list_ECE.append(i)
for i in c:
try:
obj_sem = MarkSubmissionCheck.objects.select_related().get(curr_id = i)
if obj_sem:
semester_marks.append(obj_sem)
else:
continue
except Exception as e:
continue
batch_1_list = {
'batch_list_year' : list[0],
'batch_list_ME' : batch_1_list_ME,
'batch_list_ECE' : batch_1_list_ECE,
'batch_list_CSE' : batch_1_list_CSE
}
batch_2_list = {
'batch_list_year' : list[1],
'batch_list_ME' : batch_2_list_ME,
'batch_list_ECE' : batch_2_list_ECE,
'batch_list_CSE' : batch_2_list_CSE
}
batch_3_list = {
'batch_list_year' : list[2],
'batch_list_ME' : batch_3_list_ME,
'batch_list_ECE' : batch_3_list_ECE,
'batch_list_CSE' : batch_3_list_CSE
}
batch_4_list = {
'batch_list_year' : list[3],
'batch_list_ME' : batch_4_list_ME,
'batch_list_ECE' : batch_4_list_ECE,
'batch_list_CSE' : batch_4_list_CSE
}
batch_grade_data_set = {'batch_grade_data' : [batch_1_list, batch_2_list, batch_3_list, batch_4_list],
'batch_sub_check' : semester_marks}
return batch_grade_data_set
def get_batch_branch_data(result_year):
batches = []
for batch in Batch.objects.all():
if batch.year in result_year:
batches.append(batch)
return batches
@login_required(login_url='/accounts/login')
def student_list(request):
if(request.POST):
batch = request.POST["batch"]
year = demo_date.year
month = demo_date.month
yearr = str(year) + "-" + str(year+1)
semflag = 0
queryflag = 1
if(month >= 7):
semflag = 1
else:
semflag = 2
batch_year_option = get_batch_query_detail(month, year)
branch_option = {"CSE": "CSE", "ECE": "ECE", "ME": "ME"}
date = {'year': yearr, 'month': month, 'semflag': semflag, 'queryflag': queryflag}
batch_id = Batch.objects.get(id = batch)
student_obj = Student.objects.all().filter(batch_id = batch_id)
student = []
for obj in student_obj:
curr_id = batch_id.curriculum
sem_id = Semester.objects.get(curriculum = curr_id, semester_no = obj.curr_semester_no + 1)
try:
reg = StudentRegistrationChecks.objects.all().filter(student_id = obj, semester_id = sem_id).first()
pay = FeePayments.objects.all().filter(student_id = obj, semester_id = sem_id).first()
final = FinalRegistration.objects.all().filter(student_id = obj, semester_id = sem_id,verified = False)
except Exception as e:
reg = None
pay = None
final = None
if reg:
if reg.final_registration_flag == True and final:
student.append((obj,pay,final))
else:
continue
else:
continue
html = render_to_string('academic_procedures/student_table.html',
{'student': student}, request)
maindict = {'date': date,
'query_option1': batch_year_option,
'query_option2': branch_option,
'html': html,
'queryflag': queryflag}
obj = json.dumps(maindict)
return HttpResponse(obj, content_type='application/json')
def process_verification_request(request):
if request.is_ajax():
return verify_registration(request)
return JsonResponse({'status': 'Failed'}, status=400)
@transaction.atomic
def verify_registration(request):
if request.POST.get('status_req') == "accept" :
student_id = request.POST.get('student_id')
student = Student.objects.get(id = student_id)
batch = student.batch_id
curr_id = batch.curriculum
sem_id = Semester.objects.get(curriculum = curr_id, semester_no = student.curr_semester_no+1)
final_register_list = FinalRegistration.objects.all().filter(student_id = student, verified = False, semester_id = sem_id)
sem_no = student.curr_semester_no + 1
with transaction.atomic():
ver_reg = []
for obj in final_register_list:
p = course_registration(
course_id=obj.course_id,
student_id=student,
semester_id=obj.semester_id,
course_slot_id = obj.course_slot_id
)
ver_reg.append(p)
o = FinalRegistration.objects.filter(id= obj.id).update(verified = True)
course_registration.objects.bulk_create(ver_reg)
academics_module_notif(request.user, student.id.user, 'registration_approved')
Student.objects.filter(id = student_id).update(curr_semester_no = sem_no)
return JsonResponse({'status': 'success', 'message': 'Successfully Accepted'})
elif request.POST.get('status_req') == "reject" :
reject_reason = request.POST.get('reason')
student_id = request.POST.get('student_id')
student_id = Student.objects.get(id = student_id)
batch = student_id.batch_id
curr_id = batch.curriculum
sem_id = Semester.objects.get(curriculum = curr_id, semester_no = student_id.curr_semester_no + 1)
with transaction.atomic():
academicadmin = get_object_or_404(User, username = "acadadmin")
FinalRegistration.objects.filter(student_id = student_id, verified = False, semester_id = sem_id).delete()
StudentRegistrationChecks.objects.filter(student_id = student_id, semester_id = sem_id).update(final_registration_flag = False)
FeePayments.objects.filter(student_id = student_id, semester_id = sem_id).delete()
academics_module_notif(academicadmin, student_id.id.user, 'Registration Declined - '+reject_reason)
return JsonResponse({'status': 'success', 'message': 'Successfully Rejected'})
def get_registration_courses(courses):
x = [[]]
for temp in courses:
flag = False
i = str(temp.course_code)
i = i[:5]
for j in x:
if j:
name = j[0]
name = str(name.course_code)
name = name[:5]
if i.upper() == name.upper():
j.append(temp)
flag = True
else :
continue
if not flag:
x.append([temp])
return x
def teaching_credit_register(request) :
if request.method == 'POST':
try:
roll = request.POST.get('roll')
course1 = request.POST.get('course1')
roll = str(roll)
student_id = get_object_or_404(User, username=request.POST.get('roll'))
student_id = ExtraInfo.objects.all().select_related('user','department').filter(user=student_id).first()
student_id = Student.objects.all().select_related('id','id__user','id__department').filter(id=student_id.id).first()
course1 = Curriculum.objects.select_related().get(curriculum_id = request.POST.get('course1'))
course2 = Curriculum.objects.select_related().get(curriculum_id = request.POST.get('course2'))
course3 = Curriculum.objects.select_related().get(curriculum_id = request.POST.get('course3'))
course4 = Curriculum.objects.select_related().get(curriculum_id = request.POST.get('course4'))
p = TeachingCreditRegistration(
student_id = student_id,
curr_1 = course1,
curr_2 = course2,
curr_3 = course3,
curr_4 = course4
)
p.save()
messages.info(request, ' Successful')
return HttpResponseRedirect('/academic-procedures/main')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
else:
return HttpResponseRedirect('/academic-procedures/main')
def course_marks_data(request):
try:
curriculum_id = request.POST.get('curriculum_id')
course = Curriculum.objects.select_related().get(curriculum_id = curriculum_id)
student_list = Register.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(curr_id = course)
mrks = []
for obj in student_list:
o = SemesterMarks.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(student_id = obj.student_id).filter(curr_id = course).first()
if o :
continue
else :
p = SemesterMarks(
student_id = obj.student_id,
q1 = 0,
mid_term = 0,
q2 = 0,
end_term = 0,
other = 0,
grade = None,
curr_id = course
)
mrks.append(p)
SemesterMarks.objects.bulk_create(mrks)
enrolled_student_list = SemesterMarks.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(curr_id = course)
grade_submission_date_eligibility = False
try :
d = Calendar.objects.get(description = "grade submission date")
if demo_date.date() >= d.from_date and demo_date.date() <= d.to_date :
grade_submission_date_eligibility = True
except Exception as e:
grade_submission_date_eligibility = False
data = render_to_string('academic_procedures/course_marks_data.html',
{'enrolled_student_list' : enrolled_student_list,
'course' : course,
'grade_submission_date_eligibility' : grade_submission_date_eligibility}, request)
obj = json.dumps({'data' : data})
return HttpResponse(obj, content_type = 'application/json')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
def submit_marks(request):
try:
user = request.POST.getlist('user')
q1 = request.POST.getlist('q1_marks')
mid = request.POST.getlist('mid_marks')
q2 = request.POST.getlist('q2_marks')
end = request.POST.getlist('end_marks')
other = request.POST.getlist('other_marks')
try:
grade = request.POST.getlist('grade')
except Exception as e:
grade = None
messages.info(request, ' Successful')
values_length = len(request.POST.getlist('user'))
curr_id = Curriculum.objects.select_related().get(curriculum_id = request.POST.get('curriculum_id'))
for x in range(values_length):
student_id = get_object_or_404(User, username = user[x])
student_id = ExtraInfo.objects.select_related('user','department').get(id = student_id)
student_id = Student.objects.select_related('id','id__user','id__department').get(id = student_id)
if grade:
g = grade[x]
else :
g = None
st_existing = SemesterMarks.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(student_id = student_id).filter(curr_id = curr_id).first()
if st_existing :
st_existing.q1 = q1[x]
st_existing.mid_term = mid[x]
st_existing.q2 = q2[x]
st_existing.end_term = end[x]
st_existing.other = other[x]
st_existing.grade = g
st_existing.save()
else :
p = SemesterMarks(
student_id = student_id,
q1 = q1[x],
mid_term = mid[x],
q2 = q2[x],
end_term = end[x],
other = other[x],
grade = g,
curr_id = curr_id
)
p.save()
if request.POST.get('final_submit') == "True":
try:
o_sub = MarkSubmissionCheck.objects.select_related().get(curr_id = curr_id)
except Exception as e:
o_sub = None
if o_sub:
o_sub.submitted = True
o_sub.save()
else:
o_sub_create = MarkSubmissionCheck(
curr_id = curr_id,
verified = False,
submitted =True,
announced = False,)
o_sub_create.save()
if request.POST.get('final_submit') == "False":
try:
sub_obj = MarkSubmissionCheck.objects.select_related().get(curr_id = curr_id)
except Exception as e:
sub_obj = None
if sub_obj:
continue
else :
sub_obj_create = MarkSubmissionCheck(
curr_id = curr_id,
verified = False,
submitted =False,
announced = False)
sub_obj_create.save()
return HttpResponseRedirect('/academic-procedures/main')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
def verify_course_marks_data(request):
try:
curriculum_id = request.POST.get('curriculum_id')
course = Curriculum.objects.select_related().get(curriculum_id = curriculum_id)
enrolled_student_list = SemesterMarks.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(curr_id = course)
grade_verification_date_eligibility = False
try :
d = Calendar.objects.get(description = "grade verification date")
if demo_date.date() >= d.from_date and demo_date.date() <= d.to_date :
grade_verification_date_eligibility = True
except Exception as e:
grade_verification_date_eligibility = False
data = render_to_string('academic_procedures/verify_course_marks_data.html',
{'enrolled_student_list' : enrolled_student_list,
'course' : course,
'grade_verification_date_eligibility' : grade_verification_date_eligibility}, request)
obj = json.dumps({'data' : data})
return HttpResponse(obj, content_type = 'application/json')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
########################################
##########GLOBAL VARIABLE###############
########################################
verified_marks_students = [[]]
verified_marks_students_curr = None
########################################
##########GLOBAL VARIABLE###############
########################################
def verify_marks(request):
try:
global verified_marks_students
global verified_marks_students_curr
verified_marks_students = [[]]
verified_marks_students_curr = None
user = request.POST.getlist('user')
curr_id = Curriculum.objects.select_related().get(curriculum_id = request.POST.get('curriculum_id'))
grade = request.POST.getlist('grade')
values_length = len(request.POST.getlist('user'))
ver_gr = []
for x in range(values_length):
student_id = get_object_or_404(User, username = user[x])
student_id = ExtraInfo.objects.select_related('user','department').get(id = student_id)
student_id = Student.objects.select_related('id','id__user','id__department').get(id = student_id)
if grade:
g = grade[x]
else :
g = None
st_existing = SemesterMarks.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(student_id = student_id).filter(curr_id = curr_id).first()
st_existing.grade = g
ver_gr.append(st_existing)
verified_marks_students.append([student_id,g])
SemesterMarks.objects.bulk_update(ver_gr,['grade'])
verified_marks_students_curr = curr_id
obj = MarkSubmissionCheck.objects.select_related().get(curr_id = curr_id)
obj.verified = True
obj.save()
return HttpResponseRedirect('/aims/')
except Exception as e:
return HttpResponseRedirect('/aims/')
def render_to_pdf(template_src, context_dict):
template = get_template(template_src)
html = template.render(context_dict)
result = BytesIO()
pdf = pisa.pisaDocument(BytesIO(html.encode("ISO-8859-1")), result)
if not pdf.err:
return HttpResponse(result.getvalue(), content_type='application/pdf')
return None
def generate_grade_pdf(request):
instructor = Curriculum_Instructor.objects.all().select_related('curriculum_id','instructor_id','curriculum_id__course_id','instructor_id__department','instructor_id__user').filter(curriculum_id = verified_marks_students_curr).first()
context = {'verified_marks_students' : verified_marks_students,
'verified_marks_students_curr' : verified_marks_students_curr,
'instructor' : instructor}
pdf = render_to_pdf('academic_procedures/generate_pdf.html',context)
if pdf:
response = HttpResponse(pdf, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="%s.pdf"' %(verified_marks_students_curr.course_code)
return response
return HttpResponse("PDF could not be generated")
def generate_result_pdf(request):
batch = request.POST.get('batch')
branch = request.POST.get('branch')
programme = request.POST.get('programme')
student_list = []
branch_list = []
result_list = [[]]
curriculum_list = []
if programme == "":
return HttpResponse("please insert programme")
student_obj = Student.objects.all().select_related('id','id__user','id__department').filter(programme = programme)
if batch == "":
return HttpResponse("please insert batch")
else:
student_obj = student_obj.filter(batch = int(batch))
if branch == "" :
return HttpResponse("please insert branch")
else :
dep_objects = DepartmentInfo.objects.get(name = str(branch))
branch_objects = ExtraInfo.objects.all().select_related('user','department').filter(department = dep_objects)
for i in branch_objects:
branch_list.append(i)
for i in student_obj:
if i.id in branch_list:
student_list.append(i)
else:
continue
curriculum_obj = Curriculum.objects.all().select_related().filter(batch = int(batch)).filter(branch = str(branch)).filter(programme = programme)
curriculum_obj_common = Curriculum.objects.all().select_related().filter(batch = int(batch)).filter(branch = 'Common').filter(programme = programme)
for i in curriculum_obj:
curriculum_list.append(i)
for i in curriculum_obj_common:
curriculum_list.append(i)
for i in student_list :
x = []
x.append(i.id.user.username)
x.append(i.id.user.first_name+" "+i.id.user.last_name)
for j in curriculum_list :
grade_obj = SemesterMarks.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(curr_id = j).filter(student_id = i).first()
if grade_obj :
x.append(grade_obj.grade)
else :
x.append("-")
spi = get_spi(curriculum_list ,x)
x.append(spi)
result_list.append(x)
context = {'batch' : batch,
'branch' : branch,
'programme' : programme,
'course_list' : curriculum_list,
'result_list' : result_list}
pdf = render_to_pdf('academic_procedures/generate_result_pdf.html',context)
if pdf:
response = HttpResponse(pdf, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="%s.pdf"' %(programme + batch + branch)
return response
return HttpResponse("PDF could not be generated")
def generate_grade_sheet_pdf(request):
batch = request.POST.get('batch')
branch = request.POST.get('branch')
programme = request.POST.get('programme')
student_list = []
branch_list = []
result_list = [[]]
curriculum_list = []
if programme == "":
return HttpResponse("please insert programme")
student_obj = Student.objects.all().select_related('id','id__user','id__department').filter(programme = programme)
if batch == "":
return HttpResponse("please insert batch")
else:
student_obj = student_obj.filter(batch = int(batch))
if branch == "" :
return HttpResponse("please insert branch")
else :
dep_objects = DepartmentInfo.objects.get(name = str(branch))
branch_objects = ExtraInfo.objects.all().select_related('user','department').filter(department = dep_objects)
for i in branch_objects:
branch_list.append(i)
for i in student_obj:
if i.id in branch_list:
student_list.append(i)
else:
continue
curriculum_obj = Curriculum.objects.all().select_related().filter(batch = int(batch)).filter(branch = str(branch)).filter(programme = programme)
curriculum_obj_common = Curriculum.objects.all().select_related().filter(batch = int(batch)).filter(branch = 'Common').filter(programme = programme)
for i in curriculum_obj:
curriculum_list.append(i)
for i in curriculum_obj_common:
curriculum_list.append(i)
for i in student_list :
x = []
x.append(i.id.user.username)
x.append(i.id.user.first_name+" "+i.id.user.last_name)
for j in curriculum_list :
grade_obj = SemesterMarks.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(curr_id = j).filter(student_id = i).first()
if grade_obj :
x.append(grade_obj.grade)
else :
x.append("-")
spi = get_spi(curriculum_list ,x)
x.append(spi)
result_list.append(x)
context = {'batch' : batch,
'branch' : branch,
'programme' : programme,
'course_list' : curriculum_list,
'result_list' : result_list}
pdf = render_to_pdf('academic_procedures/generate_sheet.html',context)
if pdf:
response = HttpResponse(pdf, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="%s.pdf"' %(programme + batch + branch)
return response
return HttpResponse("PDF could not be generated")
def get_spi(course_list,grade_list):
spi = 0.0
credits = 0
total = 0
earned = 0
y = []
for i in range(2,len(grade_list)) :
x = {
'grade' : grade_list[i],
'credits' : None
}
y.append(x)
for i in range(0,len(course_list)):
y[i]['credits'] = course_list[i].credits
for obj in y:
if obj['grade'] == 'O':
total = total + 10*obj['credits']
credits = credits+ obj['credits']
earned = earned + obj['credits']
elif obj['grade'] == 'A+':
total = total + 10*obj['credits']
credits = credits+ obj['credits']
earned = earned + obj['credits']
elif obj['grade'] == 'A':
total = total + 9*obj['credits']
credits = credits+ obj['credits']
earned = earned + obj['credits']
elif obj['grade'] == 'B+':
total = total + 8*obj['credits']
credits = credits+ obj['credits']
earned = earned + obj['credits']
elif obj['grade'] == 'B':
total = total + 7*obj['credits']
credits = credits+ obj['credits']
earned = earned + obj['credits']
elif obj['grade'] == 'C+':
total = total + 6*obj['credits']
credits = credits+ obj['credits']
earned = earned + obj['credits']
elif obj['grade'] == 'C':
total = total + 5*obj['credits']
credits = credits+ obj['credits']
earned = earned + obj['credits']
elif obj['grade'] == 'D+':
total = total + 4*obj['credits']
credits = credits+ obj['credits']
earned = earned + obj['credits']
elif obj['grade'] == 'D':
total = total + 3*obj['credits']
credits = credits+ obj['credits']
earned = earned + obj['credits']
elif obj['grade'] == 'F':
total = total + 2*obj['credits']
credits = credits+ obj['credits']
earned = earned + obj['credits']
elif obj['grade'] == 'S':
total = total
credits = credits
earned = earned + obj['credits']
elif obj['grade'] == 'X':
total = total
credits = credits
earned = earned
elif obj['grade'] == '-':
total = total
credits = credits
earned = earned
if credits == 0:
return 0.0
spi = total/credits
return spi
def manual_grade_submission(request):
if request.method == 'POST' and request.FILES:
manual_grade_xsl=request.FILES['manual_grade_xsl']
excel = xlrd.open_workbook(file_contents=manual_grade_xsl.read())
sheet=excel.sheet_by_index(0)
course_code = str(sheet.cell(0,1).value)
course_name = str(sheet.cell(1,1).value)
instructor = str(sheet.cell(2,1).value)
batch = int(sheet.cell(3,1).value)
sem = int(sheet.cell(4,1).value)
branch = str(sheet.cell(5,1).value)
programme = str(sheet.cell(6,1).value)
credits = int(sheet.cell(7,1).value)
curriculum_obj = Curriculum.objects.all().select_related().filter(course_code = course_code).filter(batch = batch).filter(programme = programme).first()
if not curriculum_obj:
course_obj = Course.objects.all().filter(course_name = course_name).first()
if not course_obj :
course_obj_create = Course(
course_name = course_name,
course_details = instructor)
course_obj_create.save()
course_obj = Course.objects.all().filter(course_name = course_name).first()
curriculum_obj_create = Curriculum(
course_code = course_code,
course_id = course_obj,
credits = credits,
course_type = 'Professional Core',
programme = programme,
branch = branch,
batch = batch,
sem = sem,
floated = True)
curriculum_obj_create.save()
curriculum_obj = Curriculum.objects.all().select_related().filter(course_code = course_code).filter(batch = batch).filter(programme = programme).first()
marks_check_obj = MarkSubmissionCheck.objects.select_related().all().filter(curr_id = curriculum_obj).first()
if marks_check_obj :
marks_check_obj.submitted = True
marks_check_obj.verified = True
marks_check_obj.save()
elif not marks_check_obj :
marks_check_obj_create = MarkSubmissionCheck(
curr_id = curriculum_obj,
submitted = True,
verified = False,
announced = False)
marks_check_obj_create.save()
for i in range(11,sheet.nrows):
roll = str(int(sheet.cell(i,0).value))
q1 = float(sheet.cell(i,2).value)
mid = float(sheet.cell(i,3).value)
q2 = float(sheet.cell(i,4).value)
end = float(sheet.cell(i,5).value)
others = float(sheet.cell(i,6).value)
grade = str(sheet.cell(i,8).value).strip()
user = get_object_or_404(User, username = roll)
extrainfo = ExtraInfo.objects.select_related('user','department').get(user = user)
dep_objects = DepartmentInfo.objects.get(name = str(branch))
extrainfo.department = dep_objects
extrainfo.save()
extrainfo = ExtraInfo.objects.select_related('user','department').get(user = user)
student_obj = Student.objects.select_related('id','id__user','id__department').get(id = extrainfo)
student_obj.programme = programme
student_obj.batch = batch
student_obj.category = 'GEN'
student_obj.save()
student_obj = Student.objects.select_related('id','id__user','id__department').get(id = extrainfo)
register_obj = Register.objects.all().filter(curr_id = curriculum_obj, student_id = student_obj).first()
if not register_obj:
register_obj_create = Register(
curr_id = curriculum_obj,
year = batch,
student_id = student_obj,
semester = sem)
register_obj_create.save()
register_obj = Register.objects.all().filter(curr_id = curriculum_obj, student_id = student_obj).first()
st_existing = SemesterMarks.objects.all().select_related('curr_id','student_id','curr_id__course_id','student_id__id','student_id__id__user','student_id__id__department').filter(student_id = student_obj).filter(curr_id = curriculum_obj).first()
if st_existing :
st_existing.grade = str(sheet.cell(i,8).value)
st_existing.save()
else :
p = SemesterMarks(
student_id = student_obj,
q1 = q1,
mid_term = mid,
q2 = q2,
end_term = end,
other = others,
grade = grade,
curr_id = curriculum_obj
)
p.save()
return HttpResponseRedirect('/academic-procedures/')
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
##
def test(request):
br_up = []
st_list = Student.objects.select_related('id','id__user','id__department').all()
for i in st_list :
roll = i.id.user.username
roll = str(roll)
if i.programme.upper() == "B.DES" or i.programme.upper() == "B.TECH":
batch = int(roll[:4])
i.batch = batch
elif i.programme.upper() == "M.DES" or i.programme.upper() == "M.TECH" or i.programme.upper() == "PH.D":
batch = int('20'+roll[:2])
i.batch = batch
br_up.append(i)
Student.objects.bulk_update(br_up,['batch'])
return render(request,'../templates/academic_procedures/test.html',{})
def test_ret(request):
try:
data = render_to_string('academic_procedures/test_render.html',
{}, request)
obj = json.dumps({'d' : data})
return HttpResponse(obj, content_type = 'application/json')
except Exception as e:
return HttpResponseRedirect('/academic-procedures/main')
def Bonafide_form(request):
template = get_template('academic_procedures/bonafide_pdf.html')
current_user = get_object_or_404(User, username=request.user.username)
user_details = ExtraInfo.objects.select_related('user','department').get(id = request.user)
des = HoldsDesignation.objects.all().select_related().filter(user = request.user).first()
name = ExtraInfo.objects.all().select_related('user','department').filter(id=request.user.username)[0].user
if str(des.designation) == "student":
obj = Student.objects.select_related('id','id__user','id__department').get(id = user_details.id)
context = {
'student_id' : request.user.username,
'degree' : obj.programme.upper(),
'name' : name.first_name +" "+ name.last_name,
'branch' : get_user_branch(user_details),
'purpose' : request.POST['purpose']
}
pdf = render_to_pdf('academic_procedures/bonafide_pdf.html',context)
if pdf:
response = HttpResponse(pdf, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename=Bonafide.pdf'
return response
return HttpResponse("PDF could not be generated")
# def bonafide(request):
# # if this is a POST request we need to process the form data
# if request.method == 'POST':
# # create a form instance and populate it with data from the request:
# form = BonafideForm(request.POST)
# # check whether it's valid:
# if form.is_valid():
# # process the data in form.cleaned_data as required
# # ...
# # redirect to a new URL:
# print("vaild")
# # if a GET (or any other method) we'll create a blank form
# else:
# form = BonafideForm()
# return render(request, 'bonafide.html', {'form': form})
@login_required
def ACF(request):
stu = Student.objects.get(id=request.user.username)
month = request.POST.get('month')
year= request.POST.get('year')
account = request.POST.get('bank_account')
thesis = request.POST.get('thesis_supervisor')
ta = request.POST.get('ta_supervisor')
appli = request.POST.get('applicability')
FACUL1 = None
FACUL2 = None
message = ""
faculties = ExtraInfo.objects.all().filter(user_type = "faculty")
res = "error"
for j in range(2):
for i in faculties:
checkName = i.user.first_name + " " + i.user.last_name
if j==0 and ta == checkName:
res = "success"
FACUL1 = i
elif j==1 and thesis == checkName:
res = "success"
FACUL2 = i
if (res == "error"):
message = message + "The entered faculty incharge does not exist"
content = {
'status' : res,
'message' : message
}
content = json.dumps(content)
return HttpResponse(content)
faculty_inc1 = get_object_or_404(Faculty, id = FACUL1)
faculty_inc2 = get_object_or_404(Faculty, id = FACUL2)
acf = AssistantshipClaim(student=stu,month=month, year=year, bank_account=account, thesis_supervisor=faculty_inc2, ta_supervisor=faculty_inc1, applicability= appli)
acf.save()
message= message + "Form submitted succesfully"
content = {
'status' : res,
'message' : message
}
sender1 = ExtraInfo.objects.get(id = str(FACUL1)[:4]).user
sender2 = ExtraInfo.objects.get(id = str(FACUL2)[:4]).user
content = json.dumps(content)
AssistantshipClaim_faculty_notify(request.user,sender1)
AssistantshipClaim_faculty_notify(request.user,sender2)
return HttpResponse(content)
def update_assistantship(request):
if request.method == 'POST':
r = request.POST.get('remark')
i = request.POST.get('obj_id')
user = ExtraInfo.objects.get(user = request.user)
recipient = User.objects.get(username = "acadadmin")
assistantship_object = AssistantshipClaim.objects.get(id = i)
sender = User.objects.get(username = assistantship_object.student)
if user == assistantship_object.ta_supervisor.id and r == "Satisfactory":
assistantship_object.ta_supervisor_remark=True
elif user == assistantship_object.ta_supervisor.id and r == "Unsatisfactory":
assistantship_object.ta_supervisor_remark=False
if user == assistantship_object.thesis_supervisor.id and r == "Satisfactory":
assistantship_object.thesis_supervisor_remark=True
elif r == "Unsatisfactory" :
assistantship_object.thesis_supervisor_remark=False
assistantship_object.save()
if assistantship_object.thesis_supervisor_remark == True and assistantship_object.ta_supervisor_remark == True :
AssistantshipClaim_acad_notify(sender,recipient)
return HttpResponseRedirect('/academic-procedures/main/')
def update_hod_assistantship(request):
if request.method == 'POST':
d = request.POST.get('dict')
dic = json.loads(d)
assisobj = AssistantshipClaim.objects.filter(ta_supervisor_remark = True).filter(thesis_supervisor_remark = True).filter(hod_approval = False)
for obj in assisobj:
if str(obj.student) in dic.keys():
obj.hod_approval =True
obj.save()
return HttpResponse('success')
def update_acad_assis(request):
if request.method == 'POST':
d = request.POST.get('dict')
dic = json.loads(d)
aobj= AssistantshipClaim.objects.all()
for obj in aobj:
if obj.acad_approval == False and str(obj.student) in dic.keys():
obj.stipend = dic[str(obj.student)]
obj.acad_approval=True
obj.save()
return HttpResponse('success')
def update_account_assistantship(request):
if request.method == 'POST':
di = request.POST.get('dict')
dic = json.loads(di)
acobj= AssistantshipClaim.objects.all()
for obj in acobj:
if obj.account_approval == False and str(obj.student) in dic.keys():
obj.account_approval = True
obj.save()
recipient = User.objects.get(username = obj.student)
AssistantshipClaim_notify(request.user,recipient,obj.month,obj.year)
return HttpResponse('success')
def assis_stat(request):
if request.method == 'POST':
flag= request.POST.get('flag')
assis_status = Assistantship_status.objects.all()
for obj in assis_status:
if flag == "studenttrue" :
obj.student_status= True
elif flag == "studentfalse":
obj.student_status = False
elif flag == "hodtrue" :
obj.hod_status= True
elif flag == "hodfalse":
obj.hod_status = False
elif flag == "accounttrue" :
obj.account_status= True
elif flag == "accountfalse":
obj.account_status = False
obj.save()
return HttpResponse('success')
@login_required
def MTSGF(request):
if request.method == 'POST':
stu= Student.objects.get(id=request.user.username)
theme = request.POST.get('theme_of_work')
date = request.POST.get('date')
place = request.POST.get('place')
time = request.POST.get('time')
work = request.POST.get('workdone')
contribution = request.POST.get('specificcontri')
future = request.POST.get('futureplan')
report = request.POST.get('briefreport')
publication_submitted = request.POST.get('publicationsubmitted')
publication_accepted = request.POST.get('publicationaccepted')
paper_presented = request.POST.get('paperpresented')
paper_under_review = request.POST.get('paperunderreview')
form=MTechGraduateSeminarReport(student=stu, theme_of_work=theme, date=date, place=place, time=time, work_done_till_previous_sem=work,
specific_contri_in_cur_sem=contribution, future_plan=future, brief_report=report, publication_submitted=publication_submitted,
publication_accepted=publication_accepted, paper_presented=paper_presented, papers_under_review=paper_under_review)
form.save()
message= "Form submitted succesfully"
res="success"
content = {
'status' : res,
'message' : message
}
content = json.dumps(content)
return HttpResponse(content)
@login_required
def PHDPE(request):
if request.method == 'POST':
stu= Student.objects.get(id=request.user.username)
theme = request.POST.get('theme_of_work')
dateandtime = request.POST.get('date')
place = request.POST.get('place')
work = request.POST.get('workdone')
contribution = request.POST.get('specificcontri')
future = request.POST.get('futureplan')
uploadfile = request.POST.get('Attachments')
paper_submitted = request.POST.get('papersubmitted')
paper_published = request.POST.get('paperaccepted')
paper_presented = request.POST.get('paperpresented')
form=PhDProgressExamination(student=stu, theme=theme, seminar_date_time=dateandtime, place=place, work_done=work,
specific_contri_curr_semester=contribution, future_plan=future,details=uploadfile,
papers_published=paper_published, presented_papers=paper_presented,papers_submitted=paper_submitted)
form.save()
message= "Form submitted succesfully"
res="success"
content = {
'status' : res,
'message' : message
}
content = json.dumps(content)
return HttpResponse(content)
def update_mtechsg(request):
if request.method == 'POST':
i = request.POST.get('obj_id')
ql=request.POST.get('quality')
qn=request.POST.get('quantity')
gr=request.POST.get('grade')
pr=request.POST.get('panel_report')
sg=request.POST.get('suggestion')
mtech_object=MTechGraduateSeminarReport.objects.get(id = i)
mtech_object.quality_of_work=ql
mtech_object.quantity_of_work=qn
mtech_object.Overall_grade=gr
mtech_object.panel_report=pr
mtech_object.suggestion=sg
mtech_object.save()
return HttpResponseRedirect('/academic-procedures/main/')
def update_phdform(request):
if request.method == 'POST':
i = request.POST.get('obj_id')
ql = request.POST.get('quality')
qn = request.POST.get('quantity')
gr = request.POST.get('grade')
continuationa = request.POST.get('continuationa')
enhancementa = request.POST.get('enhancementa')
completionperiod = request.POST.get('completionperiod')
pr = request.POST.get('pr')
annualp = request.POST.get('annualp')
sugg = request.POST.get('sugg')
phd_object = PhDProgressExamination.objects.get(id = i)
phd_object.quality_of_work=ql
phd_object.quantity_of_work=qn
phd_object.Overall_grade=gr
phd_object.continuation_enhancement_assistantship=continuationa
phd_object.enhancement_assistantship=enhancementa
phd_object.completion_period=completionperiod
phd_object.panel_report=pr
phd_object.annual_progress_seminar=annualp
phd_object.commments=sugg
phd_object.save()
content="success"
content = json.dumps(content)
return HttpResponse(content)
def update_dues(request):
if request.method == "POST":
i = request.POST.get('obj_id')
md =int(request.POST.get('md'))
hd = int(request.POST.get('hd'))
ld = int(request.POST.get('ld'))
pd = int(request.POST.get('pd'))
ad = int(request.POST.get('ad'))
dues_object = Dues.objects.get(id = i)
message = ""
if md < 0 and -1*md > dues_object.mess_due :
message = message + "Subtracting more value than existing mess due<br>"
if hd < 0 and -1*hd > dues_object.hostel_due :
message = message + "Subtracting more value than existing hostel due<br>"
if ld < 0 and -1*ld > dues_object.library_due :
message = message + "Subtracting more value than existing library due<br>"
if pd < 0 and -1*pd > dues_object.placement_cell_due :
message = message + "Subtracting more value than existing placement cell due<br>"
if ad < 0 and -1*ad > dues_object.academic_due :
message = message + "Subtracting more value than existing academic due<br>"
if (not message):
message = "success"
if message != "success":
content = json.dumps(message)
return HttpResponse(content)
md += dues_object.mess_due
hd += dues_object.hostel_due
ld += dues_object.library_due
pd += dues_object.placement_cell_due
ad += dues_object.academic_due
dues_object.mess_due = md
dues_object.hostel_due = hd
dues_object.library_due = ld
dues_object.placement_cell_due = pd
dues_object.academic_due = ad
dues_object.save()
content = json.dumps(message)
return HttpResponse(content)
def mdue(request):
if request.method == 'POST':
rollno = request.POST.get('rollno')
year = request.POST.get('year')
month = request.POST.get('month')
amount = int(request.POST.get('amount'))
desc = request.POST.get('desc')
amount1 = amount
if desc == "due":
amount1 = -1*amount
Dues_mess = amount
student = Student.objects.get(id = rollno)
messdue_list=MessDue.objects.all().filter(student = student)
duesobj = Dues.objects.get(student_id = student)
if(messdue_list):
new_remaining = messdue_list[len(messdue_list)-1].remaining_amount + amount1
Dues_mess = new_remaining
messdueobj = MessDue(student = student, month = month, year = year,description = desc, amount = amount, remaining_amount = new_remaining)
else:
messdueobj=MessDue(student = student, month = month, year = year,description = desc, amount = amount, remaining_amount = amount1)
messdueobj.save()
if Dues_mess >= 0 :
duesobj.mess_due = 0
else :
duesobj.mess_due = -1*Dues_mess
duesobj.save()
content = json.dumps("success")
return HttpResponse(content)
|
py | 1a31d66708a083e387501e9401a47fe362871938 | from typing import Literal, List, Tuple, Union, Optional, Dict
import numpy as np
import scipy.linalg as la
from scipy import stats
Indices = Union[str, List[str]]
def std_basis_vector(size: int, index: int,
shape: Literal["row", "col", "flat"] = "col"):
"""Create a vector of {size} values where all values are zero except at
position {index} which is one. The shape can be specified as 'row', 'col',
or 'flat' to generate vectors of shape (1, {size}), ({size}, 1), or
({size}, ) respectively. The default shape is 'col'."""
e = np.zeros(size)
e[index] = 1
if shape.lower() == "col":
e = np.reshape(e, (size, 1))
elif shape.lower() == "row":
e = np.reshape(e, (1, size))
elif shape.lower() == "flat":
pass
else:
raise ValueError(f"Cannot understand vector shape: '{shape}', use "
f"'row', 'col', or 'flat'")
return(e)
class GenericDiagnosisMethod:
def __init__(self) -> None:
self.sample_size = 0
def contribution(self, sample: np.ndarray, variable_index: int) -> float:
"""Return the error contribution of a variable in a sample"""
raise NotImplementedError
def expectation(self, variable_index: int) -> float:
"""Return the expected error contribution of a variable"""
raise NotImplementedError
def limits(self, variable_index: int,
alpha: float) -> Tuple[float, float]:
"""Return the lower and upper limits of a variable at a given alpha"""
e_contrib = self.expectation(variable_index)
lower = stats.chi2.ppf(alpha, 1) * e_contrib
upper = stats.chi2.ppf(1 - alpha, 1) * e_contrib
return(lower, upper)
def rel_contribution(self, sample: np.ndarray,
variable_index: int) -> float:
"""Return the relative error contribution of a variable in a sample"""
c = self.contribution(sample, variable_index)
E_c = self.expectation(variable_index)
return(c / E_c)
def all_contributions(self, sample: np.ndarray) -> np.ndarray:
"""Return the error contributions for all variables in a sample"""
contribs = np.zeros(self.sample_size)
for i in range(self.sample_size):
contribs[i] = self.contribution(sample, i)
return(contribs)
def all_rel_contributions(self, sample: np.ndarray) -> np.ndarray:
"""Return the relative error contributions for all variables in a
sample"""
rel_contribs = np.zeros(self.sample_size)
for i in range(self.sample_size):
rel_contribs[i] = self.rel_contribution(sample, i)
return(rel_contribs)
def all_expectations(self) -> np.ndarray:
"""Return the expected error contribution for all variables"""
e_contribs = np.zeros(self.sample_size)
for i in range(self.sample_size):
e_contribs[i] = self.expectation(i)
return(e_contribs)
def all_limits(self, alpha: float) -> np.ndarray:
"""Return the lower and upper limits for all variables at a given
alpha"""
lower_upper_limits = np.zeros((self.sample_size, 2))
for i in range(self.sample_size):
lower_upper_limits[i] = self.limits(i, alpha)
return(lower_upper_limits)
class CDC(GenericDiagnosisMethod):
def __init__(self, M: np.ndarray, S: Optional[np.ndarray]) -> None:
"""Complete Decomposition Contributions Diagnosis Method"""
super().__init__()
self.M = M
self.S = S
self.sqrt_M = np.real(la.fractional_matrix_power(M, 0.5))
self.sample_size = M.shape[0]
def contribution(self, sample: np.ndarray, variable_index: int) -> float:
e_i = std_basis_vector(self.sample_size, variable_index, 'col')
contrib = (e_i.T @ self.sqrt_M @ sample) ** 2
return(contrib)
def expectation(self, variable_index: int) -> float:
if self.S is None:
raise RuntimeError("S matrix must be set to use this function")
e_i = std_basis_vector(self.sample_size, variable_index, 'col')
e_contrib = e_i.T @ self.S @ self.M @ e_i
return(e_contrib)
class PDC(GenericDiagnosisMethod):
def __init__(self, M: np.ndarray, S: Optional[np.ndarray]) -> None:
"""Partial Decomposition Contributions Diagnosis Method"""
super().__init__()
self.M = M
self.S = S
self.sample_size = M.shape[0]
def contribution(self, sample: np.ndarray, variable_index: int) -> float:
e_i = std_basis_vector(sample.size, variable_index, 'col')
contrib = sample.T @ self.M @ e_i @ e_i.T @ sample
return(contrib)
def expectation(self, variable_index: int) -> float:
if self.S is None:
raise RuntimeError("S matrix must be set to use this function")
e_i = std_basis_vector(self.sample_size, variable_index, 'col')
e_contrib = e_i.T @ self.S @ self.M @ e_i
return(e_contrib)
def limits(self, variable_index: int,
alpha: float) -> Tuple[float, float]:
e_contrib = self.expectation(variable_index)
e_i = std_basis_vector(self.sample_size, variable_index, 'col')
stdv_contrib = ((e_contrib) ** 2
+ e_i.T @ self.S @ self.M
@ self.M @ e_i @ e_i.T @ self.S @ e_i) ** 0.5
# Assumes n>=30 to use normal distribution rather than t distribution
lower, upper = stats.norm.interval(alpha, e_contrib, stdv_contrib)
return(lower, upper)
class DC(GenericDiagnosisMethod):
def __init__(self, M: np.ndarray, S: Optional[np.ndarray]) -> None:
"""Diagonal Contributions Diagnosis Method"""
super().__init__()
self.M = M
self.S = S
self.sample_size = M.shape[0]
def contribution(self, sample: np.ndarray, variable_index: int) -> float:
e_i = std_basis_vector(self.sample_size, variable_index, 'col')
contrib = sample.T @ e_i @ e_i.T @ self.M @ e_i @ e_i.T @ sample
return(contrib)
def expectation(self, variable_index: int) -> float:
if self.S is None:
raise RuntimeError("S matrix must be set to use this function")
e_i = std_basis_vector(self.M.shape[1], variable_index, 'col')
e_contrib = e_i.T @ self.S @ e_i @ e_i.T @ self.M @ e_i
return(e_contrib)
class RBC(GenericDiagnosisMethod):
def __init__(self, M: np.ndarray, S: Optional[np.ndarray]) -> None:
"""Reconstruction Based Contributions Diagnosis Method"""
super().__init__()
self.M = M
self.S = S
self.sample_size = M.shape[0]
def contribution(self, sample: np.ndarray, variable_index: int) -> float:
e_i = std_basis_vector(self.sample_size, variable_index, 'col')
contrib = (e_i.T @ self.M @ sample) ** 2 / (e_i.T @ self.M @ e_i)
return(contrib)
def expectation(self, variable_index: int) -> float:
if self.S is None:
raise RuntimeError("S matrix must be set to use this function")
e_i = std_basis_vector(self.sample_size, variable_index, 'col')
e_contrib = (e_i.T @ self.M @ self.S @ self.M @ e_i
/ (e_i.T @ self.M @ e_i))
return(e_contrib)
class GenericFaultDiagnosisModel:
def __init__(self, M: np.ndarray, S: Optional[np.ndarray]) -> None:
"""Generic Fault Diagnosis Model for any test statistic"""
if S is not None:
if not (M.shape[0] == M.shape[1] == S.shape[0] == S.shape[1]):
raise ValueError("M and S need to be [n x n] matrices")
else:
if not (M.shape[0] == M.shape[1]):
raise ValueError("M needs to be an [n x n] matrix")
self.diagnosis_methods = {
"CDC": CDC(M, S),
"PDC": PDC(M, S),
"DC": DC(M, S),
"RBC": RBC(M, S)
}
self.sample_size = M.shape[0]
indices = list(self.diagnosis_methods.keys())
rel_indices = [f"r{i}" for i in indices]
self.valid_indices = indices + rel_indices
def validate_indices(self, indices: Indices) -> List[str]:
"""Validate list of requested indices"""
if type(indices) == str:
indices = [indices]
for ind in indices:
if ind not in self.valid_indices:
raise ValueError(f"No contribution index {ind} exists")
return(indices)
def validate_sample(self, sample: np.ndarray) -> np.ndarray:
"""Validate passed sample"""
if not isinstance(sample, np.ndarray):
raise TypeError("Expected numpy array inputs for sample")
if not (self.sample_size == sample.size):
raise ValueError("M needs to be an [n x n] matrix and x needs to "
"be an [n x 1] vector")
sample = np.reshape(sample, (-1, 1)) # Makes sure it's a column vector
return(sample)
def get_contributions(self, sample: np.ndarray,
indices: Indices = ['CDC']) -> Dict[str, np.ndarray]:
"""Get the fault contributions for the sample for each index passed"""
indices = self.validate_indices(indices)
sample = self.validate_sample(sample)
index_values = dict()
for ind in indices:
if ind[0] == 'r':
fd_method = self.diagnosis_methods[ind[1:]]
index_values[ind] = fd_method.all_rel_contributions(sample)
else:
fd_method = self.diagnosis_methods[ind]
index_values[ind] = fd_method.all_contributions(sample)
return(index_values)
def get_limits(self, alpha: float = 0.05,
indices: Indices = ['CDC']) -> Dict[str, np.ndarray]:
"""Get the lower and upper control limits for any non-relative
contribution indices"""
indices = self.validate_indices(indices)
limits = dict()
for ind in indices:
if ind[0] == 'r':
raise ValueError("Control limits are not defined for relative "
"contribution indices")
else:
fd_method = self.diagnosis_methods[ind]
limits[ind] = fd_method.all_limits(alpha)
return(limits)
if __name__ == "__main__":
import random
print("Module ran as script: Running example fault diagnosis with PCA")
def example_process_model(num_samples):
A = [
[-0.3441, 0.4815, 0.6637],
[-0.2313, -0.5936, 0.3545],
[-0.5060, 0.2495, 0.0739],
[-0.5552, -0.2405, -0.1123],
[-0.3371, 0.3822, -0.6115],
[-0.3877, -0.3868, -0.2045]
]
A = np.asarray(A)
num_vars = 6
# Generate inputs t
t1 = 2.0 * stats.uniform.rvs(size=num_samples)
t2 = 1.6 * stats.uniform.rvs(size=num_samples)
t3 = 1.2 * stats.uniform.rvs(size=num_samples)
t = np.asarray([t1, t2, t3])
# Generate noise
noise = [None] * num_vars
for i in range(num_vars):
noise[i] = stats.norm.rvs(size=num_samples, scale=0.2)
noise = np.asarray(noise)
# Create samples
X = A @ t + noise
return(X)
num_samples = 3000
num_faults = 2000
num_vars = 6
X = example_process_model(num_samples)
""" PCA Model """
# Shift to 0 mean
xmean = np.mean(X, 1).reshape((-1, 1))
X = X - xmean
# Scale to unit variance
xstd = np.std(X, 1).reshape((-1, 1))
X = X / xstd
assert np.allclose(np.mean(X, 1), 0)
assert np.allclose(np.std(X, 1), 1)
S = np.cov(X)
Lam, P = la.eig(S)
Lam = np.real_if_close(Lam)
order = np.argsort(-1 * Lam)
Lam = Lam[order]
P = P[:, order]
# Plot cumulative variance of eigenvectors
# cum_eig = np.cumsum(Lam) / np.sum(Lam)
# plt.plot(cum_eig)
# plt.show()
principal_vectors = 3
alpha = 0.01 # Confidence = (1 - alpha) x 100%
P_resid = P[:, principal_vectors:]
Lam_resid = Lam[principal_vectors:]
P = P[:, :principal_vectors]
Lam = Lam[:principal_vectors]
D = P @ np.diag(Lam ** -1) @ P.T
# Generate faults
faults = np.zeros((num_vars, num_faults))
for fault_sample in range(num_faults):
fault_var = random.sample(range(num_vars), 1)[0]
faults[fault_var, fault_sample] = 5.0 * stats.uniform.rvs()
X_faulty = example_process_model(num_faults) + faults
X_faulty = (X_faulty - xmean) / xstd
T_sqr = [0] * num_faults
for i in range(num_faults):
T_sqr[i] = X_faulty[:, i].T @ D @ X_faulty[:, i]
T_sqr_limit = [stats.chi2.ppf(1 - alpha, principal_vectors)] * num_faults
detected_faults = []
for i in range(num_faults):
if T_sqr[i] > T_sqr_limit[i]:
detected_faults.append(i)
fault_detect_rate = len(detected_faults) / num_faults * 100
print(f"T^2 Detected Faults: {fault_detect_rate:.2f} %")
# plt.plot(T_sqr, label="\$T^2\$")
# plt.plot(T_sqr_limit, label="Limit")
# plt.legend()
# plt.show()
all_indices = ['CDC', 'rCDC', 'PDC', 'rPDC', 'DC', 'rDC', 'RBC', 'rRBC']
FDModel = GenericFaultDiagnosisModel(D, S)
cont_rates = dict()
for ind in all_indices:
# Tracks number of correct diagnoses and false diagnoses
cont_rates[ind] = [0, 0, 0]
for i in detected_faults:
# Get index and limit for each fault sample
cont = FDModel.get_contributions(X_faulty[:, i], all_indices)
for ind in all_indices:
highest_contrib = np.argmax(cont[ind])
if highest_contrib == np.argmax(faults[:, i]):
cont_rates[ind][0] += 1
else:
cont_rates[ind][1] += 1
for ind in all_indices:
diag_rate = cont_rates[ind][0] / len(detected_faults) * 100
false_diag_rate = cont_rates[ind][1] / len(detected_faults) * 100
# missed_rate = cont_rates[ind][2] / len(detected_faults) * 100
print("--------------------------------")
print(f"{ind} correct diagnosis: {diag_rate:.2f} %")
print(f"{ind} false diagnosis: {false_diag_rate:.2f} %")
# print(f"{ind} missed diagnosis: {missed_rate:.2f} %")
|
py | 1a31d714eb662ad45dfd4571298909b5979cd098 | import sys
if (sys.version_info[0] == 2 and sys.version_info[:2] >= (2,7)) or \
(sys.version_info[0] == 3 and sys.version_info[:2] >= (3,2)):
import unittest
else:
import unittest2 as unittest
import subprocess
import shutil
import time
import os
import signal
from distutils.sysconfig import get_config_var
import py2app
import platform
DIR_NAME=os.path.dirname(os.path.abspath(__file__))
class TestBasicPlugin (unittest.TestCase):
plugin_dir = os.path.join(DIR_NAME, 'plugin_with_scripts')
py2app_args = []
# Basic setup code
#
# The code in this block needs to be moved to
# a base-class.
@classmethod
def setUpClass(cls):
try:
if os.path.exists(os.path.join(cls.plugin_dir, 'build')):
shutil.rmtree(os.path.join(cls.plugin_dir, 'build'))
if os.path.exists(os.path.join(cls.plugin_dir, 'dist')):
shutil.rmtree(os.path.join(cls.plugin_dir, 'dist'))
cmd = [ sys.executable, 'setup.py', 'py2app'] + cls.py2app_args
env=os.environ.copy()
pp = os.path.dirname(os.path.dirname(py2app.__file__))
if 'PYTHONPATH' in env:
env['PYTHONPATH'] = pp + ':' + env['PYTHONPATH']
else:
env['PYTHONPATH'] = pp
if 'LANG' not in env:
env['LANG'] = 'en_US.UTF-8'
p = subprocess.Popen(
cmd,
cwd = cls.plugin_dir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True,
env=env)
lines = p.communicate()[0]
if p.wait() != 0:
print (lines)
raise AssertionError("Creating basic_plugin bundle failed")
p = subprocess.Popen([
'xcode-select', '-print-path'
], stdout = subprocess.PIPE)
lines = p.communicate()[0]
xit = p.wait()
if p.wait() != 0:
raise AssertionError("Fetching Xcode root failed")
root = lines.strip()
if sys.version_info[0] != 2:
root = root.decode('utf-8')
if platform.mac_ver()[0] < '10.7.':
cc = [get_config_var('CC')]
env = dict(os.environ)
env['MACOSX_DEPLOYMENT_TARGET'] = get_config_var('MACOSX_DEPLOYMENT_TARGET')
else:
cc = ['xcrun', 'clang']
env = dict(os.environ)
p = subprocess.Popen(cc
+ get_config_var('LDFLAGS').split() + get_config_var('CFLAGS').split() + [
'-o', 'bundle_loader', os.path.join(DIR_NAME, 'bundle_loader.m'),
'-framework', 'Foundation'],
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True)
lines = p.communicate()[0]
if p.wait() != 0:
print (lines)
raise AssertionError("Creating bundle_loader failed")
except:
cls.tearDownClass()
raise
@classmethod
def tearDownClass(cls):
if os.path.exists('bundle_loader'):
os.unlink('bundle_loader')
if os.path.exists(os.path.join(cls.plugin_dir, 'build')):
shutil.rmtree(os.path.join(cls.plugin_dir, 'build'))
if os.path.exists(os.path.join(cls.plugin_dir, 'dist')):
shutil.rmtree(os.path.join(cls.plugin_dir, 'dist'))
def start_app(self):
# Start the test app, return a subprocess object where
# stdin and stdout are connected to pipes.
cmd = ['./bundle_loader',
os.path.join(self.plugin_dir,
'dist/BasicPlugin.bundle'),
]
p = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True,
)
#stderr=subprocess.STDOUT)
return p
def wait_with_timeout(self, proc, timeout=10):
for i in range(timeout):
x = proc.poll()
if x is None:
time.sleep(1)
else:
return x
os.kill(proc.pid, signal.SIGKILL)
return proc.wait()
def run_script(self, name):
path = os.path.join(
self.plugin_dir,
'dist/BasicPlugin.bundle/Contents/MacOS/%s'%(name,))
p = subprocess.Popen([path],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True,
)
#stderr=subprocess.STDOUT)
return p
#
# End of setup code
#
def test_helper1(self):
p = self.run_script('helper1')
lines = p.communicate()[0]
p.wait()
self.assertEqual(lines, b'Helper 1\n')
def test_helper2(self):
p = self.run_script('helper2')
lines = p.communicate()[0]
p.wait()
self.assertEqual(lines, b'Helper 2\n')
def test_basic_start(self):
p = self.start_app()
v = p.stdout.readline()
self.assertFalse(v.startswith(b'** Cannot load bundle'))
p.stdin.write('BasicPlugin.bundle:test startup\n'.encode('latin1'))
p.stdin.flush()
v = p.stdout.readline()
self.assertEqual(v.strip(), b'+ test startup')
p.stdin.close()
p.stdout.close()
exit = self.wait_with_timeout(p)
self.assertEqual(exit, 0)
class TestBasicAliasPlugin (TestBasicPlugin):
py2app_args = [ '--alias' ]
class TestBasicSemiStandalonePlugin (TestBasicPlugin):
py2app_args = [ '--semi-standalone' ]
class TestBasicPluginUnicodePath (TestBasicPlugin):
if sys.version_info[0] == 2:
plugin_dir = os.path.join(DIR_NAME, 'basic_plugin ' + unichr(2744).encode('utf-8'))
else:
plugin_dir = os.path.join(DIR_NAME, 'basic_plugin ' + chr(2744))
@classmethod
def setUpClass(cls):
try:
if os.path.exists(cls.plugin_dir):
shutil.rmtree(cls.plugin_dir)
assert not os.path.exists(cls.plugin_dir)
shutil.copytree(TestBasicPlugin.plugin_dir, cls.plugin_dir)
super(TestBasicPluginUnicodePath, cls).setUpClass()
except:
if os.path.exists(cls.plugin_dir):
shutil.rmtree(cls.plugin_dir)
raise
@classmethod
def tearDownClass(cls):
if os.path.exists(cls.plugin_dir):
shutil.rmtree(cls.plugin_dir)
super(TestBasicPluginUnicodePath, cls).tearDownClass()
class TestBasicAliasPluginUnicodePath (TestBasicPluginUnicodePath):
py2app_args = [ '--alias' ]
class TestBasicSemiStandalonePluginUnicodePath (TestBasicPluginUnicodePath):
py2app_args = [ '--semi-standalone' ]
if __name__ == "__main__":
unittest.main()
|
py | 1a31d745e98a28c68a0006b34c3c5dd7b9d07814 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 5 15:59:51 2019
@author: 939035
Classifiers
"""
# %% 1)Importing packages
import seaborn as sns
import pandas as pd
import numpy as np
# Handling SSL error when trying to connect from the office!
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
# Handing sns not showing plot error
import matplotlib.pyplot as plt
# ML models
# kernal SVM
from sklearn.svm import SVC
# RandomForrestModel
from sklearn.ensemble import RandomForestClassifier
# MLPClassifier (neural_network)
from sklearn.neural_network import MLPClassifier
# Gradient Boosting Tree
from sklearn.ensemble import GradientBoostingClassifier
# Training the model (speed)
# Decision Tree Classificr
from sklearn.tree import DecisionTreeClassifier
# Logisitc Regression
from sklearn.linear_model import LogisticRegression
# Data Is too Large
##Import Gaussian Naive Bayes model
from sklearn.naive_bayes import GaussianNB
# other random ones
# KNN
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
class machine_learning_classifier():
''' A class that contains a classifier loop '''
def __init__(self):
# Variables to alter
self.df = sns.load_dataset('iris')
# Give the string of the y variable
self.y_var = 'species'
# Do not alter
self.df_feat = pd.DataFrame()
self.dummies = pd.DataFrame
def inital_variable_removal(self, inital_vars_to_drop):
# Dropping duplicate variable e.g qualitative variable Class and quantitative equivalent pclass
self.df = self.df.drop(inital_vars_to_drop, axis=1)
return self.df
def remove_na(self):
# Dropping nan or na rows
self.df = self.df.dropna().reset_index().drop('index', axis=1)
return self.df
def exploring_data(self, y_var_category, var1, var2):
# ## Basic Pairplot
pp = sns.pairplot(self.df, hue=self.y_var)
plt.show()
# creating kde plot of sepal_lenght vs sepal width for setosa species of flower
kde = self.df[self.df[self.y_var] == y_var_category]
kdeplot = sns.kdeplot(kde[var1], kde[var2], cmap='plasma', shade='true'
, shade_lowest=False)
plt.show()
return pp, kdeplot
def creating_dummies(self):
# 4)Creating Dummys from qualitative variables (optional)
self.dummies = pd.get_dummies(self.df[self.qualitative_vars])
### dropping qualitative variables before standardising
self.df = self.df.drop(self.qualitative_vars, axis=1)
return self.df
def standardising(self):
# Splitting the DataFrame into the dummies and then the standard varibales
from sklearn.preprocessing import StandardScaler
# standardising the data to the same scale
# why - larger scale data will have a greater effect on the results
scaler = StandardScaler()
# fitting the data minus the dependent variable
scaler.fit(self.df.drop(self.y_var, axis=1))
# creating the variable scaled featers (returns a array)
scaled_features = scaler.transform(self.df.drop(self.y_var, axis=1))
# Creating a df of the array'd scaled features
self.df_feat = pd.DataFrame(scaled_features, columns=self.df.drop(self.y_var, axis=1).columns)
return self.df_feat
def readding_dummies(self):
# %% 6) Re adding dummies after standardising
## adding dummies back on after standaridiation of the rest of the data
self.df_feat = pd.concat([self.df_feat, self.dummies], axis=1)
return self.df_feat
def correlations(self):
# %% 7) Find correlation among variables.
# after standardising
correlation_matrix = self.df_feat.corr()
return correlation_matrix
def dropping_highly_correlated_variables(self, list_of_vars_to_drop):
self.df_feat = self.df_feat.drop(list_of_vars_to_drop, axis=1)
return self.df_feat
def setting_y(self):
# Setting X and y
self.y = self.df[self.y_var]
return self.y
def feature_selection(self):
# https://scikit-learn.org/stable/modules/feature_selection.html
import sklearn.feature_selection
def model_loop(self):
# model selection by cross validation.
from sklearn.model_selection import cross_val_score
models = [SVC(),
RandomForestClassifier(),
MLPClassifier(),
GradientBoostingClassifier(),
DecisionTreeClassifier(),
LogisticRegression(),
GaussianNB(),
KNeighborsClassifier()]
classification_results = pd.DataFrame(columns=['model',
'corss_val_scores',
'cvs_mean'
])
for m in models:
model = m
cvs = cross_val_score(model, self.df_feat, self.y, cv=10, scoring='accuracy')
cvsm = cvs.mean()
classification_results = classification_results.append({'model': m,
'corss_val_scores': cvs,
'cvs_mean': cvsm,
}
, ignore_index=True)
return classification_results
def model_tuning(self):
param_grid = {'C': [0.1, 1, 10, 100],
'gamma': [1, 0.1, 0.01, 0.001]}
grid = GridSearchCV(SVC(), param_grid, verbose=2)
grid.fit(self.df_feat, self.y)
grid_predictions = grid.predict(self.df_feat)
cm = (confusion_matrix(self.y, grid_predictions))
cr = (classification_report(self.y, grid_predictions))
return cm, cr
def main():
mlc = machine_learning_classifier()
# mlc.inital_variable_removal(inital_vars_to_drop = [''])
mlc.remove_na()
mlc.exploring_data(y_var_category = 'setosa', var1 = 'sepal_width', var2 = 'sepal_length')
mlc.standardising()
correlation_matrix = mlc.correlations()
# mlc.dropping_highly_correlated_variables(list_of_vars_to_drop=['who_man'])
mlc.setting_y()
classification_results = mlc.model_loop()
confusion_matrix, classification_report = mlc.model_tuning() |
py | 1a31d78fb8c41e5be5f43b4dfb0834668daface0 | from .denoising import DenoisingTask
from .multilingual_denoising import MultilingualDenoisingTask
from .summarization_from_pretrained_bart import SummarizationFromPretrainedBARTTask
|
py | 1a31d7fb65367c3b68c5967c0de4f8ccc6447d13 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import deque
from typing import Dict, Iterable, Optional, Set, TypeVar
import numpy as np
from compiler_gym.datasets.benchmark import Benchmark
from compiler_gym.datasets.dataset import Dataset
from compiler_gym.datasets.uri import BENCHMARK_URI_RE, resolve_uri_protocol
T = TypeVar("T")
def round_robin_iterables(iters: Iterable[Iterable[T]]) -> Iterable[T]:
"""Yield from the given iterators in round robin order."""
# Use a queue of iterators to iterate over. Repeatedly pop an iterator from
# the queue, yield the next value from it, then put it at the back of the
# queue. The iterator is discarded once exhausted.
iters = deque(iters)
while len(iters) > 1:
it = iters.popleft()
try:
yield next(it)
iters.append(it)
except StopIteration:
pass
# Once we have only a single iterator left, return it directly rather
# continuing with the round robin.
if len(iters) == 1:
yield from iters.popleft()
class Datasets:
"""A collection of datasets.
This class provides a dictionary-like interface for indexing and iterating
over multiple :class:`Dataset <compiler_gym.datasets.Dataset>` objects.
Select a dataset by URI using:
>>> env.datasets["benchmark://cbench-v1"]
Check whether a dataset exists using:
>>> "benchmark://cbench-v1" in env.datasets
True
Or iterate over the datasets using:
>>> for dataset in env.datasets:
... print(dataset.name)
benchmark://cbench-v1
benchmark://github-v0
benchmark://npb-v0
To select a benchmark from the datasets, use :meth:`benchmark()`:
>>> env.datasets.benchmark("benchmark://a-v0/a")
Use the :meth:`benchmarks()` method to iterate over every benchmark in the
datasets in a stable round robin order:
>>> for benchmark in env.datasets.benchmarks():
... print(benchmark)
benchmark://cbench-v1/1
benchmark://github-v0/1
benchmark://npb-v0/1
benchmark://cbench-v1/2
...
If you want to exclude a dataset, delete it:
>>> del env.datasets["benchmark://b-v0"]
"""
def __init__(
self, datasets: Iterable[Dataset],
):
self._datasets: Dict[str, Dataset] = {d.name: d for d in datasets}
self._visible_datasets: Set[str] = set(
name for name, dataset in self._datasets.items() if not dataset.deprecated
)
def datasets(self, with_deprecated: bool = False) -> Iterable[Dataset]:
"""Enumerate the datasets.
Dataset order is consistent across runs.
:param with_deprecated: If :code:`True`, include datasets that have been
marked as deprecated.
:return: An iterable sequence of :meth:`Dataset
<compiler_gym.datasets.Dataset>` instances.
"""
datasets = self._datasets.values()
if not with_deprecated:
datasets = (d for d in datasets if not d.deprecated)
yield from sorted(datasets, key=lambda d: (d.sort_order, d.name))
def __iter__(self) -> Iterable[Dataset]:
"""Iterate over the datasets.
Dataset order is consistent across runs.
Equivalent to :meth:`datasets.datasets()
<compiler_gym.datasets.Dataset.datasets>`, but without the ability to
iterate over the deprecated datasets.
If the number of benchmarks in any of the datasets is infinite
(:code:`len(dataset) == math.inf`), the iterable returned by this method
will continue indefinitely.
:return: An iterable sequence of :meth:`Dataset
<compiler_gym.datasets.Dataset>` instances.
"""
return self.datasets()
def dataset(self, dataset: str) -> Dataset:
"""Get a dataset.
Return the corresponding :meth:`Dataset
<compiler_gym.datasets.Dataset>`. Name lookup will succeed whether or
not the dataset is deprecated.
:param dataset: A dataset name.
:return: A :meth:`Dataset <compiler_gym.datasets.Dataset>` instance.
:raises LookupError: If :code:`dataset` is not found.
"""
dataset_name = resolve_uri_protocol(dataset)
if dataset_name not in self._datasets:
raise LookupError(f"Dataset not found: {dataset_name}")
return self._datasets[dataset_name]
def __getitem__(self, dataset: str) -> Dataset:
"""Lookup a dataset.
:param dataset: A dataset name.
:return: A :meth:`Dataset <compiler_gym.datasets.Dataset>` instance.
:raises LookupError: If :code:`dataset` is not found.
"""
return self.dataset(dataset)
def __setitem__(self, key: str, dataset: Dataset):
"""Add a dataset to the collection.
:param key: The name of the dataset.
:param dataset: The dataset to add.
"""
dataset_name = resolve_uri_protocol(key)
self._datasets[dataset_name] = dataset
if not dataset.deprecated:
self._visible_datasets.add(dataset_name)
def __delitem__(self, dataset: str):
"""Remove a dataset from the collection.
This does not affect any underlying storage used by dataset. See
:meth:`uninstall() <compiler_gym.datasets.Datasets.uninstall>` to clean
up.
:param dataset: The name of a dataset.
:return: :code:`True` if the dataset was removed, :code:`False` if it
was already removed.
"""
dataset_name = resolve_uri_protocol(dataset)
if dataset_name in self._visible_datasets:
self._visible_datasets.remove(dataset_name)
del self._datasets[dataset_name]
def __contains__(self, dataset: str) -> bool:
"""Returns whether the dataset is contained."""
try:
self.dataset(dataset)
return True
except LookupError:
return False
def benchmarks(self, with_deprecated: bool = False) -> Iterable[Benchmark]:
"""Enumerate the (possibly infinite) benchmarks lazily.
Benchmarks order is consistent across runs. One benchmark from each
dataset is returned in round robin order until all datasets have been
fully enumerated. The order of :meth:`benchmarks()
<compiler_gym.datasets.Datasets.benchmarks>` and :meth:`benchmark_uris()
<compiler_gym.datasets.Datasets.benchmark_uris>` is the same.
If the number of benchmarks in any of the datasets is infinite
(:code:`len(dataset) == math.inf`), the iterable returned by this method
will continue indefinitely.
:param with_deprecated: If :code:`True`, include benchmarks from
datasets that have been marked deprecated.
:return: An iterable sequence of :class:`Benchmark
<compiler_gym.datasets.Benchmark>` instances.
"""
return round_robin_iterables(
(d.benchmarks() for d in self.datasets(with_deprecated=with_deprecated))
)
def benchmark_uris(self, with_deprecated: bool = False) -> Iterable[str]:
"""Enumerate the (possibly infinite) benchmark URIs.
Benchmark URI order is consistent across runs. URIs from datasets are
returned in round robin order. The order of :meth:`benchmarks()
<compiler_gym.datasets.Datasets.benchmarks>` and :meth:`benchmark_uris()
<compiler_gym.datasets.Datasets.benchmark_uris>` is the same.
If the number of benchmarks in any of the datasets is infinite
(:code:`len(dataset) == math.inf`), the iterable returned by this method
will continue indefinitely.
:param with_deprecated: If :code:`True`, include benchmarks from
datasets that have been marked deprecated.
:return: An iterable sequence of benchmark URI strings.
"""
return round_robin_iterables(
(d.benchmark_uris() for d in self.datasets(with_deprecated=with_deprecated))
)
def benchmark(self, uri: str) -> Benchmark:
"""Select a benchmark.
Returns the corresponding :class:`Benchmark
<compiler_gym.datasets.Benchmark>`, regardless of whether the containing
dataset is installed or deprecated.
:param uri: The URI of the benchmark to return.
:return: A :class:`Benchmark <compiler_gym.datasets.Benchmark>`
instance.
"""
uri = resolve_uri_protocol(uri)
match = BENCHMARK_URI_RE.match(uri)
if not match:
raise ValueError(f"Invalid benchmark URI: '{uri}'")
dataset_name = match.group("dataset")
dataset = self._datasets[dataset_name]
return dataset.benchmark(uri)
def random_benchmark(
self, random_state: Optional[np.random.Generator] = None
) -> Benchmark:
"""Select a benchmark randomly.
First, a dataset is selected uniformly randomly using
:code:`random_state.choice(list(datasets))`. The
:meth:`random_benchmark()
<compiler_gym.datasets.Dataset.random_benchmark>` method of that dataset
is then called to select a benchmark.
Note that the distribution of benchmarks selected by this method is not
biased by the size of each dataset, since datasets are selected
uniformly. This means that datasets with a small number of benchmarks
will be overrepresented compared to datasets with many benchmarks. To
correct for this bias, use the number of benchmarks in each dataset as
a weight for the random selection:
>>> rng = np.random.default_rng()
>>> finite_datasets = [d for d in env.datasets if len(d) != math.inf]
>>> dataset = rng.choice(
finite_datasets,
p=[len(d) for d in finite_datasets]
)
>>> dataset.random_benchmark(random_state=rng)
:param random_state: A random number generator. If not provided, a
default :code:`np.random.default_rng()` is used.
:return: A :class:`Benchmark <compiler_gym.datasets.Benchmark>`
instance.
"""
random_state = random_state or np.random.default_rng()
dataset = random_state.choice(list(self._visible_datasets))
return self[dataset].random_benchmark(random_state=random_state)
@property
def size(self) -> int:
return len(self._visible_datasets)
def __len__(self) -> int:
"""The number of datasets in the collection."""
return self.size
|
py | 1a31d81fbb0a60ca7f07803d50c5c234f8172fe8 | from dataclasses import dataclass
from final_class import final
__all__ = ['Part']
@final
@dataclass(frozen=True)
class Part:
type_: int
λ: float
|
py | 1a31d838226141e926a70d5d4bed70235151cfe8 | #!/usr/bin/env python
# coding: utf-8
import argparse
import os
import ray
from dotenv import load_dotenv
from tqdm import tqdm
from birdfsd_yolov5.label_studio_helpers.utils import get_all_projects_tasks
from birdfsd_yolov5.model_utils.handlers import catch_keyboard_interrupt
from birdfsd_yolov5.model_utils.utils import api_request
@ray.remote
def patch_anno(task, _from, to):
for _entry in task['annotations']:
entry_id = _entry['id']
for entry in _entry['result']:
value = entry['value']
if not _from == value['rectanglelabels'][0]:
print(f'Could not find the label `{_from}` in task '
f'`{task["id"]}`! Skipping...')
return
entry['value']['rectanglelabels'] = [to]
url = f'{os.environ["LS_HOST"]}/api/annotations/{entry_id}/'
api_request(url, method='patch', data=_entry)
return
@ray.remote
def patch_pred(pred, _from, to):
for result in pred['result']:
label = result['value']['rectanglelabels']
if not _from == label[0]:
print(f'Could not find the label `{_from}` in pred '
f'`{pred["id"]}`! Skipping...')
return
result['value']['rectanglelabels'] = [to]
url = f'{os.environ["LS_HOST"]}/api/predictions/{pred["id"]}/'
api_request(url, method='patch', data=pred)
return
def check_if_label_exists_in_task_annotations(task, label):
labels = []
if not task.get('annotations'):
return
results = sum([x['result'] for x in task['annotations']], [])
for result in results:
labels.append(result['value']['rectanglelabels'])
labels = sum(labels, [])
if label in labels:
return task
return
def opts() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument('-f',
'--from-label',
help='Label to find and change (i.e., old label)',
type=str,
required=True)
parser.add_argument(
'-t',
'--to-label',
help='Label to use instead of the old label (i.e., new label)',
type=str,
required=True)
return parser.parse_args()
def patch(from_label, to_label):
catch_keyboard_interrupt()
# --------------------------------------------------------------
tasks = get_all_projects_tasks()
tasks_with_label = []
for task in tqdm(tasks, desc='Scan tasks'):
task = check_if_label_exists_in_task_annotations(task,
label=from_label)
if task:
tasks_with_label.append(task)
futures = []
for task in tasks_with_label:
futures.append(patch_anno.remote(task, from_label, to_label))
for future in tqdm(futures, desc='Futures'):
ray.get(future)
# --------------------------------------------------------------
preds = get_all_projects_tasks(get_predictions_instead=True)
preds_with_label = []
for pred in tqdm(preds, desc='Scan preds'):
for result in pred['result']:
label = result['value']['rectanglelabels']
if from_label in label:
preds_with_label.append(pred)
futures = []
for pred in preds_with_label:
futures.append(patch_pred.remote(pred, from_label, to_label))
for future in tqdm(futures, desc='Futures'):
ray.get(future)
# --------------------------------------------------------------
ray.shutdown()
if __name__ == '__main__':
load_dotenv()
args = opts()
patch(from_label=args.from_label, to_label=args.to_label)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.