id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
3344040 | """Preference management for cloud."""
from ipaddress import ip_address
from typing import List, Optional
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.auth.models import User
from homeassistant.core import callback
from homeassistant.util.logging import async_create_catching_coro
from .const import (
DEFAULT_ALEXA_REPORT_STATE,
DEFAULT_EXPOSED_DOMAINS,
DEFAULT_GOOGLE_REPORT_STATE,
DOMAIN,
PREF_ALEXA_DEFAULT_EXPOSE,
PREF_ALEXA_ENTITY_CONFIGS,
PREF_ALEXA_REPORT_STATE,
PREF_ALIASES,
PREF_CLOUD_USER,
PREF_CLOUDHOOKS,
PREF_DISABLE_2FA,
PREF_ENABLE_ALEXA,
PREF_ENABLE_GOOGLE,
PREF_ENABLE_REMOTE,
PREF_GOOGLE_DEFAULT_EXPOSE,
PREF_GOOGLE_ENTITY_CONFIGS,
PREF_GOOGLE_LOCAL_WEBHOOK_ID,
PREF_GOOGLE_REPORT_STATE,
PREF_GOOGLE_SECURE_DEVICES_PIN,
PREF_OVERRIDE_NAME,
PREF_SHOULD_EXPOSE,
PREF_USERNAME,
InvalidTrustedNetworks,
InvalidTrustedProxies,
)
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
_UNDEF = object()
class CloudPreferences:
"""Handle cloud preferences."""
def __init__(self, hass):
"""Initialize cloud prefs."""
self._hass = hass
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
self._prefs = None
self._listeners = []
async def async_initialize(self):
"""Finish initializing the preferences."""
prefs = await self._store.async_load()
if prefs is None:
prefs = self._empty_config("")
self._prefs = prefs
if PREF_GOOGLE_LOCAL_WEBHOOK_ID not in self._prefs:
await self._save_prefs(
{
**self._prefs,
PREF_GOOGLE_LOCAL_WEBHOOK_ID: self._hass.components.webhook.async_generate_id(),
}
)
@callback
def async_listen_updates(self, listener):
"""Listen for updates to the preferences."""
self._listeners.append(listener)
async def async_update(
self,
*,
google_enabled=_UNDEF,
alexa_enabled=_UNDEF,
remote_enabled=_UNDEF,
google_secure_devices_pin=_UNDEF,
cloudhooks=_UNDEF,
cloud_user=_UNDEF,
google_entity_configs=_UNDEF,
alexa_entity_configs=_UNDEF,
alexa_report_state=_UNDEF,
google_report_state=_UNDEF,
alexa_default_expose=_UNDEF,
google_default_expose=_UNDEF,
):
"""Update user preferences."""
prefs = {**self._prefs}
for key, value in (
(PREF_ENABLE_GOOGLE, google_enabled),
(PREF_ENABLE_ALEXA, alexa_enabled),
(PREF_ENABLE_REMOTE, remote_enabled),
(PREF_GOOGLE_SECURE_DEVICES_PIN, google_secure_devices_pin),
(PREF_CLOUDHOOKS, cloudhooks),
(PREF_CLOUD_USER, cloud_user),
(PREF_GOOGLE_ENTITY_CONFIGS, google_entity_configs),
(PREF_ALEXA_ENTITY_CONFIGS, alexa_entity_configs),
(PREF_ALEXA_REPORT_STATE, alexa_report_state),
(PREF_GOOGLE_REPORT_STATE, google_report_state),
(PREF_ALEXA_DEFAULT_EXPOSE, alexa_default_expose),
(PREF_GOOGLE_DEFAULT_EXPOSE, google_default_expose),
):
if value is not _UNDEF:
prefs[key] = value
if remote_enabled is True and self._has_local_trusted_network:
prefs[PREF_ENABLE_REMOTE] = False
raise InvalidTrustedNetworks
if remote_enabled is True and self._has_local_trusted_proxies:
prefs[PREF_ENABLE_REMOTE] = False
raise InvalidTrustedProxies
await self._save_prefs(prefs)
async def async_update_google_entity_config(
self,
*,
entity_id,
override_name=_UNDEF,
disable_2fa=_UNDEF,
aliases=_UNDEF,
should_expose=_UNDEF,
):
"""Update config for a Google entity."""
entities = self.google_entity_configs
entity = entities.get(entity_id, {})
changes = {}
for key, value in (
(PREF_OVERRIDE_NAME, override_name),
(PREF_DISABLE_2FA, disable_2fa),
(PREF_ALIASES, aliases),
(PREF_SHOULD_EXPOSE, should_expose),
):
if value is not _UNDEF:
changes[key] = value
if not changes:
return
updated_entity = {**entity, **changes}
updated_entities = {**entities, entity_id: updated_entity}
await self.async_update(google_entity_configs=updated_entities)
async def async_update_alexa_entity_config(
self, *, entity_id, should_expose=_UNDEF
):
"""Update config for an Alexa entity."""
entities = self.alexa_entity_configs
entity = entities.get(entity_id, {})
changes = {}
for key, value in ((PREF_SHOULD_EXPOSE, should_expose),):
if value is not _UNDEF:
changes[key] = value
if not changes:
return
updated_entity = {**entity, **changes}
updated_entities = {**entities, entity_id: updated_entity}
await self.async_update(alexa_entity_configs=updated_entities)
async def async_set_username(self, username):
"""Set the username that is logged in."""
# Logging out.
if username is None:
user = await self._load_cloud_user()
if user is not None:
await self._hass.auth.async_remove_user(user)
await self._save_prefs({**self._prefs, PREF_CLOUD_USER: None})
return
cur_username = self._prefs.get(PREF_USERNAME)
if cur_username == username:
return
if cur_username is None:
await self._save_prefs({**self._prefs, PREF_USERNAME: username})
else:
await self._save_prefs(self._empty_config(username))
def as_dict(self):
"""Return dictionary version."""
return {
PREF_ALEXA_DEFAULT_EXPOSE: self.alexa_default_expose,
PREF_ALEXA_ENTITY_CONFIGS: self.alexa_entity_configs,
PREF_ALEXA_REPORT_STATE: self.alexa_report_state,
PREF_CLOUDHOOKS: self.cloudhooks,
PREF_ENABLE_ALEXA: self.alexa_enabled,
PREF_ENABLE_GOOGLE: self.google_enabled,
PREF_ENABLE_REMOTE: self.remote_enabled,
PREF_GOOGLE_DEFAULT_EXPOSE: self.google_default_expose,
PREF_GOOGLE_ENTITY_CONFIGS: self.google_entity_configs,
PREF_GOOGLE_REPORT_STATE: self.google_report_state,
PREF_GOOGLE_SECURE_DEVICES_PIN: self.google_secure_devices_pin,
}
@property
def remote_enabled(self):
"""Return if remote is enabled on start."""
enabled = self._prefs.get(PREF_ENABLE_REMOTE, False)
if not enabled:
return False
if self._has_local_trusted_network or self._has_local_trusted_proxies:
return False
return True
@property
def alexa_enabled(self):
"""Return if Alexa is enabled."""
return self._prefs[PREF_ENABLE_ALEXA]
@property
def alexa_report_state(self):
"""Return if Alexa report state is enabled."""
return self._prefs.get(PREF_ALEXA_REPORT_STATE, DEFAULT_ALEXA_REPORT_STATE)
@property
def alexa_default_expose(self) -> Optional[List[str]]:
"""Return array of entity domains that are exposed by default to Alexa.
Can return None, in which case for backwards should be interpreted as allow all domains.
"""
return self._prefs.get(PREF_ALEXA_DEFAULT_EXPOSE)
@property
def alexa_entity_configs(self):
"""Return Alexa Entity configurations."""
return self._prefs.get(PREF_ALEXA_ENTITY_CONFIGS, {})
@property
def google_enabled(self):
"""Return if Google is enabled."""
return self._prefs[PREF_ENABLE_GOOGLE]
@property
def google_report_state(self):
"""Return if Google report state is enabled."""
return self._prefs.get(PREF_GOOGLE_REPORT_STATE, DEFAULT_GOOGLE_REPORT_STATE)
@property
def google_secure_devices_pin(self):
"""Return if Google is allowed to unlock locks."""
return self._prefs.get(PREF_GOOGLE_SECURE_DEVICES_PIN)
@property
def google_entity_configs(self):
"""Return Google Entity configurations."""
return self._prefs.get(PREF_GOOGLE_ENTITY_CONFIGS, {})
@property
def google_local_webhook_id(self):
"""Return Google webhook ID to receive local messages."""
return self._prefs[PREF_GOOGLE_LOCAL_WEBHOOK_ID]
@property
def google_default_expose(self) -> Optional[List[str]]:
"""Return array of entity domains that are exposed by default to Google.
Can return None, in which case for backwards should be interpreted as allow all domains.
"""
return self._prefs.get(PREF_GOOGLE_DEFAULT_EXPOSE)
@property
def cloudhooks(self):
"""Return the published cloud webhooks."""
return self._prefs.get(PREF_CLOUDHOOKS, {})
async def get_cloud_user(self) -> str:
"""Return ID from Home Assistant Cloud system user."""
user = await self._load_cloud_user()
if user:
return user.id
user = await self._hass.auth.async_create_system_user(
"Home Assistant Cloud", [GROUP_ID_ADMIN]
)
await self.async_update(cloud_user=user.id)
return user.id
async def _load_cloud_user(self) -> Optional[User]:
"""Load cloud user if available."""
user_id = self._prefs.get(PREF_CLOUD_USER)
if user_id is None:
return None
# Fetch the user. It can happen that the user no longer exists if
# an image was restored without restoring the cloud prefs.
return await self._hass.auth.async_get_user(user_id)
@property
def _has_local_trusted_network(self) -> bool:
"""Return if we allow localhost to bypass auth."""
local4 = ip_address("127.0.0.1")
local6 = ip_address("::1")
for prv in self._hass.auth.auth_providers:
if prv.type != "trusted_networks":
continue
for network in prv.trusted_networks:
if local4 in network or local6 in network:
return True
return False
@property
def _has_local_trusted_proxies(self) -> bool:
"""Return if we allow localhost to be a proxy and use its data."""
if not hasattr(self._hass, "http"):
return False
local4 = ip_address("127.0.0.1")
local6 = ip_address("::1")
if any(
local4 in nwk or local6 in nwk for nwk in self._hass.http.trusted_proxies
):
return True
return False
async def _save_prefs(self, prefs):
"""Save preferences to disk."""
self._prefs = prefs
await self._store.async_save(self._prefs)
for listener in self._listeners:
self._hass.async_create_task(async_create_catching_coro(listener(self)))
@callback
def _empty_config(self, username):
"""Return an empty config."""
return {
PREF_ALEXA_DEFAULT_EXPOSE: DEFAULT_EXPOSED_DOMAINS,
PREF_ALEXA_ENTITY_CONFIGS: {},
PREF_CLOUD_USER: None,
PREF_CLOUDHOOKS: {},
PREF_ENABLE_ALEXA: True,
PREF_ENABLE_GOOGLE: True,
PREF_ENABLE_REMOTE: False,
PREF_GOOGLE_DEFAULT_EXPOSE: DEFAULT_EXPOSED_DOMAINS,
PREF_GOOGLE_ENTITY_CONFIGS: {},
PREF_GOOGLE_LOCAL_WEBHOOK_ID: self._hass.components.webhook.async_generate_id(),
PREF_GOOGLE_SECURE_DEVICES_PIN: None,
PREF_USERNAME: username,
}
| StarcoderdataPython |
97989 | class MinStack:
# Update Min every pop (Accepted), O(1) push, pop, top, min
def __init__(self):
self.stack = []
self.min = None
def push(self, val: int) -> None:
if self.stack:
self.min = min(self.min, val)
self.stack.append((val, self.min))
else:
self.min = val
self.stack.append((val, self.min))
def pop(self) -> None:
val, min_ = self.stack.pop()
if self.stack:
self.min = self.stack[-1][1]
else:
self.min = None
def top(self) -> int:
if self.stack:
return self.stack[-1][0]
def getMin(self) -> int:
return self.min
class MinStack:
# Find min in last elem (Top Voted), O(1) push, pop, top, min
def __init__(self):
self.q = []
def push(self, x: int) -> None:
curMin = self.getMin()
if curMin == None or x < curMin:
curMin = x
self.q.append((x, curMin))
def pop(self) -> None:
self.q.pop()
def top(self) -> int:
if len(self.q) == 0:
return None
else:
return self.q[len(self.q) - 1][0]
def getMin(self) -> int:
if len(self.q) == 0:
return None
else:
return self.q[len(self.q) - 1][1]
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(val)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
| StarcoderdataPython |
27023 | <reponame>oruebel/ndx-icephys-meta
"""
Module with ObjectMapper classes for the icephys-meta Container classes/neurodata_types
"""
from pynwb import register_map
from pynwb.io.file import NWBFileMap
from hdmf.common.io.table import DynamicTableMap
from ndx_icephys_meta.icephys import ICEphysFile, AlignedDynamicTable
@register_map(ICEphysFile)
class ICEphysFileMap(NWBFileMap):
"""
Customize object mapping for ICEphysFile to define the mapping
for our custom icephys tables, i.e., InteracellularRecordings, SimultaneousRecordingsTable,
SequentialRecordingsTable, RepetitionsTable, and ExperimentalConditionsTable
"""
def __init__(self, spec):
super().__init__(spec)
general_spec = self.spec.get_group('general')
icephys_spec = general_spec.get_group('intracellular_ephys')
self.map_spec('intracellular_recordings', icephys_spec.get_neurodata_type('IntracellularRecordingsTable'))
self.map_spec('icephys_simultaneous_recordings', icephys_spec.get_neurodata_type('SimultaneousRecordingsTable'))
self.map_spec('icephys_sequential_recordings', icephys_spec.get_neurodata_type('SequentialRecordingsTable'))
self.map_spec('icephys_repetitions', icephys_spec.get_neurodata_type('RepetitionsTable'))
self.map_spec('icephys_experimental_conditions', icephys_spec.get_neurodata_type('ExperimentalConditionsTable'))
self.map_spec('ic_filtering', icephys_spec.get_dataset('filtering'))
@register_map(AlignedDynamicTable)
class AlignedDynamicTableMap(DynamicTableMap):
"""
Customize the mapping for AlignedDynamicTable
"""
def __init__(self, spec):
super().__init__(spec)
# By default the DynamicTables contained as sub-categories in the AlignedDynamicTable are mapped to
# the 'dynamic_tables' class attribute. This renames the attribute to 'category_tables'
self.map_spec('category_tables', spec.get_neurodata_type('DynamicTable'))
@DynamicTableMap.object_attr('electrodes')
def electrodes(self, container, manager):
return container.category_tables.get('electrodes', None)
@DynamicTableMap.object_attr('stimuli')
def stimuli(self, container, manager):
return container.category_tables.get('stimuli', None)
@DynamicTableMap.object_attr('responses')
def responses(self, container, manager):
return container.category_tables.get('responses', None)
| StarcoderdataPython |
9610 | <gh_stars>1-10
"""
Test to verify performance of PVC creation and deletion
for RBD, CephFS and RBD-Thick interfaces
"""
import time
import logging
import datetime
import pytest
import ocs_ci.ocs.exceptions as ex
import threading
import statistics
from concurrent.futures import ThreadPoolExecutor
from uuid import uuid4
from ocs_ci.framework.testlib import performance
from ocs_ci.ocs.perftests import PASTest
from ocs_ci.helpers import helpers, performance_lib
from ocs_ci.ocs import constants
from ocs_ci.helpers.helpers import get_full_test_logs_path
from ocs_ci.ocs.perfresult import PerfResult
from ocs_ci.framework import config
log = logging.getLogger(__name__)
class ResultsAnalyse(PerfResult):
"""
This class generates results for all tests as one unit
and saves them to an elastic search server on the cluster
"""
def __init__(self, uuid, crd, full_log_path):
"""
Initialize the object by reading some of the data from the CRD file and
by connecting to the ES server and read all results from it.
Args:
uuid (str): the unique uid of the test
crd (dict): dictionary with test parameters - the test yaml file
that modify it in the test itself.
full_log_path (str): the path of the results files to be found
"""
super(ResultsAnalyse, self).__init__(uuid, crd)
self.new_index = "pvc_create_delete_fullres"
self.full_log_path = full_log_path
# make sure we have connection to the elastic search server
self.es_connect()
@performance
class TestPVCCreationDeletionPerformance(PASTest):
"""
Test to verify performance of PVC creation and deletion
"""
def setup(self):
"""
Setting up test parameters
"""
log.info("Starting the test setup")
super(TestPVCCreationDeletionPerformance, self).setup()
self.benchmark_name = "PVC_Creation-Deletion"
self.uuid = uuid4().hex
self.crd_data = {
"spec": {
"test_user": "Homer simpson",
"clustername": "test_cluster",
"elasticsearch": {
"server": config.PERF.get("production_es_server"),
"port": config.PERF.get("production_es_port"),
"url": f"http://{config.PERF.get('production_es_server')}:{config.PERF.get('production_es_port')}",
},
}
}
if self.dev_mode:
self.crd_data["spec"]["elasticsearch"] = {
"server": config.PERF.get("dev_es_server"),
"port": config.PERF.get("dev_es_port"),
"url": f"http://{config.PERF.get('dev_es_server')}:{config.PERF.get('dev_es_port')}",
}
@pytest.fixture()
def base_setup(self, interface_type, storageclass_factory, pod_factory):
"""
A setup phase for the test
Args:
interface_type: A fixture to iterate over ceph interfaces
storageclass_factory: A fixture to create everything needed for a
storageclass
pod_factory: A fixture to create new pod
"""
self.interface = interface_type
if self.interface == constants.CEPHBLOCKPOOL_THICK:
self.sc_obj = storageclass_factory(
interface=constants.CEPHBLOCKPOOL,
new_rbd_pool=True,
rbd_thick_provision=True,
)
else:
self.sc_obj = storageclass_factory(self.interface)
self.pod_factory = pod_factory
@pytest.fixture()
def namespace(self, project_factory):
"""
Create a new project
"""
proj_obj = project_factory()
self.namespace = proj_obj.namespace
def init_full_results(self, full_results):
"""
Initialize the full results object which will send to the ES server
Args:
full_results (obj): an empty FIOResultsAnalyse object
Returns:
FIOResultsAnalyse (obj): the input object fill with data
"""
for key in self.environment:
full_results.add_key(key, self.environment[key])
full_results.add_key("storageclass", self.sc)
full_results.add_key("index", full_results.new_index)
return full_results
@pytest.mark.parametrize(
argnames=["interface_type", "pvc_size"],
argvalues=[
pytest.param(
*[constants.CEPHBLOCKPOOL, "5Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHBLOCKPOOL, "15Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHBLOCKPOOL, "25Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHFILESYSTEM, "5Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHFILESYSTEM, "15Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHFILESYSTEM, "25Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHBLOCKPOOL_THICK, "5Gi"],
marks=[pytest.mark.performance_extended],
),
pytest.param(
*[constants.CEPHBLOCKPOOL_THICK, "15Gi"],
marks=[pytest.mark.performance_extended],
),
pytest.param(
*[constants.CEPHBLOCKPOOL_THICK, "25Gi"],
marks=[pytest.mark.performance_extended],
),
],
)
@pytest.mark.usefixtures(base_setup.__name__)
def test_pvc_creation_deletion_measurement_performance(
self, teardown_factory, pvc_size
):
"""
Measuring PVC creation and deletion times for pvc samples
Verifying that those times are within the required limits
"""
# Getting the full path for the test logs
self.full_log_path = get_full_test_logs_path(cname=self)
if self.interface == constants.CEPHBLOCKPOOL:
self.sc = "RBD"
elif self.interface == constants.CEPHFILESYSTEM:
self.sc = "CephFS"
elif self.interface == constants.CEPHBLOCKPOOL_THICK:
self.sc = "RBD-Thick"
self.full_log_path += f"-{self.sc}-{pvc_size}"
log.info(f"Logs file path name is : {self.full_log_path}")
self.start_time = time.strftime("%Y-%m-%dT%H:%M:%SGMT", time.gmtime())
self.get_env_info()
# Initialize the results doc file.
self.full_results = self.init_full_results(
ResultsAnalyse(self.uuid, self.crd_data, self.full_log_path)
)
self.full_results.add_key("pvc_size", pvc_size)
num_of_samples = 5
accepted_creation_time = (
600 if self.interface == constants.CEPHBLOCKPOOL_THICK else 1
)
# accepted deletion time for RBD is 1 sec, for CephFS is 2 secs and for RBD Thick is 5 secs
if self.interface == constants.CEPHFILESYSTEM:
accepted_deletion_time = 2
elif self.interface == constants.CEPHBLOCKPOOL:
accepted_deletion_time = 1
else:
accepted_deletion_time = 5
self.full_results.add_key("samples", num_of_samples)
accepted_creation_deviation_percent = 50
accepted_deletion_deviation_percent = 50
creation_time_measures = []
deletion_time_measures = []
msg_prefix = f"Interface: {self.interface}, PVC size: {pvc_size}."
for i in range(num_of_samples):
logging.info(f"{msg_prefix} Start creating PVC number {i + 1}.")
start_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name, size=pvc_size)
timeout = 600 if self.interface == constants.CEPHBLOCKPOOL_THICK else 60
helpers.wait_for_resource_state(
pvc_obj, constants.STATUS_BOUND, timeout=timeout
)
pvc_obj.reload()
creation_time = performance_lib.measure_pvc_creation_time(
self.interface, pvc_obj.name, start_time
)
logging.info(
f"{msg_prefix} PVC number {i + 1} was created in {creation_time} seconds."
)
if creation_time > accepted_creation_time:
raise ex.PerformanceException(
f"{msg_prefix} PVC creation time is {creation_time} and is greater than "
f"{accepted_creation_time} seconds."
)
creation_time_measures.append(creation_time)
pv_name = pvc_obj.backed_pv
pvc_reclaim_policy = pvc_obj.reclaim_policy
pod_obj = self.write_file_on_pvc(pvc_obj)
pod_obj.delete(wait=True)
teardown_factory(pvc_obj)
logging.info(f"{msg_prefix} Start deleting PVC number {i + 1}")
if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE:
pvc_obj.delete()
pvc_obj.ocp.wait_for_delete(pvc_obj.name)
helpers.validate_pv_delete(pvc_obj.backed_pv)
deletion_time = helpers.measure_pvc_deletion_time(
self.interface, pv_name
)
logging.info(
f"{msg_prefix} PVC number {i + 1} was deleted in {deletion_time} seconds."
)
if deletion_time > accepted_deletion_time:
raise ex.PerformanceException(
f"{msg_prefix} PVC deletion time is {deletion_time} and is greater than "
f"{accepted_deletion_time} seconds."
)
deletion_time_measures.append(deletion_time)
else:
logging.info(
f"Reclaim policy of the PVC {pvc_obj.name} is not Delete;"
f" therefore not measuring deletion time for this PVC."
)
creation_average = self.process_time_measurements(
"creation",
creation_time_measures,
accepted_creation_deviation_percent,
msg_prefix,
)
self.full_results.add_key("creation-time", creation_average)
deletion_average = self.process_time_measurements(
"deletion",
deletion_time_measures,
accepted_deletion_deviation_percent,
msg_prefix,
)
self.full_results.add_key("deletion-time", deletion_average)
self.full_results.all_results["creation"] = creation_time_measures
self.full_results.all_results["deletion"] = deletion_time_measures
self.end_time = time.strftime("%Y-%m-%dT%H:%M:%SGMT", time.gmtime())
self.full_results.add_key(
"test_time", {"start": self.start_time, "end": self.end_time}
)
self.full_results.es_write()
log.info(f"The Result can be found at : {self.full_results.results_link()}")
def process_time_measurements(
self, action_name, time_measures, accepted_deviation_percent, msg_prefix
):
"""
Analyses the given time measured. If the standard deviation of these times is bigger than the
provided accepted deviation percent, fails the test
Args:
action_name (str): Name of the action for which these measurements were collected; used for the logging
time_measures (list of floats): A list of time measurements
accepted_deviation_percent (int): Accepted deviation percent to which computed standard deviation may be
compared
msg_prefix (str) : A string for comprehensive logging
Returns:
(float) The average value of the provided time measurements
"""
average = statistics.mean(time_measures)
log.info(
f"{msg_prefix} The average {action_name} time for the sampled {len(time_measures)} "
f"PVCs is {average} seconds."
)
if self.interface == constants.CEPHBLOCKPOOL_THICK:
st_deviation = statistics.stdev(time_measures)
st_deviation_percent = st_deviation / average * 100.0
if st_deviation_percent > accepted_deviation_percent:
log.error(
f"{msg_prefix} The standard deviation percent for {action_name} of {len(time_measures)} sampled "
f"PVCs is {st_deviation_percent}% which is bigger than accepted {accepted_deviation_percent}."
)
else:
log.info(
f"{msg_prefix} The standard deviation percent for {action_name} of {len(time_measures)} sampled "
f"PVCs is {st_deviation_percent}% and is within the accepted range."
)
self.full_results.add_key(
f"{action_name}_deviation_pct", st_deviation_percent
)
return average
def write_file_on_pvc(self, pvc_obj, filesize=1):
"""
Writes a file on given PVC
Args:
pvc_obj: PVC object to write a file on
filesize: size of file to write (in GB - default is 1GB)
Returns:
Pod on this pvc on which the file was written
"""
pod_obj = self.pod_factory(
interface=self.interface, pvc=pvc_obj, status=constants.STATUS_RUNNING
)
# filesize to be written is always 1 GB
file_size = f"{int(filesize * 1024)}M"
log.info(f"Starting IO on the POD {pod_obj.name}")
# Going to run only write IO
pod_obj.fillup_fs(size=file_size, fio_filename=f"{pod_obj.name}_file")
# Wait for the fio to finish
fio_result = pod_obj.get_fio_results()
err_count = fio_result.get("jobs")[0].get("error")
assert (
err_count == 0
), f"IO error on pod {pod_obj.name}. FIO result: {fio_result}"
log.info("IO on the PVC has finished")
return pod_obj
@pytest.mark.parametrize(
argnames=["interface_type"],
argvalues=[
pytest.param(
*[constants.CEPHBLOCKPOOL],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHFILESYSTEM],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHBLOCKPOOL_THICK],
marks=[pytest.mark.performance_extended],
),
],
)
@pytest.mark.usefixtures(base_setup.__name__)
@pytest.mark.usefixtures(namespace.__name__)
@pytest.mark.polarion_id("OCS-2618")
def test_multiple_pvc_deletion_measurement_performance(self, teardown_factory):
"""
Measuring PVC deletion time of 120 PVCs in 180 seconds
Args:
teardown_factory: A fixture used when we want a new resource that was created during the tests
to be removed in the teardown phase.
Returns:
"""
number_of_pvcs = 120
pvc_size = "1Gi"
msg_prefix = f"Interface: {self.interface}, PVC size: {pvc_size}."
log.info(f"{msg_prefix} Start creating new 120 PVCs")
pvc_objs, _ = helpers.create_multiple_pvcs(
sc_name=self.sc_obj.name,
namespace=self.namespace,
number_of_pvc=number_of_pvcs,
size=pvc_size,
burst=True,
)
for pvc_obj in pvc_objs:
pvc_obj.reload()
teardown_factory(pvc_obj)
timeout = 600 if self.interface == constants.CEPHBLOCKPOOL_THICK else 60
with ThreadPoolExecutor(max_workers=5) as executor:
for pvc_obj in pvc_objs:
executor.submit(
helpers.wait_for_resource_state,
pvc_obj,
constants.STATUS_BOUND,
timeout=timeout,
)
executor.submit(pvc_obj.reload)
pod_objs = []
for pvc_obj in pvc_objs:
pod_obj = self.write_file_on_pvc(pvc_obj, 0.3)
pod_objs.append(pod_obj)
# Get pvc_name, require pvc_name to fetch deletion time data from log
threads = list()
for pvc_obj in pvc_objs:
process = threading.Thread(target=pvc_obj.reload)
process.start()
threads.append(process)
for process in threads:
process.join()
pvc_name_list, pv_name_list = ([] for i in range(2))
threads = list()
for pvc_obj in pvc_objs:
process1 = threading.Thread(target=pvc_name_list.append(pvc_obj.name))
process2 = threading.Thread(target=pv_name_list.append(pvc_obj.backed_pv))
process1.start()
process2.start()
threads.append(process1)
threads.append(process2)
for process in threads:
process.join()
log.info(f"{msg_prefix} Preparing to delete 120 PVC")
# Delete PVC
for pvc_obj, pod_obj in zip(pvc_objs, pod_objs):
pod_obj.delete(wait=True)
pvc_obj.delete()
pvc_obj.ocp.wait_for_delete(pvc_obj.name)
# Get PVC deletion time
pvc_deletion_time = helpers.measure_pv_deletion_time_bulk(
interface=self.interface, pv_name_list=pv_name_list
)
log.info(
f"{msg_prefix} {number_of_pvcs} bulk deletion time is {pvc_deletion_time}"
)
# accepted deletion time is 2 secs for each PVC
accepted_pvc_deletion_time = number_of_pvcs * 2
for del_time in pvc_deletion_time.values():
if del_time > accepted_pvc_deletion_time:
raise ex.PerformanceException(
f"{msg_prefix} {number_of_pvcs} PVCs deletion time is {pvc_deletion_time.values()} and is "
f"greater than {accepted_pvc_deletion_time} seconds"
)
logging.info(f"{msg_prefix} {number_of_pvcs} PVCs deletion times are:")
for name, a_time in pvc_deletion_time.items():
logging.info(f"{name} deletion time is: {a_time} seconds")
| StarcoderdataPython |
3384695 | #ExrMerge v1.0 from nukepedia
#Desarrollado por <NAME> (Zabander) 21-11-2014.
#hacked by Rafal 1)removed the write node 2)added stringSplit to tidy the name to remove version, show, shot; 3) changed into class
#todo , do noBeauty, make gui, new group name, bring back option to create write, fix error traps (i.e. if read not selected); backStringSplit, clean ui
import os
import nuke
class channelMergeFromRead():
def __init__(self):
import os
import nuke
self.lista = []
self.add = 1
#self.node = 0
self.node2 = 0
self.currentlayerName = ""
self.newdir = ""
self.dic = {}
self.mainBeautyLayer = 0
self.sGroup = []
self.isGroup = True
self.nIncrement = 1
self.splitString =''
self.removeFront=''
self.removeBack=''
self.readNodes=[]
self.folderCount='2'
#Permite seleccionar al passe beauty
def mainBeauty(self):
#try:
for nodes in self.lista:
self.dic[nodes] = os.path.split(nodes.knob('file').value())[1]
readNames = self.dic.values()
readNames.sort()
readNames.sort(key=len)
p = nuke.Panel('channel merge read nodes')
#p.addSingleLineInput( 'Path', os.path.split(nodes.knob('file').value())[0] )
p.addEnumerationPulldown('BeautyLayer', readNames)
p.addEnumerationPulldown('nameMethod', 'from_folder_name from_file_name')
#p.Divider('from_folderNameMethod')
p.addEnumerationPulldown('folderCount', '2 0 1 2 3 4')
#p.Divider('from_fileNameMethod')
p.addSingleLineInput('splitString', '_')
p.addEnumerationPulldown('removeFront', '8 0 1 2 3 4 5 6 7 8 9 10')
p.addEnumerationPulldown('removeBack', '1 0 1 2 3 4 5 6 7 8 9 10')
ret = p.show()
c = p.value('BeautyLayer')
self.folderCount =p.value('folderCount')
self.nameMethod =p.value('nameMethod')
self.splitString =p.value('splitString')
self.removeFront=p.value('removeFront')
self.removeBack=p.value('removeBack')
result = c.split("'")[1]
for mKey, name in self.dic.items():
if result == name:
self.mainBeautyLayer = mKey
else:
pass
#except:
#pass
#Agrega los read nodes a la self.lista
def getReadNodes(self):
#try:
if self.readNodes ==0:
sNodes = nuke.selectedNodes()
self.readNodes = sNodes
else:
sNodes = self.readNodes
len(sNodes)
if len(sNodes) >= 2:
for node in sNodes:
node.autoplace()
if node.Class() == "Read":
nodeName = node
self.lista.append(nodeName)
else:
pass#nuke.message("One of your nodes is not a read node: %s, it will be excluded." % node.self.name())
self.mainBeauty()
if self.mainBeautyLayer=='0':
nuke.ask('err broken.. sorry')
else:
beautyIndex = self.lista.index(self.mainBeautyLayer)
self.lista[beautyIndex], self.lista[0] = self.lista[0], self.lista[beautyIndex]
self.node = self.lista[0]
self.node.knob("selected").setValue(False)
else:
nuke.message ("Please select more than 1 node__")
#except:
# self.isGroup = False
#Define el nombre del nodo a ser usado
def name(self,node):
path = node["file"].value()
filename = os.path.basename(path)
filenamesplit = filename.split(".")
folderCount = int(self.folderCount)
if self.nameMethod=='from_folder_name':
filePathSplit = path.split("/")
self.currentlayerName= ''.join(filePathSplit[len(filePathSplit)-(folderCount+1):-folderCount])
if self.nameMethod=='from_file_name':
del filenamesplit[-1]
filenamesplit = '_'.join(filenamesplit)
filenamesplit = filenamesplit.split(self.splitString)
self.currentlayerName = str(self.splitString.join(filenamesplit[int(self.removeFront):len(filenamesplit)-int(self.removeBack)]))
nuke.tprint(filenamesplit)
nuke.tprint(self.currentlayerName)
#Generar Shuffle y transferir atributos
def exrCompile (self,x, y):
s1 = nuke.nodes.ShuffleCopy()
self.sGroup.append(s1)
s1.autoplace()
if s1.canSetInput(0, self.node) and s1.canSetInput(1,self.node2):
s1.setInput( 0, self.node)
s1.setInput( 1, self.node2)
chan = s1["in2"].value()
s1["red"].setValue('red')
s1["green"].setValue('green')
s1["blue"].setValue('blue')
s1["alpha"].setValue('alpha')
self.name(self.node2)
nameTemp=''
listTemp=[]
#listTemp=str.split(self.currentlayerName,'_')
listTemp=self.currentlayerName
#for x in range(int(float(self.removeFront)),len(listTemp)-int(float(self.removeBack)),1):
#nameTemp= nameTemp+'_'+listTemp[x]
nameTemp=listTemp
nuke.tprint(nameTemp)
currentlayerNameRed = str(nameTemp) + ".red"
currentlayerNameGreen = str(nameTemp) + ".green"
currentlayerNameBlue = str(nameTemp) + ".blue"
currentlayerNameAlpha = str(nameTemp) + ".alpha"
nuke.Layer(nameTemp,[currentlayerNameRed, currentlayerNameGreen,currentlayerNameBlue, currentlayerNameAlpha])
s1["out"].setValue(nameTemp)
self.node = s1
self.node.knob("selected").setValue(False)
self.node2.knob("selected").setValue(False)
else:
pass
#Evalua cada uno de los nodos de la self.lista y ejecuta exrCompile
def selector(self):
for item in self.lista[1:]:
self.node2 = self.lista[self.add]
self.exrCompile(self.node, self.node2)
if self.add < len(self.lista):
self.add =+ self.add + 1
else:
self.add =+ self.add + 0
pass
if self.mainBeautyLayer=='0':
pass
item.knob("selected").setValue(False)
def makeGroup(self):
if len(self.lista) >= 2:
nuke.selectAll()
nuke.invertSelection()
for shuffleknob in self.sGroup:
shuffleknob['selected'].setValue(True)
#for shuffleknob in self.readNodes:
#shuffleknob['selected'].setValue(True)
node = nuke.collapseToGroup(show=False)
node['xpos'].setValue(self.mainBeautyLayer.xpos())
node['ypos'].setValue(self.mainBeautyLayer.ypos()+100)
#node.autoplace()
#gName = node.name()
#nuke.tprint((self.mainBeautyLayer))
#nuke.toNode(gName)["name"].setValue("Exr Merge %s" %'hello')
#self.nIncrement += 1
#node.lock_connections(True)
else:
pass
def run(self,readNodes=0):
self.readNodes = readNodes
self.readNodes =0
#nuke.message(str(self.readNodes))
self.getReadNodes()
if len(self.lista) >= 2 and self.isGroup == True:
self.selector()
self.makeGroup()
else:
print "Process Stopped"
pass
| StarcoderdataPython |
61209 | # contains neither Process object nor execute() function
| StarcoderdataPython |
6479 | <gh_stars>1-10
picamera import PiCamera
from time import sleep
import boto3
import os.path
import subprocess
s3 = boto3.client('s3')
bucket = 'cambucket21'
camera = PiCamera()
#camera.resolution(1920,1080)
x = 0
camerafile = x
while True:
if (x == 6):
x = 1
else:
x = x + 1
camera.start_preview()
camera.start_recording('/home/pi/' + str(x) + '.h264')
sleep(2)
camera.stop_recording()
camera.stop_preview()
subprocess.Popen("MP4Box -add " + str(x) + ".h264 " + str(x) +".mp4", shell=True)
sleep(1)
s3.upload_file('/home/pi/' + str(x) + '.mp4',bucket,'/home/pi/' + str(x) + '.mp4')
| StarcoderdataPython |
3360638 | <gh_stars>0
from one_indiv_immed_fri import friend_besties
from second_degree_fri import friend_second_besties
from collections import defaultdict
#adapted from University of Melbourne's sample solution
def predict_attribute(friends, feat_dict, feature):
"""predict the target 'feature' from the set 'friends' based on the
attributes in 'feat_dict' """
# dictionary for counting attribute frequency among 'friends'
freq = defaultdict(int)
#add vote for feature if feature exists as friend's
for friend in friends:
if friend in feat_dict and feature in feat_dict[friend]:
freq[feat_dict[friend][feature]] += 1
#in case there is at least 1 count of target feature among 'friends',
#find which attribute has majority count to predict
if freq:
max_count = 0
for attribute, count in freq.items():
if count > max_count:
att_list = [attribute]
max_count = count
elif count == max_count:
att_list.append(attribute)
return sorted(att_list)
#if no attributes of feature are found, return empty list
else:
return []
def friendly_prediction(unknown_user, features, friend_dict, feat_dict):
'''friendly_prediction takes name of 'unkwown_user', target 'features' to predict,
information of friends in 'friend_dict' and predicts features of that person according
to their friends' attributes in 'feat dict'
'''
# to record predictions about unknown_user's features
feat_user={}
# check if the user is in the social network or not
if unknown_user in friend_dict:
# look for features of 1st degree friend to predict unkown_user's
for feature in features:
predict_closeFri_attribute = predict_attribute(friend_besties(unknown_user,
friend_dict), feat_dict, feature)
#use degree one friends' attribute for feature if it exists
if (predict_closeFri_attribute):
feat_user[feature] = predict_closeFri_attribute
#in case no degree one friends have attribute for target feature, check degree two's
else:
predict_second_deg_attribute = predict_attribute(friend_second_besties(unknown_user,
friend_dict), feat_dict, feature)
feat_user[feature] = predict_second_deg_attribute
return feat_user
| StarcoderdataPython |
3386234 | # Written by <NAME>
# https://github.com/bo-yang/misc/blob/master/run_command_timeout.py
import subprocess
import threading
""" Run system commands with timeout
"""
class Command(object):
def __init__(self, cmd):
self.cmd = cmd
self.process = None
self.out = "TIMEOUT"
def run_command(self, capture = False):
if not capture:
self.process = subprocess.Popen(self.cmd,shell=True)
self.process.communicate()
return
# capturing the outputs of shell commands
self.process = subprocess.Popen(self.cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,stdin=subprocess.PIPE)
out,err = self.process.communicate()
if len(out) > 0:
self.out = out.splitlines()
else:
self.out = None
# set default timeout to 2 minutes
def run(self, capture = False, timeout = 120):
thread = threading.Thread(target=self.run_command, args=(capture,))
thread.start()
thread.join(timeout)
if thread.is_alive():
print 'Command timeout, kill it: ' + self.cmd
self.process.terminate()
thread.join()
return False
return True
if __name__=='__main__':
for i in range(3): #three tries
s = 3-i
r = Command('echo "sleep ' + str(s) + ' seconds"; sleep ' + str(s) + '; echo "done"').run(timeout=2)
print r
if r:
print 'success attempt ' + str(i+1)
break
else:
print 'failed. trying again...'
| StarcoderdataPython |
3295830 | ''' Bluefruit_Onboard_Neopixel
Illuminates the CPB's built-in NeoPixels (internally connected to pin 8).
Developed for The Art of Making: An Introduction to Hands-On System Design and Engineering
University of Pittsburgh Swanson School of Engineering
v1.2 <NAME> 02/11/2022
Wheel() colorwheel function based on Adafruit's Flora demo code: https://learn.adafruit.com/pages/5682/elements/1711074/download
'''
import board
import neopixel
import time
'''In python, functions must be defined before they are called. We'll use this later!
Wheel(n) -- input a value n ranging from 0 to 255 to get a color value from a color wheel
The colours are a transition red - green - blue - back to red.'''
def Wheel(wheelpos):
wheelpos=255-wheelpos
if wheelpos<85:
return (255-wheelpos*3,0,wheelpos*3)
elif wheelpos<170:
wheelpos-=85
return (0,wheelpos*3,255-wheelpos*3)
else:
wheelpos-=170
return (wheelpos*3,255-wheelpos*3,0)
'''Let's have the Bluefruit's onboard NeoPixels illuminate red, green, blue and white in sequence for 1 second each.// strip.setPixelColor(n, R, G, B) is the function used to send an RGB color value to a NeoPixel.
The first argument is the pin the neopixels are connected to, in this case pin D8
The second argument is the number of pixels in the strip. In this case, there is ten pixels on the "strip" -- so n=10
red, green and blue range from 0 (off) to 255 (maximum brightness)
Note that onboard.fill DOES immeditely set the color;
if you want to set pixels as off, use (0,0,0)'''
onboard=neopixel.NeoPixel(board.D8,10,brightness=.5)
while True:
onboard.fill((255,0,0)) #set onboard neopixels to red (255,0,0) and display
time.sleep(1) #Wait for 1 second. sleep() takes numbers in seconds
onboard.fill((0,255,0)) #set pixel color to green
time.sleep(1)
onboard.fill((0,0,255)) #et pixel color to blue
time.sleep(1)
onboard.fill((255,255,255)) #set pixel color to white
time.sleep(1)
'''Now let's transition smoothly through rainbow colors
We'll use the function "Wheel" defined above'''
for i in range(1,5):
for j in range(0,255):
onboard.fill(Wheel(j))
time.sleep(.03)
| StarcoderdataPython |
3292939 | <reponame>neriat/envcon
from .configuration import environment_configuration, configuration
from .frozen import FrozenError
__all__ = ["environment_configuration", "configuration", "FrozenError"]
| StarcoderdataPython |
95607 | <gh_stars>0
import alsaaudio
from math import pi, sin, pow
import getch
SAMPLE_RATE = 44100
FORMAT = alsaaudio.PCM_FORMAT_U8
PERIOD_SIZE = 512
N_SAMPLES = 1024
notes = "abcdefg"
frequencies = {}
for i, note in enumerate(notes):
frequencies[note] = 440 * pow(pow(2, 1/2), i)
# Generate the sine wave, centered at y=128 with 1024 samples
sine_wave = [int(sin(x * 2*pi/N_SAMPLES) * 127) for x in range(0, N_SAMPLES)]
square_wave = []
sawtooth_wave = []
triangle_wave = []
for i in range(0, N_SAMPLES):
phase = (i * 2*pi / N_SAMPLES) % 2*pi
if phase < pi:
square_wave.append(127)
else:
square_wave.append(-128)
sawtooth_wave.append(int(127 - (127 // pi * phase)))
if phase < pi:
triangle_wave.append(int(-127 + (2 * 127 * phase // pi)))
else:
triangle_wave.append(int(3 * 127 - (2 * 127 * phase // pi)))
def main():
buf = bytearray(PERIOD_SIZE)
# alsaaudio setup
dev = alsaaudio.PCM(type=alsaaudio.PCM_PLAYBACK)
dev.setchannels(1)
dev.setrate(SAMPLE_RATE)
dev.setformat(FORMAT)
dev.setperiodsize(PERIOD_SIZE)
#load_buf(buf, 440)
f = 440
w_half = [x//2 + 128 for x in make_wave(sine_wave, f)]
#w_o1 = [x//4 for x in make_wave(f*2)]
#w_o2 = [x//6 for x in make_wave(f*3)]
#w_o3 = [x//8 for x in make_wave(f*4)]
#w_o4 = [x//10 for x in make_wave(f*5)]
#w_o4 = [x//12 for x in make_wave(f*6)]
#w_o5 = [x//14 for x in make_wave(f*7)]
#w_o6 = [x//16 for x in make_wave(f*8)]
#for i, samp in enumerate(w_o1):
# w[i] += samp + w_o2[i] + w_o3[i] + w_o4[i] + w_o5[i] + w_o6[i] + 128
# print(w[i])
#buf = bytearray(w)
#for i, samp in enumerate(w):
# if samp > 0:
# samp = 127
# else:
# samp = -128
w = [x + 128 for x in make_wave(square_wave, 440)]
buf = bytearray(w)
char = getch.getch()
last = 'q'
while char != 'q':
if char != last:
if char == '1':
w = [x//2 + 128 for x in make_wave(sine_wave, 440)]
buf = bytearray(w)
elif char == '2':
w = [x//2 + 128 for x in make_wave(square_wave, 440)]
buf = bytearray(w)
elif char == '3':
w = [x//2 + 128 for x in make_wave(sawtooth_wave, 440)]
buf = bytearray(w)
elif char == '4':
w = [x//2 + 128 for x in make_wave(triangle_wave, 440)]
buf = bytearray(w)
elif char == '5':
buf = bytearray(w_half)
dev.write(buf)
dev.write(buf)
dev.write(buf)
last = char
char = getch.getch()
return 0
#def load_buf(buf, frequency):
# step = N_SAMPLES * frequency // SAMPLE_RATE
# for i in range(0, PERIOD_SIZE):
# buf[i] = wave[(step * i * N_SAMPLES // PERIOD_SIZE) % N_SAMPLES]
# return buf
def make_wave(wave, frequency):
step = N_SAMPLES * frequency // SAMPLE_RATE
w = []
for i in range(0, PERIOD_SIZE):
w.append(wave[(step * i * N_SAMPLES // PERIOD_SIZE) % N_SAMPLES])
return w
if __name__ == '__main__':
main()
| StarcoderdataPython |
71683 | <reponame>DanPopa46/neo3-boa
from __future__ import annotations
import base64
from typing import Any, Dict, List, Optional
from boa3.neo import from_hex_str, to_hex_str
from boa3.neo3.core.types import UInt256
from boa3_test.tests.test_classes import transactionattribute as tx_attribute
from boa3_test.tests.test_classes.signer import Signer
from boa3_test.tests.test_classes.witness import Witness
class Transaction:
def __init__(self, script: bytes, signers: List[Signer] = None, witnesses: List[Witness] = None):
self._signers: List[Signer] = signers if signers is not None else []
self._witnesses: List[Witness] = witnesses if witnesses is not None else []
self._attributes: List[tx_attribute.TransactionAttribute] = []
self._script: bytes = script
self._hash: Optional[UInt256] = None
def add_attribute(self, tx_attr: tx_attribute.TransactionAttribute):
if tx_attr not in self._attributes:
self._attributes.append(tx_attr)
def to_json(self) -> Dict[str, Any]:
return {
'signers': [signer.to_json() for signer in self._signers],
'witnesses': [witness.to_json() for witness in self._witnesses],
'attributes': [attr.to_json() for attr in self._attributes],
'script': to_hex_str(self._script)
}
@classmethod
def from_json(cls, json: Dict[str, Any]) -> Transaction:
script = base64.b64decode(json['script'])
tx = cls(script)
if 'signers' in json:
signers_json = json['signers']
if not isinstance(signers_json, list):
signers_json = [signers_json]
tx._signers = [Signer.from_json(js) for js in signers_json]
if 'witnesses' in json:
witnesses_json = json['witnesses']
if not isinstance(witnesses_json, list):
witnesses_json = [witnesses_json]
tx._witnesses = [Witness.from_json(js) for js in witnesses_json]
if 'attributes' in json:
attributes_json = json['attributes']
if not isinstance(attributes_json, list):
attributes_json = [attributes_json]
tx._attributes = [tx_attribute.TransactionAttribute.from_json(js) for js in attributes_json]
if 'hash' in json and isinstance(json['hash'], str):
tx._hash = UInt256(from_hex_str(json['hash']))
return tx
def copy(self):
copied = Transaction(self._script, self._signers, self._witnesses)
copied._hash = self._hash
return copied
| StarcoderdataPython |
121464 | import pandas as pd
import itertools
import numpy as np
import pickle
import os
import argparse
basePath=os.getcwd()
def get_file_list(file_folder):
# method one: file_list = os.listdir(file_folder)
for root, dirs, file_list in os.walk(file_folder):
return dirs,file_list
parser = argparse.ArgumentParser()
parser.add_argument("-saveBasePath", help="res file path", type=str, default=basePath+'/res_test_data/ML/')
parser.add_argument("-writeDir", help="write file path",type=str, default=basePath+'/results/mlRes/')
parser.add_argument("-datasetNames", help="Dataset Name",type=str, default=["ecfp6fcfp6MACCS"])
parser.add_argument("-methodNames", help="method Name",type=str, default="SVM")
args = parser.parse_args()
saveBasePath = args.saveBasePath
writeDir = args.writeDir
datasetNames = args.datasetNames
methodNames = args.methodNames
if not os.path.exists(writeDir):
os.makedirs(writeDir)
for method in datasetNames:
filepath = saveBasePath+method+'/'+methodNames+'/'
files_list = get_file_list(filepath)[0]
num = len(files_list)
AUCall0_0=[]
AUCall0_1=[]
AUCall0_2=[]
AUCmean0=[]
targets=[]
for i in range(num):
datapath= filepath + files_list[i]
targetid = files_list[i]
f= open(datapath+'/o0001.test.auc.pckl','rb')
datacontent0_0 = pickle.load(f)
data0_0=datacontent0_0[0][0]
f= open(datapath+'/o0002.test.auc.pckl','rb')
datacontent0_1 = pickle.load(f)
data0_1=datacontent0_1[0][0]
f= open(datapath+'/o0003.test.auc.pckl','rb')
datacontent0_2 = pickle.load(f)
data0_2=datacontent0_2[0][0]
AUCall0_0.append(data0_0)
AUCall0_1.append(data0_1)
AUCall0_2.append(data0_2)
mean0=(data0_0+data0_1+data0_2)/3
AUCmean0.append(mean0)
targets.append(targetid)
res1={'targets':targets,method+'auc_0':AUCall0_0,method+'auc_1':AUCall0_1,method+'auc_2':AUCall0_2}
res1_data = pd.DataFrame(res1)
res2={'targets':targets,method+'AUCmean':AUCmean0}
res2_data = pd.DataFrame(res2)
res1_data.to_csv(writeDir+method+'.roc.csv',index=0)
res2_data.to_csv(writeDir+method+'.rocmean.csv',index=0)
| StarcoderdataPython |
18223 | from .constants import SPECIAL_TOKENS
try:
import re2 as re
except ImportError:
import re
def twitter_sentiment_token_matching(token):
"""Special token matching function for twitter sentiment data."""
if 'URL_TOKEN' in SPECIAL_TOKENS and re.match(r'https?:\/\/[^\s]+', token):
return SPECIAL_TOKENS['URL_TOKEN']
if 'POS_EM_TOKEN' in SPECIAL_TOKENS and re.match(r':-?(\)|D|p)+', token):
return SPECIAL_TOKENS['POS_EM_TOKEN']
if 'NEG_EM_TOKEN' in SPECIAL_TOKENS and re.match(r':-?(\(|\\|/)+', token):
return SPECIAL_TOKENS['NEG_EM_TOKEN']
if 'USER_TOKEN' in SPECIAL_TOKENS and re.match(
r'(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9]+)', token):
return SPECIAL_TOKENS['USER_TOKEN']
if 'HEART_TOKEN' in SPECIAL_TOKENS and re.match(r'<3+', token):
return SPECIAL_TOKENS['HEART_TOKEN']
| StarcoderdataPython |
3266545 | #
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: EPL-2.0
#
import os
from subprocess import check_output
from test_utils import project
from mlt.utils import constants, git_helpers
from test_utils.e2e_commands import CommandTester
class TestTemplates(CommandTester):
def _call_template_list(self, template_repo):
return check_output(['mlt', 'templates', 'list',
'--template-repo={}'.format(template_repo)]
).decode("utf-8")
def test_templates(self):
output = self._call_template_list(project.basedir())
desired_template_output = """Template Description
------------------- ---------------------------------------------------------\
-----------------------------------------
experiments Runs hyperparameter experiments for a demo job.
hello-world A TensorFlow python HelloWorld example run through \
Kubernetes Jobs.
horovod A distributed model training using horovod and openmpi.
pytorch Sample distributed application taken from \
http://pytorch.org/tutorials/intermediate/dist_tuto.html
pytorch-distributed A distributed PyTorch MNIST example run using the \
pytorch-operator.
tensorboard-bm A TensorBoard service in Kubernetes Bare Metal cluster.
tensorboard-gke A TensorBoard service in Google Kubernetes cluster.
tf-dist-mnist A distributed TensorFlow MNIST model which designates \
worker 0 as the chief.
tf-distributed A distributed TensorFlow matrix multiplication run \
through the TensorFlow Kubernetes Operator.
"""
assert output == desired_template_output
def test_local_templates(self):
"""
Tests creating a new template in a clone of mlt. Verifies that we can
specify the template-repo for mlt template list and see new template in
the list. Then uses mlt init to create an app with new template and
verifies that the app directory exists.
"""
# Create a git clone of mlt to use as a local template diretory
with git_helpers.clone_repo(project.basedir()) as temp_clone:
# Add a new test template to the local mlt template directory
templates_directory = os.path.join(
temp_clone, constants.TEMPLATES_DIR)
new_template_name = "test-local"
new_template_directory = os.path.join(templates_directory,
new_template_name)
os.mkdir(new_template_directory)
new_template_file = os.path.join(
new_template_directory, "README.md")
with open(new_template_file, "w") as f:
f.write("New local template for testing")
# call mlt template list and then check the output
output = self._call_template_list(temp_clone)
# template list should include our new test-local template
desired_template_output = """Template Description
------------------- ---------------------------------------------------------\
-----------------------------------------
experiments Runs hyperparameter experiments for a demo job.
hello-world A TensorFlow python HelloWorld example run through \
Kubernetes Jobs.
horovod A distributed model training using horovod and openmpi.
pytorch Sample distributed application taken from \
http://pytorch.org/tutorials/intermediate/dist_tuto.html
pytorch-distributed A distributed PyTorch MNIST example run using the \
pytorch-operator.
tensorboard-bm A TensorBoard service in Kubernetes Bare Metal cluster.
tensorboard-gke A TensorBoard service in Google Kubernetes cluster.
test-local New local template for testing
tf-dist-mnist A distributed TensorFlow MNIST model which designates \
worker 0 as the chief.
tf-distributed A distributed TensorFlow matrix multiplication run \
through the TensorFlow Kubernetes Operator.
"""
assert output == desired_template_output
# init an app with this new template and verify that the app exists
self.init(template=new_template_name, template_repo=temp_clone)
assert os.path.isdir(self.project_dir)
assert os.path.isfile(os.path.join(
self.project_dir, "README.md"))
| StarcoderdataPython |
3391205 | <filename>externals/director/src/python/ddapp/shallowCopy.py<gh_stars>0
def deepCopy(dataOb):
newData = dataObject.NewInstance()
newData.DeepCopy(dataObj)
return newData
def shallowCopy(dataObj):
newData = dataObj.NewInstance()
newData.ShallowCopy(dataObj)
return newData
| StarcoderdataPython |
1665580 | <filename>python/src/examples/node_ledbutton/node.py
#!/usr/bin/python3
# import aiogrpc
import asyncio
import logging
import grpc
import signal
import sys
# pregenerated from proto file
import wedge_pb2
import wedge_pb2_grpc
from button import Button
from led import Led
CHANNEL_OPTIONS = [('grpc.lb_policy_name', 'pick_first'),
('grpc.enable_retries', 0),
('grpc.keepalive_timeout_ms', 10000)]
node_id = wedge_pb2.NodeIdentity(
id="python_node_client_led_button",
)
"""
Uplink class holds all remote Wedge Methods
"""
class Uplink:
def __init__(self, stub, node_id):
logging.info("Initialize Uplink object.")
self.stub = stub
self.node_id = node_id
signal.signal(signal.SIGINT, self.cancel_request)
def cancel_request(self, signal, frame):
print("")
logging.warning("Interrupt signal. Exiting!")
self._future.cancel()
sys.exit(0)
async def SetModel(self, model):
# metadata is optional, it just for test purpose.
metadata = [('ip', '127.0.0.1')]
request = wedge_pb2.SetModelRequest(model=model)
return await self.stub.SetModel(request=request, metadata=metadata)
async def SetState(self, value_id, state):
logging.info("Update value with id {} to: {}"
.format(value_id, state.data))
req = wedge_pb2.SetStateRequest(
node=self.node_id,
device_id=1, # only one device exist in this model
value_id=value_id,
state=state
)
resp = await self.stub.SetState(req)
logging.info("Replay: {}".format(resp))
async def GetControl(self, request):
self._future = self.stub.GetControl(request)
return await self._future
async def main() -> None:
async with grpc.aio.insecure_channel(target='localhost:50051') as channel:
stub = wedge_pb2_grpc.WedgeStub(channel)
uplink = Uplink(stub, node_id)
button = Button(uplink)
led = Led(uplink)
# Example of Model, which identical to Seluxit data Model.
model = wedge_pb2.Model(
node=node_id,
device=[wedge_pb2.Device(
id=1,
name="LED_Button2",
version="0.1.2",
value=[
button.value,
led.value
]
)]
)
resp = await uplink.SetModel(model)
logging.info("Response: {}".format(resp))
# data = button.pin.read()
# logging.info("Current button status: {}".format(data))
# button.update(data)
await asyncio.gather(led.listen(node_id))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
asyncio.run(main())
| StarcoderdataPython |
4821853 | <reponame>fcoterroba/first_birthday_statistics_python
import matplotlib.pyplot as plt
# Make the arrays with the info
meses = ["Abril", "Mayo", "Junio", "Julio", "Agosto", "Septiembre", "Octubre", "Noviembre", "Diciembre", "Enero", "Febrero", "Marzo"]
visitas = [816, 1034, 1101, 1250, 1604, 1983, 2468, 3021, 2867, 3520, 4010, 5097]
# Make the arrays with trimester data
months = ["1º trimestre: Abril-Junio", "2º trimestre: Julio-Septiembre", "3º trimestre: Octubre-Diciembre", "4º trimestre: Enero-Marzo"]
visits_trimester = [2951, 4837, 8356, 12627]
def grafico_barra_vertical():
# Make the object
fig, ax = plt.subplots()
# Label for Y
ax.set_ylabel('Visitas')
# Label for X
ax.set_title('Visitas en el primer aniversario de la web')
# Set the arrays to the X and Y
plt.bar(meses, visitas)
# Show the chart
plt.show()
def grafico_lineas_horizontal():
# Make a plot with data
plt.plot(meses, visitas)
# Show the chart
plt.show()
def grafico_tarta_trimestral():
# Make a plot with the data and labels
plt.pie(visits_trimester, labels = months)
plt.show()
if __name__ == "__main__":
grafico_barra_vertical()
grafico_lineas_horizontal()
grafico_tarta_trimestral()
| StarcoderdataPython |
1786318 | <reponame>michaelfarinacci/tchack2016<filename>app.py
from flask import Flask, redirect, url_for, render_template, request, flash
import flask
import os
from os.path import join, dirname
from dotenv import load_dotenv
import braintree
import json
app = Flask(__name__)
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
app.secret_key = os.environ.get('APP_SECRET_KEY')
braintree.Configuration.configure(
os.environ.get('BT_ENVIRONMENT'),
os.environ.get('BT_MERCHANT_ID'),
os.environ.get('BT_PUBLIC_KEY'),
os.environ.get('BT_PRIVATE_KEY')
)
TRANSACTION_SUCCESS_STATUSES = [
braintree.Transaction.Status.Authorized,
braintree.Transaction.Status.Authorizing,
braintree.Transaction.Status.Settled,
braintree.Transaction.Status.SettlementConfirmed,
braintree.Transaction.Status.SettlementPending,
braintree.Transaction.Status.Settling,
braintree.Transaction.Status.SubmittedForSettlement
]
orders = {'45321': {'total': 100, 'tax': 5}}
@app.route('/', methods=['GET'])
def index():
return render_template('first.html')
def post():
return redirect("second.html")
@app.route('/checkouts/new', methods=['GET'])
def new_checkout():
return render_template('checkouts/new.html', client_token=client_token)
@app.route('/create_order', methods=['GET'])
def create_order():
total = flask.request.args['total']
items = flask.request.args['items']
tax = flask.request.args['tax']
merchant_id = flask.request.args['merchantId']
order_id = 45321
return order_id
@app.route('/get_order', methods=['GET'])
def get_order():
order_id = flask.request.args['orderId']
order_data = orders[order_id]
json_order_data = json.dumps(order_data)
resp = flask.Response(response=json_order_data, status=200, mimetype="application/json")
return resp
@app.route('/customer', methods=['GET'])
def customer():
return render_template('customer.html')
@app.route('/merchant', methods=['GET'])
def merchant():
return render_template('merchant.html')
@app.route('/checkouts/<transaction_id>', methods=['GET'])
def show_checkout(transaction_id):
transaction = braintree.Transaction.find(transaction_id)
result = {}
if transaction.status in TRANSACTION_SUCCESS_STATUSES:
result = {
'header': 'Sweet Success!',
'icon': 'success',
'message': 'Your test transaction has been successfully processed. See the Braintree API response and try again.'
}
else:
result = {
'header': 'Transaction Failed',
'icon': 'fail',
'message': 'Your test transaction has a status of ' + transaction.status + '. See the Braintree API response and try again.'
}
return render_template('checkouts/show.html', transaction=transaction, result=result)
@app.route('/checkouts', methods=['POST'])
def create_checkout():
result = braintree.Transaction.sale({
'amount': request.form['amount'],
'payment_method_nonce': request.form['payment_method_nonce'],
})
if result.is_success or result.transaction:
return redirect(url_for('show_checkout',transaction_id=result.transaction.id))
else:
for x in result.errors.deep_errors: flash('Error: %s: %s' % (x.code, x.message))
return redirect(url_for('new_checkout'))
if __name__ == '__main__':
app.run(host='0.0.0.0', port=4567, debug=True)
| StarcoderdataPython |
1643745 | <reponame>LordKBX/EbookCollection
from checkpoint import *
from files import *
from content_table_editor import *
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from common.dialog import *
from common.books import *
from common.files import *
from common.archive import *
from common.vars import *
import common.qt
import common.dialog
class EditorWindow(QtWidgets.QMainWindow):
default_page = ''
ebook_info = None
tmpcss = ''
toc_type = ''
def __init__(self, parent: QtWidgets.QMainWindow, opened_file, lang, bdd):
super(EditorWindow, self).__init__(parent)
PyQt5.uic.loadUi(os.path.dirname(os.path.realpath(__file__)) + os.sep + 'editor.ui'.replace('/', os.sep), self)
self.opened_file = opened_file
self.tmpDir = app_user_directory + os.sep + 'editor' + os.sep + 'tmp'
try:
rmDir(self.tmpDir)
if os.path.isdir(self.tmpDir) is not True:
os.makedirs(self.tmpDir + os.sep + 'original')
os.makedirs(self.tmpDir + os.sep + 'current')
except Exception:
""
self.lang = lang
self.BDD = bdd
# load window size
size_tx = self.BDD.get_param('editor/windowSize')
if size_tx is not None and size_tx != '':
size = eval(size_tx)
self.resize(size[0], size[1])
# load window position
pos_tx = self.BDD.get_param('editor/windowPos')
if pos_tx is not None and pos_tx != '':
pos = eval(pos_tx)
self.move(pos[0], pos[1])
self.pos()
self.app_style = self.BDD.get_param('style')
self.lang.set_lang(self.BDD.get_param('lang'))
self.apply_translation()
self.apply_style()
# ui.tabWidget
ad = app_directory.replace(os.sep, '/')
self.tabWidget.clear()
self.voidLabel.setVisible(True)
self.tabWidget.setVisible(False)
self.tabWidget.set_preview_webview(self.webView, self.default_page)
self.tabWidget.tabCloseRequested.connect(self.on_close_tab)
self.tabWidget.currentChanged.connect(self.on_change_tab)
# Processing File Table
self.treeFileTable.clear()
self.treeFileTable.itemDoubleClicked.connect(self.file_table_item_double_clicked)
self.treeFileTable.setIndentation(10)
self.treeFileTable.setCursor(QtCore.Qt.PointingHandCursor)
self.treeFileTable.setStyleSheet(get_style_var(self.app_style, 'fullTreeView'))
# Processing Content Table
# self.treeContentTable = QListWidget()
self.treeContentTable.setCursor(QtCore.Qt.PointingHandCursor)
self.treeContentTable.currentItemChanged.connect(self.content_table_current_item_changed)
self.treeContentTable.itemDoubleClicked.connect(self.content_table_item_double_clicked)
# Toolbar buttons
self.button_save.clicked.connect(self.save_ebook)
self.button_load_checkpoint.clicked.connect(self.load_check_point)
self.button_create_checkpoint.clicked.connect(self.create_check_point)
self.button_file_manager.clicked.connect(self.load_file_managment)
self.button_edit_content_table.clicked.connect(self.load_content_table_managment)
self.webView.setHtml(self.default_page)
self.webView.page().settings().setUserStyleSheetUrl(QtCore.QUrl.fromLocalFile(self.tmpcss))
self.webView.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
filepath, ext = os.path.splitext(self.opened_file)
mappdir = app_directory.replace(os.sep, '/') + '/data/'
self.setWindowTitle(
self.lang['Editor']['WindowTitle'] + ' - ' + self.opened_file.replace(os.sep, '/')
.replace(mappdir, '').replace('/', ' / ').replace(ext, '')
)
# EditorWindow.show()
if ext in ['.epub', '.epub2', '.epub3']:
self.ebook_info = get_epub_info(self.opened_file)
ret = inflate(self.opened_file, self.tmpDir + os.sep + 'original')
ret = inflate(self.opened_file, self.tmpDir + os.sep + 'current')
elif ext in ['.cbz', '.cbr']:
ret = inflate(self.opened_file, self.tmpDir + os.sep + 'original')
ret = inflate(self.opened_file, self.tmpDir + os.sep + 'current')
else:
WarnDialog(
self.lang['Editor']['DialogInfoBadFileWindowTitle'],
self.lang['Editor']['DialogInfoBadFileWindowText'], self)
exit(0)
self.file_table_load()
self.load_content_table()
def resizeEvent(self, a0: QtGui.QResizeEvent) -> None:
size = self.size()
tx = [size.width(), size.height()].__str__()
self.BDD.set_param('editor/windowSize', tx)
def moveEvent(self, a0: QtGui.QMoveEvent) -> None:
pos = self.pos()
tx = [pos.x(), pos.y()].__str__()
self.BDD.set_param('editor/windowPos', tx)
def apply_translation(self):
self.default_page = "".join(self.lang['Editor']['WebViewDefaultPageContent'])
# Blocks title
self.dockTop.setWindowTitle(self.lang['Editor/BlockToolbar/Header'])
self.dockFiles.setWindowTitle(self.lang['Editor/BlockFileListHeader'])
self.dockContentTable.setWindowTitle(self.lang['Editor/BlockContentTableHeader'])
self.dockPreview.setWindowTitle(self.lang['Editor/BlockPreviewHeader'])
# Toolbar buttons
self.button_save.setText(self.lang['Editor/BlockToolbar/Save'])
self.button_load_checkpoint.setText(self.lang['Editor/BlockToolbar/CheckPointLoad'])
self.button_create_checkpoint.setText(self.lang['Editor/BlockToolbar/CheckPointCreate'])
self.button_file_manager.setText(self.lang['Editor/BlockToolbar/FileManager'])
self.button_edit_content_table.setText(self.lang['Editor/BlockToolbar/EditContentTable'])
self.voidLabel.setText(self.lang['Editor/CentralZoneEmpty'])
def apply_style(self):
self.setStyleSheet(get_style_var(self.app_style, 'QMainWindow'))
self.dockTopContents.setStyleSheet(get_style_var(self.app_style, 'QMainWindow'))
self.voidLabel.setStyleSheet(get_style_var(self.app_style, 'EditorCentralLabel'))
icon_names_list = ['save', 'checkpoint_load', 'checkpoint_create', 'file_manager', 'content_table']
icon_dir = {}
for name in icon_names_list:
icon_dir[name] = QtGui.QIcon()
icon_dir[name].addPixmap(
QtGui.QPixmap(
get_style_var(self.app_style, 'icons/'+name)
.replace('{APP_DIR}', app_directory)
.replace('/', os.sep)
),
QtGui.QIcon.Normal, QtGui.QIcon.Off
)
self.button_save.setIcon(icon_dir['save'])
self.button_load_checkpoint.setIcon(icon_dir['checkpoint_load'])
self.button_create_checkpoint.setIcon(icon_dir['checkpoint_create'])
self.button_file_manager.setIcon(icon_dir['file_manager'])
self.button_edit_content_table.setIcon(icon_dir['content_table'])
self.tmpcss = self.tmpDir + os.sep + "tmp.css"
with open(self.tmpcss, "w", encoding="utf8") as file_page:
print('tmpcss = ' + get_style_var(self.app_style, 'EditorQWebViewPreview'))
file_page.write(get_style_var(self.app_style, 'EditorQWebViewPreview'))
self.tabWidget.setStyleSheet(get_style_var(self.app_style, 'QTabWidgetHorizontal'))
self.tabWidget.setBackgroundRole(QtGui.QPalette.ColorRole(QtGui.QPalette.Light))
def content_table_current_item_changed(self, current: QListWidgetItem):
if current is None:
return
try:
data = current.data(99)
print(data)
except Exception:
traceback.print_exc()
def file_table_item_double_clicked(self, current: QTreeWidgetItem, previous: QTreeWidgetItem):
try:
data = current.data(0, 99)
text = current.data(0, 98)
if data != ':dir:':
icon = self.file_icon(data)
self.tabWidget.create_pane(text, icon, data, self, self.tmpcss)
self.voidLabel.setVisible(False)
self.tabWidget.setVisible(True)
except Exception:
traceback.print_exc()
def content_table_item_double_clicked(self, current: QListWidgetItem):
try:
print('current = ', current)
data = current.data(99)
text = current.data(98)
print(text)
if data != ':dir:':
icon = self.file_icon(data)
self.tabWidget.create_pane(text, icon, data, self, self.tmpcss)
self.voidLabel.setVisible(False)
self.tabWidget.setVisible(True)
except Exception:
traceback.print_exc()
def file_table_load(self):
self.treeFileTable.clear()
liste = list_directory_tree(self.tmpDir + os.sep + 'current', None)
# print(liste)
for index in liste:
item = QtWidgets.QTreeWidgetItem(self.treeFileTable)
item.setText(0, index)
item.setData(0, 98, index)
if isinstance(liste[index], dict):
item.setData(0, 99, ':dir:')
common.qt.setQTreeItemFolderIcon(item)
item = self.recur_file_table_insert(item, liste[index])
else:
self.icon_file_item(item, liste[index])
item.setData(0, 99, liste[index])
self.treeFileTable.insertTopLevelItem(0, item)
def recur_file_table_insert(self, base_item: QtWidgets.QTreeWidgetItem, tree: dict):
for indexr in tree:
itemr = QtWidgets.QTreeWidgetItem(base_item)
itemr.setText(0, indexr)
itemr.setData(0, 98, indexr)
if isinstance(tree[indexr], dict):
itemr.setData(0, 99, ':dir:')
common.qt.setQTreeItemIcon(itemr, get_style_var(self.app_style, 'icons/folder'))
itemr = self.recur_file_table_insert(itemr, tree[indexr])
else:
self.icon_file_item(itemr, tree[indexr])
itemr.setData(0, 99, tree[indexr])
base_item.addChild(itemr)
return base_item
def file_icon(self, file_path: str):
file_type = get_file_type(file_path)
if file_type.startswith('image/'):
return get_style_var(self.app_style, 'icons/image')
elif file_type == 'text/css':
return get_style_var(self.app_style, 'icons/style')
elif file_type == 'application/oebps-package+xml': # .opf
return get_style_var(self.app_style, 'icons/info')
elif file_type == 'application/x-dtbncx+xml': # .ncx
return get_style_var(self.app_style, 'icons/content_table')
elif file_type == 'application/xml':
return get_style_var(self.app_style, 'icons/xml')
elif file_type == 'application/x-font-truetype':
return get_style_var(self.app_style, 'icons/font')
elif file_type == 'application/xhtml+xml':
return get_style_var(self.app_style, 'icons/page')
else:
return get_style_var(self.app_style, 'icons/file')
def icon_file_item(self, item: QTreeWidgetItem, file_path: str):
icon = self.file_icon(file_path)
file_type = get_file_type(file_path)
common.qt.setQTreeItemIcon(item, icon)
def on_close_tab(self, index_tab: int):
if self.tabWidget.count() == 0:
return
print('on_close_tab')
if self.tabWidget.count() > index_tab >= 0:
self.tabWidget.removeTab(index_tab)
if self.tabWidget.count() == 0:
self.voidLabel.setVisible(True)
self.tabWidget.setVisible(False)
def on_change_tab(self, index_tab: int):
self.tabWidget.draw_preview()
def create_check_point(self):
try:
print("create_check_point")
stime = unixtime_to_string(time.time(), template='%Y-%m-%d_%H-%M-%S', is_utc=False)
shutil.copytree(self.tmpDir + os.sep + 'current', self.tmpDir + os.sep + stime)
InfoDialog(
self.lang['Editor']['DialogCreateCheckpointWindowTitle'],
self.lang['Editor']['DialogCreateCheckpointWindowText'].format(stime),
self
)
except Exception:
traceback.print_exc()
def load_check_point(self):
try:
wl = CheckpointWindow(self, self.tmpDir)
ret = wl.openExec()
if ret is not None:
if os.path.isdir(self.tmpDir + os.sep + ret) is True:
common.files.rmDir(self.tmpDir + os.sep + 'current')
common.files.copyDir(self.tmpDir + os.sep + ret, self.tmpDir + os.sep + 'current')
self.tabWidget.reload_contents()
except Exception:
traceback.print_exc()
def save_ebook(self):
try:
ret = common.dialog.InfoDialogConfirm(
self.lang['Editor']['DialogConfirmSaveWindowTitle'],
self.lang['Editor']['DialogConfirmSaveWindowText'],
self.lang['Generic']['DialogBtnYes'],
self.lang['Generic']['DialogBtnNo'], self.parent()
)
if ret is True:
os.remove(self.opened_file)
deflate(self.tmpDir + os.sep + 'current' + os.sep + '*', self.opened_file)
except Exception:
traceback.print_exc()
def load_file_managment(self):
try:
wl = FilesWindow(self, self.tmpDir + os.sep + 'current')
ret = wl.open_exec()
# print(ret)
if ret is not None:
for file in ret['delete']:
if ret['delete'][file]['type'] == 'deleteFile':
os.remove(self.tmpDir + os.sep + 'current' + ret['delete'][file]['innerPath'])
elif ret['delete'][file]['type'] == 'deleteFolder':
rmDir(self.tmpDir + os.sep + 'current' + ret['delete'][file]['innerPath'])
for file in ret['rename']:
if ret['rename'][file]['type'] == 'renameFile':
rename(
self.tmpDir + os.sep + 'current' + ret['rename'][file]['original'],
self.tmpDir + os.sep + 'current' + ret['rename'][file]['newPath']
)
elif ret['rename'][file]['type'] == 'renameFolder':
rename(
self.tmpDir + os.sep + 'current' + ret['rename'][file]['original'],
self.tmpDir + os.sep + 'current' + ret['rename'][file]['newPath']
)
for file in ret['new']:
# print(file)
if ret['new'][file]['type'] == 'new_file':
f = open(self.tmpDir + os.sep + 'current' + ret['new'][file]['innerPath'], 'w', encoding="utf8")
f.write(' ')
f.close()
elif ret['new'][file]['type'] == 'new_folder':
os.makedirs(self.tmpDir + os.sep + 'current' + ret['new'][file]['innerPath'])
elif ret['new'][file]['type'] == 'import':
copyFile(
ret['new'][file]['original'],
self.tmpDir + os.sep + 'current' + ret['new'][file]['innerPath']
)
self.file_table_load()
except Exception:
traceback.print_exc()
def load_content_table_managment(self):
try:
wl = ContentTableWindow(self, self.tmpDir + os.sep + 'current')
ret = wl.open_exec()
if ret is not None:
chapters = []
for obj in ret:
chapters.append(obj['url'][1:])
opf = self.save_metada(chapters)
if self.toc_type == 'NCX':
li = common.files.list_directory(self.tmpDir + os.sep + 'current', "ncx")
if len(li) > 0:
file = open(li[0], "r", encoding="utf8")
content = file.read()
file.close()
mydoc = minidom.parseString(content.replace(" ", "").replace("\t", ""))
map = mydoc.getElementsByTagName('navMap')[0]
points = mydoc.getElementsByTagName('navPoint')
for i in range(0, len(points)):
map.removeChild(points[i])
i = 0
for obj in ret:
i += 1
point = mydoc.createElement('navPoint')
point.setAttribute('id', 'num_{}'.format(i))
point.setAttribute('playOrder', "{}".format(i))
label = mydoc.createElement('navLabel')
tx = mydoc.createElement('text')
text_node = minidom.Text()
text_node.data = obj['name']
tx.appendChild(text_node)
label.appendChild(tx)
point.appendChild(label)
content = mydoc.createElement('content')
content.setAttribute('src', obj['url'])
point.appendChild(content)
map.appendChild(point)
mydoc.toprettyxml()
file = open(li[0], "wt", encoding="utf8")
file.write(mydoc.toprettyxml().replace("\r", "").replace("\n", "").replace(" ", "").replace(">\t", ">\n\t"))
file.close()
self.load_content_table(opf)
except Exception:
traceback.print_exc()
def load_content_table(self, data: str = None):
try:
print('---------------------------------------------------------')
self.treeContentTable.clear()
file_name = ''
file_path = ''
directory = self.tmpDir + os.sep + 'current' + os.sep
if data is None:
li = common.files.list_directory(directory, "opf")
file_name = li[0][li[0].rindex(os.sep)+1:]
file_path = li[0]
with open(li[0]) as myfile:
data = myfile.read()
self.toc_type, chapters = parse_content_table(
data,
file_path.replace(directory, '').replace(file_name, '').replace(os.sep, '/'),
directory
)
for chapter in chapters:
try:
try:
last_slash = chapter['src'].rindex('/')
except ValueError:
last_slash = - 1
item = QtWidgets.QListWidgetItem(self.treeContentTable)
item.setText(chapter['name'])
item.setData(98, chapter['src'][last_slash+1:])
item.setData(99, directory + chapter['src'].replace('/', os.sep))
icon = QtGui.QIcon()
image = QtGui.QPixmap()
image.load(get_style_var(self.app_style, 'icons/page'))
icon.addPixmap(image, QtGui.QIcon.Normal, QtGui.QIcon.Off)
item.setIcon(icon)
self.treeContentTable.addItem(item)
except Exception:
traceback.print_exc()
except IOError:
print("File not open")
except Exception:
traceback.print_exc()
def save_metada(self, chapters: list = None):
try:
li = common.files.list_directory(self.tmpDir + os.sep + 'current', "opf")
if len(li) > 0:
content = ''
with open(li[0], "r", encoding="utf8") as file:
content = file.read()
# print(content)
mydoc = minidom.parseString(content)
manifest = mydoc.getElementsByTagName('manifest')[0]
items = mydoc.getElementsByTagName('item')
for i in range(0, len(items)):
manifest.removeChild(items[i])
spine = mydoc.getElementsByTagName('spine')[0]
toc = None
try:
toc = spine.attributes['toc'].value
except Exception:
""
refs_list = mydoc.getElementsByTagName('item')
for i in range(0, len(refs_list)):
spine.removeChild(refs_list[i])
files = common.files.list_directory(self.tmpDir + os.sep + 'current')
idno = 1
list_refs = {}
for file in files:
path = file.replace(self.tmpDir + os.sep + 'current' + os.sep, '')
tp = path.split('.')
ext = None
if len(tp) > 1 and "META-INF" not in path:
ext = tp[len(tp) - 1].lower()
mtype = 'text/plain'
try:
mtype = mediatypes[ext]
except Exception:
""
item = mydoc.createElement('item')
item.setAttribute('id', 'id{}'.format(idno))
item.setAttribute('href', path)
item.setAttribute('media-type', mtype)
manifest.appendChild(item)
if ext in ['ncx']:
spine.attributes['toc'].value = 'id{}'.format(idno)
if ext in ['xhtml', 'html']:
itemref = mydoc.createElement('itemref')
itemref.setAttribute('idref', 'id{}'.format(idno))
if chapters is None:
spine.appendChild(itemref)
else:
list_refs[path] = itemref
idno += 1
if chapters is not None:
# print(list_refs)
for chapter in chapters:
if chapter in list_refs:
spine.appendChild(list_refs[chapter])
mydoc.toprettyxml()
ret = mydoc.toprettyxml().replace("\r", "").replace("\n", "").replace(" ", "").replace(">\t", ">\n\t")
with open(li[0], "wt", encoding="utf8") as file:
file.write(ret)
return ret
except Exception:
traceback.print_exc()
return None
| StarcoderdataPython |
1635514 | <gh_stars>100-1000
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: model_metrics.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='model_metrics.proto',
package='nasbench',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\x13model_metrics.proto\x12\x08nasbench\"s\n\x0cModelMetrics\x12\x31\n\x0f\x65valuation_data\x18\x01 \x03(\x0b\x32\x18.nasbench.EvaluationData\x12\x1c\n\x14trainable_parameters\x18\x02 \x01(\x05\x12\x12\n\ntotal_time\x18\x03 \x01(\x01\"\xa3\x01\n\x0e\x45valuationData\x12\x15\n\rcurrent_epoch\x18\x01 \x01(\x01\x12\x15\n\rtraining_time\x18\x02 \x01(\x01\x12\x16\n\x0etrain_accuracy\x18\x03 \x01(\x01\x12\x1b\n\x13validation_accuracy\x18\x04 \x01(\x01\x12\x15\n\rtest_accuracy\x18\x05 \x01(\x01\x12\x17\n\x0f\x63heckpoint_path\x18\x06 \x01(\t')
)
_MODELMETRICS = _descriptor.Descriptor(
name='ModelMetrics',
full_name='nasbench.ModelMetrics',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='evaluation_data', full_name='nasbench.ModelMetrics.evaluation_data', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trainable_parameters', full_name='nasbench.ModelMetrics.trainable_parameters', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='total_time', full_name='nasbench.ModelMetrics.total_time', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=33,
serialized_end=148,
)
_EVALUATIONDATA = _descriptor.Descriptor(
name='EvaluationData',
full_name='nasbench.EvaluationData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='current_epoch', full_name='nasbench.EvaluationData.current_epoch', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='training_time', full_name='nasbench.EvaluationData.training_time', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='train_accuracy', full_name='nasbench.EvaluationData.train_accuracy', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='validation_accuracy', full_name='nasbench.EvaluationData.validation_accuracy', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='test_accuracy', full_name='nasbench.EvaluationData.test_accuracy', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='checkpoint_path', full_name='nasbench.EvaluationData.checkpoint_path', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=151,
serialized_end=314,
)
_MODELMETRICS.fields_by_name['evaluation_data'].message_type = _EVALUATIONDATA
DESCRIPTOR.message_types_by_name['ModelMetrics'] = _MODELMETRICS
DESCRIPTOR.message_types_by_name['EvaluationData'] = _EVALUATIONDATA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ModelMetrics = _reflection.GeneratedProtocolMessageType('ModelMetrics', (_message.Message,), dict(
DESCRIPTOR = _MODELMETRICS,
__module__ = 'model_metrics_pb2'
# @@protoc_insertion_point(class_scope:nasbench.ModelMetrics)
))
_sym_db.RegisterMessage(ModelMetrics)
EvaluationData = _reflection.GeneratedProtocolMessageType('EvaluationData', (_message.Message,), dict(
DESCRIPTOR = _EVALUATIONDATA,
__module__ = 'model_metrics_pb2'
# @@protoc_insertion_point(class_scope:nasbench.EvaluationData)
))
_sym_db.RegisterMessage(EvaluationData)
# @@protoc_insertion_point(module_scope)
| StarcoderdataPython |
3341024 | # Generated by Django 2.2.10 on 2020-03-16 04:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('workflow', '0009_auto_20200306_1040'),
('workflow', '0009_projectstatus'),
]
operations = [
]
| StarcoderdataPython |
3323398 | <filename>source/HTTP_Component/Sensors.py
#
# Date: 2/24/21
# File Name: Sensors.py
#
# Engineer: <NAME>
# Contact: <EMAIL>
#
# Description:
# This is a script intended to retrieve, parse and return data from network connected sensors.
#
#
from urllib.request import urlopen
import math
METRIC = "mertric"
IMPERIAL = "imperial"
class Sensor:
def __init__(self, url_plug, units = METRIC, domain = 'localhost', port = '80'):
self.units = units
try:
self.address = 'http://' + domain + ':' + port + '/' + url_plug
except Exception as e:
print(e)
self.address = ''
# print("address(sensor): " + self.address)
def requestData(self):
try:
self.page = urlopen(self.address)
except Exception as e:
print(e)
return "NaN"
rawBytes = self.page.read()
rawString = rawBytes.decode("utf-8")
try:
float(rawString)
except Exception as e:
print(e)
return "NaN"
return rawString
def getSensorValue(self):
dataString = self.requestData()
sensorValue = float(dataString)
return sensorValue
def getUnits(self):
return self.units()
class TemperatureSensor(Sensor):
def getSensorValue(self):
dataString = self.requestData()
degreesCelsius = round(float(dataString), 2)
if self.units == METRIC:
return degreesCelsius
else:
degreesFahrenheit = round(((degreesCelsius * 1.8) + 32.0), 2)
return degreesFahrenheit
def getDegreesCelcius(self):
dataString = self.requestData()
degreesCelsius = float(dataString)
return degreesCelsius
def getUnits(self):
if self.units == IMPERIAL:
return "Fahrenheit"
else:
return "Celsius"
class HumiditySensor(Sensor):
def getSensorValue(self):
dataString = self.requestData()
relativeHumidity = float(dataString)
return relativeHumidity
def calculateDewPoint(self, relativeHumidity, degreesCelsius):
dewPoint = (243.12 * (math.log(relativeHumidity / 100) + ((17.62 * degreesCelsius) / (243.12 + degreesCelsius)))) / (17.62 - (math.log(relativeHumidity/100) + ((17.62 * degreesCelsius)/(243.12 + degreesCelsius))))
dewPoint = round(dewPoint, 2)
return dewPoint
def getDewPoint(self, degreesCelsius):
relativeHumidity = self.getSensorValue()
dewPointCelsius = self.calculateDewPoint(relativeHumidity, degreesCelsius)
if self.units == METRIC:
return dewPointCelsius
else:
dewPointFahrenheit = (dewPointCelsius * 1.8) + 32.0
dewPointFahrenheit = round(dewPointFahrenheit, 2)
return dewPointFahrenheit
def getUnits(self):
if self.units == IMPERIAL:
return "Fahrenheit"
else:
return "Celsius"
def main():
temperatureSensorOne = TemperatureSensor(
"temperature",
IMPERIAL,
"http://localhost:8080")
humiditySensorOne = HumiditySensor(
"humidity",
IMPERIAL,
"http://localhost:8080")
print("Temperature in degrees {}: {}".format(temperatureSensorOne.getUnits(), temperatureSensorOne.getSensorValue()))
print("Relative Humidity: ", humiditySensorOne.getSensorValue())
print("Dew Point {}: {}".format(humiditySensorOne.getUnits(), humiditySensorOne.getDewPoint(temperatureSensorOne.getDegreesCelcius())))
def testSimulated():
print("testing")
temp = TemperatureSensor(sub_address="temperature")
humid = HumiditySensor(sub_address="humidity")
print(temp.getSensorValue())
print(humid.getSensorValue())
if __name__ == "__main__":
main()
#testSimulated()
| StarcoderdataPython |
22315 | """
Defines models
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
def init_weights(m):
if type(m) == nn.Linear or type(m) == nn.Conv2d:
torch.nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
m.bias.data.fill_(0.01)
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class Model(nn.Module):
def __init__(self, opt):
super(Model, self).__init__()
self.acoustic_modality = opt.acoustic_modality
self.visual_modality = opt.visual_modality
self.lexical_modality = opt.lexical_modality
self.acoustic_feature_dim = opt.acoustic_feature_dim
self.visual_feature_dim = opt.visual_feature_dim
self.lexical_feature_dim = opt.lexical_feature_dim
self.conv_width_v = opt.conv_width_v
self.conv_width_a = opt.conv_width_a
self.kernel_size_v = opt.kernel_size_v
self.kernel_size_a = opt.kernel_size_a
self.max_pool_width = opt.max_pool_width
self.rnn_layer_num_v = opt.rnn_layer_num_v
self.rnn_layer_num_a = opt.rnn_layer_num_a
self.rnn_width = opt.rnn_width
self.linear_width_l = opt.linear_width_l
self.linear_width = opt.linear_width
self.dropout_rate = opt.dropout_rate
self.conv1d_v1 = nn.Conv1d( in_channels=opt.visual_feature_dim,
out_channels=self.conv_width_v,
kernel_size=self.kernel_size_v,
padding=self.kernel_size_v-1)
self.conv1d_v2 = nn.Conv1d( in_channels=self.conv_width_v,
out_channels=self.conv_width_v,
kernel_size=self.kernel_size_v,
padding=self.kernel_size_v-1)
self.conv1d_v3 = nn.Conv1d( in_channels=self.conv_width_v,
out_channels=self.conv_width_v,
kernel_size=self.kernel_size_v,
padding=self.kernel_size_v-1)
self.conv1d_a1 = nn.Conv1d( in_channels=opt.acoustic_feature_dim,
out_channels=self.conv_width_a,
kernel_size=self.kernel_size_a,
padding=self.kernel_size_a-1)
self.conv1d_a2 = nn.Conv1d( in_channels=self.conv_width_a,
out_channels=self.conv_width_a,
kernel_size=self.kernel_size_a,
padding=self.kernel_size_a-1)
self.conv1d_a3 = nn.Conv1d( in_channels=self.conv_width_a,
out_channels=self.conv_width_a,
kernel_size=self.kernel_size_a,
padding=self.kernel_size_a-1)
self.maxpool = nn.MaxPool1d(self.max_pool_width)
self.gru_v = nn.GRU(input_size=self.conv_width_v,
num_layers=self.rnn_layer_num_v,
hidden_size=self.rnn_width,
batch_first=True)
self.gru_a = nn.GRU(input_size=self.conv_width_a,
num_layers=self.rnn_layer_num_a,
hidden_size=self.rnn_width,
batch_first=True)
self.linear_l = nn.Linear(self.lexical_feature_dim, self.linear_width_l)
self.batchnorm_v = nn.BatchNorm1d(self.rnn_width)
self.batchnorm_a = nn.BatchNorm1d(self.rnn_width)
self.batchnorm_l = nn.BatchNorm1d(self.linear_width_l)
self.dropout = nn.Dropout(self.dropout_rate)
width = 0
if self.acoustic_modality:
width += self.rnn_width
if self.visual_modality:
width += self.rnn_width
if self.lexical_modality:
width += self.linear_width_l
self.linear_1 = nn.Linear(width, self.linear_width)
self.linear_2 = nn.Linear(self.linear_width, 3)
self.softmax = nn.Softmax(dim=1)
self.relu = nn.ReLU()
def forward_v(self, x_v):
x = x_v
x = torch.transpose(x, 1, 2)
x = self.relu(self.maxpool(self.conv1d_v1(x)))
x = self.relu(self.maxpool(self.conv1d_v2(x)))
x = self.relu(self.maxpool(self.conv1d_v3(x)))
x = torch.transpose(x, 1, 2)
x, _ = self.gru_v(x)
x = torch.transpose(x, 1, 2)
x = F.adaptive_avg_pool1d(x,1)[:, :, -1]
x = self.batchnorm_v(self.dropout(x))
return x
def forward_a(self, x_a):
x = x_a
x = torch.transpose(x, 1, 2)
x = self.relu(self.maxpool(self.conv1d_a1(x)))
x = self.relu(self.maxpool(self.conv1d_a2(x)))
x = self.relu(self.maxpool(self.conv1d_a3(x)))
x = torch.transpose(x, 1, 2)
x, _ = self.gru_a(x)
x = torch.transpose(x, 1, 2)
x = F.adaptive_avg_pool1d(x,1)[:, :, -1]
x = self.batchnorm_a(self.dropout(x))
return x
def forward_l(self, x_l):
x = x_l
x = self.relu(self.linear_l(x))
x = self.batchnorm_l(self.dropout(x))
return x
def encoder(self, x_v, x_a, x_l):
if self.visual_modality:
x_v = self.forward_v(x_v)
if self.acoustic_modality:
x_a = self.forward_a(x_a)
if self.lexical_modality:
x_l = self.forward_l(x_l)
if self.visual_modality:
if self.acoustic_modality:
if self.lexical_modality:
x = torch.cat((x_v, x_a, x_l), 1)
else:
x = torch.cat((x_v, x_a), 1)
else:
if self.lexical_modality:
x = torch.cat((x_v, x_l), 1)
else:
x = x_v
else:
if self.acoustic_modality:
if self.lexical_modality:
x = torch.cat((x_a, x_l), 1)
else:
x = x_a
else:
x = x_l
return x
def recognizer(self, x):
x = self.relu(self.linear_1(x))
x = self.linear_2(x)
return x
def forward(self, x_v, x_a, x_l):
x = self.encoder(x_v, x_a, x_l)
x = self.recognizer(x)
return x
| StarcoderdataPython |
1788033 | import json
import os
import shutil
from imskaper.experiment.experiment import experiment, read_Xy_data
from imskaper.utils.classifiers import get_classifiers
from imskaper.utils.features_selectors import get_features_selectors
current_path = "tests" # os.path.dirname(os.path.abspath(__file__))
json_path = os.path.join(current_path, "test_config.json")
with open(json_path) as config_file:
config = json.load(config_file)
config["config"]["features_file"] = os.path.join(
current_path, "test_dataset.csv"
)
config["config"]["output_dir"] = os.path.join(current_path, "temp\\")
def test_read_Xy():
X, y, columns_names = read_Xy_data(config["config"]["features_file"])
assert X.shape == (197, 22)
assert y.shape == (197,)
assert len(columns_names) == 22
assert columns_names[0] == "Elongation"
def test_exp():
p = config["config"]["output_dir"]
folder_to_delete = [os.path.join(p, f) for f in os.listdir(p)]
for f in folder_to_delete:
shutil.rmtree(f)
df = experiment(config, verbose=0)
assert df.shape == (
len(get_classifiers(config)),
len(get_features_selectors(config)),
)
folder_name = [os.path.join(p, f) for f in os.listdir(p)]
assert len([f for f in os.listdir(folder_name[0]) if f.endswith('.csv') or f.endswith('.jpg')]) == 8
df2 = experiment(config, verbose=0)
# Make sure we are able to reproduce the results when using the same seed
assert df.equals(df2)
config["config"]["SEED"] = 999
df3 = experiment(config, verbose=0)
assert not (df.equals(df3))
folder_to_delete = [os.path.join(p, f) for f in os.listdir(p)]
for f in folder_to_delete:
shutil.rmtree(f)
| StarcoderdataPython |
169544 | <filename>AutotestWebD/all_urls/LittleToolUrl.py
from django.conf.urls import url
from apps.littletool.views import tool
urlpatterns = [
#add page
url(r'^littletool/jsoncn$', tool.jsoncn, name="LITTLETOOL_jsoncn"),
] | StarcoderdataPython |
3343601 | <reponame>ChadKillingsworth/sentry
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from mock import patch
from sentry.models import (
AuthProvider, OrganizationMember, OrganizationMemberType
)
from sentry.testutils import APITestCase
class UpdateOrganizationMemberTest(APITestCase):
@patch('sentry.models.OrganizationMember.send_invite_email')
def test_reinvite_pending_member(self, mock_send_invite_email):
self.login_as(user=self.user)
organization = self.create_organization(name='foo', owner=self.user)
member_om = OrganizationMember.objects.create(
organization=organization,
email='<EMAIL>',
type=OrganizationMemberType.MEMBER,
has_global_access=False,
)
path = reverse('sentry-api-0-organization-member-details', args=[organization.slug, member_om.id])
self.login_as(self.user)
resp = self.client.put(path, data={'reinvite': 1})
assert resp.status_code == 204
mock_send_invite_email.assert_called_once_with()
@patch('sentry.models.OrganizationMember.send_sso_link_email')
def test_reinvite_sso_link(self, mock_send_sso_link_email):
self.login_as(user=self.user)
organization = self.create_organization(name='foo', owner=self.user)
member = self.create_user('<EMAIL>')
member_om = OrganizationMember.objects.create(
organization=organization,
user=member,
type=OrganizationMemberType.MEMBER,
has_global_access=False,
)
AuthProvider.objects.create(organization=organization, provider='dummy')
path = reverse('sentry-api-0-organization-member-details', args=[organization.slug, member_om.id])
self.login_as(self.user)
resp = self.client.put(path, data={'reinvite': 1})
assert resp.status_code == 204
mock_send_sso_link_email.assert_called_once_with()
@patch('sentry.models.OrganizationMember.send_sso_link_email')
def test_cannot_reinvite_normal_member(self, mock_send_sso_link_email):
self.login_as(user=self.user)
organization = self.create_organization(name='foo', owner=self.user)
member = self.create_user('<EMAIL>')
member_om = OrganizationMember.objects.create(
organization=organization,
user=member,
type=OrganizationMemberType.MEMBER,
has_global_access=False,
)
path = reverse('sentry-api-0-organization-member-details', args=[organization.slug, member_om.id])
self.login_as(self.user)
resp = self.client.put(path, data={'reinvite': 1})
assert resp.status_code == 400
class DeleteOrganizationMemberTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
organization = self.create_organization(name='foo', owner=self.user)
member = self.create_user('<EMAIL>')
member_om = OrganizationMember.objects.create(
organization=organization,
user=member,
type=OrganizationMemberType.MEMBER,
has_global_access=False,
)
path = reverse('sentry-api-0-organization-member-details', args=[organization.slug, member_om.id])
self.login_as(self.user)
resp = self.client.delete(path)
assert resp.status_code == 204
assert not OrganizationMember.objects.filter(id=member_om.id).exists()
def test_cannot_delete_only_owner(self):
self.login_as(user=self.user)
organization = self.create_organization(name='foo', owner=self.user)
# create a pending member, which shouldn't be counted in the checks
OrganizationMember.objects.create(
organization=organization,
type=OrganizationMemberType.OWNER,
email='<EMAIL>',
)
owner_om = OrganizationMember.objects.get(
organization=organization,
user=self.user,
)
path = reverse('sentry-api-0-organization-member-details', args=[organization.slug, owner_om.id])
self.login_as(self.user)
resp = self.client.delete(path)
assert resp.status_code == 403
assert OrganizationMember.objects.filter(id=owner_om.id).exists()
| StarcoderdataPython |
16511 | <gh_stars>0
import numpy as np
from scipy import constants
measured_species = ["HMF", "DFF", "HMFCA", "FFCA", "FDCA"]
all_species = measured_species.copy()
all_species.extend(["H_" + s for s in measured_species])
all_species.extend(["Hx_" + s for s in measured_species])
def c_to_q(c):
c_e = list()
for i, s in enumerate(all_species):
c_e.append(2*(i%5 + int(i/5))*c[:,i])
c_e = np.sum(c_e, axis = 0) # uM
c_e *= 1e-6 # M
V = 100e-3 # L
q = c_e*V*constants.N_A*constants.e # number of charge in coulombs
return q
def derivatives(y, t, p):
"""
Calculates the derivatives from local values, used by scipy.integrate.solve_ivp
"""
c = {s:y[i] for i, s in enumerate(all_species)}
dc = dict()
dc["HMF"] = - (p["k11"] + p["k12"] + p["kH1"])*c["HMF"]
dc["DFF"] = p["k11"]*c["HMF"] - (p["k21"] + p["kH21"])*c["DFF"]
dc["HMFCA"] = p["k12"]*c["HMF"] - (p["k22"] + p["kH22"])*c["HMFCA"]
dc["FFCA"] = p["k21"]*c["DFF"] + p["k22"]*c["HMFCA"] - (p["k3"] + p["kH3"])*c["FFCA"]
dc["FDCA"] = p["k3"]*c["FFCA"] - p["kH4"]*c["FDCA"]
dc["H_HMF"] = p["kH1"]*c["HMF"] - p["kHx"]*c["H_HMF"]
dc["H_DFF"] = p["kH21"]*c["DFF"] - p["kHx"]*c["H_DFF"]
dc["H_HMFCA"] = p["kH22"]*c["HMFCA"] - p["kHx"]*c["H_HMFCA"]
dc["H_FFCA"] = p["kH3"]*c["FFCA"] - p["kHx"]*c["H_FFCA"]
dc["H_FDCA"] = p["kH4"]*c["FDCA"] - p["kHx"]*c["H_FDCA"]
dc["Hx_HMF"] = p["kHx"]*c["H_HMF"]
dc["Hx_DFF"] = p["kHx"]*c["H_DFF"]
dc["Hx_HMFCA"] = p["kHx"]*c["H_HMFCA"]
dc["Hx_FFCA"] = p["kHx"]*c["H_FFCA"]
dc["Hx_FDCA"] = p["kHx"]*c["H_FDCA"]
dy = [dc[name] for name in all_species]
return dy | StarcoderdataPython |
1795029 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2018-2019 <NAME>
#
# Distributed under MIT License. See LICENSE file for details.
from __future__ import unicode_literals
import factory
# FAQ: `pyint` is everywhere because it is always [0..9999], so it
# will be good enough for every integer-related field.
def build_bigintegerfield(field_cls):
return factory.Faker("pyint")
def build_decimalfield(field_cls):
characteristic = field_cls.max_digits - field_cls.decimal_places
mantissa = field_cls.decimal_places
return factory.Faker(
"pydecimal",
left_digits=characteristic,
right_digits=mantissa,
)
def build_floatfield(field_cls):
return factory.Faker("pyfloat")
def build_integerfield(field_cls):
return factory.Faker("pyint")
def build_positiveintegerfield(field_cls):
return factory.Faker("pyint")
def build_positivesmallintegerfield(field_cls):
return factory.Faker("pyint")
def build_smallintegerfield(field_cls):
return factory.Faker("pyint")
| StarcoderdataPython |
111243 | from typing import Optional, List
from ..pattern import Pattern
from ..pattern_recognizer import PatternRecognizer
class IpRecognizer(PatternRecognizer):
"""
Recognize IP address using regex.
:param patterns: List of patterns to be used by this recognizer
:param context: List of context words to increase confidence in detection
:param supported_language: Language this recognizer supports
:param supported_entity: The entity this recognizer can detect
"""
PATTERNS = [
Pattern(
"IPv4",
r"\b(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b", # noqa: E501
0.6,
),
Pattern(
"IPv6",
r"\s*(?!.*::.*::)(?:(?!:)|:(?=:))(?:[0-9a-f]{0,4}(?:(?<=::)|(?<!::):)){6}(?:[0-9a-f]{0,4}(?:(?<=::)|(?<!::):)[0-9a-f]{0,4}(?:(?<=::)|(?<!:)|(?<=:)(?<!::):)|(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)){3})\s*", # noqa: E501
0.6,
),
]
CONTEXT = ["ip", "ipv4", "ipv6"]
def __init__(
self,
patterns: Optional[List[Pattern]] = None,
context: Optional[List[str]] = None,
supported_language: str = "en",
supported_entity: str = "IP_ADDRESS",
):
patterns = patterns if patterns else self.PATTERNS
context = context if context else self.CONTEXT
super().__init__(
supported_entity=supported_entity,
patterns=patterns,
context=context,
supported_language=supported_language,
)
| StarcoderdataPython |
3286132 | '''
Created on 31 Mar 2017
@author: <NAME> <<EMAIL>>
'''
from rdflib import URIRef, BNode, Literal, Graph, Namespace
from rdflib.namespace import RDF, RDFS, XSD, OWL
from Misc.datacheck import isfloat
#Tuple of the form: (*camera id*:string, *camera lat*:string, *camera long*: string,
# *OSM derived road name*:string, *speed limit*:string)
def toLD( data, debug=False ):
assert isinstance(data, tuple), "Expecting tuple"
assert len(data) == 5, "Expecting 5 values"
assert isinstance(data[0], basestring), "First element of list needs to be a string"
assert isinstance(data[1], basestring), "Second element of list needs to be a string"
assert isinstance(data[2], basestring), "Third element of list needs to be a string"
assert isinstance(data[3], basestring), "Fourth element of list needs to be a string"
assert isinstance(data[4], basestring), "Fifth element of list needs to be a int"
assert isfloat(data[1]), "Second element of list needs to represent a float number"
assert isfloat(data[2]), "Third element of list needs to represent a float number"
cameraid=data[0]
cameralat=data[1]
cameralong=data[2]
addrstreet=data[3]
speedlimit=data[4]
toappend = ""
if (float(cameralat) > 0):
if(cameralat[0] != "+"):
toappend += "+" + cameralat
else:
toappend += cameralat
else:
toappend += cameralat
if (float(cameralong) > 0):
if(cameralong[0] != "+"):
toappend += "+" + cameralong
else:
toappend += cameralong
else:
toappend += cameralong
g = Graph()
traffic = Namespace("https://dais-ita.org/traffic/")
pos = Namespace("http://www.w3.org/2003/01/geo/wgs84_pos#")
osm = Namespace("https://raw.github.com/doroam/planning-do-roam/master/Ontology/tags.owl#")
osmaddr = Namespace("https://raw.github.com/doroam/planning-do-roam/master/Ontology/tags.owl#k_addr:")
cameranode = URIRef(traffic + "camera" + cameraid )
locationnode = URIRef(traffic + "location" + toappend )
addressnode = URIRef(traffic + "address" + toappend )
g.add( ( cameranode, RDF.type, traffic.Camera ) )
g.add( ( cameranode, RDF.type, OWL.NamedIndividual))
g.add( ( cameranode, RDFS.label, Literal(cameraid, datatype=XSD.string) ) )
g.add( ( cameranode, pos.location, locationnode) )
g.add( ( locationnode, RDF.type, pos.Point) )
g.add( ( locationnode, RDF.type, OWL.NamedIndividual) )
g.add( ( locationnode, pos.lat, Literal(cameralat, datatype=XSD.string) ) )
g.add( ( locationnode, pos.long, Literal(cameralong, datatype=XSD.string) ) )
g.add( ( cameranode, osm.has_address, addressnode ) )
g.add( ( addressnode, RDF.type, osm.address))
g.add( ( addressnode, RDF.type, OWL.NamedIndividual))
g.add( ( addressnode, osmaddr.street, Literal(addrstreet, datatype=XSD.string) ) )
g.add( ( addressnode, traffic.speedLimit, Literal(speedlimit, datatype=XSD.string) ) )
if (debug):
print g.serialize(format='turtle')
return g
def toJsonLD( data ):
g = toLD(data)
return g.serialize(format='json-ld', indent=4) | StarcoderdataPython |
1681072 | import os
import warnings
import numpy as np
from scipy.spatial import distance
from pycosmosac.molecule.cavity import Cavity
from pycosmosac.param import data
from pycosmosac.utils import elements
BOND_SCALING = 1.2
def get_connectivity(mol, geometry=None):
#TODO improve accuracy
if geometry is None:
geometry = mol.geometry
if not geometry:
raise RuntimeError("molecule not initialized.")
atoms = geometry["atom"]
xyz = geometry["xyz"]
d = distance.cdist(xyz, xyz)
natom = len(atoms)
connectivity = [[] for _ in range(natom)]
for i, atom_i in enumerate(atoms):
for j, atom_j in enumerate(atoms):
if i==j:
continue
l = BOND_SCALING * elements.covalent_bond(atom_i, atom_j)
if d[i,j] <= l:
connectivity[i].append(j)
if not connectivity[i]:
warnings.warn("atom (%s, %s) has no bonds." % (i+1, atom_i))
return connectivity
def _dfs(connectivity, iatm, color, traversalOrder, res, parent=None):
'''
depth-first search
'''
color[iatm] = 1
traversalOrder.append(iatm)
for jatm in connectivity[iatm]:
if color[jatm] == 0:
if len(connectivity[jatm]) < 2:
color[jatm] = 2
else:
_dfs(connectivity, jatm, color, traversalOrder, res, parent=iatm)
elif color[jatm] == 1:
if parent and parent != jatm:
cycle = []
lastatm_index = traversalOrder.index(iatm)
for index in range(lastatm_index, -1, -1):
katm = traversalOrder[index]
if katm == jatm:
break
else:
cycle.append(katm)
cycle.append(jatm)
res.append(cycle)
color[iatm] = 2
traversalOrder.pop()
def find_rings(mol, connectivity=None):
if connectivity is None: connectivity = mol.connectivity
natom = mol.natom
color = np.zeros((natom), dtype=int)
res = []
for i in range(natom):
if color[i] > 0:
continue
if len(connectivity[i]) < 2:
color[i] = 2
continue
traversalOrder = []
_dfs(connectivity, i, color, traversalOrder, res)
return res
def find_ring_atoms(mol, connectivity=None):
if connectivity is None: connectivity = mol.connectivity
res = find_rings(mol, connectivity)
return list(set().union(*res))
def classify_hydrogen_bonds(mol, geometry=None, connectivity=None):
if geometry is None: geometry = mol.geometry
if connectivity is None: connectivity = mol.connectivity
if not geometry or not connectivity:
raise RuntimeError("molecule not initialized.")
atoms = geometry["atom"]
hb_class = []
for i, atom_i in enumerate(atoms):
if atom_i in ['N', 'F']:
hb_class.append("OT")
elif atom_i in ['O', 'H']:
bond_type = 'NHB'
for j in connectivity[i]:
atom_j = atoms[j]
atom_ij = atom_i + atom_j
if atom_ij in ['OH', 'HO']:
bond_type = 'OH'
break
if atom_i == 'O':
bond_type = 'OT'
break
if atom_i == 'H' and atom_j in ['N', 'F']:
bond_type = 'OT'
break
hb_class.append(bond_type)
else:
hb_class.append('NHB')
return hb_class
def get_dispersion_type(mol, geometry=None, connectivity=None):
if geometry is None: geometry = mol.geometry
if connectivity is None: connectivity = mol.connectivity
atoms = geometry["atom"]
if len(atoms) == 3 and atoms.count("O") == 1 and atoms.count("H") == 2:
disp_tot = (data.disp["H(H2O)"] * 2 + data.disp["-O-"]) / 3.0
return disp_tot, "H2O"
disp_type = "NHB"
disp_tot = 0.0
natom = 0
nCOOH = 0
for i, atom_i in enumerate(atoms):
n = len(connectivity[i])
if atom_i == "C":
natom += 1
if n == 4:
disp_tot += data.disp["C(sp3)"]
elif n == 3:
disp_tot += data.disp["C(sp2)"]
atom_js = []
js = []
for j in connectivity[i]:
atom_js.append(atoms[j])
js.append(j)
if atom_js.count("O") == 2:
for j, atom_j in zip(js,atom_js):
if atom_j != "O":
continue
if len(connectivity[j]) == 2:
for k in connectivity[j]:
if atoms[k] == "H":
nCOOH += 1
disp_tot += data.disp["H(COOH)"]
disp_type = "COOH"
elif n == 2:
disp_tot += data.disp["C(sp)"]
elif atom_i == "N":
natom += 1
if n == 3:
disp_tot += data.disp["N(sp3)"]
elif n == 2:
disp_tot += data.disp["N(sp2)"]
elif n == 1:
disp_tot += data.disp["N(sp)"]
elif atom_i == "O":
natom += 1
if n == 2:
disp_tot += data.disp["-O-"]
elif n == 1:
disp_tot += data.disp["=O"]
elif atom_i == "F":
natom += 1
disp_tot += data.disp["F"]
elif atom_i == "Cl":
natom += 1
disp_tot += data.disp["Cl"]
elif atom_i == "Br":
natom += 1
disp_tot += data.disp["Br"]
elif atom_i == "I":
natom += 1
disp_tot += data.disp["I"]
elif atom_i == "P":
natom += 1
disp_tot += data.disp["P"]
elif atom_i == "S":
natom += 1
disp_tot += data.disp["S"]
elif atom_i == "H":
j = connectivity[i][0]
atom_j = atoms[j]
if atom_j == "O":
natom += 1
disp_tot += data.disp["H(OH)"]
elif atom_j == "N":
natom += 1
disp_tot += data.disp["H(NH)"]
else:
warnings.warn("dispersion parameter not available for %s" % atom_i)
disp_tot -= nCOOH * data.disp["H(OH)"]
disp_tot /= natom
return disp_tot, disp_type
def fromstring(string, format='xyz'):
format = format.lower()
if format == 'xyz':
dat = string.splitlines()
natm = int(dat[0])
return '\n'.join(dat[2:natm+2])
elif format == 'raw':
return string
else:
raise NotImplementedError
def fromfile(filename, format=None):
if format is None: # Guess format based on filename
format = os.path.splitext(filename)[1][1:].lower()
if format not in ('xyz', 'zmat', 'sdf', 'mol2'):
format = 'raw'
with open(filename, 'r') as f:
return fromstring(f.read(), format)
def read_geometry(xyz):
if os.path.isfile(xyz):
try:
xyz_raw = fromfile(xyz)
return raw_to_geometry(xyz_raw)
except:
raise ValueError('Failed to parse geometry file %s' % xyz)
else:
return raw_to_geometry(xyz)
def raw_to_geometry(xyz):
geometry = {}
geometry["atom"] = []
geometry["xyz"] = []
def str2atm(line):
dat = line.split()
assert(len(dat) == 4)
geometry["atom"].append(dat[0])
geometry["xyz"].append([float(x) for x in dat[1:4]])
if isinstance(xyz, str):
xyz = str(xyz.replace(';','\n').replace(',',' ').replace('\t',' '))
fmt_atoms = []
for dat in xyz.split('\n'):
dat = dat.strip()
if dat and dat[0] != '#':
fmt_atoms.append(dat)
for line in fmt_atoms:
str2atm(line)
geometry["xyz"] = np.asarray(geometry["xyz"])
else:
raise NotImplementedError
return geometry
def geometry_to_xyz(geometry, name="unknown"):
symb = geometry["atom"]
coord = geometry["xyz"]
natom = len(symb)
xyz = str(natom) + "\n"
xyz += name + "\n"
for i in range(natom):
xyz += symb[i] + " "
xyz += str(coord[i, 0]) + " "
xyz += str(coord[i, 1]) + " "
xyz += str(coord[i, 2]) + " "
xyz += "\n"
return xyz.strip()
class Mole():
'''
Class for molecular information
Attributes:
geometry : dict
Geometry information.
"xyz" : ndarray
"atom" : list
cavity : Cavity
Cavity information
connectivity : list
Connectivity information.
hb_class : list
Hydrogen bond classification.
'''
def __init__(self):
#{"atom" : [], "xyz" : ndarray(natom, 3)}
self.geometry = None
self.cavity = None
self.connectivity = None
self.hb_class = None
@property
def natom(self):
if self.geometry is None:
raise RuntimeError("molecule not initialized")
return len(self.geometry["atom"])
def build(self, geometry=None, cavity=None):
if geometry is not None:
if isinstance(geometry, str):
self.geometry = read_geometry(geometry)
elif isinstance(geometry, dict):
self.geometry = geometry
else:
raise ValueError("unsupported geometry input")
if cavity is not None: self.cavity = cavity
self.connectivity = self.get_connectivity()
self.hb_class = self.classify_hydrogen_bonds()
return self
get_connectivity = get_connectivity
classify_hydrogen_bonds = classify_hydrogen_bonds
get_dispersion_type = get_dispersion_type
find_rings = find_rings
find_ring_atoms = find_ring_atoms
if __name__ == "__main__":
from pycosmosac.utils.misc import fingerprint
geometry = {}
geometry["atom"] = ['O', 'H', 'H']
geometry["xyz"] = np.asarray([[ 0., 0., -0.405655705],
[ 0.770106178, 0., 0.202827852],
[-0.770106178, 0., 0.202827852]])
mol = Mole().build(geometry)
print(mol.connectivity == [[1, 2], [0], [0]])
print(mol.hb_class == ['OH','OH','OH'])
print(mol.get_dispersion_type()[0] - 70.75953333333332)
print(mol.get_dispersion_type()[1] == "H2O")
xyz = '''
N -2.86237 0.53549 -0.00680
C -1.59157 1.12789 -0.00460
C -0.65647 0.06499 -0.00640
N -1.36327 -1.15231 -0.01640
C -2.67117 -0.86471 -0.01510
C 0.72143 0.40079 -0.00170
N 1.06103 1.72159 -0.02520
C 0.08733 2.69019 -0.01940
N -1.23787 2.45869 -0.00720
H 0.42943 3.73339 -0.02780
N 1.75063 -0.53271 0.12520
H -3.73057 1.00639 -0.00350
H -3.47277 -1.60891 -0.01910
H 1.51683 -1.44251 -0.19520
H 2.65133 -0.22311 -0.15800
'''
mol = Mole().build(xyz)
print(mol.geometry["atom"] == ['N', 'C', 'C', 'N', 'C', 'C', 'N', 'C', 'N', 'H', 'N', 'H', 'H', 'H', 'H'])
print(fingerprint(mol.geometry["xyz"]) - -7.705571225872962)
print(mol.connectivity == [[1, 4, 11], [0, 2, 8], [1, 3, 5], [2, 4], [0, 3, 12], [2, 6, 10], [5, 7], [6, 8, 9], [1, 7], [7], [5, 13, 14], [0], [4], [10], [10]])
print(mol.find_ring_atoms() == [0, 1, 2, 3, 4, 5, 6, 7, 8])
print(mol.find_rings() == [[4, 3, 2, 1, 0], [8, 7, 6, 5, 2, 1]])
xyz = '''
C 0.78526 0.09180 -0.07290
C 0.46366 -1.44010 0.02370
C -0.58284 -1.14900 1.15480
C -0.26134 0.38280 1.05820
C -0.33764 0.25930 -1.15480
C -1.38414 0.55030 -0.02370
C -1.70564 -0.98160 0.07290
C -0.65914 -1.27260 -1.05820
H 1.78286 0.52170 -0.13130
H 1.20356 -2.23730 0.04260
H -0.68114 -1.71320 2.07970
H -0.10194 1.04580 1.90590
H -0.23944 0.82320 -2.07990
H -2.12424 1.34730 -0.04270
H -2.70324 -1.41140 0.13130
H -0.81854 -1.93550 -1.90580
'''
mol = Mole().build(xyz)
print(mol.find_ring_atoms() == [0, 1, 2, 3, 4, 5, 6, 7])
| StarcoderdataPython |
129127 | <gh_stars>0
from UM.Settings.Models.InstanceContainersModel import InstanceContainersModel
from unittest.mock import MagicMock, patch
import pytest
@pytest.fixture
def instance_containers_model(container_registry):
with patch("UM.Settings.ContainerRegistry.ContainerRegistry.getInstance",
MagicMock(return_value=container_registry)):
result = InstanceContainersModel()
result._fetchInstanceContainers = MagicMock(return_value = ({}, {"bla": {"name": "test", "id": "beep"}}))
return result
def test_simpleUpdate(instance_containers_model):
instance_containers_model._update()
items = instance_containers_model.items
assert len(items) == 1
assert items[0]["name"] == "test"
assert items[0]["id"] == "beep"
test_validate_data_get_set = [
{"attribute": "sectionProperty", "value": "YAY"},
{"attribute": "filter", "value": {"beep": "oh noes"}}
]
@pytest.mark.parametrize("data", test_validate_data_get_set)
def test_getAndSet(data, instance_containers_model):
model = instance_containers_model
# Convert the first letter into a capital
attribute = list(data["attribute"])
attribute[0] = attribute[0].capitalize()
attribute = "".join(attribute)
# mock the correct emit
setattr(model, data["attribute"] + "Changed", MagicMock())
# Attempt to set the value
getattr(model, "set" + attribute)(data["value"])
# Check if signal fired.
signal = getattr(model, data["attribute"] + "Changed")
assert signal.emit.call_count == 1
# Ensure that the value got set
assert getattr(model, data["attribute"]) == data["value"]
# Attempt to set the value again
getattr(model, "set" + attribute)(data["value"])
# The signal should not fire again
assert signal.emit.call_count == 1
def test_updateMetaData(instance_containers_model):
instance_container = MagicMock()
instance_container.getMetaData = MagicMock(return_value = {})
instance_container.getName = MagicMock(return_value = "name")
instance_container.getId = MagicMock(return_value = "the_id")
instance_containers_model.setProperty = MagicMock()
instance_containers_model._updateMetaData(instance_container)
calls = instance_containers_model.setProperty.call_args_list
assert calls[0][0][2] == {}
assert calls[1][0][2] == "name"
assert calls[2][0][2] == "the_id"
def test_fetchInstanceContainers(container_registry):
with patch("UM.Settings.ContainerRegistry.ContainerRegistry.getInstance", MagicMock(return_value=container_registry)):
model = InstanceContainersModel()
model.setFilter({"id": "empty"})
assert model.filterList == [{"id": "empty"}]
containers, metadatas = model._fetchInstanceContainers()
assert "empty" in containers
assert metadatas == dict()
def test_getIOPlugins(instance_containers_model):
registry = MagicMock()
registry.getActivePlugins = MagicMock(return_value=["omg"])
registry.getMetaData = MagicMock(return_value = {"test": "blorp"})
with patch("UM.PluginRegistry.PluginRegistry.getInstance", MagicMock(return_value=registry)):
assert instance_containers_model._getIOPlugins("test") == [("omg", {"test": "blorp"})] | StarcoderdataPython |
153616 | <filename>C++/testing/serverSock.py
import socket
#from threading import *
HOST = '10.1.121.102'
PORT = 65432
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
conn, addr = s.accept()
with conn:
print('connected by', addr)
while True:
data = conn.recv(100000024)
print(data)
if not data:
break
conn.sendall(data)
| StarcoderdataPython |
122697 | from .bot import Bot
from .utils.logger import LogLevel
__all__ = (LogLevel, Bot)
| StarcoderdataPython |
160071 | #!/usr/bin/python2.4
#
# Copyright 2010-2012 Google Inc. All Rights Reserved.
"""Renderscript Compiler Test.
Runs subdirectories of tests for the Renderscript compiler.
"""
import filecmp
import glob
import os
import re
import shutil
import subprocess
import sys
__author__ = 'Android'
class Options(object):
def __init__(self):
return
verbose = 0
cleanup = 1
updateCTS = 0
def CompareFiles(actual, expect):
"""Compares actual and expect for equality."""
if not os.path.isfile(actual):
if Options.verbose:
print 'Could not find %s' % actual
return False
if not os.path.isfile(expect):
if Options.verbose:
print 'Could not find %s' % expect
return False
return filecmp.cmp(actual, expect, False)
def UpdateFiles(src, dst):
"""Update dst if it is different from src."""
if not CompareFiles(src, dst):
print 'Copying from %s to %s' % (src, dst)
shutil.copyfile(src, dst)
def GetCommandLineArgs(filename):
"""Extracts command line arguments from first comment line in a file."""
f = open(filename, 'r')
line = f.readline()
if line[0] == '/' and line [1] == '/':
return line[2:].strip()
else:
return ''
def ExecTest(dirname):
"""Executes an llvm-rs-cc test from dirname."""
passed = True
if Options.verbose != 0:
print 'Testing %s' % dirname
os.chdir(dirname)
stdout_file = open('stdout.txt', 'w+')
stderr_file = open('stderr.txt', 'w+')
cmd_string = ('../../../../../../out/host/linux-x86/bin/llvm-rs-cc '
'-o tmp/ -p tmp/ '
'-MD '
'-I ../../../../../../frameworks/rs/scriptc/ '
'-I ../../../../../../mediatek/external/clang/lib/Headers/')
base_args = cmd_string.split()
rs_files = glob.glob('*.rs')
fs_files = glob.glob('*.fs')
rs_files += fs_files;
rs_files.sort()
# Extra command line arguments can be placed as // comments at the start of
# any .rs file. We automatically bundle up all of these extra args and invoke
# llvm-rs-cc with them.
extra_args_str = ''
for rs_file in rs_files:
extra_args_str += GetCommandLineArgs(rs_file)
extra_args = extra_args_str.split()
args = base_args + extra_args + rs_files
if Options.verbose > 1:
print 'Executing:',
for arg in args:
print arg,
print
# Execute the command and check the resulting shell return value.
# All tests that are expected to FAIL have directory names that
# start with 'F_'. Other tests that are expected to PASS have
# directory names that start with 'P_'.
ret = 0
try:
ret = subprocess.call(args, stdout=stdout_file, stderr=stderr_file)
except:
passed = False
stdout_file.flush()
stderr_file.flush()
if Options.verbose > 1:
stdout_file.seek(0)
stderr_file.seek(0)
for line in stdout_file:
print 'STDOUT>', line,
for line in stderr_file:
print 'STDERR>', line,
stdout_file.close()
stderr_file.close()
if dirname[0:2] == 'F_':
if ret == 0:
passed = False
if Options.verbose:
print 'Command passed on invalid input'
elif dirname[0:2] == 'P_':
if ret != 0:
passed = False
if Options.verbose:
print 'Command failed on valid input'
else:
passed = (ret == 0)
if Options.verbose:
print 'Test Directory name should start with an F or a P'
if not CompareFiles('stdout.txt', 'stdout.txt.expect'):
passed = False
if Options.verbose:
print 'stdout is different'
if not CompareFiles('stderr.txt', 'stderr.txt.expect'):
passed = False
if Options.verbose:
print 'stderr is different'
if Options.updateCTS:
# Copy resulting files to appropriate CTS directory (if different).
if passed and glob.glob('IN_CTS'):
cts_path = '../../../../../cts/'
cts_res_raw_path = cts_path + 'tests/res/raw/'
cts_src_path = cts_path + 'tests/tests/renderscript/src/'
for bc_src in glob.glob('tmp/*.bc'):
bc_dst = re.sub('tmp\/', cts_res_raw_path, bc_src, 1)
UpdateFiles(bc_src, bc_dst)
for java_src in glob.glob('tmp/android/renderscript/cts/*.java'):
java_dst = re.sub('tmp\/', cts_src_path, java_src, 1)
UpdateFiles(java_src, java_dst)
if Options.cleanup:
try:
os.remove('stdout.txt')
os.remove('stderr.txt')
shutil.rmtree('tmp/')
except:
pass
os.chdir('..')
return passed
def Usage():
"""Print out usage information."""
print ('Usage: %s [OPTION]... [TESTNAME]...'
'Renderscript Compiler Test Harness\n'
'Runs TESTNAMEs (all tests by default)\n'
'Available Options:\n'
' -h, --help Help message\n'
' -n, --no-cleanup Don\'t clean up after running tests\n'
' -u, --update-cts Update CTS test versions\n'
' -v, --verbose Verbose output\n'
) % (sys.argv[0]),
return
def main():
passed = 0
failed = 0
files = []
failed_tests = []
for arg in sys.argv[1:]:
if arg in ('-h', '--help'):
Usage()
return 0
elif arg in ('-n', '--no-cleanup'):
Options.cleanup = 0
elif arg in ('-u', '--update-cts'):
Options.updateCTS = 1
elif arg in ('-v', '--verbose'):
Options.verbose += 1
else:
# Test list to run
if os.path.isdir(arg):
files.append(arg)
else:
print >> sys.stderr, 'Invalid test or option: %s' % arg
return 1
if not files:
tmp_files = os.listdir('.')
# Only run tests that are known to PASS or FAIL
# Disabled tests can be marked D_ and invoked explicitly
for f in tmp_files:
if os.path.isdir(f) and (f[0:2] == 'F_' or f[0:2] == 'P_'):
files.append(f)
for f in files:
if os.path.isdir(f):
if ExecTest(f):
passed += 1
else:
failed += 1
failed_tests.append(f)
print 'Tests Passed: %d\n' % passed,
print 'Tests Failed: %d\n' % failed,
if failed:
print 'Failures:',
for t in failed_tests:
print t,
return failed != 0
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
3253603 | <gh_stars>1-10
"""
https://leetcode.com/explore/interview/card/top-interview-questions-hard/116/array-and-strings/827/
"""
from typing import List
# refrenced solution to arrive at this result
class Solution:
def product_except_self(self, nums: List[int]) -> List[int]:
"""
no division
O(n) desired runtime
*except nums[i]
brute force would be O(n^2)
could do even better with a running prefix multiplier, but then would have
to divide out nums[i] at each step
n > 1
don't necessarily know that input is sorted, so can't use that
dynamic programming?
with division would be easy...
two-finger approach? nope, would be O(n^2)
...
we know all products are 32bits, so that isn't an issue to consider rn...
binary approach?
# use L and R arrays - was on the right idea there!!!
"""
N = len(nums)
L, R = [0] * N, [0] * N
L[0] = 1
for i in range(1, N):
L[i] = L[i - 1] * nums[i - 1]
R[N - 1] = 1
for j in reversed(range(N - 1)):
R[j] = R[j + 1] * nums[j + 1]
return [L[i] * R[i] for i in range(N)]
class SolutionOptimized:
def product_except_self(self, nums: List[int]) -> List[int]:
N = len(nums)
ans = [0] * N
ans[0] = 1
for i in range(1, N):
ans[i] = ans[i - 1] * nums[i - 1]
# multiply by things on the right, and update that value R as we go
R = 1
for j in reversed(range(N)):
ans[j] *= R
R *= nums[j]
return ans
| StarcoderdataPython |
1713068 | <gh_stars>0
import pytest
from dynamodb_doctor import Model, String, ModelCreationException, Many
ENDPOINT_URL = "http://localhost:58000"
@pytest.mark.asyncio
async def test_define_model_without_table():
with pytest.raises(ModelCreationException):
class _(Model):
name = String()
@pytest.mark.asyncio
async def test_list_all(table_fixture):
test_model_name = "test_model"
class TestModel(Model):
name = String()
class Meta:
table = table_fixture
test_model = TestModel(name=test_model_name)
await test_model.save()
test_models = await TestModel.all()
assert(len(test_models) == 1)
assert(test_models[0].id == test_model.id)
@pytest.mark.asyncio
async def test_list_one_to_many(table_fixture):
class TestModelMany(Model):
name = String()
class Meta:
table = table_fixture
class TestModelOne(Model):
title = String()
many = Many(TestModelMany)
class Meta:
table = table_fixture
test_model_one = TestModelOne(
title="test",
many=[{"name":"testone"}, {"name":"testtwo"}]
)
await test_model_one.save()
test_models = await TestModelOne.all()
assert(len(test_models) == 1)
one = test_models[0]
assert(one.id == test_model_one.id)
many = test_models[0].many
assert(len(many) == 2)
assert(many[0].name == "testone")
assert(many[1].name == "testtwo")
| StarcoderdataPython |
4809677 | <filename>dprint/_impl.py
import tokenize
import inspect
import token
import sys
import re
import io
import os
_NAME_MATCHING_REGEX = re.compile(r'\bdprint\b')
def dprint(value):
"""A simple printing debugging helper.
Designed to be used on any expression, to print the value of an expression,
without modifying the result of the same.
In most cases, the only visible effect of this function is a call to
__str__ of the passed value and a call to the builtin print function.
"""
frame_info = inspect.stack()[1]
message = _construct_message(value, frame_info)
# The parts that matter.
print(message)
return value
def _construct_message(value, frame_info):
"""Construct a human readable message for context.
"""
# Parts
filename_str = _format_filename(frame_info.filename)
line_str = _format_lineno(frame_info.lineno)
function_str = _format_function(frame_info.function)
expression_str = _format_expression(frame_info.code_context)
val_str = _format_value(value)
# Put an arrow on it.
context = filename_str + line_str + function_str
if context:
context += "\n"
# Show the expression with the output.
if expression_str:
main_text = " {} -> {}".format(expression_str, val_str)
else:
main_text = " -> {}" + val_str
return context + main_text
def _format_filename(filename):
"""Format the filename in a nicer manner than given.
Try to make the filename shorter when it makes sense to, without losing the
clarity of what it means.
"""
if filename is None:
return "<unknown-file>"
# A tiny helper
def in_dir(dirpath, abspath):
return dirpath == os.path.commonpath([dirpath, abspath])
abspath = os.path.abspath(filename)
cwd = os.getcwd()
# If it's in the current directory, return the path, with current directory
# removed.
if in_dir(cwd, abspath):
return abspath[len(cwd) + 1:]
# If it's importable, we show the path to it.
for location in sys.path:
if in_dir(location, abspath):
fpath = abspath[len(location) + 1:]
if fpath.endswith(".py"):
fpath = fpath[:-3]
return "<installed> " + fpath.replace(os.path.sep, ".")
return abspath
def _format_lineno(lineno):
"""Just convert the line number into a better form.
"""
if lineno is None:
return ""
return ":" + str(lineno)
def _format_function(func_name):
"""Provide better context for a "function" of the caller.
"""
if func_name is None:
return ""
elif func_name == "<module>":
return " (top level stmt)"
else:
return " in " + func_name
def _format_expression(code_context):
"""Provide the expression used to call dprint.
Constraints:
- Function must be called dprint
- A call should span no more than 1 line
- No more than 1 call on 1 line
If these constraints are violated, the current implementation doesn't
manage to extract the expression.
"""
if not code_context:
return ""
line = code_context[0]
# Tokenize the line
token_list = tokenize.tokenize(io.BytesIO(line.encode('utf-8')).readline)
# Determine the start and end of expression
start = None
end = None
level = 0 # because nesting
for tok in token_list:
# Looking for the start of string.
if start is None:
if tok.type == token.NAME and tok.string == "dprint":
start = tok.start[1] # we get the proper value later.
continue
if end is None:
if tok.type != token.OP:
continue
if tok.string == "(":
if level == 0: # opening parens
start = tok.end[1]
level += 1
elif tok.string == ")":
level -= 1
if level == 0: # closing parens
end = tok.start[1]
# This is fine since we don't need more information
break
return line[start:end]
def _format_value(value):
"""Convert to a string or be very visibly wrong.
"""
try:
val_str = repr(value)
except Exception:
val_str = "<could not convert to string>"
return val_str
| StarcoderdataPython |
3222832 | <filename>test/test_minimum_jerk_trajectory.py
import numpy as np
from movement_primitives.minimum_jerk_trajectory import MinimumJerkTrajectory
from numpy.testing import assert_array_almost_equal
def test_step_through_minimum_jerk_trajectory():
mjt = MinimumJerkTrajectory(3, 1.0, 0.01)
mjt.configure(start_y=np.zeros(3), goal_y=np.ones(3))
y = np.zeros(3)
yd = np.zeros(3)
y, yd = mjt.n_steps_open_loop(y, yd, 50)
assert_array_almost_equal(y, 0.49 * np.ones(3), decimal=2)
assert_array_almost_equal(yd, 1.9, decimal=2)
y, yd = mjt.n_steps_open_loop(y, yd, 1)
assert_array_almost_equal(y, 0.51 * np.ones(3), decimal=2)
assert_array_almost_equal(yd, 1.9, decimal=2)
y, yd = mjt.n_steps_open_loop(y, yd, 50)
assert_array_almost_equal(y, np.ones(3))
assert_array_almost_equal(yd, np.zeros(3))
mjt.reset()
y, yd = mjt.n_steps_open_loop(y, yd, 101)
assert_array_almost_equal(y, np.ones(3))
assert_array_almost_equal(yd, np.zeros(3))
| StarcoderdataPython |
3320544 | from pynwb import TimeSeries
import numpy as np
from bisect import bisect, bisect_left
def get_timeseries_tt(node: TimeSeries, istart=0, istop=None) -> np.ndarray:
"""
For any TimeSeries, return timestamps. If the TimeSeries uses starting_time and rate, the timestamps will be
generated.
Parameters
----------
node: pynwb.TimeSeries
istart: int, optional
Optionally sub-select the returned times - lower bound
istop: int, optional
Optionally sub-select the returned times - upper bound
Returns
-------
numpy.ndarray
"""
if node.timestamps is not None:
return node.timestamps[istart:istop]
else:
if not np.isfinite(node.starting_time):
starting_time = 0
else:
starting_time = node.starting_time
if istop is None:
return np.arange(istart, len(node.data)) / node.rate + starting_time
elif istop > 0:
return np.arange(istart, istop) / node.rate + starting_time
else:
return (
np.arange(istart, len(node.data) + istop - 1) / node.rate
+ starting_time
)
def get_timeseries_maxt(node: TimeSeries) -> float:
"""
Returns the maximum time of any TimeSeries
Parameters
----------
node: pynwb.TimeSeries
Returns
-------
float
"""
if node.timestamps is not None:
return node.timestamps[-1]
elif np.isnan(node.starting_time):
return (len(node.data) - 1) / node.rate
else:
return (len(node.data) - 1) / node.rate + node.starting_time
def get_timeseries_mint(node: TimeSeries) -> float:
"""
Returns the minimum time of any TimeSeries
Parameters
----------
node: pynwb.TimeSeries
Returns
-------
float
"""
if node.timestamps is not None:
return node.timestamps[0]
elif np.isnan(node.starting_time):
return 0
else:
return node.starting_time
def get_timeseries_in_units(node: TimeSeries, istart=None, istop=None):
"""
Convert data into the designated units
Parameters
----------
node: pynwb.TimeSeries
istart: int
istop: int
Returns
-------
numpy.ndarray, str
"""
data = node.data[istart:istop]
if node.conversion and np.isfinite(node.conversion):
data = data * node.conversion
unit = node.unit
else:
unit = None
return data, unit
def timeseries_time_to_ind(node: TimeSeries, time, ind_min=None, ind_max=None) -> int:
"""
Get the index of a certain time for any TimeSeries. For TimeSeries that use timestamps, bisect is used. You can
optionally provide ind_min and ind_max to constrain the search.
Parameters
----------
node: pynwb.TimeSeries
time: float
ind_min: int, optional
ind_max: int, optional
Returns
-------
"""
if node.timestamps is not None:
kwargs = dict()
if ind_min is not None:
kwargs.update(lo=ind_min)
if ind_max is not None:
kwargs.update(hi=ind_max)
id_found = bisect_left(node.timestamps, time, **kwargs)
return id_found if id_found < len(node.data) else len(node.data) - 1
else:
if np.isnan(node.starting_time):
starting_time = 0
else:
starting_time = node.starting_time
id_found = int(np.ceil((time - starting_time) * node.rate))
return id_found if id_found < len(node.data) else len(node.data) - 1
def bisect_timeseries_by_times(
timeseries: TimeSeries, starts, duration: float, traces=None
):
"""
Parameters
----------
timeseries: TimeSeries
starts: iterable
time at which to bisect
duration: float
duration of window after start
traces: int
index into the second dim of data
Returns
-------
out: list
list with bisected arrays from data
"""
out = []
for start in starts:
if timeseries.rate is not None:
idx_start = int((start - timeseries.starting_time) * timeseries.rate)
idx_stop = int(idx_start + duration * timeseries.rate)
else:
idx_start = bisect(timeseries.timestamps, start)
idx_stop = bisect(timeseries.timestamps, start + duration, lo=idx_start)
if len(timeseries.data.shape) > 1 and traces is not None:
out.append(timeseries.data[idx_start:idx_stop, traces])
else:
out.append(timeseries.data[idx_start:idx_stop])
return out
def align_by_times_with_timestamps(
timeseries: TimeSeries, starts, duration: float, traces=None
):
"""
Parameters
----------
timeseries: TimeSeries
timeseries with variable timestamps
starts: array-like
starts in seconds
duration: float
duration in seconds
Returns
-------
out: list
list: length=(n_trials); list[0]: array, shape=(n_time, ...)
"""
assert timeseries.timestamps is not None, "supply timeseries with timestamps"
return bisect_timeseries_by_times(timeseries, starts, duration, traces)
def align_by_times_with_rate(
timeseries: TimeSeries, starts, duration: float, traces=None
):
"""
Parameters
----------
timeseries: TimeSeries
timeseries with variable timestamps
starts: array-like
starts in seconds
duration: float
duration in seconds
Returns
-------
out: list
list: length=(n_trials); list[0]: array, shape=(n_time, ...)
"""
assert timeseries.rate is not None, "supply timeseries with start_time and rate"
return np.array(bisect_timeseries_by_times(timeseries, starts, duration, traces))
def align_timestamps_by_trials(
timeseries: TimeSeries, starts, before: float, after: float
):
"""
Parameters
----------
timeseries: TimeSeries
timeseries with variable timestamps
starts: array-like
starts in seconds
duration: float
duration in seconds
Returns
-------
out: list
list: length=(n_trials); list[0]: array, shape=(n_time, ...)
"""
assert timeseries.timestamps is not None, "supply timeseries with timestamps"
out = []
for start in starts:
idx_start = bisect(timeseries.timestamps, start)
idx_stop = bisect(timeseries.timestamps, start + before + after, lo=idx_start)
out.append(timeseries.timestamps[idx_start:idx_stop])
return [list(np.array(i) - i[0] - before) for i in out]
def align_by_trials(
timeseries: TimeSeries,
start_label="start_time",
before=0.0,
after=1.0,
):
"""
Args:
timeseries: TimeSeries
start_label: str
default: 'start_time'
before: float
time after start_label in secs (positive goes back in time)
after: float
time after stop_label in secs (positive goes forward in time)
Returns:
np.array(shape=(n_trials, n_time, ...))
"""
trials = timeseries.get_ancestor("NWBFile").trials
return align_by_time_intervals(timeseries, trials, start_label, before, after)
def align_by_time_intervals(
timeseries: TimeSeries,
intervals,
start_label="start_time",
before=0.0,
after=0.0,
traces=None,
):
"""
Args:
timeseries: pynwb.TimeSeries
intervals: pynwb.epoch.TimeIntervals
start_label: str
default: 'start_time'
before: float
time after start_label in secs (positive goes back in time)
after: float
time after stop_label in secs (positive goes forward in time)
timestamps: bool
if alignment uses timestamps or constant rate and starting time in TimeSeries
Returns:
np.array(shape=(n_trials, n_time, ...))
"""
starts = np.array(intervals[start_label][:]) - before
if timeseries.rate is not None:
return align_by_times_with_rate(
timeseries, starts, duration=after + before, traces=traces
)
else:
return align_by_times_with_timestamps(
timeseries, starts, duration=after + before, traces=traces
)
| StarcoderdataPython |
1605940 | <filename>Site-Campus/sitecampus/migrations/0007_auto_20191121_1902.py<gh_stars>1-10
# Generated by Django 2.2.7 on 2019-11-21 19:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sitecampus', '0006_post_sutia'),
]
operations = [
migrations.AlterField(
model_name='autor',
name='semestre',
field=models.CharField(max_length=6),
),
migrations.AlterField(
model_name='post',
name='semestrepub',
field=models.CharField(max_length=6),
),
]
| StarcoderdataPython |
1647869 | <filename>python/cugraph/cugraph/tests/conftest.py
# Copyright (c) 2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import pytest
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
from dask_cuda.initialize import initialize
from cugraph.comms import comms as Comms
from cugraph.dask.common.mg_utils import get_visible_devices
# module-wide fixtures
@pytest.fixture(scope="module")
def dask_client():
dask_scheduler_file = os.environ.get("SCHEDULER_FILE")
cluster = None
client = None
tempdir_object = None
if dask_scheduler_file:
# Env var UCX_MAX_RNDV_RAILS=1 must be set too.
initialize(enable_tcp_over_ucx=True,
enable_nvlink=True,
enable_infiniband=True,
enable_rdmacm=True,
# net_devices="mlx5_0:1",
)
client = Client(scheduler_file=dask_scheduler_file)
print("\ndask_client fixture: client created using "
f"{dask_scheduler_file}")
else:
# The tempdir created by tempdir_object should be cleaned up once
# tempdir_object goes out-of-scope and is deleted.
tempdir_object = tempfile.TemporaryDirectory()
cluster = LocalCUDACluster(local_directory=tempdir_object.name)
client = Client(cluster)
client.wait_for_workers(len(get_visible_devices()))
print("\ndask_client fixture: client created using LocalCUDACluster")
Comms.initialize(p2p=True)
yield client
Comms.destroy()
# Shut down the connected scheduler and workers
# therefore we will no longer rely on killing the dask cluster ID
# for MNMG runs
client.shutdown()
if cluster:
cluster.close()
print("\ndask_client fixture: client.close() called")
| StarcoderdataPython |
162911 | <reponame>Aditya-aot/Web-Scraping-of-car24-in-Python<gh_stars>0
from bs4 import BeautifulSoup
import pandas as pd
import requests
import re
url = 'https://www.cars24.com/buy-used-honda-cars-delhi-ncr/'
page = requests.get(url)
soup = BeautifulSoup(page.text,'html.parser')
Cars_dict = {}
cars_no = 0
no_page = 1
tables = soup.find_all("div", {"class": "col-sm-12 col-md-6 col-lg-4"})
while cars_no < 42:
for table in tables:
url = 'https://www.cars24.com/buy-used-honda-cars-delhi-ncr/'
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
name = table.find('h3', {'class': '_1Nvyc _1Corb'}).text
model_date = table.find('li', {'itemprop': 'modelDate'}).text
model_date = int(model_date)
engine_1 = table.find('li', {'itemprop': 'vehicleEngine'}).text
engine = (re.split('•', engine_1))
engine = ''.join(engine)
price_1 = table.find('div', {'class': 'col-5 col-md-12 col-xl-5'}).text
price_2 = (re.split('₹|,|', price_1))
price = ''.join(price_2)
price = float(price)
location = table.find('a', {'class': '_1Kids'}).text
viwe_1 = table.find('a', {'class': '_3dFtM'}).text
viwe_2 = (re.split('K Views|Views|', viwe_1))
viwe_3 = ''.join(viwe_2)
viwe_3 = float(viwe_3)
viwe = (viwe_3) * 100
link = 'https://www.cars24.com/' + table.find('a', {'class': 'qD5mC'}).get('href')
car_1 = requests.get(link)
car = car_1.text
car_soup = BeautifulSoup(car, 'html.parser')
emi_1 = car_soup.find('span', {'class': '_3N4Rp'})
emi_2 = emi_1.text if emi_1 else "N/A"
emi_3 = (re.split('EMI starts @|,|', emi_2))
emi_4 = ''.join(emi_3)
if emi_4 != "N/A":
emi = float(emi_4)
else:
emi = 'null'
overviews = car_soup.find('ul', {"class": "_1wIhE"})
for overview in overviews:
detail = overview.text
if detail[0] == 'C':
car_id = (detail[6:-1] + detail[-1])
elif detail[0] == 'K':
km_driven = (detail[10:-1] + detail[-1])
elif detail[0] == 'F':
fuel_type = (detail[9:-1] + detail[-1])
elif detail[0] == 'O':
owner = (detail[5:-1] + detail[-1])
elif detail[0] == 'T':
transmission = (detail[12:-1] + detail[-1])
elif detail[0] == 'R':
rot = (detail[3:-1] + detail[-1])
cars_no = cars_no + 1
Cars_dict[cars_no] = [name, model_date, engine, location, viwe, price, link, emi, car_id, km_driven, fuel_type, owner,
transmission, rot]
no_page = no_page + 1
no_page = str(no_page)
url = 'https://www.cars24.com/buy-used-honda-cars-delhi-ncr/' + '?page=' + no_page
no_page = int(no_page)
cars_dict_df = pd.DataFrame.from_dict(Cars_dict, orient='index',
columns=['Name_of_car', 'Model_Date', 'Engine', 'Location', 'Viwe', 'Price_Rs',
'Link', 'Emi_Starts_At :', 'Car_ID', 'KmS_Driven', 'Fuel_Type', 'Owner',
'Transmission', 'RTO'])
cars_dict_df
cars_dict_df.to_csv('about_cars24.csv')
| StarcoderdataPython |
3297064 | <filename>notes/migrations/0003_auto_20171008_1407.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-08 08:37
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('notes', '0002_auto_20171008_1406'),
]
operations = [
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('description', models.TextField()),
('created_date', models.DateTimeField(default=datetime.datetime(2017, 10, 8, 8, 37, 29, 863199, tzinfo=utc))),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.RemoveField(
model_name='notes',
name='author',
),
migrations.DeleteModel(
name='notes',
),
]
| StarcoderdataPython |
1662390 | <gh_stars>10-100
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import d1_common
import d1_common.type_conversions
import d1_client.baseclient_1_2
class DataONEBaseClient_2_0(d1_client.baseclient_1_2.DataONEBaseClient_1_2):
"""Extend DataONEBaseClient_1_2 with functionality common between Member and
Coordinating nodes that was added in v2.0 of the DataONE infrastructure.
For details on how to use these methods, see:
https://releases.dataone.org/online/api-documentation-v2.0/apis/MN_APIs.html
https://releases.dataone.org/online/api-documentation-v2.0/apis/CN_APIs.html
"""
def __init__(self, *args, **kwargs):
"""See baseclient.DataONEBaseClient for args."""
super(DataONEBaseClient_2_0, self).__init__(*args, **kwargs)
self._log = logging.getLogger(__name__)
self._api_major = 2
self._api_minor = 0
self._pyxb_binding = d1_common.type_conversions.get_pyxb_binding_by_api_version(
self._api_major, self._api_minor
)
# =============================================================================
# v2.0 APIs shared between CNs and MNs.
# =============================================================================
def updateSystemMetadataResponse(self, pid, sysmeta_pyxb, vendorSpecific=None):
"""MNStorage.updateSystemMetadata(session, pid, sysmeta) → boolean
http://jenkins-1.dataone.org/documentation/unstable/API-Documentation-
development/apis/MN_APIs.html#MNStorage.updateSystemMetadata.
Args:
pid:
sysmeta_pyxb:
vendorSpecific:
Returns:
"""
mmp_dict = {
"pid": pid.encode("utf-8"),
"sysmeta": ("sysmeta.xml", sysmeta_pyxb.toxml("utf-8")),
}
return self.PUT("meta", fields=mmp_dict, headers=vendorSpecific)
def updateSystemMetadata(self, pid, sysmeta_pyxb, vendorSpecific=None):
response = self.updateSystemMetadataResponse(pid, sysmeta_pyxb, vendorSpecific)
return self._read_boolean_response(response)
| StarcoderdataPython |
112886 | from rest_framework import serializers
from ..models import Anime, Movie, Show
class AnimeSerializer(serializers.ModelSerializer):
class Meta:
model = Anime
fields = '__all__'
class MovieSerializer(serializers.ModelSerializer):
class Meta:
model = Movie
fields = '__all__'
class ShowSerializer(serializers.ModelSerializer):
class Meta:
model = Show
fields = '__all__'
| StarcoderdataPython |
1732014 | import shap
import warnings
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
try:
import plotly.express as px
import plotly.graph_objects as go
except ModuleNotFoundError:
_has_plotly = False
_plotly_exception_message = (
'Plotly is required to run this pydrift functionality.'
)
else:
_has_plotly = True
_plotly_exception_message = None
from typing import List, Union, Dict, Tuple
from sklearn.pipeline import Pipeline
from pathlib import Path
from ..models import ScikitModel
from ..decorators import check_optional_module
class InterpretableDrift:
def __init__(self,
model: ScikitModel,
X_train: pd.DataFrame,
X_test: pd.DataFrame,
y_train: pd.DataFrame,
y_test: pd.DataFrame,
column_names: List[str]):
"""Inits `InterpretableDrift` for a given `model`,
`X_train` and `X_test` datasets and `column_names
"""
if isinstance(model, Pipeline):
X_train_to_shap = model[:-1].transform(X_train)
X_test_to_shap = model[:-1].transform(X_test)
model_to_shap = model.steps[-1][1]
else:
X_train_to_shap = X_train.copy()
X_test_to_shap = X_test.copy()
model_to_shap = model
self.model = model_to_shap
self.X_train_to_shap = pd.DataFrame(X_train_to_shap,
columns=column_names)
self.X_test_to_shap = pd.DataFrame(X_test_to_shap,
columns=column_names)
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
self.column_names = column_names
self.shap_values = np.empty(0)
def compute_shap_values(self) -> None:
"""Shap values depending on what model we are using
`shap.TreeExplainer` by default and if not it uses
`KernelExplainer`
Also provides compatibility with sklearn pipelines
`shap_values` are stored in `self.shap_values`
"""
with warnings.catch_warnings():
# Some `shap` warnings are not useful for this implementation
warnings.simplefilter("ignore")
try:
explainer = shap.TreeExplainer(
model=self.model,
feature_perturbation='tree_path_dependent'
)
shap_values_arguments = dict(X=self.X_test_to_shap)
except Exception:
def model_predict(data_array):
data_frame = pd.DataFrame(data_array,
columns=self.column_names)
return self.model.predict_proba(data_frame)[:, 1]
explainer = shap.KernelExplainer(model=model_predict,
data=shap.sample(
self.X_train_to_shap,
100
),
link='logit')
shap_values_arguments = dict(X=self.X_test_to_shap,
l1_reg='aic')
self.shap_values = explainer.shap_values(**shap_values_arguments)
def most_discriminative_features_plot(self,
save_plot_path: Path = None) -> None:
"""Plots most discriminative features with its
shap values
You can save the plot in `save_plot_path` path
"""
if self.shap_values.size == 0:
self.compute_shap_values()
shap.summary_plot(self.shap_values,
self.X_test_to_shap,
plot_type='bar',
title='Most Discriminative Features',
show=True if not save_plot_path else False)
if save_plot_path:
plt.savefig(save_plot_path, bbox_inches='tight')
@check_optional_module(has_module=_has_plotly,
exception_message=_plotly_exception_message)
def both_histogram_plot(self,
column: str,
fillna_value: Union[str, float, int] = None,
nbins: int = None,
save_plot_path: Path = None) -> None:
"""Plots histogram for the column passed
in `column`
You can set `nbins` to any number that makes
your plot better
You can save the plot in `save_plot_path` path
Requires `plotly`
"""
if not _has_plotly:
raise ModuleNotFoundError(
)
X_train_column = self.X_train.loc[:, [column]]
X_test_column = self.X_test.loc[:, [column]]
if fillna_value:
X_train_column.fillna(fillna_value, inplace=True)
X_test_column.fillna(fillna_value, inplace=True)
X_train_total_nans = X_train_column[column].isna().sum()
X_test_total_nans = X_test_column[column].isna().sum()
if X_train_total_nans or X_test_total_nans:
warnings.warn(
f'Column {column} has '
f'{X_train_total_nans + X_test_total_nans} nan values, '
f'you can use `fillna_value` if you need it'
)
X_train_column['is_left'] = self.y_train.to_numpy()
X_test_column['is_left'] = self.y_test.to_numpy()
X_train_and_test = pd.concat([X_train_column, X_test_column])
fig = px.histogram(X_train_and_test,
title=f'Both Histogram Normalized For {column}',
x=column,
color='is_left',
barmode='group',
nbins=nbins,
histnorm='probability density')
fig.update_layout(bargroupgap=.1)
if save_plot_path:
fig.write_html(save_plot_path)
else:
fig.show()
@check_optional_module(has_module=_has_plotly,
exception_message=_plotly_exception_message)
def feature_importance_vs_drift_map_plot(
self,
dict_each_column_drift_coefficient: Dict[str, float],
top: int = 10,
save_plot_path: Path = None) -> None:
"""Feature importance versus drift coefficient map,
with this plot you can visualize the most critical
features involved in your model drift process
By default shows you the top 10 most important features
but you can customize it with `top` parameter
You can save the plot in `save_plot_path` path
"""
df_feature_importance = pd.DataFrame(
zip(self.column_names,
np.abs(self.shap_values).mean(axis=0)),
columns=['Feature Name', 'Feature Importance']
)
df_feature_importance['Drift Coefficient'] = (
(df_feature_importance['Feature Name']
.map(dict_each_column_drift_coefficient))
)
value_min = df_feature_importance['Feature Importance'].min()
value_max = df_feature_importance['Feature Importance'].max()
df_feature_importance['Feature Importance Scaled'] = (
(df_feature_importance['Feature Importance'] - value_min)
/ (value_max - value_min)
)
df_feature_importance_to_plot = (
df_feature_importance
.sort_values('Feature Importance Scaled', ascending=False)
.nlargest(top, columns='Feature Importance Scaled')
)
fig = px.scatter(df_feature_importance_to_plot,
x='Feature Importance Scaled',
y='Drift Coefficient',
text='Feature Name',
hover_name='Feature Name',
hover_data={'Feature Importance Scaled': ':.2f',
'Drift Coefficient': ':.2f',
'Feature Importance': False,
'Feature Name': False},
title='Feature Importance vs Drift Map')
fig.update_traces(marker=dict(size=10, opacity=.75))
axis_value_min, axis_value_medium, axis_value_max = 0, .5, 1
fig.add_trace(
go.Scatter(
x=[axis_value_min + .15, axis_value_max - .15,
axis_value_max - .15, axis_value_min + .15],
y=[axis_value_max + .05, axis_value_max + .05,
axis_value_min - .05, axis_value_min - .05],
text=['NON-IMPORTANT FEATURES DRIFTED',
'IMPORTANT FEATURES AND DRIFTED',
'IMPORTANT FEATURES NON-DRIFTED',
'NON-IMPORTANT FEATURES NON-DRIFTED'],
mode="text",
showlegend=False
)
)
fig.add_shape(
type="rect",
x0=axis_value_min,
y0=axis_value_min,
x1=axis_value_medium,
y1=axis_value_medium,
fillcolor="khaki",
opacity=.25
)
fig.add_shape(
type="rect",
x0=axis_value_min,
y0=axis_value_medium,
x1=axis_value_medium,
y1=axis_value_max,
fillcolor="coral",
opacity=.25
)
fig.add_shape(
type="rect",
x0=axis_value_medium,
y0=axis_value_min,
x1=axis_value_max,
y1=axis_value_medium,
fillcolor="limegreen",
opacity=.25
)
fig.add_shape(
type="rect",
x0=axis_value_medium,
y0=axis_value_medium,
x1=axis_value_max,
y1=axis_value_max,
fillcolor="crimson",
opacity=.25
)
fig.update_layout(
xaxis=dict(range=[axis_value_min - .05, axis_value_max + .05]),
yaxis=dict(range=[axis_value_min - .1, axis_value_max + .1])
)
if save_plot_path:
fig.write_html(save_plot_path)
else:
fig.show()
@staticmethod
@check_optional_module(has_module=_has_plotly,
exception_message=_plotly_exception_message)
def weights_plot(weights: np.array, save_plot_path: Path = None) -> None:
"""Weights plot, the higher the weight, the more
similar the train data is to the test data
This will be used to retrain the model
You can save the plot in `save_plot_path` path
"""
fig = px.histogram(weights,
title='Weights From The Discriminative Model')
fig.update_layout(showlegend=False)
if save_plot_path:
fig.write_html(save_plot_path)
else:
fig.show()
@staticmethod
def _drop_outliers_between(
df: pd.DataFrame,
feature: str,
percentiles: Tuple[float,
float] = (.05, .95)) -> pd.DataFrame:
"""Drop outliers for column `feature` of
`df` between `percentiles`
"""
lower, upper = percentiles
return df[df[feature].between(df[feature].quantile(lower),
df[feature].quantile(upper))]
@staticmethod
def _convert_to_mid_interval_with_max_bins(
serie: pd.Series,
bins: int = 25) -> pd.DataFrame:
"""Convert `series` values to a binned version
of it in `bins` as number of
intervals
"""
return (pd
.cut(serie, bins=bins)
.apply(lambda x: x.mid)
.astype(float))
@check_optional_module(has_module=_has_plotly,
exception_message=_plotly_exception_message)
def partial_dependence_comparison_plot(
self,
feature: str,
percentiles: Tuple[float, float] = (.05, .95),
max_bins: int = 25,
save_plot_path: Path = None) -> None:
"""Partial dependence plot for `feature` in
both datasets predictions
You can save the plot in `save_plot_path` path
"""
X_train_copy = self.X_train.copy()
X_test_copy = self.X_test.copy()
X_train_copy['is_left'] = '1'
X_test_copy['is_left'] = '0'
X_train_copy['Prediction'] = (
self.model.predict_proba(X_train_copy)[:, 1]
)
X_test_copy['Prediction'] = (
self.model.predict_proba(X_test_copy)[:, 1]
)
is_numeric = pd.api.types.is_numeric_dtype(
X_train_copy[feature]
)
if is_numeric:
X_train_copy = (
self._drop_outliers_between(X_train_copy,
feature=feature,
percentiles=percentiles)
)
X_test_copy = (
self._drop_outliers_between(X_test_copy,
feature=feature,
percentiles=percentiles)
)
bins = min(X_train_copy[feature].nunique(),
max_bins)
X_train_copy[feature] = (
self._convert_to_mid_interval_with_max_bins(
X_train_copy[feature],
bins
)
)
X_test_copy[feature] = (
self._convert_to_mid_interval_with_max_bins(
X_test_copy[feature],
bins
)
)
X_both = pd.concat([X_train_copy, X_test_copy])
data_to_plot = (
X_both
.groupby(['is_left', feature])
.Prediction
.mean()
.reset_index()
)
if is_numeric:
fig = px.scatter(data_to_plot,
x=feature,
y='Prediction',
color='is_left',
trendline="ols")
else:
fig = px.bar(data_to_plot,
x=feature,
y='Prediction',
color='is_left',
barmode='group')
fig.update_layout(title=f'Partial Dependence For {feature}',
bargroupgap=.1)
if save_plot_path:
fig.write_html(save_plot_path)
else:
fig.show()
@check_optional_module(has_module=_has_plotly,
exception_message=_plotly_exception_message)
def drift_by_sorted_bins_plot(self,
feature: str,
bins: int = 10,
save_plot_path: Path = None) -> None:
"""Concat all the data in both dataframes and
sort it by `feature`, then it cuts in `bins`
number of bins and computes quantity of registers
in each bin
You can save the plot in `save_plot_path` path
"""
X_train_copy = self.X_train.copy()
X_test_copy = self.X_test.copy()
X_train_copy['is_left'] = '1'
X_test_copy['is_left'] = '0'
X_both = (
pd
.concat([X_train_copy[[feature, 'is_left']],
X_test_copy[[feature, 'is_left']]])
.sample(frac=1)
.reset_index()
)
is_categorical = not pd.api.types.is_numeric_dtype(
X_both[feature]
)
if is_categorical:
X_both[feature] = X_both[feature].astype('category')
X_both['rank'] = (
X_both[feature].cat.codes .rank(method='first') if is_categorical
else X_both[feature].rank(method='first')
)
X_both['Bin Number'] = pd.qcut(X_both['rank'],
q=bins,
labels=range(1, bins + 1))
fig = px.histogram(X_both,
x='Bin Number',
color='is_left',
nbins=bins,
barmode='group')
fig.update_layout(title=f'Drift By Bin For {feature}',
bargroupgap=.1,
xaxis=dict(tickmode='linear'))
if save_plot_path:
fig.write_html(save_plot_path)
else:
fig.show()
| StarcoderdataPython |
3375891 | <filename>jes/jes-v5.020-linux/demos/turtle.py
w = makeWorld(500, 500)
t = makeTurtle(w)
penUp(t)
moveTo(t, int(500 / 3), 250)
penDown(t)
for i in range(0, 360):
turn(t, 1)
forward(t, 3)
| StarcoderdataPython |
1659550 | import numpy as np
import cv2
import matplotlib.pyplot as plt
import pandas as pd
from scipy.optimize import linear_sum_assignment
from scipy import signal
from sklearn.neighbors import KernelDensity
import copy
import os
import utm
import rasterio
from CountLine import CountLine
import sys
sys.path.append('/home/golden/general-detection/functions')
import koger_tracking as ktf
def mark_bats_on_image(image_raw, centers, radii=None,
scale_circle_size=5, contours=None,
draw_contours=False):
'''
Draw a bunch of circles on given image
image: 2D or 3D image
centers: shape(n,2) array of circle centers
radii: list of circle radii
'''
if len(image_raw.shape) < 2:
print('image has too few dimensions')
return None
if len(image_raw.shape) == 2:
color = 200
else:
if image_raw.shape[2] == 3:
color = (0, 255, 255)
else:
print('image is the wrong shape')
return None
image = np.copy(image_raw)
if radii is None:
radii = np.ones(len(centers))
for circle_ind, radius in enumerate(radii):
cv2.circle(image,
(centers[circle_ind, 0].astype(int),
centers[circle_ind, 1].astype(int)),
int(radius * scale_circle_size), color , 1)
if draw_contours and contours:
for contour in contours:
if len(contour.shape) > 1:
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box_d = np.int0(box)
cv2.drawContours(image, [box_d], 0, (0,255,100), 1)
return image
def get_tracks_in_frame(frame_ind, track_list):
""" Return list of all tracks present in frame ind. """
tracks_in_frame = []
for track in track_list:
if (track['last_frame'] >= frame_ind
and track['first_frame'] <= frame_ind):
tracks_in_frame.append(track)
return tracks_in_frame
def draw_tracks_on_frame(frame, frame_ind, track_list,
positions=None, figure_scale=60,
track_width=2, position_alpha=.5,
draw_whole_track=False, shift=0):
""" Draw all active tracks and all detected bat locations on given frame.
frame: loaded image - np array
frame_ind: frame number
track_list: list of all tracks in observation
positions: all detected bat positions in observation
figure_scale: how big to display output image
track_width: width of plotted tracks
position_alpha: alpha of position dots
draw_whole_track: Boolean draw track in the future of frame_ind
shift: compensate for lack of padding in network when drawing tracks
on input frames
"""
plt.figure(
figsize = (int(frame.shape[1] / figure_scale),
int(frame.shape[0] / figure_scale)))
plt.imshow(frame)
num_tracks = 0
for track in track_list:
if (track['last_frame'] >= frame_ind
and track['first_frame'] <= frame_ind):
rel_frame = frame_ind - track['first_frame']
if draw_whole_track:
plt.plot(track['track'][:, 0] + shift,
track['track'][:, 1] + shift,
linewidth=track_width)
else:
plt.plot(track['track'][:rel_frame, 0] + shift,
track['track'][:rel_frame, 1] + shift,
linewidth=track_width)
num_tracks += 1
if positions:
plt.scatter(positions[frame_ind][:,0] + shift,
positions[frame_ind][:,1] + shift,
c='red', alpha=position_alpha)
plt.title('Tracks: {}, Bats: {}'.format(num_tracks,
len(positions[frame_ind])))
def subtract_background(images, image_ind, background_sum):
'''
Subtract an averaged background from the image. Average over frame_range in the past and future
images: 3d numpy array (num images, height, width)
image_ind: index in circular image array
background_sum: sum of blue channel pixels across 0 dimension of images
'''
background = np.floor_divide(background_sum, images.shape[0])
# The order of subtraction means dark bats are now light in image_dif
image_dif = background - images[image_ind, :, :, 2]
return image_dif, background
def preprocess_to_binary(image, binary_thresh, background):
'''
Converts 2D image to binary after rescaling pixel intensity
image: 2D np array
low_pix_value: pixel value below which all pixels are set to 0
high_pix_value: pixel value above which all pixels are set to 255
binary_thresh: number from 0 - 255, above set to 255, bellow, set to 0
background: background image (2D probably blue channel)
'''
# # Rescale image pixels within range
# image_rescale = exposure.rescale_intensity(
# image, in_range=(low_pix_value, high_pix_value), out_range=(0, 255))
image_rescale = image
# Binarize image based on threshold
min_difference = 5
threshold = binary_thresh * background
threshold = np.where(threshold < min_difference, min_difference, threshold)
binary_image = np.where(image < threshold, 0, 255)
return binary_image
def get_blob_info(binary_image, background=None, size_threshold=0):
'''
Get contours from binary image. Then find center and average radius of each contour
binary_image: 2D image
background: 2D array used to see locally how dark the background is
size_threshold: radius above which blob is considered real
'''
contours, hierarchy = cv2.findContours(binary_image.astype(np.uint8).copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
centers = []
# Size of bounding rectangles
sizes = []
areas = []
# angle of bounding rectangle
angles = []
rects = []
good_contours = []
contours = [np.squeeze(contour) for contour in contours]
for contour_ind, contour in enumerate(contours):
if len(contour.shape) > 1:
rect = cv2.minAreaRect(contour)
if background is not None:
darkness = background[int(rect[0][1]), int(rect[0][0])]
if darkness < 30:
dark_size_threshold = size_threshold + 22
elif darkness < 50:
dark_size_threshold = size_threshold + 15
elif darkness < 80:
dark_size_threshold = size_threshold + 10
elif darkness < 100:
dark_size_threshold = size_threshold + 5
# elif darkness < 130:
# dark_size_threshold = size_threshold + 3
else:
dark_size_threshold = size_threshold
else:
dark_size_threshold = 0 # just used in if statement
area = rect[1][0] * rect[1][1]
if (area >= dark_size_threshold) or background is None:
centers.append(rect[0])
sizes.append(rect[1])
angles.append(rect[2])
good_contours.append(contour)
areas.append(area)
rects.append(rect)
if centers:
centers = np.stack(centers, 0)
sizes = np.stack(sizes, 0)
else:
centers = np.zeros((0,2))
return (centers, np.array(areas), good_contours, angles, sizes, rects)
def draw_circles_on_image(image, centers, sizes, rects=None):
'''
Draw a bunch of circles on given image
image: 2D or 3D image
centers: shape(n,2) array of circle centers
rects: list of minimum bounding rectangles
'''
if len(image.shape) < 2:
print('image has too few dimensions')
return None
if len(image.shape) == 2:
color = 200
rect_color = 100
else:
if image.shape[2] == 3:
color = (0, 255, 255)
rect_color = (0,255,100)
else:
print('image is the wrong shape')
return None
for circle_ind, size in enumerate(sizes):
cv2.circle(image, (centers[circle_ind, 0].astype(int), centers[circle_ind, 1].astype(int)),
int(np.max(size)), color , 1)
if rects:
for rect in rects:
box = cv2.boxPoints(rect)
box_d = np.int0(box)
cv2.drawContours(image, [box_d], 0, rect_color, 1)
return image
def update_circular_image_array(images, image_ind, image_files, frame_num, background_sum):
""" Add new image if nessesary and increment image_ind.
Also update sum of pixels across array for background subtraction.
If frame_num is less than half size of array than don't need to
replace image since intitally all images in average are in the future.
images: image array size (num images averaging, height, width, channel)
image_ind: index of focal frame in images
image_files: list of all image files in observation
frame_num: current frame number in observation
background_sum: sum of current frames blue dimension across frames
"""
if (frame_num > int(images.shape[0] / 2)
and frame_num < (len(image_files) - int(images.shape[0] / 2))):
replace_ind = image_ind + int(images.shape[0] / 2)
replace_ind %= images.shape[0]
# Subtract the pixel values that are about to be removed from background
background_sum -= images[replace_ind, :, :, 2]
image_file = image_files[frame_num + int(images.shape[0] / 2)]
image = cv2.imread(image_file)
images[replace_ind] = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Add new pixel values to the background sum
background_sum += images[replace_ind, :, :, 2]
image_ind += 1
# image_ind should always be in between 0 and images.shape - 1
image_ind = image_ind % images.shape[0]
return images, image_ind, background_sum
def initialize_image_array(image_files, focal_frame_ind, num_images):
""" Create array of num_images x h x w x 3.
Args:
image_files (list): sorted paths to all image files in observation
focal_frame_ind (int): number of the frame being process
num_images (int): number of frames used for background subtraction
return array, index in array where focal frame is located
"""
images = []
first_frame_ind = focal_frame_ind - (num_images // 2)
if num_images % 2 == 0:
# even
last_frame_ind = focal_frame_ind + (num_images // 2) - 1
else:
# odd
last_frame_ind = focal_frame_ind + (num_images // 2)
for file in image_files[first_frame_ind:last_frame_ind+1]:
image = cv2.imread(file)
images.append(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
images = np.stack(images)
focal_ind = num_images // 2
return(images, focal_ind)
def process_frame(images, focal_frame_ind, bat_thresh, background_sum, bat_area_thresh, debug=False):
"""Process bat frame.
images: n x h x w x c array where the n images are averaged together for background subtraction
focal_frame_ind: which index in images array should be processed
bat_thresh: float value to use for thresholding bat from background
background_sum: sum of all blue channel pixels across the n dimension of images
debug: if true return binary image
"""
size_threshold = bat_area_thresh
max_bats = 600
mean = np.mean(images[focal_frame_ind, :, :, 2])
if mean < 35:
max_bats = 200
if mean < 28:
max_bats = 100
if mean < 5:
print('Too dark...')
if debug:
return None, None, None, None, None, None, None, None
else:
return None, None, None, None, None, None, None
image_dif, background = subtract_background(images, focal_frame_ind, background_sum)
while True:
binary_image = preprocess_to_binary(image_dif, bat_thresh, background)
bat_centers, bat_areas, contours, rect_angles, bat_sizes, bat_rects = get_blob_info(
binary_image, background, size_threshold=size_threshold)
if len(bat_centers) < max_bats:
break
bat_thresh += 0.05
if debug:
return bat_centers, bat_areas, contours, rect_angles, bat_sizes, bat_rects, bat_thresh, binary_image
else:
return bat_centers, bat_areas, contours, rect_angles, bat_sizes, bat_rects, bat_thresh
def add_all_points_as_new_tracks(raw_track_list, positions, contours,
sizes, current_frame_ind, noise):
""" When there are no active tracks, add all new points to new tracks.
Args:
raw_track_list (list): list of tracks
positions (numpy array): p x 2
contours (list): p contours
current_frame_ind (int): current frame index
noise: how much noise to add to tracks initially
"""
for ind, (position, contour, size) in enumerate(zip(positions, contours, sizes)):
raw_track_list.append(
ktf.create_new_track(first_frame=current_frame_ind,
first_position=position, pos_index=ind,
noise=noise, contour=contour, size=size
)
)
return raw_track_list
def find_tracks(first_frame_ind, positions,
contours_files=None, contours_list=None,
sizes_list=None, max_frame=None, verbose=True,
tracks_file=None):
""" Take in positions of all individuals in frames and find tracks.
Args:
first_frame_ind (int): index of first frame of these tracks
positions (list): n x 2 for each frame
contours_files (list): list of files for contour info from each frame
contours_list: already loaded list of contours, only used if contours_file
is None
sizes_list (list): sizes info from each frame
return list of all tracks found
"""
raw_track_list = []
max_distance_threshold = 30
max_distance_threshold_noise = 30
min_distance_threshold = 0
max_unseen_time = 2
min_new_track_distance = 3
min_distance_big = 30
# #Create initial tracks based on the objects in the first frame
# raw_track_list = add_all_points_as_new_tracks(
# raw_track_list, positions[0], contours_list[0], sizes_list0, noise=0
# )
#try to connect points to the next frame
if max_frame is None:
max_frame = len(positions)
contours_file_ind = 0
previous_contours_seen = 0
if contours_files:
contours_list = np.load(contours_files[contours_file_ind], allow_pickle=True)
while first_frame_ind >= previous_contours_seen + len(contours_list):
contours_file_ind += 1
previous_contours_seen += len(contours_list)
contours_list = np.load(contours_files[contours_file_ind], allow_pickle=True)
print(f'using {contours_files[contours_file_ind]}')
elif not contours_list:
print("Needs contour_files or contour_list")
return
contours_ind = first_frame_ind - previous_contours_seen - 1
for frame_ind in range(first_frame_ind, max_frame):
contours_ind += 1
if contours_files:
if contours_ind >= len(contours_list):
# load next file
try:
contours_file_ind += 1
contours_list = np.load(contours_files[contours_file_ind], allow_pickle=True)
contours_ind = 0
except:
if tracks_file:
tracks_file_error = os.path.splitext(tracks_file)[0] + f'-error-{frame_ind}.npy'
print(tracks_file_error)
np.save(tracks_file_error, np.array(raw_track_list, dtype=object))
#get tracks that are still active (have been seen within the specified time)
active_list = ktf.calculate_active_list(raw_track_list, max_unseen_time, frame_ind)
if verbose:
if frame_ind % 10000 == 0:
print('frame {} processed.'.format(frame_ind))
if tracks_file:
np.save(tracks_file, np.array(raw_track_list, dtype=object))
if len(active_list) == 0:
#No existing tracks to connect to
#Every point in next frame must start a new track
raw_track_list = add_all_points_as_new_tracks(
raw_track_list, positions[frame_ind], contours_list[contours_ind],
sizes_list[frame_ind], frame_ind, noise=1
)
continue
# Make sure there are new points to add
new_positions = None
row_ind = None
col_ind = None
new_sizes = None
new_position_indexes = None
distance = None
contours = None
if len(positions[frame_ind]) != 0:
#positions from the next step
new_positions = positions[frame_ind]
contours = [np.copy(contour) for contour in contours_list[contours_ind]]
new_sizes = sizes_list[frame_ind]
raw_track_list = ktf.calculate_max_distance(
raw_track_list, active_list, max_distance_threshold,
max_distance_threshold_noise, min_distance_threshold,
use_size=True, min_distance_big=min_distance_big
)
distance = ktf.calculate_distances(
new_positions, raw_track_list, active_list
)
max_distance = ktf.create_max_distance_array(
distance, raw_track_list, active_list
)
assert distance.shape[1] == len(new_positions)
assert distance.shape[1] == len(contours)
assert distance.shape[1] == len(new_sizes)
# Some new points could be too far away from every existing track
raw_track_list, distance, new_positions, new_position_indexes, new_sizes, contours = ktf.process_points_without_tracks(
distance, max_distance, raw_track_list, new_positions, contours,
frame_ind, new_sizes
)
if distance.shape[1] > 0:
# There are new points can be assigned to existing tracks
#connect the dots from one frame to the next
row_ind, col_ind = linear_sum_assignment(np.log(distance + 1))
# for active_ind, track_ind in enumerate(active_list):
# if active_ind in row_ind:
# row_count = np.where(row_ind == active_ind)[0]
# raw_track_list[track_ind]['debug'].append(
# '{} dist {}, best {}'.format(
# frame_ind,
# distance[row_ind[row_count],
# col_ind[row_count]],
# np.min(distance[row_ind[row_count],
# :])
# )
# )
# best_col = np.argmin(distance[row_ind[row_count],
# :])
# row_count = np.where(col_ind == best_col)[0]
# raw_track_list[track_ind]['debug'].append(
# '{} row_ind {} col {} dist {} track {}'.format(
# frame_ind, row_ind[row_count],
# col_ind[row_count],
# distance[row_ind[row_count],
# col_ind[row_count]],
# active_list[row_ind[row_count][0]])
# )
# In casese where there are fewer new points than existing tracks
# some tracks won't get new point. Just assign them to
# the closest point
row_ind, col_ind = ktf.filter_tracks_without_new_points(
raw_track_list, distance, row_ind, col_ind, active_list, frame_ind
)
# Check if tracks with big bats got assigned to small points which are
# probably noise
row_ind, col_ind = ktf.fix_tracks_with_small_points(
raw_track_list, distance, row_ind, col_ind, active_list, new_sizes, frame_ind)
# see if points got assigned to tracks that are farther
# than max_threshold_distance
# This happens when the closer track gets assigned
# to a differnt point
row_ind, col_ind = ktf.filter_bad_assigns(raw_track_list, active_list, distance, max_distance,
row_ind, col_ind
)
raw_track_list = ktf.update_tracks(raw_track_list, active_list, frame_ind,
row_ind, col_ind, new_positions,
new_position_indexes, new_sizes, contours,
distance, min_new_track_distance)
raw_track_list = ktf.remove_noisy_tracks(raw_track_list)
raw_track_list = ktf.finalize_tracks(raw_track_list)
if tracks_file:
np.save(tracks_file, np.array(raw_track_list, dtype=object))
print('{} final save.'.format(os.path.basename(os.path.dirname(tracks_file))))
return raw_track_list
def get_tracked_bats_in_frame(image_files, focal_frame_ind, bat_thresh, bat_area_thresh):
centers_list = []
contours_list = []
sizes_list = []
clip_length = 5
array_size = 31
images, frame_buffer_ind = initialize_image_array(image_files, focal_frame_ind, array_size)
background_sum = np.sum(images[:,:,:,2], 0, dtype=np.int16)
for video_frame_ind in range(focal_frame_ind, focal_frame_ind+clip_length):
bat_centers, bat_areas, bat_contours, _, _, _, bat_thresh = process_frame(
images, frame_buffer_ind, bat_thresh, background_sum,
bat_area_thresh, debug=False)
centers_list.append(bat_centers)
contours_list.append(bat_contours)
sizes_list.append(bat_areas)
images, frame_buffer_ind, background_sum = update_circular_image_array(
images, frame_buffer_ind, image_files, video_frame_ind, background_sum)
raw_tracks = find_tracks(0, centers_list,
contours_list=contours_list,
sizes_list=sizes_list
)
return raw_tracks, centers_list
# return raw_tracks, centers_list, distance, max_distance, active_list, all_pre_distances, all_row_inds, all_col_inds
# return(connected_distance, connected_size)
def piecewise_linear(x, x0, y0, k1, k2):
return np.piecewise(x, [x < x0],
[lambda x:k1*x + y0-k1*x0, lambda x:k2*x + y0-k2*x0]
)
def get_bat_accumulation(crossing_frames, obs=None, parameters=None,
w_multiplier=True, w_darkness=True, w_frac=True):
""" Create and return cummulative sum of bats crossing count line over the course of
list of given positive and negative crossing frames.
crossing_frames: list of frame that each track crosses line. Positive if leaving
negative if going
obs: observation dictionary.
parameters: list of parameters of piecewise linear function
w_multiplier: multiply each bat crossing by apropriate bat multiplier for camera etc.
w_darkness: scale each bat crossing by apropriate accuracy corrrection based on frame darkness
w_frac: scale each bat crossing by fraction of total circle that camera sees
"""
if not np.any(crossing_frames):
return np.zeros(1)
last_crossing_frame = np.max(np.abs(crossing_frames))
crossing_per_frame = np.zeros(last_crossing_frame+1)
if obs and parameters:
accurracies = piecewise_linear(obs['darkness'], *parameters)
for crossing_frame, bm, acc in zip(crossing_frames, obs['multiplier'], accurracies):
scale = 1
if w_multiplier:
scale *= bm
if w_darkness:
scale *= (1/acc)
if crossing_frame < 0:
crossing_per_frame[-crossing_frame] -= scale
elif crossing_frame > 0:
crossing_per_frame[crossing_frame] += scale
if w_frac:
crossing_per_frame *= obs['fraction_total']
else:
for crossing_frame in crossing_frames:
if crossing_frame < 0:
crossing_per_frame[-crossing_frame] -= 1
elif crossing_frame > 0:
crossing_per_frame[crossing_frame] += 1
return np.cumsum(crossing_per_frame)
def threshold_short_tracks(raw_track_list, min_length_threshold=2):
"""Only return tracks that are longer than min_length_threshold."""
track_lengths = []
track_list = []
for track_num, track in enumerate(raw_track_list):
if isinstance(track['track'], list):
track['track'] = np.array(track['track'])
track_length = track['track'].shape[0]
if track_length >= min_length_threshold:
track_lengths.append(track['track'].shape[0])
track_list.append(track)
return track_list
def calculate_height(wingspan_pixels, camera_constant, wingspan_meters):
''' Calculate bats height above the ground assumming wingspan_meters is correct.
camera_constant = (frame pixels / 2) / tan(fov / 2)
height = constant * wingspan_meters / wingspan_pixels
'''
return camera_constant * wingspan_meters / wingspan_pixels
def calculate_bat_multiplier_simple(height, horizontal_fov, distance_to_center):
''' Calculate how many bats one bats at a given height and camera localtion represents.
height: height of bat
horizontal_fov: horizontal field of view of camera (degrees)
distance_to_center: distance from camera to center of colony
ASSUMES CIRCUMFERCE IS MUCH LARGER THAN WIDTH OF SPACE SEEN
circumfernce c = 2 * pi * distance_to_center
width of seen space w = 2 * height * tan(horizontal_fov / 2)
multiplier = c / w
'''
c = 2 * np.pi * distance_to_center
horizontal_fov_rad = horizontal_fov * np.pi / 180
w = 2 * height * np.tan(horizontal_fov_rad / 2)
return c / w
def calculate_bat_multiplier(height, horizontal_fov, distance_to_center):
''' Calculate how many bats one bats at a given height and camera
localtion represents.
height: height of bat
horizontal_fov: horizontal field of view of camera (degrees)
distance_to_center: distance from camera to center of colony
phi = arctan((height*tan(horizontal_fov/2)) / distance to center)
multiplier = pi / phi
'''
horizontal_fov_rad = horizontal_fov * np.pi / 180
distance_to_center = np.max([distance_to_center, 10e-5])
phi = np.arctan((height * np.tan(horizontal_fov_rad / 2))
/ distance_to_center
)
return np.pi/phi
def combined_bat_multiplier(frame_width, wingspan_meters,
wingspan_pixels, camera_distance):
""" Calculates bat multiplier.
Args:
frame_width: frame width in pixels
wingspan_meters: bat wingspan in meters
wingspan_pixels: bat wingspan in pixels
camera_distance: distance from forest point to camera in meters
should be a single value or an array of distances with same
shape as wingspan_pixels
Returns:
bat multiplier: float
"""
denominator = np.arctan(
(frame_width*wingspan_meters)
/ (2*wingspan_pixels*camera_distance)
)
return np.pi / denominator
def get_rects(track):
""" Fit rotated bounding rectangles to each contour in track.
track: track dict with 'contour' key linked to list of cv2 contours
"""
rects = []
for contour in track['contour']:
if len(contour.shape) > 1:
rect = cv2.minAreaRect(contour)
rects.append(rect[1])
else:
rects.append((np.nan, np.nan))
return np.array(rects)
def get_wingspan(track):
""" Estimate wingspan in pixels from average of peak sizes of longest
rectangle edges.
"""
if not 'rects' in track.keys():
track['rects'] = get_rects(track)
max_edge = np.nanmax(track['rects'], 1)
max_edge = max_edge[~np.isnan(max_edge)]
peaks = signal.find_peaks(max_edge)[0]
if len(peaks) != 0:
mean_wing = np.nanmean(max_edge[peaks])
else:
mean_wing = np.nanmean(max_edge)
return mean_wing
def measure_crossing_bats(track_list, frame_height=None, frame_width=None,
count_across=False, count_out=True, num_frames=None,
with_rects=True):
""" Find and quantify all tracks that cross middle line.
track_list: list of track dicts
frame_height: height of frame in pixels
frame_width: width of frame in pixels
count_across: count horizontal tracks
count_out: count vertical tracks
num_frames: number of frames in observation
with_rects: if True calculate rects if not already
in track and estimate wingspan and body size
"""
if count_across:
assert frame_width, "If vertical must specify frame width."
across_line = CountLine(int(frame_width/2), line_dim=0, total_frames=num_frames)
if count_out:
assert frame_height, "If horizontal must specify frame height."
out_line = CountLine(int(frame_height/2), line_dim=1, total_frames=num_frames)
crossing_track_list = []
for track_ind, track in enumerate(track_list[:]):
out_result = None
across_result = None
if count_out:
out_result, out_frame_num = out_line.is_crossing(track, track_ind)
if count_across:
across_result, across_frame_num = across_line.is_crossing(track, track_ind)
if out_result or across_result:
crossing_track_list.append(track)
# result is 1 if forward crossing -1 is backward crossing
if count_out:
if out_frame_num:
crossing_track_list[-1]['crossed'] = out_frame_num * out_result
else:
crossing_track_list[-1]['crossed'] = 0
if count_across:
if across_frame_num:
crossing_track_list[-1]['across_crossed'] = across_frame_num * across_result
else:
crossing_track_list[-1]['across_crossed'] = 0
track[id] = track_ind
if with_rects:
if not 'rects' in track.keys():
track['rects'] = get_rects(track)
min_edge = np.nanmin(track['rects'], 1)
min_edge = min_edge[~np.isnan(min_edge)]
peaks = signal.find_peaks(max_edge)[0]
if len(peaks) != 0:
mean_body = np.nanmean(min_edge[peaks])
else:
mean_body = np.nanmean(max_edge)
crossing_track_list[-1]['mean_wing'] = get_wingspan(track)
crossing_track_list[-1]['mean_body'] = mean_body
return crossing_track_list
def get_camera_locations(observations, all_camera_locations, exclude=False):
"""Return dict of all camera locations that appear in observations.
observations: dict of observations. Probably all observations from one day.
all_camera_locations: dict containing all camera locations across all days
exclude: if True, exclude observations as marked in obs dict
"""
camera_locations = {}
for camera, obs in observations.items():
if exclude:
if 'exclude' in obs.keys():
if obs['exclude']:
continue
camera_locations[obs['camera']] = all_camera_locations[obs['camera']]
return camera_locations
def get_camera_distance(camera_utm, center_utm):
""" Calculate the distance between utm of camera and possible
forest center in meters.
camera_utm: [x, y] array
center_utm: [x, y] array
"""
diff = camera_utm - center_utm
return np.sum(np.sqrt(diff ** 2))
def get_camera_distances(camera_utms, center_utm):
""" Calculate distance from every given camera to specified center.
camera_utms: dict with camera names and locations
center_utm: np.array 2d, location of forest center
"""
camera_distances = {}
for camera, camera_utm in camera_utms.items():
camera_distances[camera] = get_camera_distance(camera_utm,
center_utm)
return camera_distances
def get_camera_angles(camera_utms, center_utm):
""" Calculate angle from center point to each camera location.
camera_utms: dict pairs of camera names and location info
center_utm: 2d np.array, location of forest center
"""
camera_angles = {}
for camera, camera_utm in camera_utms.items():
dif = camera_utm - center_utm
camera_angles[camera] = np.arctan2(dif[1], dif[0])
return camera_angles
def get_camera_borders(camera_utms, center_utm, jitter=False):
""" Get angles around forest center that evenly bisect camera positions.
camera_utms: dict pairs of camera names and location info
center_utm: 2d np.array, location of forest center
jitter: if True, don't actually bisect cameras at midpoint but drawn
from a gaussian
"""
camera_border = {}
camera_angles = get_camera_angles(camera_utms, center_utm)
for camera, camera_utm in camera_utms.items():
min_neg = -10000
min_pos = 100000
# for border case where focal is positive angle
# and closest cclock is negative
max_pos = 0
# for same case a last comment
all_pos = True
# for border case where focal is positive angle
# and closest cclock is negative
max_neg = 0
# for same case a last comment
all_neg = True
max_camera = None
camera_border[camera] = {'cclock': None,
'cclock_angle': None,
'clock': None,
'clock_angle': None
}
for alt_camera, alt_camera_utm in camera_utms.items():
if camera == alt_camera:
continue
dif = camera_angles[camera] - camera_angles[alt_camera]
if dif < 0:
all_pos = False
if dif > min_neg:
min_neg = dif
camera_border[camera]['cclock'] = alt_camera
camera_border[camera]['cclock_angle'] = dif / 2
if dif < max_neg:
max_neg = dif
max_camera = alt_camera
if dif > 0:
all_neg = False
if dif < min_pos:
min_pos = dif
camera_border[camera]['clock'] = alt_camera
camera_border[camera]['clock_angle'] = dif / 2
if dif > max_pos:
max_pos = dif
max_camera = alt_camera
if all_pos:
camera_border[camera]['cclock'] = max_camera
camera_border[camera]['cclock_angle'] = (max_pos - 2*np.pi) / 2
if all_neg:
camera_border[camera]['clock'] = max_camera
camera_border[camera]['clock_angle'] = (max_neg + 2*np.pi) / 2
if jitter:
for camera, border_info in camera_border.items():
camera_angle = camera_angles[camera]
clockwise_camera = border_info['clock']
angle_dif = border_info['clock_angle']
# Three sttandard deviations is between camera pair
jitter_angle = np.random.normal(scale=angle_dif/3)
jitter_angle = np.maximum(-border_info['clock_angle'],
jitter_angle)
jitter_angle = np.minimum(border_info['clock_angle'],
jitter_angle)
camera_border[camera]['clock_angle'] += jitter_angle
if camera_border[camera]['clock_angle'] < 0:
camera_border[camera]['clock_angle'] += (2 * np.pi)
if camera_border[camera]['clock_angle'] >= (2 * np.pi):
camera_border[camera]['clock_angle'] -= (2 * np.pi)
camera_border[clockwise_camera]['cclock_angle'] += jitter_angle
if camera_border[clockwise_camera]['cclock_angle'] < -2 * np.pi:
camera_border[clockwise_camera]['cclock_angle'] += (2 * np.pi)
if camera_border[clockwise_camera]['cclock_angle'] >= (2 * np.pi):
camera_border[clockwise_camera]['cclock_angle'] -= (2 * np.pi)
return camera_border
def latlong_dict_to_utm(latlong_dict):
""" Convert dict of latlong coordinates to utm."""
utm_dict = {}
for key, latlong in latlong_dict.items():
utm_val = utm.from_latlon(*latlong)
utm_dict[key] = np.array([utm_val[0], utm_val[1]])
return utm_dict
def get_camera_fractions(camera_utms, center_utm, jitter=False):
""" Calculate the fraction of circle around center that each camera is closest to.
camera_utms: dict of camera locations
center_utm: 2d np array with utm coordinates of center
jitter: If True instead of evenly dividing circle by
cameras, set borders between camera from a gaussian
return dict with fraction for each camera
"""
if len(camera_utms) == 1:
return {list(camera_utms.keys())[0]: 1.0}
camera_borders = get_camera_borders(camera_utms,
center_utm,
jitter=jitter)
camera_fractions = {}
for camera, border_info in camera_borders.items():
angle = (-border_info['cclock_angle']
+ border_info['clock_angle']
)
camera_fractions[camera] = angle / (np.pi * 2)
return camera_fractions
def get_day_total(observations, center_utm, all_camera_utms,
frame_width, wingspan, exclude=False,
correct_darkness=False, parameters=None):
""" Estimate total number of bats based on all observation counts
and corespoinding camera locations.
observations: dict of all observations for a specific day
center_utm: estimated location of forest center
all_camera_utms: dict of the utm locations of each camera
frame_width: width of camera frame in pixels
wingspan: estimated wingspan off all bats in meters
exlude: to manually remove certain cameras, ie shut off early etc.
correct_darkness: divide by accuracy estimated for given darkness
parameters: param values of linear piecewise function for darkness
error correction. Required if correct_darkness is True
"""
frac_sum = 0
total = 0
obs_totals = []
camera_utms = get_camera_locations(observations, all_camera_utms, exclude=True)
camera_fractions = get_camera_fractions(camera_utms, center_utm)
for obs in observations.values():
if exclude:
if 'exclude' in obs.keys():
if obs['exclude']:
continue
camera_distances = get_camera_distances(camera_utms, center_utm)
obs['multiplier'] = combined_bat_multiplier(frame_width,
wingspan,
obs['mean_wing'],
camera_distances[obs['camera']]
)
if correct_darkness:
assert parameters is not None, "Must pass parameters if correcting for darkness."
acc = piecewise_linear(obs['darkness'], *parameters)
obs['total_darkness'] = np.sum(obs['multiplier'] * obs['direction'] * (1/acc))
obs['total'] = np.sum(obs['multiplier'] * obs['direction'])
obs['total_unscaled'] = np.sum(obs['direction'])
obs['fraction_total'] = camera_fractions[obs['camera']]
frac_sum += obs['fraction_total']
if correct_darkness:
total += obs['total_darkness'] * obs['fraction_total']
obs_totals.append(obs['total_darkness'])
else:
total += obs['total'] * obs['fraction_total']
obs_totals.append(obs['total'])
if len(obs_totals) > 0:
mean_total = np.mean(obs_totals)
else:
mean_total = 0
return total, mean_total
def get_peak_freq(raw_freqs, raw_powers, min_freq):
""" Calculate max power frequency above min_freq.
raw_freqs: list of frequencies
raw_powers: list of powers assosiated with each raw freq value
min_freq: minimum acceptable frequency value
"""
freqs = raw_freqs[raw_freqs>min_freq]
powers = raw_powers[raw_freqs>min_freq]
if np.any(np.isnan(freqs)) or len(freqs)==0:
return np.nan, np.nan
return freqs[np.argmax(powers)], powers[np.argmax(powers)]
def get_track_wingbeat_freqs(track, fps=25, min_freq=.75):
""" Calculate peak wing freqs and assosiated power.
track: track dict
fps: frames per second track temporal resolution
min_freq: minimum frequency for calculating peak_freq.
Messily segmented tracks often have have high power
close to 0 Hz because actual signal is not clear.
"""
assert 'max_edge' in track.keys(), "Track must have max_edge already computed"
if len(track['max_edge']) < 255:
nperseg = len(track['max_edge'])
else:
nperseg = 255
f, p = signal.welch(track['max_edge'], fps, nperseg=nperseg)
peaks = signal.find_peaks(p, threshold=0, height=1)[0]
track['freqs'] = f[peaks]
track['freqs_power'] = p[peaks]
peak_freq, freq_power = get_peak_freq(track['freqs'],
track['freqs_power'],
min_freq
)
track['peak_freq'] = peak_freq
track['peak_freq_power'] = freq_power
def add_wingbeat_info_to_tracks(tracks, fps=25, min_freq=.75,
remove_contours=False):
""" Add main wingbeat freq info for all tracks in tracks after calculating
all nessissary extra info. Can remove contours after getting bounding rects
to save memory.
tracks: list of track dicts
fps: frames per second - temporal resolution of tracks
min_freq: minimum frequency for calculating peak_freq.
Messily segmented tracks often have have high power
close to 0 Hz because actual signal is not clear.
remove_contours: if True remove raw contour info from track dicts.
Useful if need to save memory
"""
for track in tracks:
if 'rects' not in track.keys():
track['rects'] = get_rects(track)
if remove_contours:
try:
del track['contour']
except KeyError:
pass
if 'max_edge' not in track.keys():
track['max_edge'] = np.nanmax(track['rects'], 1)
if 'mean_wing' not in track.keys():
track['mean_wing'] = get_wingspan(track)
get_track_wingbeat_freqs(track, fps=fps, min_freq=min_freq)
def get_random_utm_in_mask(mask, rasterio_map, num_locations=1):
""" Get a random utm location within raster mask.
mask: 2d np array where forest has values > 0 and background < 0
rasterio_map: rasterio.io.DatasetReader for mask
num_locations: number of locations to return in forest
"""
in_hull = np.argwhere(mask>0)
ind = np.random.randint(0, in_hull.shape[0], num_locations)
area_x_origin = rasterio_map.bounds.left
area_y_origin = rasterio_map.bounds.bottom
xutm = in_hull[ind, 1] + area_x_origin
yutm = in_hull[ind, 0] + area_y_origin
utm_vals = np.stack([xutm, yutm], axis=1)
# squeeze for when only returning one value to remove
# extra dimension
return np.squeeze(utm_vals)
def get_wing_correction_distributions(validation_file, num_darkness_bins,
kde_bw_scale=1, should_plot=False):
""" Calculate wing correction distributions from human validation info.
validation_file: .csv file with human groundtruth info
num_darkness_bins: how many groups to split darkness range into
kde_bw_scale: kernel size used in kde calculation: data std. in bin * kde_bw_scale
should_plot: show histograms and resulting distributions
"""
wing_validation = pd.read_csv(validation_file)
max_darkness = wing_validation.loc[wing_validation['has_gt'], 'darkness'].max()
darkness_bins = np.linspace(0, max_darkness, num_darkness_bins+1)
darkness_bins[-1] = 255
wing_correction_kdes = []
for bin_num in range(num_darkness_bins):
rows_in_bin = (wing_validation['has_gt']
& (wing_validation['darkness'] > darkness_bins[bin_num])
& (wing_validation['darkness'] <= darkness_bins[bin_num+1])
& (wing_validation['error_norm'] > -1)
)
errors = wing_validation.loc[rows_in_bin, 'error_norm'].values
error_std = errors.std()
kde = KernelDensity(
kernel='gaussian', bandwidth=error_std*kde_bw_scale).fit(errors[..., np.newaxis])
wing_correction_kdes.append(kde)
if should_plot:
sorted_error = np.sort(errors, axis=0)
samples = np.linspace(-1,1,100)
log_dens = kde.score_samples(samples[..., np.newaxis])
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.hist(sorted_error, bins=40, density=True)
ax1.plot(samples, np.exp(log_dens), c='cyan')
if should_plot:
plt.figure()
for kde in wing_correction_kdes:
samples = np.linspace(-1,1,100)
log_dens = kde.score_samples(samples[..., np.newaxis])
plt.plot(samples, np.exp(log_dens),)
return wing_correction_kdes, darkness_bins
def get_kde_samples(obs, kde_list, darkness_bins):
""" Draw a sample for each track from appropriate kde distribution for tracks darkness.
obs: observation dictionary
kde_list: a kde distribution for each darkness bin
darkness_bins: list of darkness thresholds for between each darkness bin
starts at zero so len=num bins + 1
"""
kde_samples = np.zeros(len(obs['darkness']))
kde_inds = np.zeros(len(obs['darkness']))
for ind, (kde, min_bin_val, max_bin_val) in enumerate(zip(kde_list, darkness_bins[:-1], darkness_bins[1:])):
inds_in_bin = ((obs['darkness'] > min_bin_val)
& (obs['darkness'] <= max_bin_val))
bin_samples = np.squeeze(kde.sample(len(obs['darkness'])))
kde_samples[inds_in_bin] = bin_samples[inds_in_bin]
kde_inds[inds_in_bin] = ind
return kde_samples, kde_inds
def correct_wingspan(estimate, estimate_scale):
""" Correct the estimated wingspan based on groundtruth distribution.
estimate: wingespan estimated from track
estimate_scale: (estimate - groundtruth)/ estimate
obv. don't know groundtruth in application but
estimate scale usually ranomly drawn from distribution
"""
corrected_est = estimate - estimate * estimate_scale
return corrected_est
def save_fig(save_folder, plot_title, fig=None):
""" Convient default figure saving configuration."""
plot_name = plot_title.replace(' ', '-')
file = os.path.join(save_folder, plot_name+'.png')
if fig:
fig.savefig(file, bbox_inches='tight', dpi=600)
return
plt.savefig(file, bbox_inches='tight', dpi=600)
def smooth_track(track, kernel_size=12):
""" Smooth n x 2 track with averaging filter."""
kernel = np.ones(kernel_size) / kernel_size
x = np.convolve(track[:, 0], kernel, mode='valid')
y = np.convolve(track[:, 1], kernel, mode='valid')
return np.stack([x, y], 1)
def calculate_straightness(track):
""" Caclute straightness of n x 2 numpy track."""
track = smooth_track(track, kernel_size=12)
step_vectors = track[1:] - track[:-1]
step_sizes = np.linalg.norm(step_vectors, axis=1)
combined_steps = np.sum(step_sizes)
net_distance = np.linalg.norm(track[-1] - track[0])
return net_distance / combined_steps
def get_middle_percentiles(values, lower_percentile, upper_percentile):
""" Return all values in values between lower and upper percentile."""
values = np.array(values)
values = values[~np.isnan(values)]
sorted_values = sorted(values)
lower_ind = int(lower_percentile * len(values))
upper_ind = int(upper_percentile * len(values) + 1)
return sorted_values[lower_ind:upper_ind]
def calc_movement_unit_vector(track, frame_height=1520):
""" Calculate the unit vector pointing from first position
to last position in track with bottom left origin
track: track dict
frame_height: height of frame in pixels the tracks came from
"""
track = np.copy(track['track'])
track[:, 1] = frame_height - track[:, 1]
diff = track[-1] - track[0]
unit_position = diff / np.linalg.norm(diff)
return unit_position
def calculate_polarization(tracks):
""" Following Couzin et al. 2002 calculate polarization of all bats
in tracks.
"""
direction_unit_vectors = []
for track in tracks:
direction_unit_vectors.append(
calc_movement_unit_vector(track))
direction_sum = np.sum(np.array(direction_unit_vectors), axis=0)
direction_magnitude = np.linalg.norm(direction_sum)
polarization = direction_magnitude / len(tracks)
return polarization
def get_camera_color_dict(colormap=plt.cm.tab10):
""" For consistent camerar colors across plots."""
camera_colors = {'FibweParking2': colormap(0),
'FibweParking': colormap(0),
'Chyniangale': colormap(.1),
'BBC': colormap(.2),
'Sunset': colormap(.3),
'NotChyniangale': colormap(.4),
'MusoleParking': colormap(.5),
'MusolePath2': colormap(.6),
'MusolePath': colormap(.6),
'Puku': colormap(.7),
'FibwePublic': colormap(.8),
'MusoleTower': colormap(.9),
}
return camera_colors
| StarcoderdataPython |
3277682 | #
# @lc app=leetcode id=525 lang=python3
#
# [525] Contiguous Array
#
# https://leetcode.com/problems/contiguous-array/description/
#
# algorithms
# Medium (40.05%)
# Likes: 1821
# Dislikes: 106
# Total Accepted: 125.4K
# Total Submissions: 313.1K
# Testcase Example: '[0,1]'
#
# Given a binary array, find the maximum length of a contiguous subarray with
# equal number of 0 and 1.
#
#
# Example 1:
#
# Input: [0,1]
# Output: 2
# Explanation: [0, 1] is the longest contiguous subarray with equal number of 0
# and 1.
#
#
#
# Example 2:
#
# Input: [0,1,0]
# Output: 2
# Explanation: [0, 1] (or [1, 0]) is a longest contiguous subarray with equal
# number of 0 and 1.
#
#
#
# Note:
# The length of the given binary array will not exceed 50,000.
#
#
# @lc code=start
class Solution:
def findMaxLength(self, nums: List[int]) -> int:
save = {0: -1}
count = 0
max_l = 0
for i, n in enumerate(nums):
if n:
count += 1
else:
count -= 1
if count in save:
max_l = max(max_l, i - save[count])
else:
save[count] = i
return max_l
# @lc code=end
| StarcoderdataPython |
3268576 | <filename>apps/greencheck/views.py
from datetime import date
from datetime import timedelta
from django.conf import settings
from google.cloud import storage
from django.views.generic.base import TemplateView
class GreenUrlsView(TemplateView):
template_name = "green_url.html"
def fetch_urls(self):
client = storage.Client()
bucket_name = settings.PRESENTING_BUCKET
bucket = client.get_bucket(bucket_name)
blobs = bucket.list_blobs()
return [(b.name, b.public_url) for b in blobs]
@property
def urls(self):
'''
Setting the date two weeks in the future. Two weeks from now on
it will prefetch the urls again
'''
accessed_date = getattr(self, '_date', None)
if not accessed_date or self._date < date.today():
self._date = date.today() + timedelta(weeks=2)
self._urls = self.fetch_urls()
return self._urls
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['urls'] = self.urls
return context
| StarcoderdataPython |
172628 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from ._enums import *
__all__ = [
'GetAmiBlockDeviceMappingResult',
'GetAmiFilterResult',
'GetAmiIdsFilterResult',
'GetAmiProductCodeResult',
'GetAutoscalingGroupsFilterResult',
'GetAvailabilityZoneFilterResult',
'GetAvailabilityZonesFilterResult',
'GetElasticIpFilterResult',
'GetPrefixListFilterResult',
'GetRegionsFilterResult',
]
@pulumi.output_type
class GetAmiBlockDeviceMappingResult(dict):
def __init__(__self__, *,
device_name: str,
ebs: Mapping[str, str],
no_device: str,
virtual_name: str):
"""
:param str device_name: The physical name of the device.
:param Mapping[str, str] ebs: Map containing EBS information, if the device is EBS based. Unlike most object attributes, these are accessed directly (e.g. `ebs.volume_size` or `ebs["volume_size"]`) rather than accessed through the first element of a list (e.g. `ebs[0].volume_size`).
:param str no_device: Suppresses the specified device included in the block device mapping of the AMI.
:param str virtual_name: The virtual device name (for instance stores).
"""
pulumi.set(__self__, "device_name", device_name)
pulumi.set(__self__, "ebs", ebs)
pulumi.set(__self__, "no_device", no_device)
pulumi.set(__self__, "virtual_name", virtual_name)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> str:
"""
The physical name of the device.
"""
return pulumi.get(self, "device_name")
@property
@pulumi.getter
def ebs(self) -> Mapping[str, str]:
"""
Map containing EBS information, if the device is EBS based. Unlike most object attributes, these are accessed directly (e.g. `ebs.volume_size` or `ebs["volume_size"]`) rather than accessed through the first element of a list (e.g. `ebs[0].volume_size`).
"""
return pulumi.get(self, "ebs")
@property
@pulumi.getter(name="noDevice")
def no_device(self) -> str:
"""
Suppresses the specified device included in the block device mapping of the AMI.
"""
return pulumi.get(self, "no_device")
@property
@pulumi.getter(name="virtualName")
def virtual_name(self) -> str:
"""
The virtual device name (for instance stores).
"""
return pulumi.get(self, "virtual_name")
@pulumi.output_type
class GetAmiFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
"""
:param str name: The name of the AMI that was provided during image creation.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the AMI that was provided during image creation.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@pulumi.output_type
class GetAmiIdsFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@pulumi.output_type
class GetAmiProductCodeResult(dict):
def __init__(__self__, *,
product_code_id: str,
product_code_type: str):
pulumi.set(__self__, "product_code_id", product_code_id)
pulumi.set(__self__, "product_code_type", product_code_type)
@property
@pulumi.getter(name="productCodeId")
def product_code_id(self) -> str:
return pulumi.get(self, "product_code_id")
@property
@pulumi.getter(name="productCodeType")
def product_code_type(self) -> str:
return pulumi.get(self, "product_code_type")
@pulumi.output_type
class GetAutoscalingGroupsFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
"""
:param str name: The name of the filter. The valid values are: `auto-scaling-group`, `key`, `value`, and `propagate-at-launch`.
:param Sequence[str] values: The value of the filter.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the filter. The valid values are: `auto-scaling-group`, `key`, `value`, and `propagate-at-launch`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
The value of the filter.
"""
return pulumi.get(self, "values")
@pulumi.output_type
class GetAvailabilityZoneFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
"""
:param str name: The name of the filter field. Valid values can be found in the [EC2 DescribeAvailabilityZones API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html).
:param Sequence[str] values: Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the filter field. Valid values can be found in the [EC2 DescribeAvailabilityZones API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html).
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
return pulumi.get(self, "values")
@pulumi.output_type
class GetAvailabilityZonesFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
"""
:param str name: The name of the filter field. Valid values can be found in the [EC2 DescribeAvailabilityZones API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html).
:param Sequence[str] values: Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the filter field. Valid values can be found in the [EC2 DescribeAvailabilityZones API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html).
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
return pulumi.get(self, "values")
@pulumi.output_type
class GetElasticIpFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@pulumi.output_type
class GetPrefixListFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
"""
:param str name: The name of the filter field. Valid values can be found in the [EC2 DescribePrefixLists API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribePrefixLists.html).
:param Sequence[str] values: Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the filter field. Valid values can be found in the [EC2 DescribePrefixLists API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribePrefixLists.html).
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
return pulumi.get(self, "values")
@pulumi.output_type
class GetRegionsFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
"""
:param str name: The name of the filter field. Valid values can be found in the [describe-regions AWS CLI Reference][1].
:param Sequence[str] values: Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the filter field. Valid values can be found in the [describe-regions AWS CLI Reference][1].
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
return pulumi.get(self, "values")
| StarcoderdataPython |
142004 | <reponame>zhengxiawu/XNAS
import torch
import numpy as np
from torch.autograd import Variable
def _concat(xs):
return torch.cat([x.view(-1) for x in xs])
class Architect(object):
def __init__(self, net, cfg):
self.network_momentum = cfg.OPTIM.MOMENTUM
self.network_weight_decay = cfg.OPTIM.WEIGHT_DECAY
self.net = net
if cfg.DRNAS.REG_TYPE == "l2":
weight_decay = cfg.DRNAS.REG_SCALE
elif cfg.DRNAS.REG_TYPE == "kl":
weight_decay = 0
self.optimizer = torch.optim.Adam(
self.net.arch_parameters(),
lr=cfg.DARTS.ALPHA_LR,
betas=(0.5, 0.999),
weight_decay=weight_decay,
)
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def _compute_unrolled_model(self, input, target, eta, network_optimizer):
loss = self.net._loss(input, target)
theta = _concat(self.net.parameters()).data
try:
moment = _concat(
network_optimizer.state[v]["momentum_buffer"]
for v in self.net.parameters()
).mul_(self.network_momentum)
except:
moment = torch.zeros_like(theta)
dtheta = (
_concat(torch.autograd.grad(loss, self.net.parameters())).data
+ self.network_weight_decay * theta
)
unrolled_model = self._construct_model_from_theta(
theta.sub(eta, moment + dtheta)
).to(self.device)
return unrolled_model
def unrolled_backward(
self,
input_train,
target_train,
input_valid,
target_valid,
eta,
network_optimizer,
unrolled,
):
self.optimizer.zero_grad()
if unrolled:
self._backward_step_unrolled(
input_train,
target_train,
input_valid,
target_valid,
eta,
network_optimizer,
)
else:
self._backward_step(input_valid, target_valid)
self.optimizer.step()
# def pruning(self, masks):
# for i, p in enumerate(self.optimizer.param_groups[0]['params']):
# if masks[i] is None:
# continue
# state = self.optimizer.state[p]
# mask = masks[i]
# state['exp_avg'][~mask] = 0.0
# state['exp_avg_sq'][~mask] = 0.0
def _backward_step(self, input_valid, target_valid):
loss = self.net._loss(input_valid, target_valid)
loss.backward()
def _backward_step_unrolled(
self,
input_train,
target_train,
input_valid,
target_valid,
eta,
network_optimizer,
):
unrolled_model = self._compute_unrolled_model(
input_train, target_train, eta, network_optimizer
)
unrolled_loss = unrolled_model._loss(input_valid, target_valid)
unrolled_loss.backward()
dalpha = [v.grad for v in unrolled_model.arch_parameters()]
vector = [v.grad.data for v in unrolled_model.parameters()]
implicit_grads = self._hessian_vector_product(vector, input_train, target_train)
for g, ig in zip(dalpha, implicit_grads):
g.data.sub_(eta, ig.data)
for v, g in zip(self.net.arch_parameters(), dalpha):
if v.grad is None:
v.grad = Variable(g.data)
else:
v.grad.data.copy_(g.data)
def _construct_model_from_theta(self, theta):
model_new = self.net.new()
model_dict = self.net.state_dict()
params, offset = {}, 0
for k, v in self.net.named_parameters():
v_length = np.prod(v.size())
params[k] = theta[offset : offset + v_length].view(v.size())
offset += v_length
assert offset == len(theta)
model_dict.update(params)
model_new.load_state_dict(model_dict)
return model_new
def _hessian_vector_product(self, vector, input, target, r=1e-2):
R = r / _concat(vector).norm()
for p, v in zip(self.net.parameters(), vector):
p.data.add_(R, v)
loss = self.net._loss(input, target)
grads_p = torch.autograd.grad(loss, self.net.arch_parameters())
for p, v in zip(self.net.parameters(), vector):
p.data.sub_(2 * R, v)
loss = self.net._loss(input, target)
grads_n = torch.autograd.grad(loss, self.net.arch_parameters())
for p, v in zip(self.net.parameters(), vector):
p.data.add_(R, v)
return [(x - y).div_(2 * R) for x, y in zip(grads_p, grads_n)]
| StarcoderdataPython |
81840 | """
Various utility functions.
"""
from __future__ import absolute_import
import re
import string
from Crypto.Random import random
HASH_REGEXP = re.compile(r'^{([A-Z0-9]+)}(.*)$')
def generate_password(length=12, ascii_lower=True, ascii_upper=True, punctuation=True,
digits=True, strip_ambiguous=False, strip_dangerous=True):
"""
This function will return a password consisting of a mixture of lower
and upper case letters, numbers, and non-alphanumberic characters
in a ratio defined by the parameters.
:param length: Length of generated password.
:type length: int
:param ascii_lower: Whether to include ASCII lowercase chars.
:type ascii_lower: bool
:param ascii_upper: Whether to include ASCII uppercase chars.
:type ascii_upper: bool
:param punctuation: Whether to include punctuation.
:type punctuation: bool
:param strip_ambiguous: Whether to remove easily-confused (LlOo0iI1) chars
from the password.
:type strip_ambiguous: bool
:param strip_dangerous: Whethr to remove some of the more 'dangerous' punctuation
(e.g. quotes) from the generated password.
:type strip_dangerous: bool
:returns: The generated password.
:rtype: str
"""
pool = []
if ascii_lower:
pool.extend(string.ascii_lowercase)
if ascii_upper:
pool.extend(string.ascii_uppercase)
if punctuation:
pool.extend(string.punctuation)
if digits:
pool.extend(string.digits)
if strip_ambiguous:
pool = set(pool) - set("LlOo0iI1")
pool = list(pool) # Turn it back into a list since random.choice() needs indexing.
if strip_dangerous:
pool = set(pool) - set(r'"\'`')
pool = list(pool)
if not pool:
raise ValueError("No character classes enabled for password generation.")
# Generate the total number of characters for the password
return ''.join([random.choice(pool) for _i in range(length)]) | StarcoderdataPython |
10016 | <filename>metabot2txt/display.py<gh_stars>0
import os
def display_on_editor(text):
with open('.metabot2txt', 'w') as f:
f.write(text)
os.system('gedit .metabot2txt')
def display_list_on_editor(texts):
if os.path.isfile('.metabot2txt'):
os.remove('.metabot2txt')
for text in texts:
with open('.metabot2txt', 'a') as f:
f.write(text)
f.write('\n=====================================\n')
os.system('gedit .metabot2txt')
| StarcoderdataPython |
4802399 | """Fixtures for cutty.repositories.domain.providers."""
from collections.abc import Callable
from typing import Any
from typing import Optional
from cutty.filesystems.adapters.dict import DictFilesystem
from cutty.filesystems.domain.path import Path
from cutty.repositories.domain.locations import Location
from cutty.repositories.domain.providers import Provider
from cutty.repositories.domain.repository import Repository
from cutty.repositories.domain.revisions import Revision
pytest_plugins = ["tests.fixtures.repositories.domain.stores"]
ProviderFunction = Callable[[Location, Optional[Revision]], Optional[Repository]]
def provider(name: str) -> Callable[[ProviderFunction], Provider]:
"""Decorator to create a provider from a function."""
def _decorator(function: ProviderFunction) -> Provider:
class _Provider(Provider):
def __init__(self) -> None:
super().__init__(name)
def __call__(
self, location: Location, revision: Optional[Revision] = None
) -> Optional[Repository]:
return function(location, revision)
return _Provider()
return _decorator
nullprovider = Provider("null")
"""Provider that matches no location."""
def constprovider(name: str, repository: Repository) -> Provider:
"""Provider that returns the same repository always."""
@provider(name)
def _(location: Location, revision: Optional[Revision]) -> Optional[Repository]:
return repository
return _
def dictprovider(mapping: Optional[dict[str, Any]] = None) -> Provider:
"""Provider that matches every URL with a repository."""
@provider("dict")
def _(location: Location, revision: Optional[Revision]) -> Optional[Repository]:
filesystem = DictFilesystem(mapping or {})
path = Path(filesystem=filesystem)
return Repository(location.name, path, revision)
return _
| StarcoderdataPython |
165824 | <filename>demos/appengine/app.py
"""
Demonstration of Duo authentication on Google App Engine.
To use, edit duo.conf, set gae_domain to an appropriate email domain,
and visit /.
"""
import ConfigParser
import logging
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import duo_web
import cookie
configfile = 'duo.conf'
cookie_secret = '<KEY>
# Mutable globals, used because GAE will recycle them if the app is
# already running. These should only be written to from main().
_DEBUG = True
application = None
ikey = None
skey = None
akey = None
host = None
gae_domain = None
class BasePage(webapp.RequestHandler, cookie.RequestHandler):
cookie_secret = cookie_secret
def user_email_parts(self, user):
"""
Return a (local, domain) tuple for user.
"""
return user.email().split('@')
def both_logged_in(self):
"""
Return True if the user has been authenticated with both
Google and Duo.
"""
user = users.get_current_user()
if user:
(username, _) = self.user_email_parts(user)
if self.get_secure_cookie('logged_in') == username:
return True
return False
class AuthenticatedPage(BasePage):
def get(self):
if not self.both_logged_in():
self.response.out.write(
'Log in as a user with a %s email to continue: '
'<a href="%s">Login.</a>' %
(gae_domain, 'primary_auth'))
return
self.response.out.write(
'Logged in as %s. <a href="%s">Logout.</a>' %
(users.get_current_user(), users.create_logout_url('/')))
class PrimaryAuthPage(BasePage):
def get(self):
user = users.get_current_user()
if user:
(_, domain) = self.user_email_parts(user)
if domain == gae_domain:
self.redirect('/secondary_auth')
return
self.redirect(users.create_login_url(self.request.uri))
return
class SecondaryAuthPage(BasePage):
def get(self):
if self.both_logged_in():
self.redirect('/')
return
user = users.get_current_user()
if not user:
self.redirect('/')
return
(username, _) = self.user_email_parts(user)
sig_request = duo_web.sign_request(ikey, skey, akey, username)
self.response.out.write(
"<html>"
" <head>"
" <title>Duo Authentication</title>"
" <meta name='viewport' content='width=device-width, initial-scale=1'>"
" <meta http-equiv='X-UA-Compatible' content='IE=edge'>"
" <link rel='stylesheet' type='text/css' href='/static/Duo-Frame.css'>"
" </head>"
" <body>"
" <h1>Duo Authentication</h1>"
" <script src='/static/Duo-Web-v2.js'></script>"
" <iframe id='duo_iframe'"
" title='Two-Factor Authentication'"
" frameborder='0'"
" data-host='%(host)s'"
" data-sig-request='%(sig_request)s'"
" data-post-action='%(post_action)s'"
" >"
" </iframe>"
" </body>"
"</html>"
% {'host': host, 'sig_request': sig_request, 'post_action': self.request.uri})
def post(self):
"""
If we have a sig_response argument containing a Duo auth cookie
indicating that the current user has logged in to both Google and
Duo with matching usernames, write a login cookie and redirect to /.
Otherwise, indicate failure.
"""
user = users.get_current_user()
if not user:
self.redirect('/')
return
sig_response = self.request.get('sig_response', '')
duo_user = duo_web.verify_response(ikey, skey, akey, sig_response)
if duo_user is None:
self.write('Did not authenticate.')
return
# Note that since the secure cookie is never unset, Duo auth is not
# required again until it expires, even if Google auth is required.
# We can provide redirects on the logout URLs which we present to
# unset the cookie, but the user could find and use a Google logout URL
# without these.
self.set_secure_cookie('logged_in', duo_user)
self.redirect('/')
def initialize_globals(config):
"""
Initialize and set the WSGI application and other globals.
"""
global application
global ikey, skey, akey, host
global gae_domain
logging.basicConfig(
level=logging.DEBUG,
format="%(levelname)-8s: %(message)s - %(pathname)s:%(lineno)d")
logging.debug('start')
if application is None:
application = webapp.WSGIApplication(
[('/primary_auth', PrimaryAuthPage),
('/secondary_auth', SecondaryAuthPage),
('/', AuthenticatedPage),],
debug=_DEBUG)
if ikey is None or skey is None or host is None or gae_domain is None:
ikey = config['ikey']
skey = config['skey']
akey = config['akey']
host = config['host']
gae_domain = config['gae_domain']
def main():
config = ConfigParser.ConfigParser()
config.read(configfile)
c_d = dict(config.items('duo'))
c_d.update(dict(config.items('app')))
initialize_globals(c_d)
run_wsgi_app(application)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1726398 | <gh_stars>0
my_name = 'scott'
def print_name():
global my_name
my_name = 'jen'
print('Name inside of the function is', my_name)
print_name()
print('Name outside of the function is', my_name) | StarcoderdataPython |
134059 | <reponame>f0k/matplotlib<filename>examples/pylab_examples/tripcolor_demo.py
"""
Pseudocolor plots of unstructured triangular grids.
"""
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import numpy as np
import math
# Creating a Triangulation without specifying the triangles results in the
# Delaunay triangulation of the points.
# First create the x and y coordinates of the points.
n_angles = 36
n_radii = 8
min_radius = 0.25
radii = np.linspace(min_radius, 0.95, n_radii)
angles = np.linspace(0, 2*math.pi, n_angles, endpoint=False)
angles = np.repeat(angles[...,np.newaxis], n_radii, axis=1)
angles[:,1::2] += math.pi/n_angles
x = (radii*np.cos(angles)).flatten()
y = (radii*np.sin(angles)).flatten()
z = (np.cos(radii)*np.cos(angles*3.0)).flatten()
# Create the Triangulation; no triangles so Delaunay triangulation created.
triang = tri.Triangulation(x, y)
# Mask off unwanted triangles.
xmid = x[triang.triangles].mean(axis=1)
ymid = y[triang.triangles].mean(axis=1)
mask = np.where(xmid*xmid + ymid*ymid < min_radius*min_radius, 1, 0)
triang.set_mask(mask)
# pcolor plot.
plt.figure()
plt.gca().set_aspect('equal')
plt.tripcolor(triang, z, shading='flat')
plt.colorbar()
plt.title('tripcolor of Delaunay triangulation: flat')
# Illustrate Gouraud shading.
plt.figure()
plt.gca().set_aspect('equal')
plt.tripcolor(triang, z, shading='gouraud')
plt.colorbar()
plt.title('tripcolor with Gouraud shading')
# You can specify your own triangulation rather than perform a Delaunay
# triangulation of the points, where each triangle is given by the indices of
# the three points that make up the triangle, ordered in either a clockwise or
# anticlockwise manner.
xy = np.asarray([
[-0.101,0.872],[-0.080,0.883],[-0.069,0.888],[-0.054,0.890],[-0.045,0.897],
[-0.057,0.895],[-0.073,0.900],[-0.087,0.898],[-0.090,0.904],[-0.069,0.907],
[-0.069,0.921],[-0.080,0.919],[-0.073,0.928],[-0.052,0.930],[-0.048,0.942],
[-0.062,0.949],[-0.054,0.958],[-0.069,0.954],[-0.087,0.952],[-0.087,0.959],
[-0.080,0.966],[-0.085,0.973],[-0.087,0.965],[-0.097,0.965],[-0.097,0.975],
[-0.092,0.984],[-0.101,0.980],[-0.108,0.980],[-0.104,0.987],[-0.102,0.993],
[-0.115,1.001],[-0.099,0.996],[-0.101,1.007],[-0.090,1.010],[-0.087,1.021],
[-0.069,1.021],[-0.052,1.022],[-0.052,1.017],[-0.069,1.010],[-0.064,1.005],
[-0.048,1.005],[-0.031,1.005],[-0.031,0.996],[-0.040,0.987],[-0.045,0.980],
[-0.052,0.975],[-0.040,0.973],[-0.026,0.968],[-0.020,0.954],[-0.006,0.947],
[ 0.003,0.935],[ 0.006,0.926],[ 0.005,0.921],[ 0.022,0.923],[ 0.033,0.912],
[ 0.029,0.905],[ 0.017,0.900],[ 0.012,0.895],[ 0.027,0.893],[ 0.019,0.886],
[ 0.001,0.883],[-0.012,0.884],[-0.029,0.883],[-0.038,0.879],[-0.057,0.881],
[-0.062,0.876],[-0.078,0.876],[-0.087,0.872],[-0.030,0.907],[-0.007,0.905],
[-0.057,0.916],[-0.025,0.933],[-0.077,0.990],[-0.059,0.993] ])
x = xy[:,0]*180/3.14159
y = xy[:,1]*180/3.14159
x0 = -5
y0 = 52
z = np.exp(-0.01*( (x-x0)*(x-x0) + (y-y0)*(y-y0) ))
triangles = np.asarray([
[67,66, 1],[65, 2,66],[ 1,66, 2],[64, 2,65],[63, 3,64],[60,59,57],
[ 2,64, 3],[ 3,63, 4],[ 0,67, 1],[62, 4,63],[57,59,56],[59,58,56],
[61,60,69],[57,69,60],[ 4,62,68],[ 6, 5, 9],[61,68,62],[69,68,61],
[ 9, 5,70],[ 6, 8, 7],[ 4,70, 5],[ 8, 6, 9],[56,69,57],[69,56,52],
[70,10, 9],[54,53,55],[56,55,53],[68,70, 4],[52,56,53],[11,10,12],
[69,71,68],[68,13,70],[10,70,13],[51,50,52],[13,68,71],[52,71,69],
[12,10,13],[71,52,50],[71,14,13],[50,49,71],[49,48,71],[14,16,15],
[14,71,48],[17,19,18],[17,20,19],[48,16,14],[48,47,16],[47,46,16],
[16,46,45],[23,22,24],[21,24,22],[17,16,45],[20,17,45],[21,25,24],
[27,26,28],[20,72,21],[25,21,72],[45,72,20],[25,28,26],[44,73,45],
[72,45,73],[28,25,29],[29,25,31],[43,73,44],[73,43,40],[72,73,39],
[72,31,25],[42,40,43],[31,30,29],[39,73,40],[42,41,40],[72,33,31],
[32,31,33],[39,38,72],[33,72,38],[33,38,34],[37,35,38],[34,38,35],
[35,37,36] ])
# Rather than create a Triangulation object, can simply pass x, y and triangles
# arrays to tripcolor directly. It would be better to use a Triangulation object
# if the same triangulation was to be used more than once to save duplicated
# calculations.
plt.figure()
plt.gca().set_aspect('equal')
plt.tripcolor(x, y, triangles, z, shading='flat', edgecolors='k')
plt.colorbar()
plt.title('tripcolor of user-specified triangulation')
plt.xlabel('Longitude (degrees)')
plt.ylabel('Latitude (degrees)')
plt.show()
| StarcoderdataPython |
160618 | from pathlib import Path
ROOT_DIR = Path(__file__).parent.parent.parent
DEFAULT_EMBED_COLOUR = 0x00CD99
# Dependant on above constants.
from .loc import CodeCounter
from .ready import Ready
| StarcoderdataPython |
163066 | import magma as m
from magma.testing import check_files_equal
import os
def test_inline_2d_array_interface():
class Main(m.Generator):
@staticmethod
def generate(width, depth):
class MonitorWrapper(m.Circuit):
io = m.IO(arr=m.In(m.Array[depth, m.Bits[width]]))
m.inline_verilog("""
monitor #(.WIDTH({width}), .DEPTH({depth})) monitor_inst(.arr({arr}));
""", width=width, depth=depth, arr=io.arr)
return MonitorWrapper
m.compile("build/test_inline_2d_array_interface",
Main.generate(8, 64))
assert check_files_equal(__file__,
f"build/test_inline_2d_array_interface.v",
f"gold/test_inline_2d_array_interface.v")
file_dir = os.path.abspath(os.path.dirname(__file__))
assert not os.system("verilator --lint-only "
f"{file_dir}/build/test_inline_2d_array_interface.v "
f"{file_dir}/vsrc/2d_array_interface.v "
"--top-module MonitorWrapper")
| StarcoderdataPython |
1694189 | """Helper functions for processing the schemas."""
from . import association
from . import backref
from . import clean
from . import iterate
from . import process
| StarcoderdataPython |
1670832 | <reponame>mrcbarbier/diffuseclique
from wagutils import *
import itertools
from statsmodels.nonparametric.smoothers_lowess import lowess
import pickle
from json import dump,load
import scipy.linalg as la
def reldist_type2(x, y):
xm, ym = np.mean(x), np.mean(y)
slope = np.mean((x - xm) * (y - ym) ** 2) / np.mean((x - xm) ** 2 * (y - ym))
return [slope, 1. / slope][int(np.abs(slope) > np.abs(1. / slope))]
def reldist_odr(x, y):
import scipy.odr as odr
def f(B, xx):
return B[0] * xx + B[1]
linear = odr.Model(f)
data = odr.Data(x, y, wd=1, we=1)
res = odr.ODR(data, linear, beta0=[.4, .0])
res = res.run()
b = res.beta[0]
b = np.array([b, 1 / b])[np.argmin(np.abs([b, 1 / b]))]
# print reldist_type2(x,y),b
return b
def reldist(x, y, boot=0, typ='', split=0, strip=1,**kwargs):
#
x,y=np.array(x),np.array(y)
idx = list(np.where((~np.isnan(x))&(~np.isnan(y)))[0])
if boot:
idx = list(np.random.choice(idx, replace=True, size=x.size))
x, y = x[idx], y[idx]
#
if strip:
idx = np.argsort(x)
z = int(np.floor(len(idx) / 20))
idx = idx[z:-z]
x, y = x[idx], y[idx]
idx = np.argsort(y)
return reldist(x[idx[z:-z]], y[idx[z:-z]], boot=0, strip=0, typ=typ,**kwargs)
if split:
idx = np.argsort(x)
return np.mean([reldist(x[idx], y[idx], boot=0, split=0, typ=typ,**kwargs) for idx in np.array_split(idx, split)])
#
if 'odr' in typ:
return reldist_odr(x, y)
if 'type2' in typ or (typ is None and 'type2' in sys.argv):
return reldist_type2(x, y)
if 'loglike' in typ:
if not 'var' in kwargs:
code_debugger()
v=kwargs.get('var',1)
if v is 1:
print 'No var found'
if len(v.shape)==2:
try:
v=v[np.ix_(idx,idx)]
except:
pass
if v.shape[0]==x.shape[0]:
# print 'Using covariance matrix'
return -np.dot((x-y).ravel(),np.dot(la.inv(v ),(x-y).ravel() ))/x.size
return -np.mean((x - y) ** 2 / v[idx])
#
# Relative distance as correlation rescaled by max variance - like correlation but penalizes != scalings
cov = np.cov(x, y)
return cov[0, 1] / np.max([cov[0, 0], cov[1, 1]])
def slopedist(x, y, etai, etaj, boot=0, debug=0, per_species=None, linearize=0,**kwargs):
if boot:
idx = list(np.random.choice(range(x.size), replace=True, size=x.size))
x, y = x[idx], y[idx]
if not per_species is None:
i, j, species = per_species
idx = [np.where(i == z)[0] for z in species]
slopy = [linregress(etaj[i], y[i])[0] if len(i)>2 else np.nan for i in idx]
slopx = [linregress(etaj[i], x[i])[0] if len(i)>2 else np.nan for i in idx]
#
else:
idx = np.argsort(etai)
idx = np.array_split(idx, 4)
slopy = [linregress(etaj[i], y[i])[0] if len(i)>2 else np.nan for i in idx]
slopx = [linregress(etaj[i], x[i])[0] if len(i)>2 else np.nan for i in idx]
loc = np.array([np.median(etai[i]) for i in idx])
#
slopx,slopy,loc=[np.array(z) for z in ( slopx,slopy,loc)]
if linearize:
good=(~np.isnan(slopx))&(~np.isnan(slopy))
slopx,slopy,loc=slopx[good],slopy[good],loc[good]
a = np.argsort(loc)
slopx, slopy = slopx[a], slopy[a]
sx, ix = linregress(loc, slopx)[:2]
sy, iy = linregress(loc, slopy)[:2]
slopx, slopy = loc * sx + ix, loc * sy + iy
#
if 'debug' in sys.argv or debug:
plt.close('all')
plot(loc, slopy, hold=1)
scatter(loc, slopx)
code_debugger()
kwargs.setdefault('strip',0)
if kwargs.get('return_all'):
return slopx,slopy,reldist(slopy, slopx, **kwargs)
return reldist(slopy, slopx, **kwargs)
def rowdist(x,y,i,**kwargs):
kwargs.setdefault('strip',0)
species=kwargs.get('species',np.unique(i))
meanx=np.array([np.mean(x[i==s]) if (i==s).any() else 0 for s in species])
meany=np.array([np.mean(y[i==s]) if (i==s).any() else 0 for s in species])
return reldist(meanx,meany,**kwargs)
def get_species(df):
return sorted(set(np.concatenate(df['composition'].values)))
def get_path(exp,return_suffix=0):
suffix = ''
if 'bug' in sys.argv:
suffix += '_debug'
if 'detrendK' in sys.argv:
suffix += '_detK'
elif 'detrendblock' in sys.argv:
suffix += '_detb'
elif 'detrend' in sys.argv:
suffix += '_det'
if 'cheat' in sys.argv:
suffix += '_cheat'
if 'bminfer' in sys.argv:
suffix += '_bminfer'
path = Path('data/' + exp + suffix)
path.mkdir()
if return_suffix:
return path,suffix
return path
def hyperplane_light(df,species,**kwargs):
df=df.copy()
from numpy.linalg import lstsq, norm as lanorm, inv as lainv
from scipy.optimize import least_squares
S = len(species)
mat = np.zeros((S, S)) #Returned matrix
for sidx,s in enumerate(species):
res=None
notsidx,others=[list(x) for x in zip(*[(o,oth) for o,oth in enumerate(species) if oth!=s])]
xs = df[df[s] != 0][species].values
xnomono=df[(df[s]!=0) & (np.max(df[others],axis=1)!=0 ) ]
if not xnomono.size:
print 'hyperplane skipping',s,'only present in monoculture'
mat[sidx,sidx]=-1
continue
xsT = xs.T
def costfeta(y,weights=None):
yy = -np.ones(S)
yy[notsidx] = y
if weights is None:
return (np.dot(yy, xsT) +1)
else:
return (np.dot(yy, np.sum(weights*xsT,axis=1)/np.sum(weights) ) +1)
res=least_squares(costfeta,-np.zeros(S-1) )
row=list(res.x)
row.insert(sidx, -1)
mat[sidx]=row
mat[sidx,np.sum(np.abs(xsT),axis=1)==0]=np.nan
return mat
def hyperplane(df,species,etamode=0,distances=0,use_var=0,missing=0,**kwargs):
df=df.copy()
debug=kwargs.get('debug')
from numpy.linalg import lstsq, norm as lanorm, inv as lainv
from scipy.optimize import least_squares
S = len(species)
mat = np.zeros((S, S)) #Returned matrix
compmat=np.zeros((S,S)) #To compare between differnet methods
errmat=np.zeros((S,S)) #Matrix of stderr on coefficients
table=[]
sidx=-1
res=None
Kmono=kwargs.get("K",None)
if Kmono is None:
Kmono = np.array(
[df[np.logical_and(np.sum(df[species].values > 0, axis=1) == 1, df[s] > 0)][s].mean() for s in
species])
Kmono[np.isnan(Kmono)] = 10 ** -10
for s in species:
res,res2=None,None
sidx+=1
rsquared=0
notsidx = [z for z in range(S) if z != sidx]
xs = df[df[s] != 0][species].values
xnomono=df[(df[s]!=0) & (np.max(df[[oth for oth in species if oth!=s]],axis=1)!=0 ) ]
if not xnomono.size:
print 'hyperplane skipping',s,'only present in monoculture'
mat[sidx,sidx]=-1
dic={'species':s,'R2':0,'K':10**-10,'Kvar':0 }
table.append(dic)
continue
if etamode==1:
print 'basic eta mode'
xs = xs / np.atleast_1d(Kmono)
xs[:, np.where(np.isnan(Kmono))[0]] = 0
xsT = xs.T
weights=np.ones(xs.shape[0])
if 'weights' in kwargs:
weights*=[ kwargs['weights'].get(surv,0) for surv in np.sum(xs>0,axis=1)]
if distances or debug:
# print 'USING DISTANCES',distances,debug
dxs = np.concatenate([xs - x for x in xs]).T # [1:]-xs[0]
def costf(y,weights=None):
yy = -np.ones(S)
yy[notsidx] = y
return np.dot(yy, dxs)# -Kmono[sidx]
try:
res = least_squares(costf,- np.ones(S - 1))
if kwargs.get('weights',None) and not np.allclose(weights,1):
print 'Weights+distances not implemented yet'
res = least_squares(costf, res.x,kwargs={'weights':weights})
except Exception as e:
print 'Failed least_squares',e
code_debugger()
ai = list(res.x)
residuals=res.fun
ai.insert(sidx, -1)
mat[sidx] = ai
Ks=-np.dot(ai,xsT)
rsquared = 1 - np.sum(residuals ** 2) / np.sum((dxs-np.mean(dxs,axis=1).reshape((S,1)) )**2 )
if (not distances) or debug:
def costfeta(y,weights=None):
# return np.dot(y,xsT[notsidx])-xsT[sidx]+ifelse(etamode=='given',1,Kmono[sidx])
yy = -np.ones(S)
yy[notsidx] = y
if weights is None:
return (np.dot(yy, xsT) +1)
else:
return (np.dot(yy, np.sum(weights*xsT,axis=1)/np.sum(weights) ) +1)
def costfnoeta(y,weights=None):
# return np.dot(y,xsT[notsidx])-xsT[sidx]+ifelse(etamode=='given',1,Kmono[sidx])
yy = -np.ones(S)
yy[notsidx] = y[notsidx]
if weights is None:
return np.dot(yy, xsT) +y[sidx]
else:
raise Exception('NOT READY')
return (np.dot(yy, np.sum(weights*xsT,axis=1)/np.sum(weights) ) +1)
Ks = None
if etamode:
try:
res2=least_squares(costfeta,-np.ones(S-1) )
except:
code_debugger()
if kwargs.get('weights',None) and not np.allclose(weights,1):
# code_debugger()
res2 = least_squares(costfeta, res2.x,kwargs={'weights':weights})
comparison=list(res2.x)
residuals=costfeta(res2.x)
else:
x0=-np.ones(S)
x0[sidx]=1
res2=least_squares(costfnoeta,x0 )
if kwargs.get('weights',None) and not np.allclose(weights,1):
# code_debugger()
res2 = least_squares(costfnoeta, res2.x,kwargs={'weights':weights})
Ks=res2.x[sidx]
comparison=list(res2.x[notsidx])
residuals = costfnoeta(res2.x)
if use_var:
xvarT = df[df[s] != 0][[sp + '_var' for sp in species]].values.T
xvarT[np.isnan(xvarT)]=0
try:
def costd(yy):
dd,vv=xsT,np.clip(xvarT,10**-5,None)
tmpx=costfeta(yy)
tmpv=( np.dot(yy**2, vv[notsidx]) + vv[sidx] ) ** .5
# code_debugger()
# tmpv=( np.sum(vv,axis=0) ) ** .5
# tmpv=1
# print tmpv
return tmpx/tmpv
tmpres = list(
least_squares(costd, -np.ones(S - 1)).x)
tmpres2 = list(
least_squares(costd, comparison).x)
comparison=tmpres2
# print costd(np.array(tmpres)),costd(np.array(tmpres2))
except:
print 'Failure'
code_debugger()
# print 'Final',np.sum(costd(np.array(comparison))**2)
comparison.insert(sidx, -1)
if Ks is None:
Ks=-np.dot(comparison,xsT)
compmat[sidx]=comparison
rsquared = 1 - np.sum(residuals ** 2) / np.sum((xsT-np.mean(xsT,axis=1).reshape((S,1)) )**2 )
if np.isnan(rsquared).any():
code_debugger()
# rsquared = 1 - np.sum(residuals ** 2) / np.mean() np.var(xsT)#[sidx])
# if rsquared<0:
# code_debugger()
if debug:
code_debugger()
try:
def makerr(res):
from scipy.linalg import svd
tmp, s, VT = svd(res.jac, full_matrices=False)
threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[:s.size]
pcov = np.dot(VT.T / s ** 2, VT)
return np.clip(np.diag(pcov)*2*res.cost/ np.clip(res.jac.shape[0] - res.jac.shape[1],10**-10,None),None,100)
fres=res
if fres is None:
fres=res2
if fres.jac.shape[1]==S:
errmat[sidx]=makerr(fres)**.5
else:
errmat[sidx,[si for si in range(S) if si != sidx]]=makerr(fres)**.5
except Exception as e:
print 'ERROR hyperplane:',e
dic={'species':s,'R2':rsquared,'K':np.mean(Ks),'Kvar':np.var(Ks) }
table.append(dic)
tab=pd.DataFrame(table)
Ks=np.array([tab.set_index('species')['K'].loc[s] for s in species ])
if not distances:
mat=compmat
np.fill_diagonal(errmat,0)
#
# DEAL WITH MISSING PAIRS
missingpairs=[(i,j) for i in species for j in species if not np.max(np.prod(df[[i,j]].values,axis=1))>0 ]
for i,j in missingpairs:
mat[species.index(i),species.index(j)]=np.nan
mat[species.index(j),species.index(i)]=np.nan
if missing=='mean':
mat[np.isnan(mat)]=np.mean(nonan(offdiag(mat)) )
else:
mat[np.isnan(mat)] = missing
if etamode:
# tab['Kdiff']=tab['K']
tab['K']=Kmono*tab['K']
# code_debugger()
beta=mat
alpha = mat / np.multiply.outer(Ks, 1. / Ks)
else:
alpha=mat
beta=mat*np.multiply.outer(Ks,1./Ks)
return alpha,beta,tab,errmat/(np.abs(mat)+10**-10)
def correlcalc(etafull,beta,gamma=0,return_all=1,pad=0,rank=0,**kwargs):
'''Compute plot of prediction versus theory for means and correlations'''
def bootstrap(x):
if not np.sum(x.shape)>0:
return x
return np.mean(np.random.choice(x, size=x.size))
beta=beta.copy()
S=etafull.shape[0]
etamean = np.array([bootstrap(etafull[i]) for i in range(S)])
bm, bv, betatheo = hebbian_getbstats(beta, etamean,**kwargs ) # betaerr=ana[i][j].get('beta_relerr', np.ones(beta.shape)))
if isinstance(gamma,basestring):
gamma=np.corrcoef(offdiag(beta),offdiag(beta.T))[0,1]
# print ' gamma',gamma
betatheo = bm + (betatheo - bm) + gamma * (betatheo.T - bm)
arange=np.arange(S)
mean_i=np.multiply.outer(np.arange(S),np.ones(S)).astype('int')
mean_j=mean_i.T
# code_debugger()
# bet=beta.copy()
# bet[bet == 0] = np.nan
# bet[np.abs(bet)>3.6]=np.nan
betadiff = beta - betatheo
diag = np.multiply.outer(np.ones(S), np.eye(S))
def removeself(mat):
S = mat.shape[0]
ss = range(S)
mat2 = [mat[i][np.ix_([s for s in ss if s != i], [s for s in ss if s != i])] for i in range(S)]
return np.array(mat2)
empirical = removeself(np.einsum('ij,ik->ijk', betadiff, betadiff))
var = np.array([np.nanmean(empirical[i][np.eye(S - 1) != 0]) for i in range(S)]).reshape((-1, 1, 1))
empirical /= var + 10 ** -15
empirical -= removeself(diag)
prediction = removeself(
- np.multiply.outer(1. / (np.sum(etamean ** 2) - etamean ** 2 + 0.0001), np.multiply.outer(etamean, etamean)))
def ms(x):
return np.concatenate([np.nanmean(x, axis=(1)), np.nanmean(x, axis=(2))])
corr_i=np.multiply.outer(arange,np.ones((S-1,S-1)) ).astype('int')
corr_j=removeself(np.multiply.outer(np.ones(S),np.multiply.outer(arange,np.ones(S)) ).astype('int'))
corr_k=removeself(np.multiply.outer(np.ones(S),np.multiply.outer(np.ones(S),arange) ).astype('int'))
# prediction,empirical=prediction[removeself(diag)==0],empirical[removeself(diag)==0]
# prediction,empirical=ms(prediction),ms(empirical) #Makes things significantly worse
if kwargs.get('remove_zeros',1):
beta[beta==0]=np.nan
results=dict( [('mean_theo', offdiag(betatheo)), ('mean_emp', offdiag(beta)), ('corr_theo', prediction.ravel()),
('corr_emp', empirical.ravel()),('mean_etai',etamean[list( offdiag(mean_i))] ),('mean_etaj',etamean[list( offdiag(mean_j))] ),
('corr_etai', etamean[list(corr_i.ravel())] ) ] )
# code_debugger()
for z in ('corr_i', 'corr_j', 'corr_k', 'mean_i', 'mean_j'):
val=locals()[z]
if 'mean' in z:
val=offdiag(val)
else:
val=val.ravel()
results[z]=val
if rank:
results={i:getranks(results[i]) for i in results}
results['bm']=bm
results['bv']=bv
from scipy.stats import sem, linregress, siegelslopes, theilslopes, ttest_1samp
try:
summary={v: linregress(results[v+'_theo'][~np.isnan( results[v+'_emp'])], results[v+'_emp'][~np.isnan( results[v+'_emp'])] )[0] for v in ('mean','corr')}
except Exception as e:
print e
summary={}
if return_all:
results.update(summary)
if pad:
for k in results:
if 'mean_' in k:
results[k]=np.concatenate([results[k], np.ones(len(results['corr_theo'])-len(results[k]) ) *np.nan ])
# else:
return results
return summary
def infer_bm(eta,meanfield=1,nmat=1,S=None,maxtrials=100,resolution=ifelse('bug' in sys.argv,3,15),use_noise=0, **kwargs):
from numpy.linalg import lstsq, norm as lanorm, inv as lainv
from scipy.special import erf
import time
Salive=eta.shape[0]
if S is None:
S=Salive
tstart=time.time()
eta=eta[np.argsort(np.mean(eta,axis=1))]
mneta=np.mean(np.median(eta,axis=1))
sdeta=np.std(np.median(eta,axis=1))
phieta=np.mean( eta>0) *Salive*1./S
if eta.shape[1] == 1 or Salive<2:
covmat = np.eye(3)
else:
var_mneta = np.array(np.mean([np.random.choice(eta[i], size=maxtrials) for i in range(Salive)], axis=0))
var_sdeta = np.array(np.std([np.random.choice(eta[i], size=maxtrials) for i in range(Salive)], axis=0))
var_phieta = np.array(np.mean([np.random.choice((eta[i] > 0), size=maxtrials) for i in range(Salive)], axis=0) *
(Salive - np.random.randint(0, 2, maxtrials)) * 1. / S)
covmat = np.cov([var_mneta, var_sdeta, var_phieta])
etavec=np.mean(eta,axis=1)
vare=np.mean(np.var(eta,axis=1)/etavec**1.5)
# bm_mf = (1. / mneta - 1) / S
bm_surv=bm=hebbian_stablebm(etavec)
bs_surv=bs= np.sqrt( (1- np.mean(etavec)**2/np.mean(etavec**2)) /S)
tab,learntab =None,None
gamma=0
if not meanfield:
#
if 'table' in kwargs and not kwargs['table'] is None:
tab = kwargs['table'].copy()
else:
def make_mats(bm,bs,gamma):
etas=[]
trial=0
while len(etas)<nmat and trial<maxtrials:
trial+=1
mat= -genpool(S,mu=bm,sigma=bs,gamma=gamma)[-1]
np.fill_diagonal(mat,1)
e=np.dot(lainv(mat),np.ones(S))
a=np.argsort(e)
mat=mat[np.ix_(a,a)]
if (e<0).any():
e=np.clip(e[a],0,None)
e=dosimu(-mat,np.ones(S),np.ones(S),tmax=100,noise=0,x0=e+.001)[-1][-1]
e[e<10**-5]=0
# print e
if vare>0 and use_noise:
noise=np.array([np.random.gamma(1./(vare*ee**1.5),(vare*ee**1.5) ) if ee >0 else 0 for ee in e ])
else:
noise=1
etas.append(e*noise )
return etas
#
learntab=[]
print 'CREATING TABLE FOR BM INFERENCE'
if 'nogamma' in sys.argv:
gammas=[0]
gammares=1
else:
gammas=[0,0.3,0.6]
gammares=9
fineres=3*resolution
for ix,x in enumerate(np.linspace(bm_surv*.7,2*bm_surv,resolution)):
print '{}/{}'.format(ix+1,resolution)
for y in np.linspace(bs_surv*.7,2.4*bs_surv,resolution):
for g in gammas:
etas=make_mats(x,y,g)
mns=[np.mean(e) for e in etas]
sds=[np.std(e) for e in etas]
phi=[np.mean(e>0) for e in etas]
learntab.append({'bm':x,'bs':y,'gamma':g,'mn':np.mean(mns),'sd':np.mean(sds),'phi':np.mean(phi),},)
learntab=pd.DataFrame(learntab)
#
XY=learntab[['bm','bs','gamma']].values
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import GridSearchCV
# ax.plot_wireframe(X, Y, Z, rstride=2, cstride=2,color='k',alpha=.7)
clf = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
XYsmooth=np.array([x for x in itertools.product( np.linspace(XY[0,0],XY[-1,0],fineres),
np.linspace(XY[0,1],XY[-1,1],fineres) , np.linspace(gammas[0],gammas[-1],gammares) ) ])
tab = pd.DataFrame({'bm': XYsmooth[:, 0], 'bs': XYsmooth[:, 1], 'gamma': XYsmooth[:, 2]})
for label in ['mn','sd','phi']:
Z=learntab[label].values #bandwidth(XY) #kernel='gaussian', bandwidth=.2
clf.fit(XY,Z)
Zsmooth = clf.predict(XYsmooth)
tab[label]=Zsmooth
for tb in (tab,learntab):
if tb is None:
continue
try:
dist=[ np.sqrt( np.dot( (mneta-mn,sdeta-sd,phieta-phi),np.dot(lainv(covmat), (mneta-mn,sdeta-sd,phieta-phi) ) ) ) for mn,sd,phi in tb[['mn','sd','phi']].values]
except:
code_debugger()
# dist = np.abs(mneta*phieta-tb['mn'].values)
tb['dist']=dist
tb['p']=[(1-erf( dd/np.min(dist) ) )/2. for dd in dist]
# code_debugger()
if 'bug' in sys.argv:
for i in ('p','dist','phi','mn'):
plt.figure()
if not learntab is None:
gammas = learntab['gamma'].unique()
plt.subplot(121)
plt.imshow(learntab[learntab['gamma']==gammas[0]].pivot('bm', 'bs')[i]), plt.colorbar()
plt.subplot(122)
#
bms,bss=[['{:.2f}'.format(l) for l in ll] for ll in (np.sort(tab['bm'].unique()),np.sort(tab['bs'].unique()))]
gammas = tab['gamma'].unique()
plt.imshow( tab[tab['gamma']==gammas[0]].pivot('bm', 'bs')[i]), plt.colorbar()
ax = plt.gca()
dx,dy=ax.get_xticks(),ax.get_yticks()
def getidx(lst,z):
return [lst[int(i)] if i>=0 and i<len(lst) else '' for i in z]
ax.set_yticklabels(getidx(bms,dy)),ax.set_xticklabels(getidx(bss,dx)),plt.suptitle(i)
plt.show()
# code_debugger()
bm,bs,gamma=tab[tab['p']>.95 *tab['p'].max()][['bm','bs','gamma']].median()
tend=time.time()
# bloc = hebbian_convert(eta, bm, bs, forward=0)
# bm = bloc['bm_pool']
print 'Time',tend-tstart, 'bm,bs:',bm_surv,bs_surv, '->',bm,bs,'gamma',gamma#,'bs',bs,'<-',bs_surv
if np.isnan([bm,bs]).any():
code_debugger()
return bm,bs,gamma, tab
def make_groundtruth(S=8,species=None,noise=.08,sizes=(1,2,4,8),replicas=1,nplots=None,plots=None,**kwargs):
"""Create a fake experimental setup to test inference methods."""
from scipy.misc import comb
table=[]
if species is None:
import string
ref=string.ascii_lowercase+string.ascii_uppercase+''.join([str(i) for i in range(10)])
species=[ref[i] for i in range(S)]
species=np.array(species)
rs,Ks,beta=genpool(S,**kwargs)
rs=kwargs.pop('rs',rs)
Ks=kwargs.pop('Ks',Ks)
beta=kwargs.pop('beta',beta)
Aij=beta*np.multiply.outer(rs,1./Ks)
alpha=beta*np.multiply.outer(Ks,1./Ks)
def get_true_eq(compo,N=None):
bet=beta - np.eye(S)
if isinstance(compo[0],basestring):
sidx=[list(species).index(i) for i in compo]
else:
sidx=compo
bet=bet[np.ix_(sidx,sidx)]
eqs = find_eqs(bet, uninvadable=1, stable=1, largest=N is None)
if not eqs:
eqs = find_eqs(bet, uninvadable=0, stable=1, largest=N is None)
if not eqs:
eqs = find_eqs(bet, uninvadable=0, stable=0, largest=N is None)
eq= eqs[0]
if not N is None:
from scipy.linalg import norm
eq= eqs[np.argmin([ norm(N-eq) for eq in eqs ]) ]
val=np.zeros(S)
val[sidx]=eq
return val
if plots is None:
if sizes is None:
sizes = [2 ** x for x in range(int(np.floor(np.log(S * 1.001) / np.log(2)) + 1))]
if not S in sizes:
sizes += [S]
sizes = np.array(sizes)
if replicas is None:
replicas = [int(np.round(S / s)) for s in sizes]
else:
replicas = np.atleast_1d(replicas)
if replicas.shape[0] < sizes.shape[0]:
replicas = replicas * np.ones(sizes.shape)
if nplots is None:
nplots = np.array([min(10, comb(S, s)) if s > 1 else S for s in sizes]) * replicas
plots=[]
for size, nrep, npl in zip(sizes,replicas,nplots):
nsamp=max(1,npl/nrep)
if npl>comb(S,size):
samples=list(tuple(x) for x in itertools.combinations(range(int(S)),size))
elif comb(S,size)<5000:
allcombs=[tuple(x) for x in itertools.combinations(range(int(S)), size)]
samples = [allcombs[i] for i in np.random.choice(range(len(allcombs)),int(nsamp),replace=0 )]
else:
samples=[tuple(np.random.choice(range(int(S)),size,replace=0)) for i in range(int(nsamp))]
try:
nrep = max(1,int(min(nrep, npl / len(samples))))
except:
print 'ERROR', size,nrep,npl,nsamp,samples
code_debugger()
# print size, nrep,npl,samples, len(samples)==len(set(samples))
plots+=[species[list(sidx)] for sidx in samples for rep in range(nrep) ]
plotn=0
x0=kwargs.pop('x0',np.ones(len(species)))
for plot in plots:
plotn += 1
sidx=[list(species).index(s) for s in plot]
print 'Plot {} Species {}'.format(plotn, species[sidx])
years,results = dosimu(Aij[np.ix_(sidx, sidx)], Ks[sidx], rs[sidx],x0=x0[sidx], noise=noise, evol=1, print_msg=0, **kwargs)
#print results[-1]/Ks[sidx]
for year, res in zip(years,results):
total = np.sum(res)
dic = {'plot': plotn, 'total': total, 'total/m2': total, 'year': int(np.round(year)), 'richness': len(plot),
'composition': tuple(species[sidx]),#'equilibrium':get_true_eq(sidx,plotn)
}
basekeys=sorted(dic.keys())
abund = np.zeros(len(species))
# code_debugger()
abund[sidx] = res
dic.update({s: a for s, a in zip(species, abund)})
table.append(dic)
df=pd.DataFrame(table)
df=df[list(basekeys)+list(species)]
ground={}
ground.update(kwargs)
ground.update({'A':Aij-np.diag(rs/Ks),'beta':beta-np.eye(S),'alpha':alpha-np.eye(S),'Ks':Ks, 'rs':rs,'noise':noise,
'equilibrium':{compo:get_true_eq(compo) for compo in set(df['composition'].values) } }
)
return df,ground
| StarcoderdataPython |
4801213 | # Generated by Django 2.0.8 on 2018-11-19 13:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('transformer', '0003_auto_20181114_1407'),
]
operations = [
migrations.AlterField(
model_name='package',
name='process_status',
field=models.CharField(choices=[(10, 'Transfer saved'), (20, 'Accession record created'), (30, 'Grouping component created'), (40, 'Transfer component created'), (50, 'Digital object created'), (60, 'Updated data sent to Aurora')], max_length=50),
),
]
| StarcoderdataPython |
3396250 | <gh_stars>1-10
"""
Helper Controller Class
"""
import logging
import logging.config
import os
import multiprocessing
import platform
try:
import queue
except ImportError:
import Queue as queue
import signal
import sys
import time
from helper import config, __version__
LOGGER = logging.getLogger(__name__)
class Controller(object):
"""Extend this class to implement your core application controller. Key
methods to implement are Controller.setup, Controller.process and
Controller.cleanup.
If you do not want to use the sleep/wake structure but rather something
like a blocking IOLoop, overwrite the Controller.run method.
"""
APPNAME = sys.argv[0].split(os.sep)[-1]
VERSION = __version__
#: When shutting down, how long should sleeping block the interpreter while
#: waiting for the state to indicate the class is no longer active.
SLEEP_UNIT = 0.5
#: How often should :meth:`Controller.process` be invoked
WAKE_INTERVAL = 60
#: Initializing state is only set during initial object creation
STATE_INITIALIZING = 0x01
#: When helper has set the signal timer and is paused, it will be in the
#: sleeping state.
STATE_SLEEPING = 0x02
#: The idle state is available to implementing classes to indicate that
#: while they are not actively performing tasks, they are not sleeping.
#: Objects in the idle state can be shutdown immediately.
STATE_IDLE = 0x03
#: The active state should be set whenever the implementing class is
#: performing a task that can not be interrupted.
STATE_ACTIVE = 0x04
#: The stop requested state is set when a signal is received indicating the
#: process should stop. The app will invoke the :meth:`Controller.stop`
#: method which will wait for the process state to change from STATE_ACTIVE
STATE_STOP_REQUESTED = 0x05
#: Once the application has started to shutdown, it will set the state to
#: stopping and then invoke the :meth:`Controller.stopping` method.
STATE_STOPPING = 0x06
#: Once the application has fully stopped, the state is set to stopped.
STATE_STOPPED = 0x07
# For reverse lookup
_STATES = {0x00: 'None',
0x01: 'Initializing',
0x02: 'Sleeping',
0x03: 'Idle',
0x04: 'Active',
0x05: 'Stop Requested',
0x06: 'Stopping',
0x07: 'Stopped'}
# Default state
_state = 0x00
def __init__(self, args, operating_system):
"""Create an instance of the controller passing in the debug flag,
the options and arguments from the cli parser.
:param argparse.Namespace args: Command line arguments
:param str operating_system: Operating system name from helper.platform
"""
self.set_state(self.STATE_INITIALIZING)
self.args = args
try:
self.config = config.Config(args.config)
except ValueError:
sys.exit(1)
self.debug = args.foreground
logging.config.dictConfig(self.config.logging)
self.operating_system = operating_system
self.pending_signals = multiprocessing.Queue()
@property
def current_state(self):
"""Property method that return the string description of the runtime
state.
:rtype: str
"""
return self._STATES[self._state]
@property
def is_active(self):
"""Property method that returns a bool specifying if the process is
currently active.
:rtype: bool
"""
return self._state == self.STATE_ACTIVE
@property
def is_idle(self):
"""Property method that returns a bool specifying if the process is
currently idle.
:rtype: bool
"""
return self._state == self.STATE_IDLE
@property
def is_initializing(self):
"""Property method that returns a bool specifying if the process is
currently initializing.
:rtype: bool
"""
return self._state == self.STATE_INITIALIZING
@property
def is_running(self):
"""Property method that returns a bool specifying if the process is
currently running. This will return true if the state is active, idle
or initializing.
:rtype: bool
"""
return self._state in [self.STATE_ACTIVE,
self.STATE_IDLE,
self.STATE_INITIALIZING]
@property
def is_sleeping(self):
"""Property method that returns a bool specifying if the process is
currently sleeping.
:rtype: bool
"""
return self._state == self.STATE_SLEEPING
@property
def is_stopped(self):
"""Property method that returns a bool specifying if the process is
stopped.
:rtype: bool
"""
return self._state == self.STATE_STOPPED
@property
def is_stopping(self):
"""Property method that returns a bool specifying if the process is
stopping.
:rtype: bool
"""
return self._state == self.STATE_STOPPING
@property
def is_waiting_to_stop(self):
"""Property method that returns a bool specifying if the process is
waiting for the current process to finish so it can stop.
:rtype: bool
"""
return self._state == self.STATE_STOP_REQUESTED
def on_configuration_reloaded(self):
"""Override to provide any steps when the configuration is reloaded."""
LOGGER.debug('%s.on_configuration_reloaded() NotImplemented',
self.__class__.__name__)
def on_shutdown(self):
"""Override this method to cleanly shutdown the application."""
LOGGER.debug('%s.cleanup() NotImplemented', self.__class__.__name__)
def on_sigusr1(self):
"""Called when SIGUSR1 is received, does not have any attached
behavior. Override to implement a behavior for this signal.
"""
LOGGER.debug('%s.on_sigusr1() NotImplemented', self.__class__.__name__)
def on_sigusr2(self):
"""Called when SIGUSR2 is received, does not have any attached
behavior. Override to implement a behavior for this signal.
"""
LOGGER.debug('%s.on_sigusr2() NotImplemented', self.__class__.__name__)
def process(self):
"""To be implemented by the extending class. Is called after every
sleep interval in the main application loop.
"""
raise NotImplementedError
def process_signal(self, signum):
"""Invoked whenever a signal is added to the stack.
:param int signum: The signal that was added
"""
if signum == signal.SIGTERM:
LOGGER.info('Received SIGTERM, initiating shutdown')
self.stop()
elif signum == signal.SIGHUP:
LOGGER.info('Received SIGHUP')
if self.config.reload():
LOGGER.info('Configuration reloaded')
logging.config.dictConfig(self.config.logging)
self.on_configuration_reloaded()
elif signum == signal.SIGUSR1:
self.on_sigusr1()
elif signum == signal.SIGUSR2:
self.on_sigusr2()
def run(self):
"""The core method for starting the application. Will setup logging,
toggle the runtime state flag, block on loop, then call shutdown.
Redefine this method if you intend to use an IO Loop or some other
long running process.
"""
LOGGER.info('%s v%s started', self.APPNAME, self.VERSION)
self.setup()
while not any([self.is_stopping, self.is_stopped]):
self.set_state(self.STATE_SLEEPING)
try:
signum = self.pending_signals.get(True, self.wake_interval)
except queue.Empty:
pass
else:
self.process_signal(signum)
if any([self.is_stopping, self.is_stopped]):
break
self.set_state(self.STATE_ACTIVE)
self.process()
def start(self):
"""Important:
Do not extend this method, rather redefine Controller.run
"""
for signum in [signal.SIGHUP, signal.SIGTERM,
signal.SIGUSR1, signal.SIGUSR2]:
signal.signal(signum, self._on_signal)
self.run()
def set_state(self, state):
"""Set the runtime state of the Controller. Use the internal constants
to ensure proper state values:
- :attr:`Controller.STATE_INITIALIZING`
- :attr:`Controller.STATE_ACTIVE`
- :attr:`Controller.STATE_IDLE`
- :attr:`Controller.STATE_SLEEPING`
- :attr:`Controller.STATE_STOP_REQUESTED`
- :attr:`Controller.STATE_STOPPING`
- :attr:`Controller.STATE_STOPPED`
:param int state: The runtime state
:raises: ValueError
"""
if state == self._state:
return
elif state not in self._STATES.keys():
raise ValueError('Invalid state {}'.format(state))
# Check for invalid transitions
if self.is_waiting_to_stop and state not in [self.STATE_STOPPING,
self.STATE_STOPPED]:
LOGGER.warning('Attempt to set invalid state while waiting to '
'shutdown: %s ', self._STATES[state])
return
elif self.is_stopping and state != self.STATE_STOPPED:
LOGGER.warning('Attempt to set invalid post shutdown state: %s',
self._STATES[state])
return
elif self.is_running and state not in [self.STATE_ACTIVE,
self.STATE_IDLE,
self.STATE_SLEEPING,
self.STATE_STOP_REQUESTED,
self.STATE_STOPPING]:
LOGGER.warning('Attempt to set invalid post running state: %s',
self._STATES[state])
return
elif self.is_sleeping and state not in [self.STATE_ACTIVE,
self.STATE_IDLE,
self.STATE_STOP_REQUESTED,
self.STATE_STOPPING]:
LOGGER.warning('Attempt to set invalid post sleeping state: %s',
self._STATES[state])
return
LOGGER.debug('State changed from %s to %s',
self._STATES[self._state], self._STATES[state])
self._state = state
def setup(self):
"""Override to provide any required setup steps."""
LOGGER.debug('%s.setup() NotImplemented', self.__class__.__name__)
def shutdown(self):
"""Override to provide any required shutdown steps."""
LOGGER.debug('%s.shutdown() NotImplemented', self.__class__.__name__)
def stop(self):
"""Override to implement shutdown steps."""
LOGGER.info('Attempting to stop the process')
self.set_state(self.STATE_STOP_REQUESTED)
# Call shutdown for classes to add shutdown steps
self.shutdown()
# Wait for the current run to finish
while self.is_running and self.is_waiting_to_stop:
LOGGER.info('Waiting for the process to finish')
time.sleep(self.SLEEP_UNIT)
# Change the state to shutting down
if not self.is_stopping:
self.set_state(self.STATE_STOPPING)
# Call a method that may be overwritten to cleanly shutdown
self.on_shutdown()
# Change our state
self.set_state(self.STATE_STOPPED)
@property
def system_platform(self):
"""Return a tuple containing the operating system, python
implementation (CPython, pypy, etc), and python version.
:rtype: tuple(str, str, str)
"""
return (self.operating_system,
platform.python_implementation(),
platform.python_version())
@property
def wake_interval(self):
"""Property method that returns the wake interval in seconds.
:rtype: int
"""
return (self.config.application.get('wake_interval') or
self.WAKE_INTERVAL)
def _on_signal(self, signum, _frame):
"""Append the signal to the queue, to be processed by the main."""
self.pending_signals.put(signum)
| StarcoderdataPython |
139766 | <filename>src/key.py
from dataclasses import dataclass
import uuid
import os
import re
import base58
from typing import Optional
from cryptography.hazmat.primitives.ciphers.aead import ChaCha20Poly1305
from cryptography.hazmat.primitives import hashes
@dataclass
class SymetricKeyProps:
alg: str
id: Optional[str] = None
nonce: bytes = os.urandom(12)
key: Optional[bytes] = None
hashSeed: str = str(uuid.uuid4())
key_length: int = 32
nonce_length: int = 12
class SymetricKey:
_id: Optional[str]
_hashSeed: str
_key: Optional[bytes]
_nonce: bytes
_key_length: int
_nonce_length: int
_alg = 'chacha20-poly1305'
def __init__(self, props: SymetricKeyProps):
self._id = props.id
self._key = props.key
self._hashSeed = props.hashSeed
self._key_length = props.key_length
self._nonce_length = props.nonce_length
self._nonce = props.nonce
@staticmethod
def create(props: SymetricKeyProps):
return SymetricKey(props).generate()
# def generate_nonce(self, length: Optional[int] = None) -> bytes:
# nonce_length = length if length else self._nonce_length
# self._nonce = os.urandom(nonce_length)
# return self._nonce
def generate(self):
if self._key is None:
self._key = os.urandom(self._key_length)
self._nonce = self._nonce if self._nonce else self.create_nonce()
if self._id is None:
digest = hashes.Hash(hashes.SHA256())
digest.update(self._key)
digest.update(self._nonce)
self._id=base58.b58encode(digest.finalize()).decode()
self._chacha = ChaCha20Poly1305(self._key)
return self
def create_nonce(self) -> bytes:
self._nonce = os.urandom(12)
return self.nonce
@property
def key(self) -> bytes:
if self._key is None:
raise Exception("key is not set")
return self._key
@property
def id(self) -> str:
if self._id is None:
raise Exception("key is not set")
return self._id
@property
def nonce(self) -> bytes:
if self._nonce is None:
raise Exception("nonce is not set")
return self._nonce
def encrypt(self, nonce: bytes, message: bytes, addition: Optional[bytes] = None ) -> bytes:
return self._chacha.encrypt(nonce, message, addition)
def decrypt(self, nonce: bytes, encrypted: bytes, addition: Optional[bytes] = None) -> bytes:
return self._chacha.decrypt(nonce, encrypted, addition)
def hash(self, enc: str)-> str:
hasher = hashes.Hash(hashes.SHA256())
hasher.update(bytes(self._hashSeed, 'utf-8'));
stripped = re.sub(r'[\n\r\s\t]+','', enc.upper(),flags=(re.M))
hasher.update(stripped.encode())
# 'A' means sha256 upcase und replace
string_hash = base58.b58encode(hasher.finalize()).decode()
return "A{}".format(string_hash)
| StarcoderdataPython |
1710406 | # -*- coding: utf-8 -*-
import logging
import unittest
from clarifai.rest import ClarifaiApp
from clarifai.rest import Concept
from clarifai.rest import Image as ClImage
from clarifai.rest import ModelOutputInfo, ModelOutputConfig
urls = [
"https://samples.clarifai.com/metro-north.jpg",
'https://samples.clarifai.com/3o6gb3kkXfLvdKEZs4.gif',
]
class TestPredict(unittest.TestCase):
_multiprocess_can_split_ = True
to_cleanup = []
@classmethod
def setUpClass(cls):
cls.app = ClarifaiApp(log_level=logging.WARN)
def test_predict_image_url(self):
""" predict a single url """
# just by url
m = self.app.models.get('general-v1.3')
res = m.predict_by_url(urls[0])
def test_predict_image_url_min_value(self):
# url, with min_value
m = self.app.models.get('general-v1.3')
res = m.predict_by_url(urls[0], min_value=0.4)
for c in res['outputs'][0]['data']['concepts']:
self.assertGreaterEqual(c['value'], 0.4)
def test_predict_image_url_max_concepts(self):
# url, with max_concepts
m = self.app.models.get('general-v1.3')
res = m.predict_by_url(urls[0], max_concepts=5)
self.assertEqual(len(res['outputs'][0]['data']['concepts']), 5)
def test_predict_image_url_min_value_max_concepts(self):
# url, with both min_value and max_concepts
m = self.app.models.get('general-v1.3')
res = m.predict_by_url(urls[0], min_value=0.6, max_concepts=5)
for c in res['outputs'][0]['data']['concepts']:
self.assertGreaterEqual(c['value'], 0.6)
self.assertLessEqual(len(res['outputs'][0]['data']['concepts']), 5)
def test_predict_image_url_select_concepts(self):
# url, with select_concepts, by name
m = self.app.models.get('general-v1.3')
select_concepts = [Concept(concept_name='beer'), Concept(concept_name='vehicle')]
res = m.predict_by_url(urls[0], select_concepts=select_concepts)
self.assertEqual(len(res['outputs'][0]['data']['concepts']), 2)
# url, with select_concepts by id
select_concepts = [Concept(concept_id='ai_hK1KnTCJ'),
Concept(concept_id='ai_m52MdMR3'),
Concept(concept_id='ai_fN8NZ9JV')]
res = m.predict_by_url(urls[0], select_concepts=select_concepts)
self.assertEqual(len(res['outputs'][0]['data']['concepts']), 3)
def test_predict_video_url(self):
""" predict a single url """
# just by url
m = self.app.models.get('general-v1.3')
res = m.predict_by_url(urls[1], is_video=True)
def test_predict_video_url_min_value(self):
# url, with min_value
m = self.app.models.get('general-v1.3')
res = m.predict_by_url(urls[1], is_video=True, min_value=0.9)
for frame in res['outputs'][0]['data']['frames']:
for c in frame['data']['concepts']:
self.assertGreaterEqual(c['value'], 0.9)
def test_predict_video_url_max_concepts(self):
# url, with max_concepts
m = self.app.models.get('general-v1.3')
res = m.predict_by_url(urls[1], is_video=True, max_concepts=3)
for frame in res['outputs'][0]['data']['frames']:
self.assertEqual(len(frame['data']['concepts']), 3)
def test_predict_video_url_min_value_max_concepts(self):
# url, with both min_value and max_concepts
m = self.app.models.get('general-v1.3')
res = m.predict_by_url(urls[1], is_video=True, min_value=0.85, max_concepts=3)
for frame in res['outputs'][0]['data']['frames']:
for c in frame['data']['concepts']:
self.assertGreaterEqual(c['value'], 0.85)
for frame in res['outputs'][0]['data']['frames']:
self.assertLessEqual(len(frame['data']['concepts']), 3)
def test_bulk_with_min_value(self):
img = ClImage(url=urls[0])
m = self.app.models.get('general-v1.3')
model_output_info = ModelOutputInfo(output_config=ModelOutputConfig(min_value=0.96))
res = m.predict(inputs=[img, img, img], model_output_info=model_output_info)
for result in res['outputs']:
for c in result['data']['concepts']:
self.assertGreaterEqual(c['value'], 0.96)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1780635 | <reponame>bhaving07/pyup<filename>venv/lib/python3.7/site-packages/gitlab/tests/objects/test_groups.py
"""
GitLab API: https://docs.gitlab.com/ce/api/groups.html
"""
import pytest
import responses
import gitlab
@pytest.fixture
def resp_groups():
content = {"name": "name", "id": 1, "path": "path"}
with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:
rsps.add(
method=responses.GET,
url="http://localhost/api/v4/groups/1",
json=content,
content_type="application/json",
status=200,
)
rsps.add(
method=responses.GET,
url="http://localhost/api/v4/groups",
json=[content],
content_type="application/json",
status=200,
)
rsps.add(
method=responses.POST,
url="http://localhost/api/v4/groups",
json=content,
content_type="application/json",
status=200,
)
yield rsps
@pytest.fixture
def resp_create_import(accepted_content):
with responses.RequestsMock() as rsps:
rsps.add(
method=responses.POST,
url="http://localhost/api/v4/groups/import",
json=accepted_content,
content_type="application/json",
status=202,
)
yield rsps
def test_get_group(gl, resp_groups):
data = gl.groups.get(1)
assert isinstance(data, gitlab.v4.objects.Group)
assert data.name == "name"
assert data.path == "path"
assert data.id == 1
def test_create_group(gl, resp_groups):
name, path = "name", "path"
data = gl.groups.create({"name": name, "path": path})
assert isinstance(data, gitlab.v4.objects.Group)
assert data.name == name
assert data.path == path
def test_create_group_export(group, resp_export):
export = group.exports.create()
assert export.message == "202 Accepted"
@pytest.mark.skip("GitLab API endpoint not implemented")
def test_refresh_group_export_status(group, resp_export):
export = group.exports.create()
export.refresh()
assert export.export_status == "finished"
def test_download_group_export(group, resp_export, binary_content):
export = group.exports.create()
download = export.download()
assert isinstance(download, bytes)
assert download == binary_content
def test_import_group(gl, resp_create_import):
group_import = gl.groups.import_group("file", "api-group", "API Group")
assert group_import["message"] == "202 Accepted"
@pytest.mark.skip("GitLab API endpoint not implemented")
def test_refresh_group_import_status(group, resp_groups):
group_import = group.imports.get()
group_import.refresh()
assert group_import.import_status == "finished"
| StarcoderdataPython |
128948 | <filename>data/genuine/purge.py<gh_stars>1-10
#!/usr/bin/env python3
import argparse
import itertools
import logging
import os
import csv
import re
from data.genuine.utils import check_lang
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
if __name__ == "__main__":
"""
Remove articles which are not in ground truth file.
"""
parser = argparse.ArgumentParser(description='Retain only data with specified language and GUIDs from list.')
parser.add_argument('-l', '--lang', help='language not to purge', type=check_lang)
parser.add_argument('-t, --test', dest='test', action='store_true',
help='will not delete files')
parser.set_defaults(lang='en', test=False)
args = parser.parse_args()
processed_filename_pattern = re.compile('[0-9]{10}.[0-9]-[a-z]{2}-[0-9A-Fa-f]{32}-[0-9A-Fa-f]{32}.q.job.xml')
dir_path = os.path.dirname(os.path.realpath(__file__))
heldout_dir = os.path.join(dir_path, "heldout")
test_dir = os.path.join(dir_path, "test")
filename = os.path.join(dir_path, "2017-10-gold.csv")
language = args.lang
if not os.path.exists(filename):
raise ValueError("File '%s' not found" % filename)
ids = set()
cluster_ids = set()
# Read valid GUIDs
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile)
# Skip header
it = iter(reader)
next(it)
for row in it:
# Skip documents in different languages
if language is not None and language != row[1]:
continue
# Keep track of cluster ids
cluster_ids.add(row[6])
ids.add(row[0])
counter = 0
purged = 0
for dirpath, subdirs, files in itertools.chain(os.walk(heldout_dir), os.walk(test_dir)):
logging.info("Purging files in directory '%s'" % dirpath)
for filename in files:
# Construct old file path
file_path = os.path.join(dirpath, filename)
# Check if it is an article
processed_result = processed_filename_pattern.match(filename)
if processed_result is None:
logging.warning("Skipping file '%s'." % file_path)
continue
metadata = filename.split("-")
guid = metadata[2]
counter += 1
if guid not in ids:
# logging.info("Purging '%s'" % os.path.join(dirpath, filename))
purged += 1
if not args.test:
os.remove(file_path)
N = counter - purged
K = len(cluster_ids)
logging.info("%d files were purged, %d files remain." % (purged, N))
logging.info("%d clusters found, average cluster size = %f" % (K, N / K))
| StarcoderdataPython |
1780342 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Dense(object):
pass
class Compressed(object):
pass
class DenseFormat(object):
def __eq__(self, other):
if isinstance(other, DenseFormat):
return True
else:
return False
class SparseFormat(object):
""" Sparse tensor data format.
Parameters
----------
mode_formats: a list containing the format of each mode ("dense" or "compressed")
mode_order: (optional) a list specifying the order in which modes should be stored
"""
def __init__(self, mode_formats, mode_order=None):
for mode in mode_formats:
assert isinstance(mode, (Dense, Compressed))
self.mode_formats = mode_formats
if mode_order is None:
self.mode_order = list(range(len(mode_formats)))
else:
self.mode_order = mode_order
def __eq__(self, other):
if isinstance(other, DenseFormat):
return False
if self.mode_order != other.mode_order:
return False
return all(
type(mode1) == type(mode2)
for mode1, mode2 in zip(self.mode_formats, other.mode_formats))
dense = Dense()
compressed = Compressed()
| StarcoderdataPython |
1740719 | <reponame>edupyter/EDUPYTER38
import sys
assert sys.platform == "win32"
from ctypes import byref, windll
from ctypes.wintypes import DWORD, HANDLE
from typing import Any, Optional, TextIO
from prompt_toolkit.data_structures import Size
from prompt_toolkit.win32_types import STD_OUTPUT_HANDLE
from .base import Output
from .color_depth import ColorDepth
from .vt100 import Vt100_Output
from .win32 import Win32Output
__all__ = [
"Windows10_Output",
]
# See: https://msdn.microsoft.com/pl-pl/library/windows/desktop/ms686033(v=vs.85).aspx
ENABLE_PROCESSED_INPUT = 0x0001
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
class Windows10_Output:
"""
Windows 10 output abstraction. This enables and uses vt100 escape sequences.
"""
def __init__(
self, stdout: TextIO, default_color_depth: Optional[ColorDepth] = None
) -> None:
self.win32_output = Win32Output(stdout, default_color_depth=default_color_depth)
self.vt100_output = Vt100_Output(
stdout, lambda: Size(0, 0), default_color_depth=default_color_depth
)
self._hconsole = HANDLE(windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE))
def flush(self) -> None:
"""
Write to output stream and flush.
"""
original_mode = DWORD(0)
# Remember the previous console mode.
windll.kernel32.GetConsoleMode(self._hconsole, byref(original_mode))
# Enable processing of vt100 sequences.
windll.kernel32.SetConsoleMode(
self._hconsole,
DWORD(ENABLE_PROCESSED_INPUT | ENABLE_VIRTUAL_TERMINAL_PROCESSING),
)
try:
self.vt100_output.flush()
finally:
# Restore console mode.
windll.kernel32.SetConsoleMode(self._hconsole, original_mode)
@property
def responds_to_cpr(self) -> bool:
return False # We don't need this on Windows.
def __getattr__(self, name: str) -> Any:
if name in (
"get_size",
"get_rows_below_cursor_position",
"enable_mouse_support",
"disable_mouse_support",
"scroll_buffer_to_prompt",
"get_win32_screen_buffer_info",
"enable_bracketed_paste",
"disable_bracketed_paste",
"get_default_color_depth",
):
return getattr(self.win32_output, name)
else:
return getattr(self.vt100_output, name)
Output.register(Windows10_Output)
def is_win_vt100_enabled() -> bool:
"""
Returns True when we're running Windows and VT100 escape sequences are
supported.
"""
if sys.platform != "win32":
return False
hconsole = HANDLE(windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE))
# Get original console mode.
original_mode = DWORD(0)
windll.kernel32.GetConsoleMode(hconsole, byref(original_mode))
try:
# Try to enable VT100 sequences.
result: int = windll.kernel32.SetConsoleMode(
hconsole, DWORD(ENABLE_PROCESSED_INPUT | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
)
return result == 1
finally:
windll.kernel32.SetConsoleMode(hconsole, original_mode)
| StarcoderdataPython |
151489 | <reponame>xSakix/bayesian_analyses
# multivariate lin regresion of heights vs weights
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import norm, uniform, multivariate_normal
from scipy.interpolate import griddata
import pymc3 as pm
d = pd.read_csv('../../rethinking/data/WaffleDivorce.csv', sep=';', header=0)
plt.plot(d.MedianAgeMarriage, d.Divorce, 'C0o')
plt.xlabel('median age marriage')
plt.ylabel('divorce')
plt.show()
plt.plot(d.Marriage, d.Divorce, 'C0o')
plt.xlabel('marriage')
plt.ylabel('divorce')
plt.show()
d['Marriage_s'] = (d.Marriage - np.mean(d.Marriage)) / np.std(d.Marriage)
d['MedianAgeMarriage_s'] = (d.MedianAgeMarriage - np.mean(d.MedianAgeMarriage)) / np.std(d.MedianAgeMarriage)
with pm.Model() as model:
sigma = pm.Uniform('sigma', lower=0, upper=10)
bA = pm.Normal('bA', mu=0, sd=1)
bR = pm.Normal('bR', mu=0, sd=1)
a = pm.Normal('a', mu=10, sd=10)
mu = pm.Deterministic('mu', a + bR * d.Marriage_s + bA * d.MedianAgeMarriage_s)
divorce = pm.Normal('divorce', mu=mu, sd=sigma, observed=d.Divorce)
trace_model = pm.sample(1000, tune=1000)
varnames = ['a', 'bA', 'bR', 'sigma']
pm.traceplot(trace_model, varnames)
plt.show()
print(pm.summary(trace_model, varnames, alpha=0.11))
pm.forestplot(trace_model, varnames=varnames)
plt.show()
| StarcoderdataPython |
3220171 | def if_chuck_says_so(): return not True | StarcoderdataPython |
1785742 | <reponame>gabrielmpp/climate_indices
from subprocess import call
import os
import requests
import numpy as np
import pandas as pd
from pandas.errors import EmptyDataError
from datetime import datetime
from copy import deepcopy
SOURCES = ['NOAA', 'CPC']
PID = os.getpid()
TMP_FILE_PATH = os.environ['HOME'] + f'/temp_file_climIndices_{PID}.txt'
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def exists(URL):
r = requests.head(URL)
return r.status_code == requests.codes.ok
def create_url(index, source):
"""
Return the valid URL for download
:param variable: string
:param level: string
:param date: datetime
:return: sring
"""
if source == 'NOAA':
if 'nina' in index:
base_url = 'https://psl.noaa.gov/data/correlation/{index}.anom.data'
else:
base_url = 'https://psl.noaa.gov/data/correlation/{index}.data'
elif source == 'CPC':
base_url = 'https://www.cpc.ncep.noaa.gov/data/indices/req{index}.for'
else:
raise ValueError("Source not supported")
base_url = base_url.format(index=index)
return base_url
def format_data(df, index, string_nan):
colnames=['year']
[colnames.append(i) for i in range(1,13)]
df.columns = colnames
df = df.set_index('year')
df = df.unstack()
df = df.reset_index()
df.columns = ['month','year','value']
df = df.sort_values(['year','month'])
#df = df.replace('-99.99', np.NaN)
#df = df.dropna()
indexes = pd.date_range(start='{year:0d}-{month}-01'.format(year=int(df['year'].iloc[0]), month=int(df['month'].iloc[0])),
end='{year:0d}-{month}-31'.format(year=int(df['year'].iloc[-1]), month=int(df['month'].iloc[-1])),freq='M')
df['time']=indexes
df = df.set_index('time')
df = df.drop(['month','year'], axis=1)
df.columns = [index]
df[index] = df[index].astype(float)
df = df.replace(float(string_nan), np.NaN)
df = df.dropna()
return df
def get_data(indices, source='NOAA'):
def download_df(index, source):
URL = create_url(index, source)
if not exists(URL):
print(URL)
raise ValueError(f"URL does not exist for index {index}")
call(["curl", "-s", "-o", TMP_FILE_PATH, URL], stdout=open(os.devnull, 'wb'))
assert source in SOURCES, f'source {source} not valid.'
_sources = deepcopy(SOURCES)
df_list = []
def format_datetime(x):
return pd.Timestamp(datetime(day=1, month=x.month, year=x.year))
for index in indices:
for source in _sources:
print(f'Trying source {source}')
download_df(index, source)
try:
df_temp = pd.read_csv(TMP_FILE_PATH, sep='\s+', skiprows=[0], header=None)
except (EmptyDataError, FileNotFoundError):
print("Data is empty, trying another source")
else:
break
try:
df_temp
except NameError:
raise Exception(f'ClimIndices could not download index {index}')
try:
call(['rm', TMP_FILE_PATH])
except:
print('Could not remove temp file.')
if source == 'CPC':
string_nan = '999.9'
else:
df_nan = df_temp[df_temp.isnull().any(1)]
string_nan = df_nan.iloc[0,0]
df = df_temp.dropna()
df = format_data(df, index, string_nan)
df.index = [format_datetime(x) for x in df.index]
df_list.append(df)
df = pd.concat(df_list, axis=1)
return df
if __name__=='__main__':
import matplotlib.pyplot as plt
plt.style.use('bmh')
df = get_data(['nina34', 'soi'])
# df.plot(subplots=True, sharex=True, title='Climate indices', legend='False', figsize=[10, 10])
# plt.savefig('../figs/example.png')
# plt.close() | StarcoderdataPython |
3367830 | print "How old are you brother ?"
age = raw_input() # will get some text ;def
print "How tall are you ?"
height = raw_input()
print "do you eat enough ?"
eat = raw_input()
print "So, you're a %r years old and %r tall guy that says : '%r' to the food, right ?" % (age, height, eat)
# Nb: to get a number from the return stuff, 'x = int(raw_input())'
| StarcoderdataPython |
1669910 | <reponame>killua4564/2019-AIS3-preexam
from pwn import *
conn = remote("pre-exam-pwn.ais3.org", "10000")
conn.recvuntil(".\n")
payload = b"A" * 48 + p64(0x400687)
conn.sendline(payload)
conn.interactive() | StarcoderdataPython |
3321275 | # coding: utf-8
# Distributed under the terms of the MIT license.
""" This file implements classes to store and manipulate electronic and
vibrational DOS, with or without projection data.
"""
import warnings
import copy
import functools
import numpy as np
import scipy.integrate
import scipy.interpolate
from matador.orm.orm import DataContainer
from matador.utils.chem_utils import KELVIN_TO_EV, INVERSE_CM_TO_EV
from .dispersion import Dispersion
EPS = 1e-6
MIN_PHONON_FREQ = -0.01
FREQ_CUTOFF = 1e-12
class DensityOfStates(Dispersion, DataContainer):
""" Generic class for density of states. """
required_keys = ['dos', 'energies']
def __init__(self, *args, **kwargs):
""" Initialise the DOS and trim the DOS data arrays.
Parameters:
data (dict/Dispersion): dictionary containing the phonon dos data, or
a dispersion object to convert.
"""
if kwargs.get('gaussian_width') is not None:
self.gaussian_width = kwargs['gaussian_width']
if args and isinstance(args[0], dict):
data = args[0]
else:
data = kwargs
# as we can also construct a DOS from arbitarary kpoint/energy data,
# check that we've been passed this first
if (isinstance(data, Dispersion)
or (isinstance(data, dict) and not any(key in data for key in ["spin_dos", "dos", "pdos"]))):
data = self._from_dispersion(data)
elif isinstance(data, DensityOfStates):
data = copy.deepcopy(DensityOfStates._data)
# Attempted workaround for old OPTADOS bug where spin-polarized PDOS would
# ignore set_efermi_zero. Requires any other DOS data to have been computed
# on same grid with OptaDOS, and assumes Fermi level is the same for each spin.
# https://github.com/optados-developers/optados/issues/24
if "pdos" in data and len(set(proj[2] for proj in data["pdos"]["projectors"])) == 2:
# check for energies either side of 0, if none are found, try to shift to other fermi values
if (len(data["pdos"]["energies"]) == len(data.get("energies", []))
and (np.max(data["pdos"]["energies"]) < 0 or np.min(data["pdos"]["energies"]) > 0)):
correction = np.max(data["pdos"]["energies"]) - np.max(data["energies"])
data["pdos"]["energies"] -= correction
warnings.warn(
f"""Corrected PDOS energies with difference between total DOS and PDOS grid.
Guessed Fermi level = {correction:4.3f} eV, assumed constant for both spins.
In the future, please use an updated version of OptaDOS.
This correction is to account for the OptaDOS bug #24:
https://github.com/optados-developers/optados/issues/24
"""
)
# trigger generation of dos key from spin dos
if 'dos' not in data and 'spin_dos' in data:
data["dos"] = np.asarray(data["spin_dos"]["up"]) + np.asarray(data["spin_dos"]["down"])
if 'dos' not in data and 'pdos' in data:
data["dos"] = np.sum(data["pdos"]["pdos"][proj] for proj in data["pdos"]["projectors"])
data["energies"] = data["pdos"]["energies"]
warnings.warn("Total DOS created from sum of projected DOS, which may not be at all reliable.")
if 'spin_dos' not in data and 'pdos' in data:
spin_channels = set(proj[2] for proj in data["pdos"]["projectors"])
if len(spin_channels) == 2:
data["spin_dos"] = {}
for channel in spin_channels:
data["spin_dos"][channel] = np.sum(
data["pdos"]["pdos"][proj] for proj in data["pdos"]["projectors"] if proj[2] == channel
)
data["energies"] = data["pdos"]["energies"]
warnings.warn("Total spin DOS created from sum of projected DOS, which may not be at all reliable.")
super().__init__(data)
self._trim_dos()
def _trim_dos(self):
""" Trim the density of states/frequencies to only include the non-zero
section of the DOS.
"""
dos = self._data['dos']
first_index = np.argmax(dos > EPS)
last_index = len(dos) - np.argmax(dos[::-1] > EPS)
self._trimmed_dos = dos[first_index:last_index]
self._trimmed_energies = self._data['energies'][first_index:last_index]
def _from_dispersion(self, data, **kwargs):
""" Convert a Dispersion instance to a DOS. """
_data = {}
dos, energies = self.bands_as_dos(data, gaussian_width=self.gaussian_width)
for key in data:
_data[key] = data[key]
_data['dos'] = dos
_data['energies'] = energies
warnings.warn("Loaded DOS from .bands file with naive Gaussian smearing.")
return _data
@property
def sample_dos(self):
""" Return the calculated density of states, trimmed at each end to
only include non-zero values.
"""
return self._trimmed_dos
@property
def sample_energies(self):
""" Return the energies corresponding to the trimmed DOS. """
return self._trimmed_energies
def plot_dos(self, **kwargs):
""" Plot the density of states. """
from matador.plotting.spectral_plotting import plot_spectral
_kwargs = {
"plot_dos": True,
"plot_pdos": "pdos" in self,
"plot_bandstructure": False,
"phonons": "Vibrational" in self.__class__.__name__
}
_kwargs.update(kwargs)
plot_spectral(
self,
**_kwargs
)
@staticmethod
def bands_as_dos(bands, gaussian_width=0.1):
""" Convert bands data to DOS data. """
if 'eigs_s_k' in bands:
eigs_key = 'eigs_s_k'
elif 'eigs_q' in bands:
eigs_key = 'eigs_q'
else:
raise RuntimeError('Missing eigenvalue keys from bands data.')
raw_eigs = np.asarray(bands[eigs_key]) - bands.get('fermi_energy', 0)
raw_weights = np.ones_like(raw_eigs)
if 'kpoint_weights' in bands:
for sind, _ in enumerate(bands[eigs_key]):
for kind, _ in enumerate(bands[eigs_key][sind][0]):
raw_weights[sind, :, kind] = bands['kpoint_weights'][kind]
if len(raw_weights) != 1:
if len(raw_weights) > 2:
raise NotImplementedError('Non-collinear spin not supported')
spin_dos = dict()
keys = ['up', 'down']
for sind, _ in enumerate(raw_weights):
spin_dos[keys[sind]], energies = DensityOfStates._cheap_broaden(
bands[eigs_key][sind].flatten(),
weights=raw_weights[sind].flatten(),
gaussian_width=gaussian_width
)
if 'spin_fermi_energy' in bands:
energies -= bands['spin_fermi_energy'][0]
return spin_dos, energies
dos, energies = DensityOfStates._cheap_broaden(
raw_eigs.flatten(), weights=raw_weights.flatten(), gaussian_width=gaussian_width
)
if 'fermi_energy' in bands:
energies -= bands['fermi_energy']
return dos, energies
@staticmethod
def _cheap_broaden(eigs, weights=None, gaussian_width=None):
""" Quickly broaden and bin a set of eigenvalues.
Parameters:
eigs (numpy.ndarray): eigenvalue array.
weights (numpy.ndarray): array of weights.
Keyword arguments:
gaussian_width (float): width of gaussian broadening
to apply.
Returns:
Two arrays containing the DOS and energies.
"""
if gaussian_width is None:
gaussian_width = 0.1
hist, energies = np.histogram(eigs, weights=weights, bins=1001)
if gaussian_width == 0:
return hist, energies
# shift bin edges to bin centres
energies -= energies[1] - energies[0]
energies = energies[:-1]
new_energies = np.reshape(energies, (1, len(energies)))
new_energies = new_energies - np.reshape(energies, (1, len(energies))).T
dos = np.sum(hist * np.exp(-(new_energies)**2 / gaussian_width), axis=1)
dos = np.divide(dos, np.sqrt(2 * np.pi * gaussian_width**2))
return dos, energies
class VibrationalDOS(DensityOfStates):
""" Specific class for phonon DOS data, including free energy integration. """
gaussian_width = 10 * INVERSE_CM_TO_EV
@property
def debye_temperature(self):
""" Returns the Debye temperature in K. """
return self.debye_freq / KELVIN_TO_EV
@property
def debye_freq(self):
""" Returns the Debye frequency in eV. """
return np.max(self.eigs)
@property
def zpe(self):
""" The zero-point energy per atom as computed from frequency data. """
if 'zero_point_energy_per_atom' not in self._data:
if 'eigs_q' not in self._data:
raise RuntimeError('Unable to compute ZPE without frequency data.')
zpe = self._compute_zero_point_energy(self.eigs, self.num_kpoints, kpoint_weights=self.kpoint_weights)
self['zero_point_energy'] = zpe
self['zero_point_energy_per_atom'] = zpe / (self.num_modes / 3)
return self._data['zero_point_energy_per_atom']
@staticmethod
def _compute_zero_point_energy(eigs, num_kpoints, kpoint_weights=None):
""" Computes and returns the zero-point energy of the cell
in eV from frequency data.
Parameters:
eigs (np.ndarray): phonon eigenvalues (in eV) array
in any shape. If `kpoint_weights` is passed, then
at least one axis of eigs must match the length
of `kpoint_weights`.
num_kpoints (int): the number of kpoints at which
these eigenvalues were calculated.
Keyword arguments:
kpoint_weights (np.ndarray): array of weights to use
for each kpoint.
"""
min_energy = np.min(eigs)
if min_energy < MIN_PHONON_FREQ:
warnings.warn(
'Imaginary frequency phonons found in this structure {:.1f}, ZPE '
'calculation will be unreliable, using 0 eV as lower limit of integration.'
.format(min_energy)
)
if kpoint_weights is not None:
# if kpoint weights are all the same, discard them
# and just normalise by number of kpoints
if len(np.unique(kpoint_weights)) == 1:
kpoint_weights = None
else:
eigs_shape = np.shape(eigs)
if not any(len(kpoint_weights) == axis for axis in eigs_shape):
raise RuntimeError(
'Unable to match eigs with shape {} with kpoint weights of length {}'
.format(eigs_shape, len(kpoint_weights))
)
_eigs = np.copy(eigs)
if kpoint_weights is not None:
_eigs = _eigs * kpoint_weights
else:
_eigs /= num_kpoints
return 0.5 * np.sum(np.ma.masked_where(_eigs < 0.0, _eigs, copy=False))
def vibrational_free_energy(self, temperatures=None):
""" Computes and returns the vibrational contribution to the free
energy, including zero-point energy, from the phonon frequencies.
Parameters:
temperatures (list): list or array of temperatures to compute
G(T) at.
Returns:
(np.ndarray, np.ndarray): temperature and energy array.
"""
if temperatures is None:
temperatures = np.linspace(0, 600, num=5)
try:
_ = len(temperatures)
except TypeError:
temperatures = [temperatures]
if 'eigs_q' not in self._data:
raise RuntimeError('Unable to compute free energies without frequency data.')
temperatures = np.asarray(temperatures)
free_energy = np.zeros_like(temperatures, dtype=np.float64)
min_energy = np.min(self._data['eigs_q'][0])
if min_energy < MIN_PHONON_FREQ:
warnings.warn(
'Imaginary frequency phonons found in this structure {:.1f}, free energy '
'calculation will be unreliable, using {} eV as lower limit of integration.'
.format(min_energy, FREQ_CUTOFF)
)
for ind, temperature in enumerate(temperatures):
free_energy[ind] = self.compute_free_energy(temperature)
if len(temperatures) == 1:
return free_energy[0]
return temperatures, free_energy
@functools.lru_cache(100)
def compute_free_energy(self, temperature):
""" Compute the vibrational free energy at the given temperature, using
lru_cache to avoid doing much extra work. Uses minimum temperature cutoff
of 1e-9, below which it returns just the ZPE (unless T < 0 K).
Raises:
RuntimeError: if temperature is < 0 K.
Returns:
float: vibrational free energy per atom, including ZP correction.
"""
free_energy = 0.0
kT = KELVIN_TO_EV * temperature
if temperature < 0.0:
raise RuntimeError('Not calculating free energies at T = {} K < 0 K'.format(temperature))
if temperature < 1e-9:
return self.zpe
for mode_ind in range(self.num_modes):
for qpt_ind in range(self.num_qpoints):
freq = self._data['eigs_q'][0][mode_ind][qpt_ind]
if freq > FREQ_CUTOFF and freq / kT < 32:
contrib = kT * np.log(1 - np.exp(-freq/kT))
if 'kpoint_weights' in self._data:
contrib *= self.kpoint_weights[qpt_ind]
else:
contrib /= self.num_qpoints
free_energy += contrib
# normalize by number of atoms
free_energy /= (self.num_modes / 3)
# add on zpe per atom
free_energy += self.zpe
return free_energy
def vibrational_free_energy_from_dos(self, temperatures=None):
""" Computes the vibrational contribution to the free energy
at a given set of temperatures.
Keyword arguments:
temperature (list): list, array or float of temperatures.
"""
if temperatures is None:
temperatures = np.linspace(0, 600, num=5)
temperatures = np.asarray(temperatures)
free_energy = np.zeros_like(temperatures)
errs = np.zeros_like(free_energy)
min_energy = self.sample_energies[0]
max_energy = self.sample_energies[-1]
if min_energy < 0.01:
warnings.warn(
'Imaginary frequency phonons found in this structure {:.1f}, free energy '
'calculation will be unreliable, using {} eV as lower limit of integration.'
.format(min_energy, FREQ_CUTOFF),
Warning
)
min_energy = FREQ_CUTOFF
for ind, temperature in enumerate(temperatures):
# if 0 K is requested, return 0 and move on
if temperature == 0:
free_energy[ind] = 0.0
errs[ind] = 0.0
continue
kT = KELVIN_TO_EV * temperature
def integrand(omega):
return self.vdos_function(omega) * np.log(1 - np.exp(-omega/kT))
result = scipy.integrate.quad(
integrand,
min_energy,
max_energy
)
free_energy[ind] = kT * result[0]
errs[ind] = result[1]
if len(temperatures) == 1:
return free_energy[0]
return temperatures, free_energy
@property
def vdos_function(self):
""" From the data arrays :attr:`sample_energies` and :attr:`sample_dos`,
return an interpolated function to integrate.
"""
return scipy.interpolate.interp1d(
self.sample_energies,
self.sample_dos,
fill_value=(0, 0),
bounds_error=False,
copy=False
)
def plot_free_energy(self, temperatures=None, ax=None, **kwargs):
""" Plot G(T) on the array of given temperatures. Default T is [0, 800].
Keyword arguments:
temperatures (list/np.ndarray): list or array of temperatures to plot.
If the array/list has length 2, use these as the start and endpoints
with 21 plotting points.
ax (matplotlib.pyplot.Axis): axis object to plot onto.
"""
from matador.plotting.temperature_plotting import plot_free_energy
return plot_free_energy(self, temperatures=temperatures, ax=ax, **kwargs)
class ElectronicDOS(DensityOfStates):
""" Specific class for electronic DOS data. """
gaussian_width = 0.01
| StarcoderdataPython |
3293096 | #!/usr/bin/env python
# coding:utf-8
"""merge_json.py"""
import logging
import time
import os
import json, csv
import xlwt, xlrd
from datetime import datetime
from xlrd import xldate_as_tuple
def get_logger(logname):
"""Config the logger in the module
Arguments:
logname {str} -- logger name
Returns:
logging.Logger -- the logger object
"""
logger = logging.getLogger(logname)
formater = logging.Formatter(
fmt='%(asctime)s - %(filename)s : %(levelname)-5s :: %(message)s',
# filename='./log.log',
# filemode='a',
datefmt='%m/%d/%Y %H:%M:%S')
stream_hdlr = logging.StreamHandler()
stream_hdlr.setFormatter(formater)
logger.addHandler(stream_hdlr)
logger.setLevel(logging.DEBUG)
return logger
json_path = "json/"
__logger__ = get_logger('merge_json.py')
cur_date = time.strftime("%Y-%m-%d", time.localtime())
state_json_list = []
date_list = []
processed_json_data = 0 # ! 处理后的整体数据
def print_state_data(state_data):
for province in state_data:
print(province["name"])
print(province["state"])
print(province["cities"])
def read_json_files():
global state_json_list
global json_path
global date_list
json_file_list = sorted(os.listdir(json_path), reverse=True)
date_list = list(map(lambda x: x[0:10], json_file_list))
for file_name in json_file_list:
json_data = ""
with open(json_path + file_name, 'r') as json_file:
json_data = json.load(json_file)
state_json_list.append(json_data)
# __logger__.debug(state_json_list)
def init_processed_json_data():
global processed_json_data
global state_json_list
processed_json_data = state_json_list[0]
# __logger__.debug(processed_json_data)
for province in processed_json_data:
province["confirmedCount"] = [province["confirmedCount"]]
province["suspectedCount"] = [province["suspectedCount"]]
province["curedCount"] = [province["curedCount"]]
province["deadCount"] = [province["deadCount"]]
for city in province["cities"]:
city["confirmedCount"] = [city["confirmedCount"]]
city["suspectedCount"] = [city["suspectedCount"]]
city["curedCount"] = [city["curedCount"]]
city["deadCount"] = [city["deadCount"]]
# __logger__.debug(json.dumps(processed_json_data[0], ensure_ascii=False))
def merge_cities(processed_json_cities, add_cities, day_index):
missing_city_list = list(map(lambda x: x["cityName"], processed_json_cities))
for city in add_cities:
city_list = list(filter(lambda x: x["cityName"] in city["cityName"], processed_json_cities))
if len(city_list) == 0:
# ! 在现有数据之上新增 城市 数据
# city["confirmedCount"] = [city["confirmedCount"]] + [0] * day_index
# city["suspectedCount"] = [city["suspectedCount"]] + [0] * day_index
# city["curedCount"] = [city["curedCount"]] + [0] * day_index
# city["deadCount"] = [city["deadCount"]] + [0] * day_index
# processed_json_cities.append(city)
pass
else:
# ! 刷新现有 城市 数据
processed_json_data_cur_city = city_list[0]
processed_json_data_cur_city["confirmedCount"].insert(0, city["confirmedCount"])
processed_json_data_cur_city["suspectedCount"].insert(0, city["suspectedCount"])
processed_json_data_cur_city["curedCount"].insert(0, city["curedCount"])
processed_json_data_cur_city["deadCount"].insert(0, city["deadCount"])
if processed_json_data_cur_city["cityName"] not in missing_city_list:
# ! 锡林郭勒盟 会导致这里出问题
pass
else:
missing_city_list.remove(processed_json_data_cur_city["cityName"])
for city_name in missing_city_list:
# ! 补全缺失的现有 城市 数据
miss_city = list(filter(lambda x: x["cityName"] == city_name, processed_json_cities))[0]
miss_city["confirmedCount"].insert(0, 0)
miss_city["suspectedCount"].insert(0, 0)
miss_city["curedCount"].insert(0, 0)
miss_city["deadCount"].insert(0, 0)
def sort_json():
global processed_json_data
sort_flag_index = len(processed_json_data[0]["confirmedCount"]) - 1
processed_json_data.sort(
key=lambda k: (k.get("confirmedCount", 0)[sort_flag_index]), reverse=True)
for province in processed_json_data:
province["cities"].sort(
key=lambda k: (k.get("confirmedCount", 0)[sort_flag_index]), reverse=True)
def merge_state_json_list():
global processed_json_data
global state_json_list
global date_list
init_processed_json_data()
# days_sum = len(date_list)
for state_json_index in range(1, len(date_list)):
exsited_province_list = list(map(lambda x: x["provinceShortName"], processed_json_data))
add_province_list = list(
map(lambda x: x["provinceShortName"], state_json_list[state_json_index]))
update_province_list = list(set(add_province_list) & set(exsited_province_list))
new_province_list = list(set(add_province_list) - set(exsited_province_list))
missing_province_list = list(set(exsited_province_list) - set(add_province_list))
# __logger__.debug("更新 %s" % update_province_list)
# __logger__.debug("新增 %s" % new_province_list)
# __logger__.debug("缺失 %s" % missing_province_list)
# ! 刷新现有 省市 数据
for province_name in update_province_list:
processed_json_data_cur_province = list(
filter(lambda x: x["provinceShortName"] == province_name, processed_json_data))[0]
province = list(
filter(lambda x: x["provinceShortName"] == province_name,
state_json_list[state_json_index]))[0]
processed_json_data_cur_province["confirmedCount"].insert(0, province["confirmedCount"])
processed_json_data_cur_province["suspectedCount"].insert(0, province["suspectedCount"])
processed_json_data_cur_province["curedCount"].insert(0, province["curedCount"])
processed_json_data_cur_province["deadCount"].insert(0, province["deadCount"])
merge_cities(processed_json_data_cur_province["cities"], province["cities"], state_json_index)
# ! 在现有数据之上新增 省市 数据
# for province_name in new_province_list:
# new_province = list(
# filter(lambda x: x["provinceShortName"] == province_name,
# state_json_list[state_json_index]))[0]
# new_province["confirmedCount"] = [new_province["confirmedCount"]] + [0] * state_json_index
# new_province["suspectedCount"] = [new_province["suspectedCount"]] + [0] * state_json_index
# new_province["curedCount"] = [new_province["curedCount"]] + [0] * state_json_index
# new_province["deadCount"] = [new_province["deadCount"]] + [0] * state_json_index
# for city in new_province["cities"]:
# city["confirmedCount"] = [city["confirmedCount"]] + [0] * state_json_index
# city["suspectedCount"] = [city["suspectedCount"]] + [0] * state_json_index
# city["curedCount"] = [city["curedCount"]] + [0] * state_json_index
# city["deadCount"] = [city["deadCount"]] + [0] * state_json_index
# processed_json_data.append(new_province)
# ! 补全缺失的现有 省市 数据
for province_name in missing_province_list:
miss_province = list(
filter(lambda x: x["provinceShortName"] == province_name, processed_json_data))[0]
miss_province["confirmedCount"].insert(0, 0)
miss_province["suspectedCount"].insert(0, 0)
miss_province["curedCount"].insert(0, 0)
miss_province["deadCount"].insert(0, 0)
for city in miss_province["cities"]:
city["confirmedCount"].insert(0, 0)
city["suspectedCount"].insert(0, 0)
city["curedCount"].insert(0, 0)
city["deadCount"].insert(0, 0)
sort_json()
processed_json_data.insert(0, date_list)
def save_json(file_path):
global processed_json_data
with open(file_path, "w") as json_file:
json.dump(processed_json_data, json_file, ensure_ascii=False)
json_file.close()
def save_csv():
global processed_json_data
global date_list
cnt_state = ["确诊", "疑似", "治愈", "死亡"]
cnt_name = ["confirmedCount", "suspectedCount", "curedCount", "deadCount"]
with open("history-areas.csv", "w") as csv_file:
csv_write = csv.writer(csv_file)
csv_head = ["省份", "区域", "状态"] + date_list
csv_write.writerow(csv_head)
for province in processed_json_data[1:]:
province_name = province["provinceShortName"]
for index in range(0, len(cnt_state)):
row = [province_name, province_name, cnt_state[index]] + province[cnt_name[index]]
csv_write.writerow(row)
for city in province["cities"]:
for index in range(0, len(cnt_state)):
row = [province_name, city["cityName"], cnt_state[index]] + city[cnt_name[index]]
csv_write.writerow(row)
return
def compress_state_json():
global processed_json_data
for p_idx in range(4, len(processed_json_data)):
processed_json_data[p_idx]["confirmedCount"] = processed_json_data[p_idx]["confirmedCount"][-1:]
processed_json_data[p_idx]["suspectedCount"] = processed_json_data[p_idx]["suspectedCount"][-1:]
processed_json_data[p_idx]["curedCount"] = processed_json_data[p_idx]["curedCount"][-1:]
processed_json_data[p_idx]["deadCount"] = processed_json_data[p_idx]["deadCount"][-1:]
for c_idx in range(0, len(processed_json_data[p_idx]["cities"])):
processed_json_data[p_idx]["cities"][c_idx]["confirmedCount"] = processed_json_data[p_idx][
"cities"][c_idx]["confirmedCount"][-1:]
processed_json_data[p_idx]["cities"][c_idx]["suspectedCount"] = processed_json_data[p_idx][
"cities"][c_idx]["suspectedCount"][-1:]
processed_json_data[p_idx]["cities"][c_idx]["curedCount"] = processed_json_data[p_idx][
"cities"][c_idx]["curedCount"][-1:]
processed_json_data[p_idx]["cities"][c_idx]["deadCount"] = processed_json_data[p_idx][
"cities"][c_idx]["deadCount"][-1:]
def read_csv_file(file_path):
global processed_json_data
with open(file_path, 'r') as csvFile:
reader = csv.reader(csvFile)
all_china = {}
rows = [row for row in reader]
all_china["date"] = rows[0][2:]
all_china["confirmedCount"] = [int(x) for x in rows[1][2:]]
all_china["suspectedCount"] = [int(x) for x in rows[2][2:]]
all_china["curedCount"] = [int(x) for x in rows[3][2:]]
all_china["deadCount"] = [int(x) for x in rows[4][2:]]
processed_json_data.insert(0, all_china)
return
def read_xlsx_file(file_path, sheet_name):
json_from_xlsx = {}
workbook = xlrd.open_workbook(file_path)
cur_sheet = workbook.sheet_by_name(sheet_name)
ncols = cur_sheet.ncols
date_list = []
for col_idx in range(1, ncols):
date_list.append(
datetime(*xldate_as_tuple(cur_sheet.cell_value(0, col_idx), workbook.datemode)).strftime(
"%Y-%m-%d"))
json_from_xlsx["date"] = date_list
row_idx = 1
for category_name in ["confirmedIncr", "suspectedIncr", "curedIncr", "deadIncr"]:
data_list = []
for col_idx in range(1, ncols):
data_list.append(int(cur_sheet.cell_value(row_idx, col_idx)))
json_from_xlsx[category_name] = data_list
row_idx += 1
return json_from_xlsx
def load_json_file(file_path):
with open(file_path, 'r') as json_file:
json_data = json.load(json_file)
return json_data
def main():
"""Main function"""
global processed_json_data
file_path = "history-areas.json"
read_json_files()
merge_state_json_list()
for idx in range(2, len(processed_json_data)):
if processed_json_data[idx]["provinceName"] == "待明确地区":
processed_json_data[idx]["provinceName"] = "其他"
if processed_json_data[idx]["provinceShortName"] == "待明确地区":
processed_json_data[idx]["provinceShortName"] = "其他"
save_csv()
read_csv_file("history.csv")
nation_state_json = load_json_file("nation_state.json")
incr_json = read_xlsx_file("WJW数据.xlsx", "增量")
processed_json_data.insert(0, incr_json)
processed_json_data.insert(0, nation_state_json)
save_json("history-areas-all.json")
compress_state_json()
save_json(file_path)
if __name__ == '__main__':
# ! Uncomment the next line to read args from cmd-line
main()
| StarcoderdataPython |
1642640 | """
Divergence metric between two scores based on size of subgraph isomorphism. If
two DAGs are the exact same, the subgraph isomorphism will be of maximum size
and node divergence and edge divergence will be zero.
"""
import sys
import os
import json
import argparse
import numpy as np
import networkx as nx
def get_flow(f):
return json.load(f)["flow"]
def simplify_flow(flow):
if isinstance(flow, str):
flow = json.loads(flow)
s_flow = {}
for node in flow:
s_node = dict(**node)
s_node["wires"] = s_node.get("wires", [])
if len(s_node["wires"]) > 0 and isinstance(s_node["wires"][0], list):
s_node["wires"] = sum(s_node["wires"], [])
s_flow[s_node["id"]] = s_node
return s_flow
def num_nodes(flow):
return len(flow.keys())
def num_edges(flow):
return sum(len(v["wires"]) for v in flow.values())
def has_edge(flow, k1, k2):
return k2 in flow[k1]["wires"]
def edge_to_string(k1, k2):
return " --> ".join([k1, k2])
def string_to_edge(s):
return tuple(s.split(" --> "))
def get_node_similarity(node1, node2):
if node1["type"] != node2["type"]:
return 0
__skip_compares__ = set(
[
"id",
"x",
"y",
"z",
"wires",
"type",
"endpointUrl", # for bot-intent type nodes
"name", # for ui_ nodes
"group", # for ui_ nodes
"tab", # for all nodes
"label", # for tab nodes
]
)
num = 0
den = 0
inc = 0
for x in node1.keys():
if x in __skip_compares__:
continue
den += 1
inc = 1
val1 = node1.get(x, None)
val2 = node2.get(x, None)
if (val1 is None) ^ (val2 is None):
inc = 0
elif not isinstance(val1, type(val1)) and not isinstance(val1, type(val2)):
inc = 0
elif val1 != val2:
inc = 0
num += inc
if den == 0 or num == den:
return 1
else:
return num / den
def mapping_weight(node1, node2):
# only makes sense to compare nodes of the same type
# can add additional conditions here if needed
try:
mnode1 = {k: v for k, v in node1.items() if k != "wires"}
mnode2 = {k: v for k, v in node2.items() if k != "wires"}
ans = get_node_similarity(mnode1, mnode2)
except Exception as e:
print("Comparison Exception:", e)
print(
"comparing",
json.dumps(node1, indent=2),
"\nand\n",
json.dumps(node2, indent=2),
)
ans = 0
return ans
def get_nodemap(flow1, flow2):
nodemap = []
for k1, v1 in flow1.items():
for k2, v2 in flow2.items():
wt = mapping_weight(v1, v2)
if wt > 0:
nodemap.append((k1, k2, wt))
nodemap.sort(key=lambda x: (
len(flow1[x[0]]["wires"]) + len(flow2[x[1]]["wires"])))
return nodemap
def create_product_graph(nmap, flow1, flow2):
prodgraph = set()
for k1a, k2a, wta in nmap:
for k1b, k2b, wtb in nmap:
# assert one-to-one mapping
if k1a == k1b or k2a == k2b:
continue
# is there is an edge between the two nodes in flow1?
e_a = has_edge(flow1, k1a, k1b)
# is there is an edge between the corresponding two nodes in flow2?
e_b = has_edge(flow2, k2a, k2b)
if not (e_a ^ e_b):
# if (k1a, k1b) ⇔ (k2a, k2b), AND
# the mapped nodes are of the same type,
# add edge to product graph
ind1 = nmap.index((k1a, k2a, wta))
ind2 = nmap.index((k1b, k2b, wtb))
edge = (min(ind1, ind2), max(ind1, ind2))
prodgraph.add(edge)
return list(prodgraph)
def density(pgraph, nmap):
return (2 * len(pgraph)) / (len(nmap) * (len(nmap) - 1))
def check_clique(pgraph, clq):
for i in clq:
for j in clq:
if (i != j) and (i, j) not in pgraph:
return False
return True
def large_graph_corr(pgraph, nmap, flow1, flow2):
pg_arr = np.array(pgraph, dtype=np.uint64) + 1
# runtime error if vertex numbers has 0, so add 1 and subtract when finding subset
import cliquematch
G = cliquematch.Graph.from_edgelist(pg_arr, len(nmap))
exact = True
dens = density(pgraph, nmap)
if dens > 0.7:
# highly dense graphs => node mapping is not strict enough,
# (too many nodes of same type) so computing the exact value is SLOW
# hence approximate via heuristic (some form of penalty)
clique0 = G.get_max_clique(use_heuristic=True, use_dfs=False)
# note that the approximate clique is <= the exact clique
exact = False
else:
clique0 = G.get_max_clique(use_heuristic=True, use_dfs=True)
clique = max(
G.all_cliques(size=len(clique0)), key=setup_weighted_clique(nmap, flow1, flow2)
)
subset = [nmap[i - 1] for i in clique]
return subset, exact
def setup_weighted_clique(nmap, flow1, flow2):
def clique_wt(clq):
wts = [nmap[x - 1][2] for x in clq]
return sum(wts)
return clique_wt
def small_graph_corr(pgraph, nmap, flow1, flow2):
G = nx.Graph()
G.add_nodes_from(i + 1 for i in range(len(nmap)))
G.add_edges_from([(a + 1, b + 1) for a, b in pgraph])
clique = max(
nx.algorithms.clique.find_cliques(G),
key=setup_weighted_clique(nmap, flow1, flow2),
)
subset = [nmap[x - 1] for x in clique]
return subset, True
def find_correspondence(pgraph, nmap, flow1, flow2):
if len(pgraph) == 0 and len(nmap) == 0:
return [], True
elif len(pgraph) < 2000:
return small_graph_corr(pgraph, nmap, flow1, flow2)
else:
return large_graph_corr(pgraph, nmap, flow1, flow2)
def get_mapped_edges(subset, flow1, flow2):
mapped_edges = {}
for k1a, k2a, wta in subset:
for k1b, k2b, wtb in subset:
if k1a == k1b or k2a == k2b:
continue
# is there is an edge between the two nodes in flow1?
e_a = has_edge(flow1, k1a, k1b)
# is there is an edge between the corresponding two nodes in flow2?
e_b = has_edge(flow2, k2a, k2b)
if e_a and e_b:
# successfully mapped the edge
mapped_edges[edge_to_string(
k1a, k1b)] = edge_to_string(k2a, k2b)
return mapped_edges
def edge_similarity(edgemap, nodemap, flow1, flow2):
if num_edges(flow1) != 0 and num_edges(flow2) != 0:
return (len(edgemap) / num_edges(flow1)) * (len(edgemap) / num_edges(flow2))
else:
return 0
def node_similarity(subset, nodemap, flow1, flow2):
if num_nodes(flow1) != 0 and num_nodes(flow2) != 0:
score = sum(x[2] for x in subset)
answer = (score / num_nodes(flow1)) * (score / num_nodes(flow2))
return answer
else:
return 0
def get_divergence(full1, full2, edges_only=True):
flow1 = simplify_flow(full1)
flow2 = simplify_flow(full2)
nmap = get_nodemap(flow1, flow2)
pg = create_product_graph(nmap, flow1, flow2)
corr, exact = find_correspondence(pg, nmap, flow1, flow2)
emap = get_mapped_edges(corr, flow1, flow2)
# print(f"{num_nodes(flow1)} nodes, {num_edges(flow1)} edges in flow1")
# print(f"{num_nodes(flow2)} nodes, {num_edges(flow2)} edges in flow2")
# print(len(emap), "edges mapped")
ns = node_similarity(corr, nmap, flow1, flow2)
es = edge_similarity(emap, nmap, flow1, flow2)
if edges_only:
return 1 - es
else:
return (1 - ns, 1 - es, exact)
def node_divergence(flow1, flow2):
return get_divergence(flow1, flow2, False)[0]
def edge_divergence(flow1, flow2):
return get_divergence(flow1, flow2, True)[1]
def runner(file1, file2):
divergence = get_divergence(get_flow(file1), get_flow(file2), False)[0]
return divergence
| StarcoderdataPython |
1611735 | <reponame>sony-si/pytorch-CycleGAN-and-pix2pix
import os.path
from data.base_dataset import BaseDataset, scale_width_and_crop_height_func
from data.image_folder import make_dataset
from PIL import Image
class ViewUnpairedDataset(BaseDataset):
"""
This dataset class loads unpaired datasets for a single a view.
It requires a pair of image folders, one for each person
"""
def __init__(self, opt, subdir = ''):
"""
Parameters:
opt (an Option class) -- contains experiment flags
"""
BaseDataset.__init__(self, opt)
self.dir_A_images = os.path.join(opt.dataroot, subdir, opt.personA)
self.dir_B_images = os.path.join(opt.dataroot, subdir, opt.personB)
self.A_paths = sorted(make_dataset(self.dir_A_images, opt.max_dataset_size))
self.B_paths = sorted(make_dataset(self.dir_B_images, opt.max_dataset_size))
self.A_size = len(self.A_paths) # get the size of dataset A
self.B_size = len(self.B_paths) # get the size of dataset B
def __getitem__(self, index_A, index_B):
A_path = self.A_paths[index_A % self.A_size]
B_path = self.B_paths[index_B]
A_img = Image.open(A_path).convert('RGB')
B_img = Image.open(B_path).convert('RGB')
[A, crop_pos_A] = scale_width_and_crop_height_func(A_img, self.opt)
[B, crop_pos_B] = scale_width_and_crop_height_func(B_img, self.opt)
return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path, 'crop_pos_A': crop_pos_A, 'crop_pos_B': crop_pos_B}
def __len__(self):
return max(self.A_size, self.B_size)
| StarcoderdataPython |
3219565 | <gh_stars>1-10
from .Job import Job, JobIdError
from .ExpandedJob import ExpandedJob
from .CollapsedJob import CollapsedJob
| StarcoderdataPython |
1644507 | import os
import pygraphviz as pgv
import pytest
data_path = os.path.join('tardis', 'plasma', 'tests', 'data')
def test_write_dot(tmpdir, simulation_verysimple):
fname = str(tmpdir.mkdir('test_dot').join('plasma.dot'))
simulation_verysimple.plasma.write_to_dot(fname)
actual = pgv.AGraph(fname).to_string()
expected = pgv.AGraph(os.path.join(data_path, 'plasma_ref.dot')).to_string()
assert actual == expected
def test_write_tex(tmpdir, simulation_verysimple):
fname = str(tmpdir.mkdir('test_tex').join('plasma.tex'))
simulation_verysimple.plasma.write_to_tex(fname)
with open(fname, 'r') as fp1, open(os.path.join(data_path, 'plasma_ref.tex'), 'r') as fp2:
assert fp1.readline() ==fp2.readline()
| StarcoderdataPython |
76220 | from st2common.runners.base_action import Action
import paho.mqtt.publish as publish
import paho.mqtt.client as paho
class PublishAction(Action):
def __init__(self, config):
super(PublishAction, self).__init__(config)
# Sensor/Action Mismatch
self._config = self.config
self._client = None
self._hostname = self._config.get('hostname', None)
self._port = self._config.get('port', 1883)
# self._protocol = self._config.get('protocol', 'MQTTv311')
self._protocol = self._config.get('protocol', paho.MQTTv311)
self._client_id = self._config.get('client_id', None)
self._userdata = self._config.get('userdata', None)
self._username = self._config.get('username', None)
self._password = self._config.get('password', None)
self._subscribe = self._config.get('subscribe', None)
self._ssl = self._config.get('ssl', False)
self._ssl_cacert = self._config.get('ssl_cacert', None)
self._ssl_cert = self._config.get('ssl_cert', None)
self._ssl_key = self._config.get('ssl_key', None)
self._ssl_payload = None
self._auth_payload = None
def run(self, topic, message=None, qos=0, retain=False):
if self._username:
self._auth_payload = {
'username': self._username,
'password': self._password,
}
if self._ssl:
if not self._ssl_cacert:
raise ValueError('Missing "ssl_cacert" config option')
if not self._ssl_cert:
raise ValueError('Missing "ssl_cert" config option')
if not self._ssl_key:
raise ValueError('Missing "ssl_key" config option')
self._ssl_payload = {
'ca_certs': self._ssl_cacert,
'certfile': self._ssl_cert,
'keyfile': self._ssl_key,
}
publish.single(topic, payload=message, qos=qos, retain=retain,
hostname=self._hostname, port=self._port,
client_id=self._client_id, keepalive=60,
auth=self._auth_payload, tls=self._ssl_payload,
protocol=self._protocol)
| StarcoderdataPython |
54566 | import datetime
import utils
import glob
import os
import numpy as np
import pandas as pd
if __name__ == '__main__':
loaddir = "E:/Data/h5/"
labels = ['https', 'netflix']
max_packet_length = 1514
for label in labels:
print("Starting label: " + label)
savedir = loaddir + label + "/"
now = datetime.datetime.now()
savename = "payload_%s-%.2d%.2d_%.2d%.2d" % (label, now.day, now.month, now.hour, now.minute)
filelist = glob.glob(loaddir + label + '*.h5')
# Try only one of each file
fullname = filelist[0]
# for fullname in filelist:
load_dir, filename = os.path.split(fullname)
print("Loading: {0}".format(filename))
df = utils.load_h5(load_dir, filename)
packets = df['bytes'].values
payloads = []
labels = []
filenames = []
for packet in packets:
if len(packet) == max_packet_length:
# Extract the payload from the packet should have length 1460
payload = packet[54:]
p = np.fromstring(payload, dtype=np.uint8)
payloads.append(p)
labels.append(label)
filenames.append(filename)
d = {'filename': filenames, 'bytes': payloads, 'label': labels}
dataframe = pd.DataFrame(data=d)
key = savename.split('-')[0]
dataframe.to_hdf(savedir + savename + '.h5', key=key, mode='w')
# utils.saveextractedheaders(loaddir, savedir, savename, num_headers=headersize)
print("Done with label: " + label)
| StarcoderdataPython |
3261965 | # -*- coding: utf-8 -*-
# Copyright (c) 2004-2015 Odoo S.A.
# Copyright 2018-2019 <NAME> <https://it-projects.info/team/KolushovAlexandr>
# License MIT (https://opensource.org/licenses/MIT).
from odoo import api, fields, models
class BaseConfigSettings(models.TransientModel):
_inherit = "base.config.settings"
group_attendance_use_pin = fields.Selection(
[
(
0,
'Partners do not need to enter their PIN to check in manually in the "Kiosk Mode".',
),
(
1,
'Partners must enter their PIN to check in manually in the "Kiosk Mode".',
),
],
string="Partner PIN",
help="Enable or disable partner PIN identification at check in",
implied_group="base_attendance.group_hr_attendance_use_pin",
)
shift_autocheckout = fields.Integer(
"Autocheckout ", help="Maximum Shift Time in Minutes"
)
hex_scanner_is_used = fields.Boolean(
"HEX Scanner",
default=False,
help="Some devices scan regular barcodes as hexadecimal. "
"This option decode those types of barcodes",
)
@api.multi
def set_shift_autocheckout(self):
self.env["ir.config_parameter"].set_param(
"base_attendance.shift_autocheckout", self.shift_autocheckout or "0"
)
self.checkout_shifts()
@api.multi
def get_default_shift_autocheckout(self, fields):
shift_autocheckout = self.env["ir.config_parameter"].get_param(
"base_attendance.shift_autocheckout", default=0
)
return {"shift_autocheckout": int(shift_autocheckout)}
@api.model
def checkout_shifts(self):
cron_record = self.env.ref("base_attendance.base_attendance_autocheckout")
if self.shift_autocheckout == 0:
cron_record.write({"active": False})
else:
cron_record.write({"active": True})
@api.multi
def set_hex_scanner_is_used(self):
self.env["ir.config_parameter"].set_param(
"base_attendance.hex_scanner_is_used", self.hex_scanner_is_used
)
@api.multi
def get_default_hex_scanner_is_used(self, fields):
hex_scanner_is_used = self.env["ir.config_parameter"].get_param(
"base_attendance.hex_scanner_is_used", default=False
)
return {"hex_scanner_is_used": hex_scanner_is_used}
| StarcoderdataPython |
4811963 | """
lights.py
Code based upon: https://github.com/artem-smotrakov/esp32-weather-google-sheets
class Lights controls LEDs that report the following:
WiFi connection, error, high temperature level, discomfort exceeded
2021-0817 PP added discomfort, removed test
"""
import time
from machine import Pin
import lolin_d1mini as board
class Lights(object):
# initializes a new instance
def __init__(self, leds_config):
# define LEDS
led_pins = [leds_config.get(i)[0] for i in range(1, len(leds_config)+1)]
self._leds = [Pin(pin, Pin.OUT) for pin in led_pins]
# assign meaning to specific LEDs:
for i in range(1, len(leds_config)+1):
if 'reserve' in leds_config.get(i):
#DEBUG: print(led_pins[i-1], self._leds[i-1])
#OK: self._leds[i-1].value(0) if led_pins[i-1] == board.D0 else self._leds[i-1].value(0)
self._leds[i-1].off()
if 'wifi' in leds_config.get(i):
self.wifi_led = self._leds[i-1]
if 'error' in leds_config.get(i):
self.error_led = self._leds[i-1]
if 'discomfort' in leds_config.get(i):
self.discomfort_led = self._leds[i-1]
if 'high_threshold' in leds_config.get(i):
self.high_threshold_led = self._leds[i-1]
if 'low_threshold' in leds_config.get(i):
self.low_threshold_led = self._leds[i-1]
def set_wifi_led(self, idx):
self.wifi_led = self._leds[idx]
def set_error_led(self, idx):
self.error_led = self._leds[idx]
def set_discomfort_led(self, idx):
self.discomfort_led = self._leds[idx]
def set_high_threshold_led(self, idx):
self.high_threshold_led = self._leds[idx]
def set_low_threshold_led(self, idx):
self.low_threshold_led = self._leds[idx]
# turn on the LED for WiFi
def wifi_on(self):
self.wifi_led.on()
# turn off the LED for WiFi
def wifi_off(self):
self.wifi_led.off()
# turn on the LED that reports an error
def error_on(self):
self.error_led.on()
# turn off the LED that reports an error
def error_off(self):
self.error_led.off()
# turn on the LED for discomfort
def discomfort_on(self):
self.discomfort_led.on()
# turn off the LED for discomfort
def discomfort_off(self):
self.discomfort_led.off()
# turn on the LED that reports high threshold level
def high_threshold_on(self):
self.high_threshold_led.on()
# turn off the LED that reports high threshold level
def high_threshold_off(self):
self.high_threshold_led.off()
# turn on the LED that reports low threshold level
def low_threshold_on(self):
self.low_threshold_led.on()
# turn off the LED that reports low threshold level
def low_threshold_off(self):
self.low_threshold_led.off()
# turn off all LEDs
def off(self):
self.wifi_off()
self.error_off()
self.discomfort_led.off()
self.high_threshold_off()
self.low_threshold_off()
""" 2021-0812 PP added test
def test(self, dt=1):
self.off()
time.sleep(dt)
print("Wifi connected...")
self.wifi_on()
time.sleep(dt)
print("Error condition...")
self.error_on()
time.sleep(dt)
print("Temperature > high_threshold...")
self.high_threshold_on()
time.sleep(dt)
print("Temperature < low_threshold...")
self.low_threshold_on()
time.sleep(dt*5)
print("Wifi disconnected...")
self.wifi_off()
time.sleep(dt)
print("No error condition...")
self.error_off()
time.sleep(dt)
print("Temperature < high_threshold...")
self.high_threshold_off()
time.sleep(dt)
print("Temperature > low_threshold...")
self.low_threshold_off()
"""
| StarcoderdataPython |
3360875 | from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
BASE_PATH = os.path.dirname(os.path.abspath('manage.py'))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'botforms.settings')
app = Celery('botforms')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) | StarcoderdataPython |
4825235 | import tensorflow as tf
import numpy as np
from net.layers import _conv
class ResNet():
def __init__(self, arch, layers, base_filters=64, is_training=False, use_bn=True):
if arch == 'resnet18' or arch == 'resnet34':
self.block = self.BasicBlock
elif arch == 'resnet50' or arch =='resnet101':
self.block = self.Bottleneck
else:
raise ValueError('only support resnet18 34 50 101')
self.layers = layers
self.base_filters = base_filters
if self.block == self.BasicBlock:
assert self.base_filters == 64
self.is_training = is_training
self.use_bn = use_bn
self.inplanes = 64
def BasicBlock(self, inputs, filters, strides=1, is_training=False, use_bn=True):
expansion = 1
conv1_bn_relu = _conv(inputs, filters, [3,3], strides, 'same', activation=tf.nn.relu, is_training=self.is_training, use_bn=self.use_bn)
conv2_bn = _conv(conv1_bn_relu, filters, [3,3], 1, 'same', activation=None, is_training=self.is_training, use_bn=self.use_bn)
if strides != 1 or self.inplanes != filters*expansion:
inputs = _conv(inputs, filters, [1,1], strides, 'valid', activation=None, is_training=self.is_training, use_bn=self.use_bn)
self.inplanes = filters*expansion
out = tf.nn.relu(conv2_bn+inputs)
return out
def Bottleneck(self, inputs, filters, strides=1, is_training=False, use_bn=True):
expansion = 4
conv1_bn_relu = _conv(inputs, filters, [1,1], 1, 'valid', activation=tf.nn.relu, is_training=self.is_training, use_bn=self.use_bn)
conv2_bn_relu = _conv(conv1_bn_relu, filters, [3,3], strides, 'same', activation=tf.nn.relu, is_training=self.is_training, use_bn=self.use_bn)
conv3_bn = _conv(conv2_bn_relu, filters*expansion, [1,1], 1, 'valid', activation=None, is_training=self.is_training, use_bn=self.use_bn)
if strides != 1 or self.inplanes != filters*expansion:
inputs = _conv(inputs, filters*expansion, [1,1], strides, 'valid', activation=None, is_training=self.is_training, use_bn=self.use_bn)
self.inplanes = filters*expansion
out = tf.nn.relu(conv3_bn+inputs)
return out
def _make_layer(self, x, num_channels, layers, strides=1):
for i in range(layers):
if i == 0:
x = self.block(x, num_channels, strides=strides)
else:
x = self.block(x, num_channels)
return x
def _layer0(self, inputs, filters, kernel_size=(7, 7)):
outputs = _conv(inputs, filters, [7,7], 2, 'same', activation=tf.nn.relu, is_training=self.is_training, use_bn=self.use_bn)
outputs = tf.layers.max_pooling2d(outputs, pool_size=3, strides=2, padding='same')
return outputs
def forward(self, inputs):
self.layer0 = self._layer0(inputs, self.inplanes, (7, 7))
self.layer1 = self._make_layer(self.layer0, self.base_filters, self.layers[0])
self.layer2 = self._make_layer(self.layer1, 2 * self.base_filters, self.layers[1], 2)
self.layer3 = self._make_layer(self.layer2, 4 * self.base_filters, self.layers[2], 2)
self.layer4 = self._make_layer(self.layer3, 8 * self.base_filters, self.layers[3], 2)
return self.layer1, self.layer2, self.layer3,self.layer4
def load_weights(sess,path):
pretrained = np.load(path,allow_pickle=True).item()
for variable in tf.trainable_variables():
for key in pretrained.keys():
key2 = variable.name.rstrip(':0')
if (key == key2):
sess.run(tf.assign(variable, pretrained[key]))
def _resnet(block, layers, **kwargs):
model = ResNet(block, layers, **kwargs)
return model
def resnet18(**kwargs):
return _resnet('resnet18', [2, 2, 2, 2], **kwargs)
def resnet34(**kwargs):
return _resnet('resnet34', [3, 4, 6, 3], **kwargs)
def resnet50(**kwargs):
return _resnet('resnet50', [3, 4, 6, 3], **kwargs)
def resnet101(**kwargs):
return _resnet('resnet101', [3, 4, 23, 3], **kwargs)
if __name__ =='__main__':
inputs = tf.placeholder(shape=[None,300,300,3],dtype=tf.float32)
net = resnet18(is_training=True).forward(inputs)
for variable in tf.trainable_variables():
print(variable.name,variable.shape) | StarcoderdataPython |
4816435 | import os
import discord
import sqlite3
from environs import Env
from bgg import BGGCog
from meetup import Meetup
from discord.ext import commands
from boardgamegeek import BGGClient
from codenames import Codenames
import logging
from music import Music
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("boardgame.helper")
env = Env()
env.read_env()
token = env.str("DISCORD_TOKEN")
def main():
bot = commands.Bot(command_prefix=commands.when_mentioned_or('!'),)
async def on_ready():
logger.info(f'{bot.user} has connected to Discord!')
bot.add_listener(on_ready)
bot.add_check(commands.guild_only())
bot.add_cog(BGGCog(bot))
#bot.add_cog(Meetup(bot))
#bot.add_cog(Codenames(bot))
bot.add_cog(Music(bot))
bot.run(token)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3372026 | T = int(input())
for i in range(T):
input()
cs, ec = [int(x) for x in input().split()]
total = cs + ec
IQs = [int(x) for x in input().split()]
while len(IQs)<total:
IQs += [int(x) for x in input().split()]
csIQs = IQs[0:cs]
ecIQs = IQs[cs:len(IQs)]
total = 0
for x in csIQs:
if (sum(csIQs)/len(csIQs) < (sum(csIQs)-x)/(len(csIQs)-1)) and ( sum(ecIQs)/len(ecIQs) < (sum(ecIQs) + x) / (len(ecIQs)+1) ):
total += 1
print(total)
| StarcoderdataPython |
3323043 | <gh_stars>1-10
#!/usr/bin/python3
import os
# Run a filesystem scan every day unless one is in progress.
os.system("echo \"$(($RANDOM % 60)) $(($RANDOM % 24)) * * * /scan.sh 2>&1 >> /logs/fimscan.log \" > /root.crontab")
os.system("fcrontab -u root /root.crontab")
os.system("rm /root.crontab")
# Perform a bootup Scan
os.system("/scan.sh")
# Run cron
os.system("/usr/sbin/fcron -f -d")
| StarcoderdataPython |
45493 | <reponame>isabella232/ALM-SF-DX-Python-Tools
''' Bitbucket Server Interface '''
import urllib
from modules.utils import INFO_TAG, WARNING_TAG, ERROR_LINE, SUCCESS_LINE, print_key_value_list
from modules.git_server_callout import http_request
from modules.comment_operations import get_last_comment, append_new_comments, save_comment_to_file
class GitlabHandler():
def __init__(self, host, projectId):
self.host = host
self.projectId = projectId
def create_branch(self, sslVerify, token, branchName, commitHash, **kwargs):
''' Method for creating new branch '''
url = ( f'{self.host}/api/v4/projects/{self.projectId}/repository/branches' )
headers = { 'Private-Token' : token }
payload = { 'branch' : branchName, 'ref' : commitHash }
data = urllib.parse.urlencode( payload ).encode( "utf-8" )
print_key_value_list( f'{INFO_TAG} Creating branch:', [
( 'Remote URL', self.host ), ( 'Project Id', self.projectId ),
( 'Branch Name', branchName ), ( 'Source Ref', commitHash ), ( 'Endpoint', f'{url}' )
] )
response = http_request( url, data, headers, 'POST', sslVerify )
if response.statusCode == 201:
print( f'{INFO_TAG} Branch \'{branchName}\' created' )
else:
print( f'{WARNING_TAG} Branch \'{branchName}\' not created. Status code: {response.statusCode}' )
def create_tag(self, sslVerify, token, tagName, commitHash, **kwargs):
''' Method for creating new tag '''
message = kwargs[ 'message' ]
releaseDescription = kwargs[ 'releaseDescription' ]
url = ( f'{self.host}/api/v4/projects/{self.projectId}/repository/tags' )
headers = { 'Private-Token' : token }
payload = { 'tag_name' : tagName, 'ref' : commitHash, 'message' : message, 'release_description' : releaseDescription }
data = urllib.parse.urlencode( payload ).encode( "utf-8" )
print_key_value_list( f'{INFO_TAG} Creating tag with:', [
( 'Remote URL', self.host ), ( 'Project Id', self.projectId ), ( 'Tag Name', tagName ),
( 'Ref', commitHash ), ( 'Message', message ), ( 'Description', releaseDescription ), ( 'Endpoint', f'{url}' )
] )
response = http_request( url, data, headers, 'POST', sslVerify )
if response.statusCode == 201:
print( f'{INFO_TAG} Tag Created' )
else:
print( f'{WARNING_TAG} TAG \'{tagName}\' not created. Status code: {response.statusCode}' )
def update_commit_status(self, sslVerify, token, commitHash, status, buildUrl, **kwargs):
''' Updates the commit status '''
jobName = kwargs[ 'jobName' ]
url = ( f'{self.host}/api/v4/projects/{self.projectId}/statuses/{commitHash}' )
headers = { 'Private-Token' : token }
payload = { 'state' : status, 'target_url' : buildUrl, 'name' : jobName }
data = urllib.parse.urlencode( payload ).encode( 'utf-8' )
print_key_value_list( f'{INFO_TAG} Updating commit status:', [
( 'Host URL', self.host ), ( 'Commmit SHA', commitHash ), ( 'Status', status ),
( 'Build URL', buildUrl ), ( 'Endpoint', url )
] )
response = http_request( url, data, headers, 'POST', sslVerify )
if response.statusCode == 200:
print( f'{SUCCESS_LINE} Commit status updated Successfully' )
else:
print( f'{ERROR_LINE} Could not update commit status' )
def add_comment(self, sslVerify, token, mergeRequestId, newComments, buildId, workspace, **kwargs):
''' Adds a new comment to the merge request '''
commentBody = ' '.join( newComments )
url = ( f'{self.host}/api/v4/projects/{self.projectId}/merge_requests/{mergeRequestId}/notes' )
payload = { 'body': commentBody }
headers = { 'Private-Token': token }
data = urllib.parse.urlencode( payload ).encode( 'utf-8' )
print_key_value_list( f'{INFO_TAG} Adding new Comment to:', [
( 'Host URL', self.host ), ( 'Project Id', self.projectId ),
( 'Target Endpoint', url), ( 'Comment', commentBody ), ( 'MergeRequest Id', mergeRequestId )
] )
response = http_request( url, data, headers, 'POST', sslVerify )
if response.statusCode == 201:
commentId = str( response.responseBody[ 'id' ] )
save_comment_to_file( commentBody, buildId, commentId, workspace )
print( f'{SUCCESS_LINE} Comment created succesfully with id \'{commentId}\', saved to ./{buildId}-comment.txt' )
else:
print( f'{ERROR_LINE} Could not create comment on merge request ({response.responseBody} -- {response.statusCode})' )
def edit_comment(self, sslVerify, token, mergeRequestId, newComments, buildId, workspace, **kwargs):
''' Appends message to the merge request's comments '''
commentId, lastComments = get_last_comment( workspace, buildId )
commentBody = append_new_comments( newComments, lastComments )
url = ( f'{self.host}/api/v4/projects/{self.projectId}/merge_requests/{mergeRequestId}/notes/{commentId}' )
payload = { 'body': commentBody }
headers = { 'Private-Token': token }
data = urllib.parse.urlencode( payload ).encode( 'utf-8' )
print_key_value_list( f'{INFO_TAG} Edditing Comment to:', [
( 'Host URL', self.host ), ( 'Project Id', self.projectId ),
( 'Target Endpoint', url), ( 'Comment', commentBody ), ( 'MergeRequest Id', mergeRequestId )
] )
response = http_request( url, data, headers, 'PUT', sslVerify )
if response.statusCode == 200:
commentId = str( response.responseBody[ 'id' ] )
save_comment_to_file( commentBody, buildId, commentId, workspace )
print( f'{SUCCESS_LINE} Comment created succesfully with id \'{commentId}\', saved to ./{buildId}-comment.txt' )
else:
print( f'{ERROR_LINE} Could not edit comment on merge request ({response.responseBody} -- {response.statusCode})' ) | StarcoderdataPython |
157750 | from django.db import models
# Create your models here.
class Image(models.Model):
image = models.ImageField(upload_to = 'gallery/')
name = models.CharField(max_length=30)
description = models.CharField(max_length=100)
location = models.ForeignKey('location',on_delete = models.CASCADE)
category = models.ForeignKey('category',on_delete = models.CASCADE)
@classmethod
def images(cls):
images = cls.objects.all()
return images
@classmethod
def search_by_category(cls,search_term):
images = cls.objects.filter(category__name__icontains=search_term)
return images
@classmethod
def filter_by_location(cls,location):
images = cls.objects.filter(location__name__icontains=location)
return images
@classmethod
def get_image_by_id(cls,id):
image_id = cls.objects.get(id = id)
return image_id
def save_image(self):
self.save()
def delete_image(self):
self.delete()
def __str__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length =30)
def save_category(self):
self.save()
def __str__(self):
return self.name
class Location(models.Model):
name = models.CharField(max_length =30)
@classmethod
def location(cls):
location = cls.objects.all()
return location
def save_location(self):
self.save()
def __str__(self):
return self.name | StarcoderdataPython |
93351 | <gh_stars>1-10
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
class Feed(models.Model):
user = models.ForeignKey('auth.User', related_name='actions',
on_delete=models.CASCADE, db_index=True)
activity = models.CharField(max_length=150)
target_model = models.ForeignKey(ContentType, blank=True, null=True,
related_name='target_obj', on_delete=models.CASCADE)
target_id = models.PositiveIntegerField(null=True, blank=True)
target = GenericForeignKey('target_model','target_id')
created = models.DateTimeField(auto_now_add=True, db_index=True)
class Meta:
ordering = ('-created',) | StarcoderdataPython |
1648059 | <reponame>rakesh-chinta/Blender_Renderer
import os
import OpenGL
OpenGL.ERROR_CHECKING = False
from OpenGL.GL import *
from OpenGL.GL import shaders
from OpenGL.GL.ARB.bindless_texture import *
class Voxelizer:
src_dir = os.path.dirname(os.path.realpath(__file__))
shader_clear = 0
shader_voxelize = 0
shader_voxelize_scene = 0
tex_counter = 0
tex_scene_voxel_data = 0
tex_scene_voxel_list = 0
scene_aabb = [[0, 0, 0], [0, 0, 0]]
scene_resolution = [1, 1, 1]
@classmethod
def init(cls):
# Setup voxel clear computer shader
cshader = glCreateShader(GL_COMPUTE_SHADER)
src = cls.src_dir + "/shaders/comp_clear_voxels.glsl"
with open(src, 'r') as fin:
src = fin.read()
comp = shaders.compileShader(src, GL_COMPUTE_SHADER)
cls.shader_clear = shaders.compileProgram(comp)
glDeleteShader(cshader)
# Setup voxelize computer shader
cshader = glCreateShader(GL_COMPUTE_SHADER)
src = cls.src_dir + "/shaders/comp_voxelize.glsl"
with open(src, 'r') as fin:
src = fin.read()
comp = shaders.compileShader(src, GL_COMPUTE_SHADER)
cls.shader_voxelize = shaders.compileProgram(comp)
glDeleteShader(cshader)
# Setup voxelize scene computer shader
cshader = glCreateShader(GL_COMPUTE_SHADER)
src = cls.src_dir + "/shaders/comp_voxelize_scene.glsl"
with open(src, 'r') as fin:
src = fin.read()
comp = shaders.compileShader(src, GL_COMPUTE_SHADER)
cls.shader_voxelize_scene = shaders.compileProgram(comp)
glDeleteShader(cshader)
# Setup texture counter data
cls.tex_counter = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, cls.tex_counter)
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32UI, 1, 1, 0, GL_RED_INTEGER,
GL_UNSIGNED_INT, None)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glBindTexture(GL_TEXTURE_2D, 0)
# Setup textures for scene voxel data
cls.tex_scene_voxel_data, cls.tex_scene_voxel_list = glGenTextures(2)
glBindTexture(GL_TEXTURE_3D, cls.tex_scene_voxel_data)
glTexImage3D(GL_TEXTURE_3D, 0, GL_R32UI,
cls.scene_resolution[0], cls.scene_resolution[1],
cls.scene_resolution[2], 0, GL_RED_INTEGER, GL_UNSIGNED_INT, None)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
cls.hnd_scene_voxel_data = glGetImageHandleARB(cls.tex_scene_voxel_data, 0,
GL_FALSE, 0, GL_R32UI)
glMakeImageHandleResidentARB(cls.hnd_scene_voxel_data, GL_READ_WRITE)
glBindTexture(GL_TEXTURE_2D, cls.tex_scene_voxel_list)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RG32UI, 1024, 1024, 0, GL_RG_INTEGER,
GL_UNSIGNED_INT, None)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glBindTexture(GL_TEXTURE_2D, 0)
cls.hnd_scene_voxel_list = glGetImageHandleARB(cls.tex_scene_voxel_list, 0,
GL_FALSE, 0, GL_RG32UI)
glMakeImageHandleResidentARB(cls.hnd_scene_voxel_list, GL_READ_WRITE)
@classmethod
def voxelize_scene(cls, meshes):
'''Assumes the mesh buffer is bound to buffer base 0'''
glBindImageTexture(2, cls.tex_counter, 0, GL_FALSE, 0,
GL_READ_WRITE, GL_R32UI)
glBindTexture(GL_TEXTURE_2D, cls.tex_counter)
data = (ctypes.c_uint32*1)(0)
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 1, 1, GL_RED_INTEGER,
GL_UNSIGNED_INT, data)
glUseProgram(cls.shader_clear)
loc = glGetUniformLocation(cls.shader_clear, "voxels")
glUniformHandleui64ARB(loc, cls.hnd_scene_voxel_data)
glDispatchCompute(*cls.scene_resolution)
glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT)
mesh_bounds = [mesh.aabb for mesh in meshes]
cls.scene_aabb[0][0] = min(mesh_bounds, key=lambda b: b[0][0])[0][0] * 1.1
cls.scene_aabb[0][1] = min(mesh_bounds, key=lambda b: b[0][1])[0][1] * 1.1
cls.scene_aabb[0][2] = min(mesh_bounds, key=lambda b: b[0][2])[0][2] * 1.1
cls.scene_aabb[1][0] = max(mesh_bounds, key=lambda b: b[1][0])[1][0] * 1.1
cls.scene_aabb[1][1] = max(mesh_bounds, key=lambda b: b[1][1])[1][1] * 1.1
cls.scene_aabb[1][2] = max(mesh_bounds, key=lambda b: b[1][2])[1][2] * 1.1
dimensions = (
cls.scene_aabb[1][0] - cls.scene_aabb[0][0],
cls.scene_aabb[1][1] - cls.scene_aabb[0][1],
cls.scene_aabb[1][2] - cls.scene_aabb[0][2]
)
glUseProgram(cls.shader_voxelize_scene)
loc = glGetUniformLocation(cls.shader_voxelize_scene, "u_res")
glUniform3f(loc, *cls.scene_resolution)
loc = glGetUniformLocation(cls.shader_voxelize_scene, "u_size")
glUniform3f(loc, *dimensions)
loc = glGetUniformLocation(cls.shader_voxelize_scene, "u_aabb[0]")
glUniform3f(loc, *cls.scene_aabb[0])
loc = glGetUniformLocation(cls.shader_voxelize_scene, "u_aabb[1]")
glUniform3f(loc, *cls.scene_aabb[1])
loc = glGetUniformLocation(cls.shader_voxelize_scene, "num_meshes")
glUniform1i(loc, len(meshes))
loc = glGetUniformLocation(cls.shader_voxelize_scene, "voxels")
glUniformHandleui64ARB(loc, cls.hnd_scene_voxel_data)
loc = glGetUniformLocation(cls.shader_voxelize_scene, "link_list")
glUniformHandleui64ARB(loc, cls.hnd_scene_voxel_list)
glDispatchCompute(len(meshes), 1, 1)
glMemoryBarrier(GL_TEXTURE_FETCH_BARRIER_BIT)
# glBindTexture(GL_TEXTURE_3D, cls.tex_scene_voxel_data);
# data = (ctypes.c_uint32*8)()
# glGetTexImage(GL_TEXTURE_3D, 0, GL_RED_INTEGER, GL_UNSIGNED_INT, data)
# print('ures:', cls.scene_resolution)
# print('u_size:', dimensions)
# print('u_aabb', cls.scene_aabb)
# print(['END' if i == 4294967295 else i for i in data])
glUseProgram(0)
@classmethod
def voxelize_mesh(cls, mesh):
# start = time.perf_counter()
glBindImageTexture(2, cls.tex_counter, 0, GL_FALSE, 0,
GL_READ_WRITE, GL_R32UI)
glBindTexture(GL_TEXTURE_2D, cls.tex_counter)
data = (ctypes.c_uint32*1)(0)
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 1, 1, GL_RED_INTEGER,
GL_UNSIGNED_INT, data)
glUseProgram(cls.shader_clear)
loc = glGetUniformLocation(cls.shader_clear, "voxels")
glUniformHandleui64ARB(loc, mesh.hnd_voxel_data)
glDispatchCompute(*mesh.voxel_resolution)
glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT)
glUseProgram(cls.shader_voxelize)
loc = glGetUniformLocation(cls.shader_voxelize, "u_res")
glUniform3f(loc, *mesh.voxel_resolution)
loc = glGetUniformLocation(cls.shader_voxelize, "u_size")
glUniform3f(loc, *mesh.dimensions)
loc = glGetUniformLocation(cls.shader_voxelize, "u_aabb[0]")
glUniform3f(loc, *mesh.aabb[0])
loc = glGetUniformLocation(cls.shader_voxelize, "u_aabb[1]")
glUniform3f(loc, *mesh.aabb[1])
loc = glGetUniformLocation(cls.shader_voxelize, "u_count")
glUniform1i(loc, mesh.count)
loc = glGetUniformLocation(cls.shader_voxelize, "tri_buffer")
glUniformHandleui64ARB(loc, mesh.hnd_indices)
loc = glGetUniformLocation(cls.shader_voxelize, "vert_buffer")
glUniformHandleui64ARB(loc, mesh.hnd_positions)
loc = glGetUniformLocation(cls.shader_voxelize, "voxels")
glUniformHandleui64ARB(loc, mesh.hnd_voxel_data)
loc = glGetUniformLocation(cls.shader_voxelize, "link_list")
glUniformHandleui64ARB(loc, mesh.hnd_voxel_list)
glDispatchCompute(mesh.count, 1, 1)
glMemoryBarrier(GL_TEXTURE_FETCH_BARRIER_BIT)
glUseProgram(0)
# glActiveTexture(GL_TEXTURE2)
# glGetTexImage(GL_TEXTURE_2D, 0, GL_RED_INTEGER, GL_UNSIGNED_INT, data)
# print("Voxelization time: %.2fms\n" % (time.perf_counter() - start) * 1000)
# print(data[0])
| StarcoderdataPython |
171751 | <gh_stars>10-100
#! /usr/bin/env python3
# <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
from PoPs.quantities import quantity as quantityModule
from PoPs.quantities import mass as massModule
from PoPs.quantities import spin as spinModule
from PoPs.quantities import parity as parityModule
from PoPs.quantities import charge as chargeModule
from PoPs.quantities import halflife as halflifeModule
from PoPs.families import lepton as leptonModule
electron = leptonModule.particle( 'e-', generation = 'electronic' )
mass = massModule.double( 'base', 5.48579909070e-4, quantityModule.stringToPhysicalUnit( 'amu' ) )
electron.mass.add( mass )
charge = chargeModule.integer( 'base', -1, quantityModule.stringToPhysicalUnit( 'e' ) )
electron.charge.add( charge )
halflife = halflifeModule.string( 'base', 'stable', quantityModule.stringToPhysicalUnit( 's' ) )
electron.halflife.add( halflife )
spin = spinModule.fraction( 'base', spinModule.fraction.toValueType( '1/2' ), quantityModule.stringToPhysicalUnit( 'hbar' ) )
electron.spin.add( spin )
parity = parityModule.integer( 'base', 1, quantityModule.stringToPhysicalUnit( '' ) )
electron.parity.add( parity )
print( electron.toXML( ) )
print()
electron2 = leptonModule.particle.parseXMLStringAsClass( electron.toXML( ) )
if( electron.toXML( ) != electron2.toXML( ) ) : raise Exception( 'Fix me' )
suite = leptonModule.suite( )
suite.add( electron )
print( suite.toXML( ) )
suite2 = leptonModule.suite.parseXMLStringAsClass( suite.toXML( ) )
if( suite2.toXML( ) != suite.toXML( ) ) : raise Exception( 'Fix me' )
| StarcoderdataPython |
3327895 | <filename>prove_ethics.py
#!/usr/bin/python
import subprocess
import json
steps = {
'1a1': """
all A ( is_only_in_itself(A) | is_only_in_another(A) ).
""",
# Note: the "exists" of predicate logic is not the "is" of Spinoza's
# philosophy. I am having trouble explaining how I use prover9's "exists",
# then. This could be a sign that Spinoza's philosophy is ultimately not
# translatable into a contemporary predicate logic, which has too many
# contemporary assumptions built into it. Perhaps what could be said in defense
# of this project is that it will make plainer---in the things that couldn't be
# captured--exactly where Spinoza departs from today's "common sense".
'1a4': """
all e all c (
% if c causes e...
causes(c, e) ->
% ...then...
(
% ...there is a cognition of the effect that depends on and
% involves the cognition of its cause.
exists cog_e exists cog_c (
is_cognition_of(cog_e, e) &
is_cognition_of(cog_c, c) &
depends_on(cog_e, cog_c) &
involves(cog_e, cog_c)
)
)
).
""",
# 1a5: Things that have nothing in common with one another also cannot be
# understood through one another, or the concept of the one does not involve
# the concept of the other.
'1a5': """
all a all b (
% If two things have nothing in common with one another...
-(exists t ( has(a, t) & has(b, t) ))
% ...then....
->
(
neither_is_understood_through_the_other(a, b)
& the_concept_of_the_one_does_not_involve_the_concept_of_the_other(a, b)
)
).
""",
# Note that this claim---"Things have have nothing in common with one
# another..."---is a generic, which I am interpreting as two universal
# quantifiers.
# I take "cannot" to mean "ipso facto not".
# 1a7: If a thing can be conceived as not existing, its essence does
# not involve existence.
'1a7': """
all X (
conceivably_does_not_exist(X)
->
-essence_involves_existence(X)
).
""",
# Definition of causa sui
'1def1': """
all X (
causes(X,X) <-> essence_involves_existence(X)
).
""",
# 1def3: By substance I understand what is in itself and is conceived
# through itself, i.e., that whose concept does not require the concept of
# another thing, from which it must be formed.
'1def3': """
all S (
is_substance(S) <-> (
% S is the only thing it is in
all T ( is_in(S,T) <-> (S=T) )
)
).
all S (
is_substance(S) <-> (
all T ( is_conceived_through(S,T) <-> (S=T) )
% &
% % It's not the case that...
% -(
% exists sc exists t exists tc (
% % ...any concept of the substance...
% is_concept_of(tc,c) &
% % ...requires any concept...
% requires(sc,tc) &
% is_concept_of(sc,s) &
% % ...of a distinct thing.
% -(s=t)
% )
% )
)
).
% The definition seems to be used in this way too.
all S (
is_substance(S) <-> is_in(S,S)
).
% TODO: Because this is a real definition, it states not only the
% biconditional, but also that the biconditional makes plain the essence of
% the thing. I'm not sure how to write that in prover9.
% TODO: Haven't encoded the "I understand" bit.
""",
# 1def4: By attribute I understand what the intellect perceives of a substance,
# as constituting its essence.
'1def4': """
% Will leave this blank and try to fill in in various ways.
""",
# 1def5: By mode I understand the affections of a substance, or that which is
# in another through which it is also conceived.
'1def5': """
all M (
is_mode(M) -> (
exists S (
is_substance(S) &
is_mode_of(M, S) &
is_in(M, S) &
is_conceived_through(M, S)
)
)
).
all M all S (
( is_mode(M) & is_mode_of(M, S) & is_substance(S) )
->
(is_in(M, S) & is_conceived_through(M, S))
).
all M ( ( exists S ( is_in(M, S) ) ) -> is_mode_of(M, S) ).
% TODO: Haven't encoded the "I understand" bit.
""",
# 1def6: By God I understand a being absolutely infinite, i.e., a substance
# consisting of an infinity of attributes, of which each one expresses an
# eternal and infinite essence.
'1def6': """
% God is a substance.
is_substance(God).
% God has all the attributes.
all A ( is_attribute(A) -> ( exists G ( God=G & is_attribute_of(A,G) ) ) ).
% Each attribute of God expresses an eternal and infinite essence.
all A (
is_attribute_of(A, God)
->
exists E (
is_essence(E) &
expresses(A,E) &
is_eternal(E) &
is_infinite(E)
)
).
""",
# 1def8: By eternity I understand existence itself, insofar as it is conceived
# to follow necessarily from the definition alone of the eternal thing.
'1def8': """
% Paraphrasing this as something like: if the existence of something
% follows from its definition then its existence is eternity.
all x (follows_from(existence(x),definition(x)) -> (existence(x) = Eternity)).
% TODO: Incorporate the "insofar as" bit.
""",
'1p1': """
% 1p1: A substance is prior in nature to its affections.
all S all A (
( is_substance(S) & is_affection_of(A, S) )
->
is_prior_in_nature_to(S, A)
).
% Noticed while doing this: This proposition is ambiguous between "a
% substance is prior in nature to all of its affections taken together" and
% "a substance is prior in nature to each of its affections."
% Noticed while doing this: It's not clear why 1def3 is needed in the
% demonstration.
""",
# 1p11: God, or a substance consisting of infinite attributes, each of
# which expresses eternal and infinite essence, necessarily exists.
#
# Dem.: If you deny this, conceive, if you can, that God does not
# exist. Therefore (by 1a7) his essence does not involve existence.
# But this (by 1p7) is absurd. Therefore God necessarily exists, q.e.d.
'1p11': """
% Treating existence like a predicate.
necessarily_exists(God).
""",
# 1p14: Except God, no substance can be or be conceived.
#
# Dem.: Since God is an absolutely infinite being, of whom no attribute which
# expresses an essence of substance can be denied (by 1def6), and he
# necessarily exists (by 1p11), if there were any substance except God, it
# would have to be explained through some attribute of God, and so two
# substances of the same attribute would exist, which (by 1p5) is absurd. And
# so except God, no substance can be or, consequently, be conceived. For if it
# could be conceived, it would have to be conceived as existing. But this (by
# the first part of this demonstration) is absurd. Therefore, except for God no
# substance can be or be conceived, q.e.d.
'1p14': """
% Except God, no substance can be or be conceived.
all s ( is_substance(s) -> (s=God) ).
""",
# 1p15: Whatever is, is in God, and nothing can be or be conceived without God.
#
# Dem.: Except for God, there neither is, nor can be conceived, any substance
# (by 1p14), i.e. (by 1def3), thing that is in itself and is conceived through
# itself. But modes (by 1def5) can neither be nor be conceived without
# substance. So they can be in the divine nature alone, and can be conceived
# through it alone. But except for substances and modes [II/57] there is
# nothing (by 1a1). Therefore, [NS: everything is in God and] nothing can be
# or be conceived without God, q.e.d.
'1p15': """
all X ( is_in(X,God) ). %& -can_be_without(X,God) & -can_be_conceived_without(God) ).
""",
# 1p16: From the necessity of the divine nature there must follow infinitely
# many things in infinitely many modes, (i.e., everything which can fall under
# an infinite intellect.)
#
# Dem.: This Proposition must be plain to anyone, provided he attends to the
# fact that the intellect infers from the given definition of any thing a
# number of properties that really do follow necessarily from it (i.e., from
# the very essence of the thing); and that it infers more properties the more
# the definition of the thing expresses reality, i.e., the more reality the
# essence of the defined thing involves. But since the divine nature has
# absolutely infinite attributes (by 1def6), each of which also expresses an
# essence infinite in its own kind, from its necessity there must follow
# infinitely many things in infinite modes (i.e., everything which can fall
# under an infinite intellect), q.e.d.
'1p16': """
% The number of modes of each attribute is infinite.
%all a ( is_attribute(a) -> number(modes_of(a), Infinity) ).
exists properties (
(properties = modes_of(God)) &
(a_infers_b_from_c(Intellect, properties, definition(God))) &
infinite(properties)
).
% All modes follow from the divine nature.
%number(DivineAttributes, Infinity).
all m ( is_mode(m) -> follows_from(m, DivineNature) ).
""",
'1p19': """
% 1p19: God is eternal, or all God's attributes are eternal.
eternal(God).
""",
'1p2': """
% 1p2: Two substances having different attributes have nothing in common with
% one another.
%
% Dem.: This also evident from 1def3. For each must be in itself and be
% conceived through itself, or the concept of the one does not involve the
% concept of the other.
all s1 all s2 (
% If there are two substances...
( is_substance(s1) & is_substance(s2) & -(s1=s2) )
% ...then...
->
% ...there is no attribute they have in common.
-( exists a ( is_attribute_of(a, s1) & is_attribute_of(a, s2) ) )
).
""",
# 1p21: All the things which follow from the absolute nature of any of God's
# attributes have always had to exist and be infinite, or are, through the same
# attribute, eternal and infinite.
'1p21': """
% 1p21: All the things which follow from the absolute nature of any of God's
% attributes
all t (
(
% simplify for now
follows_from(t, God)
%exists nature exists a (
% is_attribute_of(a, God) &
% is_absolute_nature_of(nature, a) &
% follows_from(t, nature)
%)
)
->
% have always had to exist and be infinite, or are, through the same
% attribute, eternal and infinite.
(
is_infinite(t)
%has_always_existed(t) & has_always_been_infinite(t) &
%eternal_through(t, a) &
%infinite_through(t, a)
)
).
""",
'1p22': """
% 1p22: Whatever follows from some attribute of God
% insofar as it is modified by a modification which,
% through the same attribute, exists necessarily and
% is infinite, must also exist necessarily and be
% infinite.
% Whatever follows from some attribute of God insofar as it is modified by
% a modification which, through the same attribute, exists necessarily and
% is infinite
all x exists mod exists attribute (
(
% Note simplification here: to follow from an attribute insofar as
% it is modified is to follow from a modification of that attribute
follows_from(x, mod) &
is_modification_of(mod, attribute) &
exists_necessarily(mod) &
is_infinite(mod)
)
->
(
exists_necessarily(x) & is_infinite(x)
)
).
""",
# 1p24: The essence of things produced by God does not involve
# existence.
'1p24': """
% The phrase 'things produced by God' implicitly excludes
% God.
all x (
(produced_by(x,God) & -(God=x))
->
-essence_involves_existence(x)
).
""",
# 1p24c: From this it follows that God is not only the cause of things'
# beginning to exist, but also of their persevering in existing, *or* (to use a
# Scholastic term) God is the cause of the being of things. For -- whether the
# things [NS: produced] exist or not -- so long as we attend to their essence,
# we shall find that it involves neither existence nor duration. So their
# essence can be the cause neither of their existence nor of their duration,
# but only God, to whose nature alone it pertains to exist[, can be the cause]
# (by 1p14c1).
'1p24c': """
% God is...the cause of things' persevering in existing.
all t exists b ( is_being_of(b, t) & partial_cause(God, b) ).
% Noticed while doing this: we can't translate this as "God is *the* cause
% of the being of things" because there are other causes. So we must
% translate it as "God is a cause"
""",
# 1p25c: Particular things are nothing but affections of God's attributes, *or*
# modes by which God's attributes are expressed in a certain and determinate
# way. The demonstration is evident from 1p15 and 1def5.
'1p25c': """
% Paraphrasing as: each particular thing is nothing but an affection of an
% attribute of God and this affection is a mode that expresses an attribute
% of God.
all t (
is_particular_thing(t) ->
(
exists attribute exists affection (
is_nothing_but(t, affection) &
is_affection_of(affection, attribute) &
is_attribute_of(attribute, God)
)
&
is_mode(affection)
&
expresses_in_a_certain_and_determinate_way(affection, attribute)
)
).
""",
# 1p26: A thing which has been determined to produce an effect has necessarily
# been determined in this way by God; and one which has not been determined by
# God cannot determine itself to produce an effect.
'1p26': """
% Simplification: doesn't subselect things in particular
all t (
(exists e (determined_to_produce(t, e)))
->
x_determines_y_to_produce_z(God, t, e)
).
""",
# 1p28: Every singular thing, or any thing which is finite and has a
# determinate existence, can neither exist nor be determined to produce an
# effect unless it is determined to exist and produce an effect by another
# cause, which is also finite and has a determinate existence; and again, this
# cause also can neither exist nor be determined to produce an effect unless it
# is determined to exist and produce an effect by another, which is also finite
# and has a determinate existence, and so on, to infinity.
'1p28': """
%all x (is_finite(x) -> exists y (causes(y,x))).
%all x all y ((causes(x,y) & infinite(x)) -> infinite(y)).
all x (infinite(x)).
% For simplicity, ignoring "exists" and just handling "determined to exist"
%all y (
% % Every singular thing, or any thing which is finite and has a
% % determinate existence,
% (
% %is_singular(y) &
% is_finite(y) &
% %has_determinate_existence(y) &
% %determined_to_produce_effect(y)
% )
% % can neither exist nor be determined to produce an
% % effect unless
% ->
% % it is determined to exist and produce an effect by another cause,
% % which is also finite and has a determinate existence;
% (
% exists x ( %exists z
% is_infinite(x) %determines_to_exist(x, y) &
% %x_determines_y_to_produce_z(x, y, z) &
% %is_finite(x) %&
% %has_determinate_existence(x)
% )
% )
%).
""",
# intermediary step on the way to 1p28
'1p28-i': """
all x (
is_finite(x) -> -causes(God, x)
).
""",
'1p2': """
% 1p2: Two substances having different attributes have nothing in common with
% one another.
%
% Dem.: This also evident from 1def3. For each must be in itself and be
% conceived through itself, or the concept of the one does not involve the
% concept of the other.
all s1 all s2 (
% If there are two substances with different attributes...
(
( is_substance(s1) & is_substance(s2) & -(s1=s2) )
& -exists a ( is_attribute_of(a, s1) && is_attribute_of(a, s2) )
)
% ...then...
->
% ...there is no thing that each has.
-exists t ( has(s1, t) && has(s2, t) )
).
""",
'1p3': """
% 1p3: If things have nothing in common with one another, one of them cannot be
% the cause of the other.
%
% Dem.: If they have nothing in common with one another, then (by 1a5) they
% cannot be understood through one another, and so (by 1a4) one cannot be the
% cause of the other, q.e.d.
% Used in 1p6
% Given two things,
all a all b (
% If there is no thing that they both have, ...
-(exists t ( has(a, t) & has(b, t) ))
% ...then...
->
% ...neither is understood through nor causes the other.
(
neither_is_understood_through_the_other(a, b)
&
neither_causes_the_other(a, b)
)
).
% Note that "they cannot be understood through one another" must mean "neither
% can be understood through the another", although strictly speaking it could
% mean that it is impossible to (simultaneously?) understand each through the
% other.
""",
'1p4': """
% 1p4: Two or more distinct things are distinguished from one another, either
% by a difference in the attributes of the substances or by a difference in
% their affections.
%
% Dem.: Whatever is, is either in itself or in another (by 1a1), i.e. (by 1def3
% and 1def5), outside the intellect there is nothing except substances and
% their affections. Therefore, there is nothing outside the intellect through
% which a number of things can be distinguished from one another except
% substances, or what is the same (by 1def4), their attributes, and their
% affections, q.e.d.
% For simplicity, I will just stick to two things.
% all a all b all c (
% distinguished_by(a, b, c)
% ->
% (
% (
% is_a_difference_in_attributes(c, a, b)
% & is_substance(a)
% & is_substance(b)
% )
% |
% is_a_difference_in_affections(c, a, b)
% )
% ).
% Paraphrase: For any distinct x and y, there is either an attribute that
% exactly one of them has, or an affection that exactly one of them has.
all x all y (
(
-(x=y)
)
->
(
exists a (
( is_attribute_of(a, x) & -is_attribute_of(a, y) )
|
( -is_attribute_of(a, x) & is_attribute_of(a, y) )
|
( is_affection_of(a, x) & -is_affection_of(a, y) )
|
( -is_affection_of(a, x) & is_affection_of(a, y) )
)
)
).
""",
'1p5': """
% 1p5: In nature there cannot be two or more substances of the same
% nature or attribute.
%
% Dem.: If there were two or more distinct substances, they would have
% to be distinguished from one another either by a difference in their
% attributes, or by a difference in their affections (by 1p4). If only
% by a difference in their attributes, then it will be conceded that
% there is only one of the same attribute. But if by a difference in
% their affections, then since a substance is prior in nature to its
% affections (by 1p1), if the affections are put to one side and [the
% substance] is considered in itself, i.e. (by 1def3 and 1a6),
% considered truly, one cannot be conceived to be distinguished from
% another, i.e. (by 1p4), there cannot be many, but only one [of the
% same nature or attribute], q.e.d.
% Rewriting the above.
all s all t all a (
% If an attribute belongs to two substances...
( is_substance(s) & is_substance(t) & is_attribute_of(a, s) & is_attribute_of(a, t) )
->
% ...then they are identical
(s=t)
).
""",
'1p6': """
% 1p6: One substance cannot be produced by another substance.
%
% Dem.: In nature there cannot be two substances of the same attribute (by
% 1p5), i.e. (by 1p2), which have something in common with each other.
% Therefore (by 1p3) one cannot be the cause of the other, or cannot be
% produced by the other, q.e.d.
all s (
is_substance(s)
->
-( exists s2 ( -(s=s2) & produces(s2, s) ) )
).
% I'm hoping that the formula above could be written like this one day:
% "For all s, if s is a substance, then it is not the case that there exists
% something s2 such that s and s2 are distinct and s2 produces s."
""",
'1p7': """
% 1p7: It pertains to the nature of a substance to exist.
%
% Dem.: A substance cannot be produced by anything else (by 1p6c); therefore it
% will be the cause of itself, i.e. (by 1def1), its essence necessarily
% involves existence, or it pertains to its nature to exist, q.e.d.
all s ( is_substance(s) -> pertains_to_its_nature_to_exist(s) ).
""",
'2a1': """
% 2a1'' (G II/99)
% "All modes by which a body is affected by another body follow both
% from the nature of the body affected at at the same time from the
% nature of the affecting body..."
% I'm ignoring this bit, which I take to just be a gloss:
% "so that one and the same body may be moved differently according to
% differences in the nature of the bodies moving it. And conversely,
% different bodies may be moved differently by one and the same body."
all mode all body
(
(
% All modes of any given body...
mode_of(mode,body) &
is_body(body) &
% ...that have an external cause...
exists body2 ( cause(body2,mode) & -(body2=body) )
)
-> % ...are such that...
(
all n all n2
(
(
% ...the nature of the affected body
nature_of(n,body) &
% ...and the nature of the affecting body
nature_of(n2,body2)
)
-> % ...are such that...
(
% ...the mode follows from both natures.
follow_from(mode,n) & follow_from(mode,n2)
)
)
)
).
""",
# 2p16: The idea of any mode in which the human body is affected by external
# bodies must involve the nature of the human body and at the same time the
# nature of the external body.
'2p16': """
all mode all body (
% for each mode of the human body...
(
is_human(body) &
mode_of(mode,body)
) ->
% ...all of that mode's external causes
all body2 (
( cause(body2,mode) & -(body2 = body) )
-> % are such that
all i all n all n2 (
% the idea of that mode involves the external bodies' natures
( idea_of(i,body) & nature_of(n,body) & nature_of(n2,body2) )
->
( involves(i,n) & involves(i,n2) )
)
)
).
""",
# 2p7: The order and connection of ideas is the same as the order and
# connection of things.
'2p7': """
% Two ideas are connected (implicitly: by a relation of dependence)
(
exists i exists j
(
is_idea(i) &
is_idea(j) &
depends_on(j, i)
)
)
% ...if and only if two things are connected (implicitly: by a causal relation)
<->
(
exists t exists u (
causes(t, u)
)
).
% Noticed: this seems to require that causation, dependence and involvement are
% the only ways of being connected.
""",
# Some of Spinoza's phrases involve
# generics. If we switch to a generic
# interpretation of them, what happens to
# the book?
"Assumption: Anything that's an idea of something is an idea":
"all I all O ( is_idea_of(I,O) -> is_idea(I) ).",
"Assumption: Anything that's a cognition of something is an idea of that thing.":
"all I all O ( is_cognition_of(I,O) <-> is_idea_of(I,O) ).",
"Assumption: Ideas are concepts":
"all I all X ( is_idea_of(I,X) <-> is_concept_of(I,X) ).",
"Assumption: Whatever we end up talking about is a 'thing'.":
"all X is_thing(X).",
"Assumption: There is an idea of each thing. (Needed for 2p7)":
"all T ( exists I ( is_idea_of(I, T) ) ).",
"Assumption: Definition of essential involvement":
"all X all F ( involves_essentially(X,F) <-> all E ( is_essence_of(E,X) -> involves(E,F) )).",
# We need either this or 'everything has a cause'
"Assumption: Everything is conceived through something":
"all X (exists Y conceived_through(X,Y)).",
"Everything has a cause":
"all X (exists Y causes(Y,X)).",
"To not conceivably have is to necessarily lack.":
"all X all F ( -conceivably_has(X,F) <-> necessarily_lacks(X,F) ).",
"To necessarily lack is to not possibly have":
"all X all F ( necessarily_lacks(X,F) <-> -possibly_has(X,F) ).",
"To conceivably lack is to not necessarily have":
"all X all F ( conceivably_lacks(X,F) <-> -necessarily_has(X,F) ).",
# Seemingly at least one of these is needed for 1p1
"What something is conceived through is prior in nature to it":
"all X all Y ( is_conceived_through(Y, X) -> is_prior_in_nature_to(X, Y) ).",
"What something is in is prior in nature to it":
"all X all Y ( is_in(Y, X) -> is_prior_in_nature_to(X, Y) ).",
"If A is an affection of S then A is an affection and S has A":
"all A all S ( is_affection_of(A, S) -> ( is_affection(A) & has(S, A) ) ).",
"If A is an attribute of S then A is an attribute and S has A":
"all A all S ( is_attribute_of(A, S) -> ( is_attribute(A) & has(S, A) ) ).",
"Assumption: Your modes are your affections":
"all X all Y ( is_mode_of(X,Y) <-> is_affection_of(X,Y) ).",
"Assumption: Modes are affections and vice versa":
"all X ( is_mode(X) <-> is_affection(X) ).",
"Assumption: Your modes are modes and you have them.":
"all X all Y ( is_mode_of(X,Y) <-> ( is_mode(X) & has(Y, X) ) ).",
"Assumption: If you're conceived through something then its concept involves your concept":
"""
all X all Y (
is_conceived_through(A, B)
->
(
exists CA exists CB (
is_concept_of(CA, A) &
is_concept_of(CB, B) &
involves(CB, CA)
)
)
).
""",
"Assumption: Grammar of 'neither of two things is understood through the other'":
"""
all A all B (
neither_is_understood_through_the_other(A, B)
<->
(
-is_understood_through(A, B)
& -is_understood_through(B, A)
)
).
""",
"Assumption: Grammar of 'neither causes the other'":
"""
all A all B (
neither_causes_the_other(A, B)
<->
(
-causes(A, B)
& -causes(B, A)
)
).
""",
"Assumption: Grammar of 'the concept of the one does not involve the concept of the other":
"""
all A all B (
the_concept_of_the_one_does_not_involve_the_concept_of_the_other(A, B)
<->
(
all CA all CB (
(
is_concept_of(CA, A)
& is_concept_of(CB, B)
)
->
(
-involves(CA, CB)
& -involves(CB, CA)
)
)
)
).
""",
"Assumption: You are understood through your causes.":
"all A all B ( causes(A, B) -> is_understood_through(B, A) ). ",
"Assumption: What is understood through something is conceived through it":
"all A all B ( is_understood_through(A, B) <-> is_conceived_through(A, B) ).",
# This is to bring everything under the scope of 1a1
"Assumption: Everything is":
"all X ( is(X) ).",
# Controversial but I think it's needed for 1p4. Talk of attributes is just
# a way of talking about how a substance is conceived and it can be
# regimented out.
"Assumption: Attributes are substances":
"all A ( is_attribute(A) <-> is_substance(A) ).",
"Assumption: What produces something causes it (but not necessarily vice versa)":
"all X all Y ( produces(X, Y) -> causes(X, Y)).",
"Assumption: If something is only in itself then anything it is in is actually itself":
"all A ( is_only_in_itself(A) <-> ( all B ( is_in(A,B) -> (B=A) ))).",
"Assumption: If something is only in another then it is not in itself but something else":
"""
all A (
is_only_in_another(A)
% better: is only in exactly one, distinct, thing
<->
(
exists B ( -(B=A) & is_in(A,B) ) &
-is_in(A,A)
)
).
""",
"Assumption: A causes B iff B is in A":
"all A all B ( causes(A, B) <-> is_in(B, A) ).",
#all A all B ( -produces(A, B) ). % just for debugging 1p6
# If we write 2p7 in a way that involves the idea of a "connection", then we might need these.
# Each dependency relation is a "connection" in a graph.
#all X all Y ( depends_on(X,Y) <-> is_connected_to(X,Y) ).
#all X all Y ( depends_on(X,Y) <-> is_connected_to(Y,X) ). % perhaps?
#all X all Y ( involves(X,Y) <-> is_connected_to(X,Y) ).
#all X all Y ( involves(X,Y) <-> is_connected_to(Y,X) ). % perhaps?
#all X all Y ( causes(X,Y) <-> is_connected_to(X,Y) ).
#all X all Y ( causes(X,Y) <-> depends_on(Y,X) ). % perhaps?
#all X all Y ( is_connected_to(X,Y) -> causes(X,Y) ). % might make 2p7 work
#all X all Y ( is_connected_to(X,Y) -> ( causes(X,Y) | depends_on(X,Y) | involves(X,Y) ) ). % needed for 2p7?
"""Assumption: If it pertains to the nature of something to exist, then its
essence involves existence""":
"all S ( pertains_to_its_nature_to_exist(S) <-> essence_involves_existence(S) ).",
"""Assumption: If something does not conceivably fail to exist then it
necessarily exists""":
"all S ( -conceivably_does_not_exist(S) <-> necessarily_exists(S) ).",
"Assumption: If something necessarily exists then it necessarily has existence":
"all X ( necessarily_exists(X) <-> ( all E ( is_existence(E) -> necessarily_has(X, E) ) ) ).",
#Needed for 1p14.
"Assumption: Each substance has an attribute.":
"all S ( ( is_substance(S) & -is_attribute(S) ) -> ( exists A ( is_attribute_of(A, S) ) ) ).",
# I've included the clause `& -is_attribute(S)`, which is strictly speaking
# false. If we don't include it, this line combines with the formula stating
# that each attribute is a substance to produce an infinite loop.
"If an attribute belongs to two substances then they are identical":
"""
% Rewriting the above.
all S all T all A (
% If an attribute belongs to two substances...
( is_substance(S) & is_substance(T) & is_attribute_of(A, S) & is_attribute_of(A, T) )
->
% ...then they are identical
(S = T)
).
""",
"Assumption: If there is a substance other than God then they do not share attributes":
"""
exists S ( is_substance(S) & -(S=God) )
->
exists S exists T exists A (
-(S=T) & is_attribute_of(A,S) & is_attribute_of(A,T)
).
""",
#all X ( is_in(X,God) & -can_be_without(X,God) & -can_be_conceived_without(X,God) ).
"Assumption: Something cannot be without its causes":
"all X all Y ( causes(X,Y) -> -can_be_without(Y,X) ).",
"Assumption: Something cannot be conceived without what it is conceived through":
"all X all Y ( is_conceived_through(X,Y) -> -can_be_conceived_without(Y,X) ).",
# the intellect infers from the given definition of any thing a number of
# properties that really do follow necessarily from it (i.e., from the very
# essence of the thing); (1p16)
# "the intellect...infers more properties the more the definition of the
# thing expresses reality, i.e., the more reality the essence of the
# defined thing involves." (1p16)
# Paraphrase this as: if the definition of a thing expresses an infinite
# amount of reality, then the intellect infers an infinite number of
# properties from it.
"""Assumption: if the definition of a thing expresses an infinite amount of
reality, then the intellect infers an infinite number of properties from it.""":
"""
all x (
( exists amount_of_reality (
expresses(definition(x),amount_of_reality) &
infinite(amount_of_reality)
))
->
exists properties (
(properties = modes_of(x)) &
(a_infers_b_from_c(Intellect, properties, definition(x))) &
infinite(properties)
)
).
""",
"Assumption: the definition of God expresses an infinite amount of reality":
"""
exists amount_of_reality (
expresses(definition(God), amount_of_reality) &
infinite(amount_of_reality)
).
""",
"""Assumption: If something expresses an infinite essence then the intellect
infers an infinite set of properties from it.""":
"""
all x all y (
(
expresses(x,y) & is_essence_of(y,x) & infinite(y)
)
->
% ...then the intellect infers an infinite set of properties from it.
exists properties (
(properties = modes_of(x)) &
(a_infers_b_from_c(Intellect, properties, definition(x))) &
infinitely_large_set(properties)
)
).
""",
"Assumption: any infinitely large set numbers infinity":
"all x (infinitely_large_set(x) <-> number(x, Infinity)).",
"Assumption: equivalence of is_infinite() and infinite()":
"all x (is_infinite(x) <-> infinite(x)).",
"Assumption: the existence of God follows from his definition":
"follows_from(existence(God),definition(God)).",
"Assumption: If the existence of something is eternity then it is eternal.":
"all x ((existence(x) = Eternity) -> eternal(x)).",
"Assumption: If B is produced by A, then A produces B":
"all A all B (produced_by(B, A) <-> produces(A, B)).",
"Assumption: The divine nature is the nature of God":
"is_the_nature_of(DivineNature, God).",
# Possibly incorrect.
"Assumption: If x is nothing but y, then x equals y":
"all x all y ( is_nothing_but(x,y) <-> x=y ).",
# Helps 1p25c go through.
"Assumption: A mode of a substance is a mode of one of its attributes.":
"""
all M all S (
is_mode_of(M, S)
->
exists A (is_attribute_of(A, S) & is_mode_of(M, A))
).
""",
# Helps 1p25c to go through.
"""Assumption: If you express something you express it in a certain and
determinate way""":
"all x all y (expresses(x,y) -> expresses_in_a_certain_and_determinate_way(x,y)).",
"Assumption: Your modes express you":
"all x all y (is_mode_of(x,y) -> expresses(x,y)).",
# all x (has_always_existed(x) <-> sempiternal(x)).
# all x (has_always_been_infinite(x) <-> is_infinite(x)).
# all x all y (eternal_through(x, y) -> is_eternal(x)).
# all x all y (infinite_through(x, y) -> is_infinite(x)).
# all x ( is_finite(x) <-> -is_infinite(x) ).
"Assumption: what follows from something is caused by it":
"all x all y (follows_from(y,x) <-> causes(x,y)).",
"Assumption: What follows from something is determined by it to produce an effect":
"all x all y (follows_from(y,x) <-> determines_to_produce_effect(x,y)).",
"Assumption: What follows from something is determined by it to exist":
"all x all y (follows_from(y,x) <-> determines_to_exist(x,y)).",
"""Assumption: X determines y to produce an effect iff there is something z
such that x determines y to produce z""":
"all x all y (determines_to_produce_effect(x,y) <-> exists z (x_determines_y_to_produce_z(x,y,z))).",
"Assumption: What is finite has a determinate existence":
"all x (finite(x) <-> has_determinate_existence(x)).",
"Assumption: A substance is conceived through itself":
"all s ( is_substance(s) -> is_conceived_through(s,s) ).",
"Assumption: A substance exists in itself":
"all s ( is_substance(x) -> exists_in(s,s) ).",
"Assumption: God is a substance":
"all s ( is_god(s) -> is_substance(s) ).",
"Assumption: If it pertains to the nature of something to exist, then its essence involves existence":
"all s ( pertains_to_its_nature_to_exist(s) -> essence_involves_existence(s) ).",
"""Assumption: If something conceivably does not exist then its essence does not involve existence""":
"all s ( conceivably_does_not_exist(s) -> -essence_involves_existence(s) ).",
"Assumption: If something conceivably does not exist then it conceivably lacks existence":
"all s ( conceivably_does_not_exist(s) <-> ( all e ( is_existence(e) -> conceivably_lacks(s,e) ) ) ).",
"Assumption: What something conceivably lacks it possibly lacks":
"all x all F ( conceivably_lacks(x, F) <-> possibly_lacks(x, F) ).",
"Assumption: What something possibly lacks it does not necessarily have":
"all x all F ( possibly_lacks(x, F) <-> -necessarily_has(x, F) ).",
"Assumption: If something necessarily exists then it necessarily has existence":
"all x ( necessarily_exists(x) <-> ( all e ( is_existence(e) -> necessarily_has(x, e) ) ) ).",
"Assumption: If something is god it necessarily exists":
"all s ( is_god(s) -> necessarily_exists(s) )."
}
demos = {
'1p1': '1def3 1def5',
#'1p1': '1def3 1def5',
#'1p1': '1def3 1def5',
#'1p2': '1def3',
#'1p2v2': '1def3',
#'1p3': '1a4 1a5',
#'1p4': '1a1 1def3 1def4 1def5',
#'1p4': '1a1 1def3 1def5',
#'1p6': '1p5 1p2 1p3 1def3',
#'2p7': '1a4',
#'1p11': '1a7 1p7 1def6',
#'1p14': '1def6 1p11 1p5',
#'1p15': '1p14 1def3 1def5 1a1',
#'1p16.1': '1def6',
#'1p16.2': '1def6',
#'1p19': '1def6 1p7 1def8 1def4',
#'1p24': '1def1 1def3',
#'1p25c': '1p15 1def5',
#'1p28': '1p21',
#'1p28.1': '1a1 1def3 1def5 1p21 1p22 1p24c 1p25c 1p26',
#'1p28': '1a1 1def3 1def5 1p21 1p22 1p24c 1p25c 1p26',
}
for (conclusion_key, premise_keys) in demos.items():
def escape(x):
return x.replace('"','\\\"')
premises = "\n".join([escape(steps[key]) for key in premise_keys.split(" ")])
cmd = """
echo "
set(prolog_style_variables).
formulas(goals).
%s
end_of_list.
formulas(assumptions).
%s
end_of_list.
" | prover9
"""
# > /dev/null 2>&1
cmd = cmd % (escape(steps[conclusion_key]), premises)
#print cmd
output = subprocess.check_output(cmd, shell=True)
| StarcoderdataPython |
1652335 | from .sgd_optimization import *
from .utils import *
from .data_plotter import *
"""
pythonw -m ad_examples.common.test_sgd_optimization
"""
def generate_data(p=11, n=400):
"""
Generates non-linear multivariate data of dimension 'p'.
The data is linear in parameters of the type:
y = b0 + b1 * x + b2*x^2 + ... + bp * x^p
Args:
:param p: int
dimensions
:param n: int
number of samples
Returns: np.ndarray
"""
true_params = np.random.uniform(low=0.0, high=1.0, size=p+1)
x = np.sort(np.random.uniform(low=-1.0, high=1.0, size=n))
X = np.zeros(shape=(n, p+1), dtype=float)
X[:, 0] = 1.
for i in range(p):
X[:, i+1] = x ** (i+1)
# logger.debug("X:\n%s" % str(list(X)))
e = np.random.normal(loc=0.0, scale=0.2, size=n)
y_true = X.dot(true_params)
y = y_true + e
# logger.debug("y:\n%s" % str(list(y)))
return X, y, true_params, x, y_true, e
def f(w, x, y):
loss = np.mean(0.5 * ((x.dot(w) - y) ** 2))
return loss
def g(w, x, y):
grad = np.multiply(x, np.transpose([x.dot(w) - y]))
mean_grad = np.mean(grad, axis=0)
# logger.debug(mean_grad.shape)
return mean_grad
def get_loss_grad():
return f, g
if __name__ == "__main__":
logger = logging.getLogger(__name__)
args = get_command_args(debug=True, debug_args=["--log_file=temp/sgd.log", "--debug"])
configure_logger(args)
np.random.seed(42)
X, y, w_true, x, y_true, e = generate_data(p=11, n=400)
# logger.debug("True params:\n%s" % str(list(w_true)))
# logger.debug("Num batches: %d" % get_num_batches(400, 25))
w0 = np.zeros(len(w_true), dtype=float)
# w = sgd(w0, X, y, f, g, learning_rate=0.01, batch_size=25, max_epochs=1000)
# w = sgdRMSPropNestorov(w0, X, y, f, g, learning_rate=0.01, alpha=0.9, ro=0.9, batch_size=25, max_epochs=1000)
# w = sgdMomentum(w0, X, y, f, g, learning_rate=0.01, alpha=0.9, batch_size=25, max_epochs=1000)
w = sgdRMSProp(w0, X, y, f, g, learning_rate=0.01, ro=0.9, batch_size=25, max_epochs=1000)
# w = sgdAdam(w0, X, y, f, g, learning_rate=0.01, ro1=0.9, ro2=0.999, batch_size=25, max_epochs=1000)
logger.debug("Inferred params:\n%s" % str(list(w)))
y_pred = X.dot(w)
pdfpath = "temp/sgd_test.pdf"
dp = DataPlotter(pdfpath=pdfpath, rows=1, cols=1)
pl = dp.get_next_plot()
pl.plot(x, y_true, 'b-')
pl.plot(x, y_pred, 'g-')
pl.plot(x, y, 'r.')
dp.close()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.