blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cee359fe3b6a52b85b80a84045288cefe202b0df | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/iothub/azure-mgmt-iothubprovisioningservices/azure/mgmt/iothubprovisioningservices/aio/_iot_dps_client.py | 5fde1ad85d8fa119ba629237d54b5a8c495d85a4 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 4,443 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import IotDpsClientConfiguration
from .operations import Operations
from .operations import DpsCertificateOperations
from .operations import IotDpsResourceOperations
from .. import models
class IotDpsClient(object):
"""API for using the Azure IoT Hub Device Provisioning Service features.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.iothubprovisioningservices.aio.operations.Operations
:ivar dps_certificate: DpsCertificateOperations operations
:vartype dps_certificate: azure.mgmt.iothubprovisioningservices.aio.operations.DpsCertificateOperations
:ivar iot_dps_resource: IotDpsResourceOperations operations
:vartype iot_dps_resource: azure.mgmt.iothubprovisioningservices.aio.operations.IotDpsResourceOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The subscription identifier.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = IotDpsClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.dps_certificate = DpsCertificateOperations(
self._client, self._config, self._serialize, self._deserialize)
self.iot_dps_resource = IotDpsResourceOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "IotDpsClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| [
"[email protected]"
] | |
d385cc2f8288b4c5aa47248859593ff5ec03611b | 67797ff7e63cd4dcafc5bd958f61fc872a5a6449 | /tests/test_builds.py | 0e4b5a7ec6a810fee438b7f94bc85f7871071168 | [
"MIT"
] | permissive | frigg/frigg-worker | 394c23149c7109a914207de4a06608ec7884fe99 | 8c215cd8f5a27ff9f5a4fedafe93d2ef0fbca86c | refs/heads/master | 2020-04-06T07:05:21.348689 | 2016-05-03T05:13:37 | 2016-05-03T05:13:37 | 25,468,616 | 4 | 3 | null | 2017-10-15T10:14:29 | 2014-10-20T13:51:38 | Python | UTF-8 | Python | false | false | 6,527 | py | # -*- coding: utf8 -*-
import unittest
from unittest import mock
from docker.helpers import ProcessResult
from docker.manager import Docker
from frigg_settings.model import FriggSettings
from frigg_worker.builds import Build
from frigg_worker.errors import GitCloneError
DATA = {
'id': 1,
'branch': 'master',
'sha': 'superbhash',
'clone_url': 'https://github.com/frigg/test-repo.git',
'owner': 'frigg',
'name': 'test-repo',
}
BUILD_SETTINGS_WITH_NO_SERVICES = FriggSettings({
'setup_tasks': [],
'tasks': ['tox'],
'services': [],
'coverage': {'path': 'coverage.xml', 'parser': 'python'}
})
BUILD_SETTINGS_ONE_SERVICE = FriggSettings({
'setup_tasks': [],
'tasks': ['tox'],
'services': ['redis-server'],
'coverage': None,
})
BUILD_SETTINGS_FOUR_SERVICES = FriggSettings({
'setup_tasks': [],
'tasks': ['tox'],
'services': ['redis-server', 'postgresql', 'nginx', 'mongodb'],
'coverage': None,
})
BUILD_SETTINGS_SERVICES_AND_SETUP = FriggSettings({
'setup_tasks': ['apt-get install nginx'],
'tasks': ['tox'],
'services': ['redis-server', 'postgresql', 'nginx', 'mongodb'],
'coverage': None,
})
BUILD_SETTINGS_WITH_AFTER_TASKS = FriggSettings({
'tasks': {
'tests': ['tox'],
'after_success': ['success_task'],
'after_failure': ['failure_task'],
},
})
WORKER_OPTIONS = {
'dispatcher_url': 'http://example.com/dispatch',
'dispatcher_token': 'tokened',
'hq_url': 'http://example.com/hq',
'hq_token': 'tokened',
}
GIT_ERROR = GitCloneError('UNKNOWN', '', '', True)
class BuildTests(unittest.TestCase):
def setUp(self):
self.docker = Docker()
self.build = Build(1, DATA, self.docker, WORKER_OPTIONS)
@mock.patch('docker.manager.Docker.start')
@mock.patch('docker.manager.Docker.stop')
@mock.patch('frigg_worker.builds.parse_coverage')
@mock.patch('frigg_worker.builds.Build.clone_repo')
@mock.patch('frigg_worker.builds.Build.run_task')
@mock.patch('docker.manager.Docker.read_file')
@mock.patch('frigg_worker.builds.Build.report_run', lambda *x: None)
@mock.patch('frigg_worker.jobs.build_settings', lambda *x: BUILD_SETTINGS_WITH_NO_SERVICES)
def test_run_tests(self, mock_read_file, mock_run_task, mock_clone_repo,
mock_parse_coverage, mock_docker_stop, mock_docker_start):
self.build.run_tests()
mock_run_task.assert_called_once_with('tox')
self.assertTrue(mock_clone_repo.called)
mock_read_file.assert_called_once_with('~/builds/1/coverage.xml')
self.assertTrue(mock_parse_coverage.called)
self.assertTrue(self.build.succeeded)
self.assertTrue(self.build.finished)
@mock.patch('frigg_worker.builds.Build.clone_repo')
@mock.patch('frigg_worker.builds.Build.run_task', side_effect=OSError())
@mock.patch('frigg_worker.builds.Build.report_run', lambda *x: None)
@mock.patch('frigg_worker.jobs.build_settings', lambda *x: BUILD_SETTINGS_WITH_NO_SERVICES)
def test_run_tests_fail_task(self, mock_run_task, mock_clone_repo):
self.build.run_tests()
self.assertTrue(mock_clone_repo.called)
mock_run_task.assert_called_once_with('tox')
self.assertFalse(self.build.succeeded)
self.assertTrue(self.build.finished)
@mock.patch('frigg_worker.builds.Build.run_task')
@mock.patch('frigg_worker.builds.Build.clone_repo', side_effect=GIT_ERROR)
def test_run_tests_fail_clone(self, mock_clone, mock_run_task):
self.build.run_tests()
self.assertFalse(mock_run_task.called)
self.assertFalse(self.build.succeeded)
@mock.patch('frigg_worker.api.APIWrapper.report_run')
@mock.patch('frigg_worker.builds.Build.serializer', lambda *x: {})
@mock.patch('frigg_worker.jobs.build_settings', lambda *x: {})
def test_report_run(self, mock_report_run):
self.build.report_run()
mock_report_run.assert_called_once_with('Build', 1, '{}')
@mock.patch('docker.manager.Docker.directory_exist')
@mock.patch('docker.manager.Docker.run')
def test_delete_working_dir(self, mock_local_run, mock_directory_exist):
self.build.delete_working_dir()
self.assertTrue(mock_directory_exist.called)
mock_local_run.assert_called_once_with('rm -rf ~/builds/1')
@mock.patch('docker.manager.Docker.run')
@mock.patch('frigg_worker.builds.Build.delete_working_dir', lambda x: True)
@mock.patch('frigg_worker.builds.Build.clone_repo', lambda x: True)
@mock.patch('frigg_worker.builds.Build.parse_coverage', lambda x: True)
@mock.patch('frigg_worker.builds.Build.report_run', lambda x: None)
@mock.patch('frigg_worker.jobs.build_settings', lambda *x: BUILD_SETTINGS_SERVICES_AND_SETUP)
def test_build_setup_steps(self, mock_docker_run):
self.build.run_tests()
mock_docker_run.assert_has_calls([
mock.call('sudo service redis-server start'),
mock.call('sudo service postgresql start'),
mock.call('sudo service nginx start'),
mock.call('sudo service mongodb start'),
mock.call('apt-get install nginx', self.build.working_directory),
mock.call('tox', self.build.working_directory),
])
def test_run_build_should_call_after_success_on_successful_build(mocker):
mocker.patch('frigg_worker.builds.Build.clone_repo')
mocker.patch('frigg_worker.builds.Build.run_task')
mocker.patch('frigg_worker.builds.Build.report_run',)
mocker.patch('frigg_worker.jobs.build_settings', return_value=BUILD_SETTINGS_WITH_AFTER_TASKS)
mock_run_after = mocker.patch('frigg_worker.builds.Build.run_after_task')
build = Build(1, DATA, Docker(), WORKER_OPTIONS)
build.run_tests()
mock_run_after.assert_called_once_with('success_task')
def test_run_build_should_call_after_failure_on_failed_build(mocker):
result = ProcessResult('tox')
result.return_code = 1
mocker.patch('frigg_worker.builds.Build.clone_repo')
mocker.patch('frigg_worker.builds.Build.run_task')
mocker.patch('frigg_worker.builds.Build.report_run')
mocker.patch('frigg_worker.builds.Build.succeeded', False)
mocker.patch('frigg_worker.jobs.build_settings', return_value=BUILD_SETTINGS_WITH_AFTER_TASKS)
mock_run_after = mocker.patch('frigg_worker.builds.Build.run_after_task')
build = Build(1, DATA, Docker(), WORKER_OPTIONS)
build.run_tests()
mock_run_after.assert_called_once_with('failure_task')
| [
"[email protected]"
] | |
87f2f3fc21a0db478f1cd45aa29417581ea8d007 | a0c53168a4bdcfb0aa917d6d2c602f0999443a10 | /DPSPipeline/widgets/projectviewwidget/shotTreeWidgetItem.py | 8e1ce528e98d88da4f527aa93dfc0ecae9dfdf61 | [] | no_license | kanooshka/DPS_PIPELINE | 8067154c59ca5c8c9c09740969bb6e8537021903 | df2fcdecda5bce98e4235ffddde1e99f334562cc | refs/heads/master | 2021-05-24T04:32:03.457648 | 2018-09-07T13:25:11 | 2018-09-07T13:25:11 | 29,938,064 | 3 | 2 | null | 2020-07-23T23:06:37 | 2015-01-27T22:26:01 | Python | UTF-8 | Python | false | false | 3,598 | py | from PyQt4 import QtCore,QtGui
from DPSPipeline.widgets import taskProgressButton
from DPSPipeline.widgets import userLabel
import sharedDB
import operator
class ShotTreeWidgetItem(QtGui.QTreeWidgetItem):
def __init__(self,shotWidget = '', shotPhaseNames = [], shot = "", project = [], phases = []):
super(QtGui.QTreeWidgetItem, self).__init__()
self.shotWidget = shotWidget
self.shotPhaseNames = shotPhaseNames
self.shot = shot
self.phases = phases
self.project = project
self.btns = []
if shotWidget is not None:
self.shotWidget.addTopLevelItem(self)
#shot id
self.setText(0,(str(self.shot._idshots)))
#shot name
self.setText(1,(str(self.shot._number)))
#if tasklist less than lenshotphasenames - 2
columnIndex = 2
#sort phases
#sortedPhases = self.phases.values()
#sortedPhases.sort(key=operator.attrgetter('_startdate'))
self.setToolTip(1,("ShotID: "+str(self.shot._idshots)))
for phase in self.phases:
if phase._taskPerShot:
currentTask = None
if self.shot._tasks is not None:
for task in self.shot._tasks.values():
if task._idphases == phase._idphases:
currentTask = task
#if task didn't exist, create task
'''
if currentTask is None and sharedDB.autoCreateShotTasks:
currentTask = sharedDB.tasks.Tasks(_idphaseassignments = phase._idphaseassignments, _idprojects = self.project._idprojects, _idshots = shot._idshots, _idphases = phase._idphases, _new = 1)
self.shot._tasks[str(currentTask.id())] = (currentTask)
currentTask.Save()
#sharedDB.myTasks.append(currentTask)
'''
#create button for currentTask
#btn = self.AddProgressButton(shotWidgetItem,columnIndex,85,currentTask._status)
btn = taskProgressButton.TaskProgressButton(_task=currentTask,_shot = self.shot, _forPhase = phase._idphases)
uLabel = userLabel.UserLabel(task = currentTask)
btn.stateChanged.connect(uLabel.getUserFromTask)
taskBtnWidget = QtGui.QWidget()
vLayout = QtGui.QHBoxLayout()
taskBtnWidget.setLayout(vLayout)
taskBtnWidget._btn = btn
taskBtnWidget._uLabel = uLabel
vLayout.addWidget(btn)
vLayout.addWidget(uLabel)
self.shotWidget.setItemWidget(self,columnIndex,taskBtnWidget)
self.btns.append(btn)
#connect button state changed signal to task
#print "Connecting statechange to: "+str(currentTask._idtasks)
#btn.stateChanged.connect(currentTask.setShit)
#btn.stateChanged.connect(self.test)
columnIndex +=1
def deselect(self):
self.shotWidget.UpdateBackgroundColors()
def select(self):
try:
bgc = QtGui.QColor(250,250,0)
for col in range(0,self.shotWidget.columnCount()):
self.setBackground(col,bgc)
except:
print "Unable to change color on shot item, sequence was removed from list" | [
"[email protected]"
] | |
a7f58b1d085e989f8ebfde64f6717ef978130d69 | 9b6f36f544af5a2c1c042b18dda920c78fd11331 | /omsBackend/apps/process/views.py | 44b2afb8c52ee8a722fefa1cda93256917efa29b | [] | no_license | Nikita-stels/MyOms | a946f08b4ba7abfa8392e98c579320b501a7ca2a | fdaf9d5a2a29b5386c1a86fcf89a2c0d5527687a | refs/heads/master | 2022-09-17T20:40:45.228067 | 2020-01-08T14:41:04 | 2020-01-08T14:41:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,817 | py | from django.shortcuts import render
from rest_framework import viewsets
from rest_framework import viewsets
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.filters import SearchFilter
from apps.process.filters import ProcessFilterBackend, IPPortBindingFilterBackend
from apps.process.serializers import ProcessSerlizer, IPPortSerializer
from apps.process.models import Process, IPPortBinding
# Create your views here.
class ProcessViewSet(viewsets.ModelViewSet):
filter_backends = (SearchFilter, DjangoFilterBackend, ProcessFilterBackend)
serializer_class = ProcessSerlizer
queryset = Process.objects.all()
filter_fields = ['process_name', 'process_version', 'process_default_boot', 'process_type', 'process_quantity',
'process_enable_monitoring', 'process_charge', 'process_port', 'process_binding_IP',
'process_port_type', 'process_agreement', 'process_affiliated_process', 'process_is_enabled',
'process_server', 'process_vmserver']
search_fields = ['process_name', 'process_version', 'process_default_boot', 'process_type', 'process_quantity',
'process_enable_monitoring', 'process_charge', 'process_port', 'process_binding_IP',
'process_port_type', 'process_agreement', 'process_affiliated_process', 'process_is_enabled',
'process_server', 'process_vmserver']
class IPPortBindingViewSets(viewsets.ModelViewSet):
filter_backends = (SearchFilter,DjangoFilterBackend,IPPortBindingFilterBackend)
queryset = IPPortBinding.objects.all()
serializer_class = IPPortSerializer
filter_fields = ['ip_ip', 'ip_port', 'ip_process', 'ip_server', 'ip_vmserver']
search_fields = ['ip_ip', 'ip_port', 'ip_process', 'ip_server', 'ip_vmserver']
| [
"[email protected]"
] | |
1ae050f68b58556641dae5383d76864138ae3f4d | cd4bbecc3f713b0c25508d0c5674d9e103db5df4 | /toontown/coghq/DistributedCogKartAI.py | 3e086524d2d0f41d184b2f3a51523165283ae197 | [] | no_license | peppythegod/ToontownOnline | dce0351cfa1ad8c476e035aa3947fdf53de916a6 | 2e5a106f3027714d301f284721382cb956cd87a0 | refs/heads/master | 2020-04-20T05:05:22.934339 | 2020-01-02T18:05:28 | 2020-01-02T18:05:28 | 168,646,608 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,621 | py | from direct.directnotify import DirectNotifyGlobal
from toontown.safezone import DistributedGolfKartAI
from toontown.building import DistributedElevatorExtAI
from toontown.building import ElevatorConstants
from toontown.toonbase import ToontownGlobals
class DistributedCogKartAI(DistributedElevatorExtAI.DistributedElevatorExtAI):
notify = DirectNotifyGlobal.directNotify.newCategory(
'DistributedCogKartAI')
def __init__(self, air, index, x, y, z, h, p, r, bldg, minLaff):
self.posHpr = (x, y, z, h, p, r)
DistributedElevatorExtAI.DistributedElevatorExtAI.__init__(
self, air, bldg, minLaff=minLaff)
self.type = ElevatorConstants.ELEVATOR_COUNTRY_CLUB
self.courseIndex = index
if self.courseIndex == 0:
self.countryClubId = ToontownGlobals.BossbotCountryClubIntA
elif self.courseIndex == 1:
self.countryClubId = ToontownGlobals.BossbotCountryClubIntB
elif self.courseIndex == 2:
self.countryClubId = ToontownGlobals.BossbotCountryClubIntC
else:
self.countryClubId = 12500
def getPosHpr(self):
return self.posHpr
def elevatorClosed(self):
numPlayers = self.countFullSeats()
if numPlayers > 0:
players = []
for i in self.seats:
if i not in [None, 0]:
players.append(i)
continue
countryClubZone = self.bldg.createCountryClub(
self.countryClubId, players)
for seatIndex in range(len(self.seats)):
avId = self.seats[seatIndex]
if avId:
self.sendUpdateToAvatarId(
avId, 'setCountryClubInteriorZone', [countryClubZone])
self.clearFullNow(seatIndex)
continue
else:
self.notify.warning('The elevator left, but was empty.')
self.fsm.request('closed')
def sendAvatarsToDestination(self, avIdList):
if len(avIdList) > 0:
countryClubZone = self.bldg.createCountryClub(
self.countryClubId, avIdList)
for avId in avIdList:
if avId:
self.sendUpdateToAvatarId(
avId, 'setCountryClubInteriorZoneForce',
[countryClubZone])
continue
def getCountryClubId(self):
return self.countryClubId
def enterClosed(self):
DistributedElevatorExtAI.DistributedElevatorExtAI.enterClosed(self)
self.fsm.request('opening')
| [
"[email protected]"
] | |
c511b2f3500d3da95d7f4d3f4d79227c85982803 | 287f810559d6669ab566abb82d52f7673ddc5248 | /virtual/bin/sqlformat | 361c2d24afeb3d2456296ff97d9f4d4eef9cd901 | [
"MIT"
] | permissive | Mariga123/studious-octo-giggle | dc70b52605a5b4dacd8b529b6d2a402fc880076f | 3acc7d5e6a4814fae7744a5fb8524f810d891a65 | refs/heads/master | 2023-02-21T08:31:26.862617 | 2021-01-20T08:24:08 | 2021-01-20T08:24:08 | 330,034,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | #!/home/moringa/Documents/instagram/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
410beadaabe051765f1ac70355a622299ed919a8 | fd7598754b87536d3072edee8e969da2f838fa03 | /chapter5_programming11.py | ffba02a5ad2e212b0444d81702c221abbb78ca22 | [] | no_license | dorabelme/Python-Programming-An-Introduction-to-Computer-Science | 7de035aef216b2437bfa43b7d49b35018e7a2153 | 3c60c9ecfdd69cc9f47b43f4a8e6a13767960301 | refs/heads/master | 2020-05-02T23:19:44.573072 | 2019-03-28T21:27:20 | 2019-03-28T21:27:20 | 178,261,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | # File: chaos_modified.py
# A simple program illustrating chaotic behavior.
def main():
print("This program illustrates chaotic function")
num1 = eval(input("Enter a number between 0 and 1: "))
num2 = eval(input("Enter a number between 0 and 1: "))
# Display table
print('\n{0} {1:^8} {2:^8}'.format('index', num1, num2))
print('-' * 27)
for i in range(1,11):
num1 = 3.9 * num1 * (1 - num1)
num2 = 3.9 * num2 * (1 - num2)
print('\n{0:^5} {1:8.6f} {2:8.6f}'.format(i, num1, num2))
main()
| [
"[email protected]"
] | |
7f1d01efbf844df24be9e5b7c02597594675641d | 8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a | /html_parsing/get_game_genres/genre_translate_file/load.py | 1eae3f325e8e436432fd7a4efb27dd080bb8d960 | [
"CC-BY-4.0"
] | permissive | stepik/SimplePyScripts | 01092eb1b2c1c33756427abb2debbd0c0abf533f | 3259d88cb58b650549080d6f63b15910ae7e4779 | refs/heads/master | 2023-05-15T17:35:55.743164 | 2021-06-11T22:59:07 | 2021-06-11T22:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import json
from pathlib import Path
FILE_NAME_GENRE_TRANSLATE = str(Path(__file__).parent.resolve() / 'data' / 'genre_translate.json')
def load(file_name: str = FILE_NAME_GENRE_TRANSLATE) -> dict:
try:
genre_translate = json.load(
open(file_name, encoding='utf-8')
)
except:
genre_translate = dict()
return genre_translate
if __name__ == '__main__':
genre_translate = load()
print(f'Genre_translate ({len(genre_translate)}): {genre_translate}')
print()
# Print all undefined genres without '{' / '}' and indent
genre_null_translate = {
k: v
for k, v in genre_translate.items()
if v is None
}
print(f'Genre null translate ({len(genre_null_translate)}):')
json_text = json.dumps(genre_null_translate, ensure_ascii=False, indent=4)
lines = json_text.splitlines()[1:-1]
for i, line in enumerate(lines):
print(line.strip())
if i > 0 and i % 40 == 0:
print()
| [
"[email protected]"
] | |
33f8f508eb2e8185c685dd2b5b388d1f9e079a0a | 53818da6c5a172fe8241465dcbbd34fba382820d | /PythonProgram/chapter_05/5-5.py | ab0c75778706842c11cba10bdc52e8e4bede383c | [] | no_license | Lethons/PythonExercises | f4fec3bcbfea4c1d8bc29dfed5b770b6241ad93b | 81d588ffecf543ec9de8c1209c7b26c3d6a423b3 | refs/heads/master | 2021-04-15T11:36:08.991028 | 2018-07-07T09:20:40 | 2018-07-07T09:20:40 | 126,686,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | alien_color = 'green'
if alien_color == 'green':
print("You get 5 points.")
elif alien_color == 'yellow':
print("You get 10 points.")
else:
print("You get 15 points.")
| [
"[email protected]"
] | |
8f26b73981652e2db2c64f29c1ac6de561ee78d0 | 3b786d3854e830a4b46ee55851ca186becbfa650 | /SystemTesting/pylib/nsx/vsm/virtual_wire/schema/virtual_wire_schema.py | 454889206cae5de5297c0f26a74ec9af814fad7c | [] | no_license | Cloudxtreme/MyProject | d81f8d38684333c22084b88141b712c78b140777 | 5b55817c050b637e2747084290f6206d2e622938 | refs/heads/master | 2021-05-31T10:26:42.951835 | 2015-12-10T09:57:04 | 2015-12-10T09:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,209 | py | import base_schema
from type_schema import TypeSchema
from vds_context_with_backing_schema import VdsContextWithBackingSchema
class VirtualWireSchema(base_schema.BaseSchema):
"""This schema is not used for configuration
This will be filled in during GET calls
"""
_schema_name = "virtualWire"
def __init__(self, py_dict=None):
""" Constructor to create VirtualWireSchema object
@param py_dict : python dictionary to construct this object
"""
super(VirtualWireSchema, self).__init__()
self.set_data_type('xml')
self.objectId = None
self.objectTypeName = None
self.vsmUuid = None
self.revision = None
self.type = TypeSchema()
self.name = None
self.description = None
self.extendedAttributes = None
self.clientHandle = None
self.tenantId = None
self.vdnScopeId = None
self.vdsContextWithBacking = VdsContextWithBackingSchema()
self.vdnId = None
self.multicastAddr = None
self.controlPlaneMode = None
self.isUniversal = None
self.vsmUuid = None
self.universalRevision = None
self.ctrlLsUuid = None
| [
"[email protected]"
] | |
157173d15f010222521b1518dd61e2ed944ee316 | 0d7ad3520c8e039cc47dff664c7e661a6df933b4 | /blog/migrations/0001_initial.py | 7cc537495cd902d1123df02690d7c224ca3ec31d | [] | no_license | skyjan0428/MyBlog | c61f710802bc3b599b9449ccb60ae00ed171e4ee | a9a6e209087b557530600d03168cf77e6de53feb | refs/heads/master | 2022-12-10T01:47:06.397172 | 2021-01-06T18:29:41 | 2021-01-06T18:29:41 | 197,189,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,965 | py | # Generated by Django 2.2.3 on 2019-07-26 10:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10)),
('email', models.CharField(max_length=30)),
('password', models.CharField(max_length=100)),
('description', models.TextField(null=True)),
],
),
migrations.CreateModel(
name='Token',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now=True, verbose_name='Datetime')),
('token', models.CharField(max_length=255, null=True, unique=True)),
('user', models.ForeignKey(default='', on_delete=django.db.models.deletion.PROTECT, related_name='token_user', to='blog.User')),
],
),
migrations.CreateModel(
name='Relationship',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now=True, verbose_name='Datetime')),
('value', models.IntegerField()),
('user1', models.ForeignKey(default='', on_delete=django.db.models.deletion.PROTECT, related_name='self', to='blog.User')),
('user2', models.ForeignKey(default='', on_delete=django.db.models.deletion.PROTECT, related_name='other', to='blog.User')),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now=True, verbose_name='Datetime')),
('content', models.TextField()),
('attach', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='attach_to_post', to='blog.Post')),
('user', models.ForeignKey(default='', on_delete=django.db.models.deletion.PROTECT, related_name='post_user_id', to='blog.User')),
],
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(upload_to='photo/')),
('is_sticker', models.BooleanField(default=False)),
('date', models.DateTimeField(auto_now=True, verbose_name='Datetime')),
('post', models.ForeignKey(default='', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='photo_post_id', to='blog.Post')),
('user', models.ForeignKey(default='', on_delete=django.db.models.deletion.PROTECT, related_name='photo_user_id', to='blog.User')),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now=True, verbose_name='Datetime')),
('text', models.TextField(null=True)),
('reciever', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='reciever', to='blog.User')),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='sender', to='blog.User')),
],
),
migrations.CreateModel(
name='LikePost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.ForeignKey(default='', on_delete=django.db.models.deletion.PROTECT, related_name='like_post_id', to='blog.Post')),
('user', models.ForeignKey(default='', on_delete=django.db.models.deletion.PROTECT, related_name='like_user_id', to='blog.User')),
],
),
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now=True, verbose_name='Datetime')),
('channel_name', models.CharField(max_length=100)),
('user_id', models.ForeignKey(default='', on_delete=django.db.models.deletion.PROTECT, related_name='client_user_id', to='blog.User')),
],
),
]
| [
"[email protected]"
] | |
d061fd184220c552f6982d0420258b026e555e2e | da053e9a63434f7b7a53faef07f6d7d2800214e4 | /zerver/tests/test_webhooks_common.py | 8af83cb0057edd8def96cadface3c285c442a539 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | BackGroundC/zulip | 3f8ecd4ca98f05c4bcb9be0034140404d7f187f1 | 2bd6d275a70a7683986edc72fa8585726e976604 | refs/heads/master | 2020-05-30T21:19:36.799304 | 2019-06-02T22:00:16 | 2019-06-02T22:00:16 | 189,969,512 | 4 | 0 | Apache-2.0 | 2019-06-03T08:53:52 | 2019-06-03T08:53:51 | null | UTF-8 | Python | false | false | 5,759 | py | # -*- coding: utf-8 -*-
from django.http import HttpRequest
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.exceptions import InvalidJSONError, JsonableError
from zerver.lib.test_classes import ZulipTestCase, WebhookTestCase
from zerver.lib.webhooks.common import \
validate_extract_webhook_http_header, \
MISSING_EVENT_HEADER_MESSAGE, MissingHTTPEventHeader, \
INVALID_JSON_MESSAGE
from zerver.models import get_user, get_realm, UserProfile
from zerver.lib.users import get_api_key
from zerver.lib.send_email import FromAddress
from zerver.lib.test_helpers import HostRequestMock
class WebhooksCommonTestCase(ZulipTestCase):
def test_webhook_http_header_header_exists(self) -> None:
webhook_bot = get_user('[email protected]', get_realm('zulip'))
request = HostRequestMock()
request.META['HTTP_X_CUSTOM_HEADER'] = 'custom_value'
request.user = webhook_bot
header_value = validate_extract_webhook_http_header(request, 'X_CUSTOM_HEADER',
'test_webhook')
self.assertEqual(header_value, 'custom_value')
def test_webhook_http_header_header_does_not_exist(self) -> None:
webhook_bot = get_user('[email protected]', get_realm('zulip'))
webhook_bot.last_reminder = None
notification_bot = self.notification_bot()
request = HostRequestMock()
request.user = webhook_bot
request.path = 'some/random/path'
exception_msg = "Missing the HTTP event header 'X_CUSTOM_HEADER'"
with self.assertRaisesRegex(MissingHTTPEventHeader, exception_msg):
validate_extract_webhook_http_header(request, 'X_CUSTOM_HEADER',
'test_webhook')
msg = self.get_last_message()
expected_message = MISSING_EVENT_HEADER_MESSAGE.format(
bot_name=webhook_bot.full_name,
request_path=request.path,
header_name='X_CUSTOM_HEADER',
integration_name='test_webhook',
support_email=FromAddress.SUPPORT
).rstrip()
self.assertEqual(msg.sender.email, notification_bot.email)
self.assertEqual(msg.content, expected_message)
def test_notify_bot_owner_on_invalid_json(self) -> None:
@api_key_only_webhook_view('ClientName', notify_bot_owner_on_invalid_json=False)
def my_webhook_no_notify(request: HttpRequest, user_profile: UserProfile) -> None:
raise InvalidJSONError("Malformed JSON")
@api_key_only_webhook_view('ClientName', notify_bot_owner_on_invalid_json=True)
def my_webhook_notify(request: HttpRequest, user_profile: UserProfile) -> None:
raise InvalidJSONError("Malformed JSON")
webhook_bot_email = '[email protected]'
webhook_bot_realm = get_realm('zulip')
webhook_bot = get_user(webhook_bot_email, webhook_bot_realm)
webhook_bot_api_key = get_api_key(webhook_bot)
request = HostRequestMock()
request.POST['api_key'] = webhook_bot_api_key
request.host = "zulip.testserver"
expected_msg = INVALID_JSON_MESSAGE.format(webhook_name='ClientName')
last_message_id = self.get_last_message().id
with self.assertRaisesRegex(JsonableError, "Malformed JSON"):
my_webhook_no_notify(request) # type: ignore # mypy doesn't seem to apply the decorator
# First verify that without the setting, it doesn't send a PM to bot owner.
msg = self.get_last_message()
self.assertEqual(msg.id, last_message_id)
self.assertNotEqual(msg.content, expected_msg.strip())
# Then verify that with the setting, it does send such a message.
with self.assertRaisesRegex(JsonableError, "Malformed JSON"):
my_webhook_notify(request) # type: ignore # mypy doesn't seem to apply the decorator
msg = self.get_last_message()
self.assertNotEqual(msg.id, last_message_id)
self.assertEqual(msg.sender.email, self.notification_bot().email)
self.assertEqual(msg.content, expected_msg.strip())
class MissingEventHeaderTestCase(WebhookTestCase):
STREAM_NAME = 'groove'
URL_TEMPLATE = '/api/v1/external/groove?stream={stream}&api_key={api_key}'
# This tests the validate_extract_webhook_http_header function with
# an actual webhook, instead of just making a mock
def test_missing_event_header(self) -> None:
self.subscribe(self.test_user, self.STREAM_NAME)
result = self.client_post(self.url, self.get_body('ticket_state_changed'),
content_type="application/x-www-form-urlencoded")
self.assert_json_error(result, "Missing the HTTP event header 'X_GROOVE_EVENT'")
webhook_bot = get_user('[email protected]', get_realm('zulip'))
webhook_bot.last_reminder = None
notification_bot = self.notification_bot()
msg = self.get_last_message()
expected_message = MISSING_EVENT_HEADER_MESSAGE.format(
bot_name=webhook_bot.full_name,
request_path='/api/v1/external/groove',
header_name='X_GROOVE_EVENT',
integration_name='Groove',
support_email=FromAddress.SUPPORT
).rstrip()
if msg.sender.email != notification_bot.email: # nocoverage
# This block seems to fire occasionally; debug output:
print(msg)
print(msg.content)
self.assertEqual(msg.sender.email, notification_bot.email)
self.assertEqual(msg.content, expected_message)
def get_body(self, fixture_name: str) -> str:
return self.webhook_fixture_data("groove", fixture_name, file_type="json")
| [
"[email protected]"
] | |
13a149b3ba1a58fb0f39d1ad41efb1703271525b | a1a43879a2da109d9fe8d9a75f4fda73f0d7166b | /api/tests/cross_entropy.py | ba5e4f8834015d4ed91cf6316b10f03edd472423 | [] | no_license | PaddlePaddle/benchmark | a3ed62841598d079529c7440367385fc883835aa | f0e0a303e9af29abb2e86e8918c102b152a37883 | refs/heads/master | 2023-09-01T13:11:09.892877 | 2023-08-21T09:32:49 | 2023-08-21T09:32:49 | 173,032,424 | 78 | 352 | null | 2023-09-14T05:13:08 | 2019-02-28T03:14:16 | Python | UTF-8 | Python | false | false | 5,138 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common_import import *
@benchmark_registry.register("cross_entropy")
class CrossEntropyConfig(APIConfig):
def __init__(self):
super(CrossEntropyConfig, self).__init__("cross_entropy")
def init_from_json(self, filename, config_id=0, unknown_dim=16):
super(CrossEntropyConfig, self).init_from_json(filename, config_id,
unknown_dim)
input_rank = len(self.input_shape)
if not hasattr(self, "axis") or self.axis == input_rank - 1:
self.axis = -1
self.num_classes = self.input_shape[self.axis]
self.feed_spec = [
{
"range": [0, 1]
}, # input
{
"range": [0, self.num_classes]
} # label
]
if self.label_dtype in ['float32', 'float64'] or self.axis != -1:
self.run_tf = False
if self.soft_label or self.axis != -1:
print(
"Warning:\n"
" 1. PyTorch does not have soft_label param, it only support hard label.\n"
)
self.run_torch = False
else:
if input_rank != 2:
self.input_shape = [
np.prod(self.input_shape[0:input_rank - 1]),
self.input_shape[-1]
]
label_rank = len(self.label_shape)
if label_rank != 2:
self.label_shape = [
np.prod(self.label_shape[0:label_rank - 1]), 1
]
def to_pytorch(self):
torch_config = super(CrossEntropyConfig, self).to_pytorch()
if self.label_shape[-1] == 1:
label_rank = len(self.label_shape)
torch_config.label_shape = [
np.prod(self.label_shape[0:label_rank - 1])
]
return torch_config
def to_tensorflow(self):
tf_config = super(CrossEntropyConfig, self).to_tensorflow()
label_rank = len(tf_config.label_shape)
if tf_config.label_shape[label_rank - 1] == 1:
tf_config.label_shape = tf_config.label_shape[0:label_rank - 1]
return tf_config
@benchmark_registry.register("cross_entropy")
class PaddleCrossEntropy(PaddleOpBenchmarkBase):
def build_graph(self, config):
input = self.variable(
name="input", shape=config.input_shape, dtype=config.input_dtype)
label = self.variable(
name="label",
shape=config.label_shape,
dtype=config.label_dtype,
stop_gradient=True)
result = paddle.nn.functional.cross_entropy(
input=input,
label=label,
weight=None,
ignore_index=config.ignore_index,
soft_label=config.soft_label,
use_softmax=True,
axis=config.axis,
reduction="none")
self.feed_list = [input, label]
self.fetch_list = [result]
if config.backward:
self.append_gradients(result, [input])
@benchmark_registry.register("cross_entropy")
class TorchCrossEntropy(PytorchOpBenchmarkBase):
def build_graph(self, config):
input = self.variable(
name="input", shape=config.input_shape, dtype=config.input_dtype)
label = self.variable(
name='label',
shape=config.label_shape,
dtype=config.label_dtype,
stop_gradient=True)
result = torch.nn.functional.cross_entropy(
input=input,
target=label,
weight=None,
ignore_index=config.ignore_index,
reduction="none")
self.feed_list = [input, label]
self.fetch_list = [result]
if config.backward:
self.append_gradients(result, [input])
@benchmark_registry.register("cross_entropy")
class TFCrossEntropy(TensorflowOpBenchmarkBase):
def build_graph(self, config):
input = self.variable(
name='input', shape=config.input_shape, dtype=config.input_dtype)
label = self.variable(
name='label', shape=config.label_shape, dtype=config.label_dtype)
onehot_label = tf.one_hot(indices=label, depth=config.num_classes)
result = tf.compat.v1.losses.softmax_cross_entropy(
input=input, onehot_labels=onehot_label, reduction='none')
self.feed_list = [input, label]
self.fetch_list = [result]
if config.backward:
self.append_gradients(result, [input])
| [
"[email protected]"
] | |
91f176c7a6e6def0b43003d56c17e90b754666b2 | b59372692c912ba17ec2e6812983663a6deccdaf | /.history/bsServer/views_20200503165259.py | 143f159593a5a9133cc1f224d21ef41dff7767ae | [] | no_license | nanjigirl/bs-server-project | 2d7c240ddf21983ed0439829a7995bde94082467 | 7863aed279b233d359c540c71fdd08ce8633976b | refs/heads/master | 2022-08-02T17:33:48.201967 | 2020-05-25T15:18:34 | 2020-05-25T15:18:34 | 261,204,713 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | import json
from django.http import HttpResponse,JsonResponse
from .models import *
# Create your views here.
def index(request):
try:
booklist = BookInfo.objects.all()
finally:
booklist = ['id']
return JsonResponse({"status": "200", "list": booklist, "msg": "query articles sucess."})
| [
"[email protected]"
] | |
d879b881b4190309369829a872ce622950608251 | a9c43c4b1a640841f1c9b13b63e39422c4fc47c2 | /test/tests/import_target.py | e7cf80e2bef7875d664ecdefab6405a10e10eaac | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] | permissive | lovejavaee/pyston | be5bd8393462be17259bcc40bf8f745e157d9793 | e8f0d9667c35db043add2f07a0ea7d23e290dd80 | refs/heads/master | 2023-05-01T17:42:35.616499 | 2015-04-07T08:10:44 | 2015-04-07T08:10:44 | 33,535,295 | 0 | 0 | NOASSERTION | 2023-04-14T02:16:28 | 2015-04-07T09:53:36 | Python | UTF-8 | Python | false | false | 260 | py | print "starting import of", __name__
import import_nested_target
x = 1
def foo():
print "foo()"
# def k():
# print x
# foo()
class C(object):
pass
_x = 1
z = 2
__all__ = ['x', u'z']
def letMeCallThatForYou(f, *args):
return f(*args)
| [
"[email protected]"
] | |
6722ed3d726a927b17bcea9c41278855f5335c1a | 2455062787d67535da8be051ac5e361a097cf66f | /Producers/BSUB/TrigProd_amumu_a5_dR5/trigger_amumu_producer_cfg_TrigProd_amumu_a5_dR5_655.py | e4071b59278c64f6a03c66fc4e7ed1dde4ff1dfa | [] | no_license | kmtos/BBA-RecoLevel | 6e153c08d5ef579a42800f6c11995ee55eb54846 | 367adaa745fbdb43e875e5ce837c613d288738ab | refs/heads/master | 2021-01-10T08:33:45.509687 | 2015-12-04T09:20:14 | 2015-12-04T09:20:14 | 43,355,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,360 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("PAT")
#process.load("BBA/Analyzer/bbaanalyzer_cfi")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.load('Configuration.EventContent.EventContent_cff')
process.load("Configuration.Geometry.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("PhysicsTools.PatAlgos.producersLayer1.patCandidates_cff")
process.load("PhysicsTools.PatAlgos.selectionLayer1.selectedPatCandidates_cff")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'MCRUN2_71_V1::All', '')
process.load("Configuration.StandardSequences.MagneticField_cff")
####################
# Message Logger
####################
process.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32(100)
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
## switch to uncheduled mode
process.options.allowUnscheduled = cms.untracked.bool(True)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(500)
)
####################
# Input File List
####################
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('root://eoscms//eos/cms/store/user/ktos/RECO_Step3_amumu_a5/RECO_Step3_amumu_a5_655.root'),
secondaryFileNames = cms.untracked.vstring()
)
############################################################
# Defining matching in DeltaR, sorting by best DeltaR
############################################################
process.mOniaTrigMatch = cms.EDProducer("PATTriggerMatcherDRLessByR",
src = cms.InputTag( 'slimmedMuons' ),
matched = cms.InputTag( 'patTrigger' ), # selections of trigger objects
matchedCuts = cms.string( 'type( "TriggerMuon" ) && path( "HLT_Mu16_TkMu0_dEta18_Onia*")' ), # input does not yet have the 'saveTags' parameter in HLT
maxDPtRel = cms.double( 0.5 ), # no effect here
maxDeltaR = cms.double( 0.3 ), #### selection of matches
maxDeltaEta = cms.double( 0.2 ), # no effect here
resolveAmbiguities = cms.bool( True ),# definition of matcher output
resolveByMatchQuality = cms.bool( True )# definition of matcher output
)
# talk to output module
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string("file:RECO_Step3_amumu_a5_TrigProd_655.root"),
outputCommands = process.MINIAODSIMEventContent.outputCommands
)
process.out.outputCommands += [ 'drop *_*_*_*',
'keep *_*slimmed*_*_*',
'keep *_pfTausEI_*_*',
'keep *_hpsPFTauProducer_*_*',
'keep *_hltTriggerSummaryAOD_*_*',
'keep *_TriggerResults_*_HLT',
'keep *_patTrigger*_*_*',
'keep *_prunedGenParticles_*_*',
'keep *_mOniaTrigMatch_*_*'
]
################################################################################
# Running the matching and setting the the trigger on
################################################################################
from PhysicsTools.PatAlgos.tools.trigTools import *
switchOnTrigger( process ) # This is optional and can be omitted.
switchOnTriggerMatching( process, triggerMatchers = [ 'mOniaTrigMatch'
])
process.outpath = cms.EndPath(process.out)
| [
"[email protected]"
] | |
adc1e2d2a5ead83ef17625fb14851d3ba023884d | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnsocorro.py | 7c7cb74edde7c1de3cb412fbedaebf72f23f17be | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 45 | py | ii = [('FitzRNS3.py', 2), ('MackCNH2.py', 3)] | [
"[email protected]"
] | |
ea3b22f393e04ed27a6ffedc6c3aaa95258b18ab | 3bda645720e87bba6c8f960bbc8750dcea974cb0 | /data/phys/fill_6170/xangle_150/DoubleEG/input_files.py | 985d3dcf0e70b9d93931efead2318476c82c0a8e | [] | no_license | jan-kaspar/analysis_ctpps_alignment_2017_preTS2 | 0347b8f4f62cf6b82217935088ffb2250de28566 | 0920f99080a295c4e942aa53a2fe6697cdff0791 | refs/heads/master | 2021-05-10T16:56:47.887963 | 2018-01-31T09:28:18 | 2018-01-31T09:28:18 | 118,592,149 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | import FWCore.ParameterSet.Config as cms
input_files = cms.vstring(
"root://eostotem.cern.ch//eos/totem/data/ctpps/reconstruction/2017/preTS2_alignment_data/version1/fill6170_xangle150_DoubleEG.root"
)
| [
"[email protected]"
] | |
a3b5b67bccd916f09f37fb18587f6d9f64adf8da | b2fef77e77f77b6cfd83da4ec2f89cbe73330844 | /monai/networks/nets/flexible_unet.py | ac2124b5f943c51ea9b50e1cbcb8896e4589bbaf | [
"Apache-2.0"
] | permissive | Project-MONAI/MONAI | 8ef2593cc5fd1cd16e13464f927fe563fe3f5bac | e48c3e2c741fa3fc705c4425d17ac4a5afac6c47 | refs/heads/dev | 2023-09-02T00:21:04.532596 | 2023-09-01T06:46:45 | 2023-09-01T06:46:45 | 214,485,001 | 4,805 | 996 | Apache-2.0 | 2023-09-14T15:19:30 | 2019-10-11T16:41:38 | Python | UTF-8 | Python | false | false | 14,147 | py | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
from collections.abc import Sequence
from pydoc import locate
from typing import Any
import torch
from torch import nn
from monai.networks.blocks import BaseEncoder, UpSample
from monai.networks.layers.factories import Conv
from monai.networks.layers.utils import get_act_layer
from monai.networks.nets import EfficientNetEncoder
from monai.networks.nets.basic_unet import UpCat
from monai.utils import InterpolateMode, optional_import
__all__ = ["FlexibleUNet", "FlexUNet", "FLEXUNET_BACKBONE", "FlexUNetEncoderRegister"]
class FlexUNetEncoderRegister:
"""
A register to regist backbones for the flexible unet. All backbones can be found in
register_dict. Please notice each output of backbone must be 2x downsample in spatial
dimension of last output. For example, if given a 512x256 2D image and a backbone with
4 outputs. Then spatial size of each encoder output should be 256x128, 128x64, 64x32
and 32x16.
"""
def __init__(self):
self.register_dict = {}
def register_class(self, name: type[Any] | str):
"""
Register a given class to the encoder dict. Please notice that input class must be a
subclass of BaseEncoder.
"""
if isinstance(name, str):
tmp_name, has_built_in = optional_import("monai.networks.nets", name=f"{name}") # search built-in
if not has_built_in:
tmp_name = locate(f"{name}") # search dotted path
name = tmp_name
if not isinstance(name, type):
raise ValueError(f"Cannot find {name} class.")
if not issubclass(name, BaseEncoder):
warnings.warn(
f"{name} would better be derived from monai.networks.blocks.BaseEncoder "
"or implement all interfaces specified by it."
)
name_string_list = name.get_encoder_names()
feature_number_list = name.num_outputs()
feature_channel_list = name.num_channels_per_output()
parameter_list = name.get_encoder_parameters()
assert len(name_string_list) == len(feature_number_list) == len(feature_channel_list) == len(parameter_list)
for cnt, name_string in enumerate(name_string_list):
cur_dict = {
"type": name,
"feature_number": feature_number_list[cnt],
"feature_channel": feature_channel_list[cnt],
"parameter": parameter_list[cnt],
}
self.register_dict[name_string] = cur_dict
FLEXUNET_BACKBONE = FlexUNetEncoderRegister()
FLEXUNET_BACKBONE.register_class(EfficientNetEncoder)
class UNetDecoder(nn.Module):
"""
UNet Decoder.
This class refers to `segmentation_models.pytorch
<https://github.com/qubvel/segmentation_models.pytorch>`_.
Args:
spatial_dims: number of spatial dimensions.
encoder_channels: number of output channels for all feature maps in encoder.
`len(encoder_channels)` should be no less than 2.
decoder_channels: number of output channels for all feature maps in decoder.
`len(decoder_channels)` should equal to `len(encoder_channels) - 1`.
act: activation type and arguments.
norm: feature normalization type and arguments.
dropout: dropout ratio.
bias: whether to have a bias term in convolution blocks in this decoder.
upsample: upsampling mode, available options are
``"deconv"``, ``"pixelshuffle"``, ``"nontrainable"``.
pre_conv: a conv block applied before upsampling.
Only used in the "nontrainable" or "pixelshuffle" mode.
interp_mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``}
Only used in the "nontrainable" mode.
align_corners: set the align_corners parameter for upsample. Defaults to True.
Only used in the "nontrainable" mode.
is_pad: whether to pad upsampling features to fit the encoder spatial dims.
"""
def __init__(
self,
spatial_dims: int,
encoder_channels: Sequence[int],
decoder_channels: Sequence[int],
act: str | tuple,
norm: str | tuple,
dropout: float | tuple,
bias: bool,
upsample: str,
pre_conv: str | None,
interp_mode: str,
align_corners: bool | None,
is_pad: bool,
):
super().__init__()
if len(encoder_channels) < 2:
raise ValueError("the length of `encoder_channels` should be no less than 2.")
if len(decoder_channels) != len(encoder_channels) - 1:
raise ValueError("`len(decoder_channels)` should equal to `len(encoder_channels) - 1`.")
in_channels = [encoder_channels[-1]] + list(decoder_channels[:-1])
skip_channels = list(encoder_channels[1:-1][::-1]) + [0]
halves = [True] * (len(skip_channels) - 1)
halves.append(False)
blocks = []
for in_chn, skip_chn, out_chn, halve in zip(in_channels, skip_channels, decoder_channels, halves):
blocks.append(
UpCat(
spatial_dims=spatial_dims,
in_chns=in_chn,
cat_chns=skip_chn,
out_chns=out_chn,
act=act,
norm=norm,
dropout=dropout,
bias=bias,
upsample=upsample,
pre_conv=pre_conv,
interp_mode=interp_mode,
align_corners=align_corners,
halves=halve,
is_pad=is_pad,
)
)
self.blocks = nn.ModuleList(blocks)
def forward(self, features: list[torch.Tensor], skip_connect: int = 4):
skips = features[:-1][::-1]
features = features[1:][::-1]
x = features[0]
for i, block in enumerate(self.blocks):
if i < skip_connect:
skip = skips[i]
else:
skip = None
x = block(x, skip)
return x
class SegmentationHead(nn.Sequential):
"""
Segmentation head.
This class refers to `segmentation_models.pytorch
<https://github.com/qubvel/segmentation_models.pytorch>`_.
Args:
spatial_dims: number of spatial dimensions.
in_channels: number of input channels for the block.
out_channels: number of output channels for the block.
kernel_size: kernel size for the conv layer.
act: activation type and arguments.
scale_factor: multiplier for spatial size. Has to match input size if it is a tuple.
"""
def __init__(
self,
spatial_dims: int,
in_channels: int,
out_channels: int,
kernel_size: int = 3,
act: tuple | str | None = None,
scale_factor: float = 1.0,
):
conv_layer = Conv[Conv.CONV, spatial_dims](
in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=kernel_size // 2
)
up_layer: nn.Module = nn.Identity()
if scale_factor > 1.0:
up_layer = UpSample(
spatial_dims=spatial_dims,
scale_factor=scale_factor,
mode="nontrainable",
pre_conv=None,
interp_mode=InterpolateMode.LINEAR,
)
if act is not None:
act_layer = get_act_layer(act)
else:
act_layer = nn.Identity()
super().__init__(conv_layer, up_layer, act_layer)
class FlexibleUNet(nn.Module):
"""
A flexible implementation of UNet-like encoder-decoder architecture.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
backbone: str,
pretrained: bool = False,
decoder_channels: tuple = (256, 128, 64, 32, 16),
spatial_dims: int = 2,
norm: str | tuple = ("batch", {"eps": 1e-3, "momentum": 0.1}),
act: str | tuple = ("relu", {"inplace": True}),
dropout: float | tuple = 0.0,
decoder_bias: bool = False,
upsample: str = "nontrainable",
pre_conv: str = "default",
interp_mode: str = "nearest",
is_pad: bool = True,
) -> None:
"""
A flexible implement of UNet, in which the backbone/encoder can be replaced with
any efficient network. Currently the input must have a 2 or 3 spatial dimension
and the spatial size of each dimension must be a multiple of 32 if is_pad parameter
is False.
Please notice each output of backbone must be 2x downsample in spatial dimension
of last output. For example, if given a 512x256 2D image and a backbone with 4 outputs.
Spatial size of each encoder output should be 256x128, 128x64, 64x32 and 32x16.
Args:
in_channels: number of input channels.
out_channels: number of output channels.
backbone: name of backbones to initialize, only support efficientnet right now,
can be from [efficientnet-b0,..., efficientnet-b8, efficientnet-l2].
pretrained: whether to initialize pretrained ImageNet weights, only available
for spatial_dims=2 and batch norm is used, default to False.
decoder_channels: number of output channels for all feature maps in decoder.
`len(decoder_channels)` should equal to `len(encoder_channels) - 1`,default
to (256, 128, 64, 32, 16).
spatial_dims: number of spatial dimensions, default to 2.
norm: normalization type and arguments, default to ("batch", {"eps": 1e-3,
"momentum": 0.1}).
act: activation type and arguments, default to ("relu", {"inplace": True}).
dropout: dropout ratio, default to 0.0.
decoder_bias: whether to have a bias term in decoder's convolution blocks.
upsample: upsampling mode, available options are``"deconv"``, ``"pixelshuffle"``,
``"nontrainable"``.
pre_conv:a conv block applied before upsampling. Only used in the "nontrainable" or
"pixelshuffle" mode, default to `default`.
interp_mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``}
Only used in the "nontrainable" mode.
is_pad: whether to pad upsampling features to fit features from encoder. Default to True.
If this parameter is set to "True", the spatial dim of network input can be arbitrary
size, which is not supported by TensorRT. Otherwise, it must be a multiple of 32.
"""
super().__init__()
if backbone not in FLEXUNET_BACKBONE.register_dict:
raise ValueError(
f"invalid model_name {backbone} found, must be one of {FLEXUNET_BACKBONE.register_dict.keys()}."
)
if spatial_dims not in (2, 3):
raise ValueError("spatial_dims can only be 2 or 3.")
encoder = FLEXUNET_BACKBONE.register_dict[backbone]
self.backbone = backbone
self.spatial_dims = spatial_dims
encoder_parameters = encoder["parameter"]
if not (
("spatial_dims" in encoder_parameters)
and ("in_channels" in encoder_parameters)
and ("pretrained" in encoder_parameters)
):
raise ValueError("The backbone init method must have spatial_dims, in_channels and pretrained parameters.")
encoder_feature_num = encoder["feature_number"]
if encoder_feature_num > 5:
raise ValueError("Flexible unet can only accept no more than 5 encoder feature maps.")
decoder_channels = decoder_channels[:encoder_feature_num]
self.skip_connect = encoder_feature_num - 1
encoder_parameters.update({"spatial_dims": spatial_dims, "in_channels": in_channels, "pretrained": pretrained})
encoder_channels = tuple([in_channels] + list(encoder["feature_channel"]))
encoder_type = encoder["type"]
self.encoder = encoder_type(**encoder_parameters)
self.decoder = UNetDecoder(
spatial_dims=spatial_dims,
encoder_channels=encoder_channels,
decoder_channels=decoder_channels,
act=act,
norm=norm,
dropout=dropout,
bias=decoder_bias,
upsample=upsample,
interp_mode=interp_mode,
pre_conv=pre_conv,
align_corners=None,
is_pad=is_pad,
)
self.segmentation_head = SegmentationHead(
spatial_dims=spatial_dims,
in_channels=decoder_channels[-1],
out_channels=out_channels,
kernel_size=3,
act=None,
)
def forward(self, inputs: torch.Tensor):
"""
Do a typical encoder-decoder-header inference.
Args:
inputs: input should have spatially N dimensions ``(Batch, in_channels, dim_0[, dim_1, ..., dim_N])``,
N is defined by `dimensions`.
Returns:
A torch Tensor of "raw" predictions in shape ``(Batch, out_channels, dim_0[, dim_1, ..., dim_N])``.
"""
x = inputs
enc_out = self.encoder(x)
decoder_out = self.decoder(enc_out, self.skip_connect)
x_seg = self.segmentation_head(decoder_out)
return x_seg
FlexUNet = FlexibleUNet
| [
"[email protected]"
] | |
abacba136794a358da8c60b230c31e0840e0247c | 105212e4d2d2175d5105e05552e29b300375e039 | /TensorFlow_tutorials/TensorFlow_train_own_data/tensorflow_train_owndata_detector/xml_to_csv.py | 6919392daf578857478fc763bed6d969d0f08fc6 | [] | no_license | Asher-1/AI | 84f0c42651c0b07e6b7e41ebb354258db64dd0d1 | a70f63ebab3163f299f7f9d860a98695c0a3f7d5 | refs/heads/master | 2022-11-26T07:24:37.910301 | 2019-05-30T13:04:31 | 2019-05-30T13:04:31 | 160,031,310 | 7 | 1 | null | 2022-11-21T22:02:53 | 2018-12-02T09:19:03 | Jupyter Notebook | UTF-8 | Python | false | false | 1,775 | py | import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
import random
train_csv_path = 'my_data/toy_train_labels.csv'
val_csv_path = 'my_data/toy_val_labels.csv'
# 验证集和数据集比例
train_val_rate = 0.7
def xml_to_csv(examples_list):
xml_list = []
for xml_file in examples_list:
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
value = (root.find('filename').text,
int(root.find('size')[0].text),
int(root.find('size')[1].text),
member[0].text,
int(member[4][0].text),
int(member[4][1].text),
int(member[4][2].text),
int(member[4][3].text)
)
xml_list.append(value)
column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
xml_df = pd.DataFrame(xml_list, columns=column_name)
return xml_df
def main():
image_path = os.path.join(os.getcwd(), 'my_data', 'annotations')
examples_list = glob.glob(image_path + '/*.xml')
random.seed(42)
random.shuffle(examples_list)
num_examples = len(examples_list)
num_train = int(train_val_rate * num_examples)
train_examples_list = examples_list[:num_train]
val_examples_list = examples_list[num_train:]
# 转化训练集数据
xml_df = xml_to_csv(train_examples_list)
xml_df.to_csv(train_csv_path, index=None)
print('Successfully converted xml to %s.' % train_csv_path)
# 转化验证集数据
xml_df = xml_to_csv(val_examples_list)
xml_df.to_csv(val_csv_path, index=None)
print('Successfully converted xml to %s.' % val_csv_path)
main()
| [
"[email protected]"
] | |
bda86386777469912ab81722f4c03c92ed25e5d0 | 16cc8f796eac98e9a475da11e4bc0aa26317e894 | /AOJ/ITP1_9_A.py | d688dbd021d385c3894b020d918e26b2bc588028 | [] | no_license | amaguri0408/AtCoder-python | 2f3fcdd82c52f5ddee88627fb99466c9e003164f | ab8ec04b8e434939e9f7035f3a280b30c0682427 | refs/heads/master | 2022-10-30T00:07:03.560011 | 2020-06-13T10:41:36 | 2020-06-13T10:41:36 | 271,954,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | W = input()
T = []
while True:
a = input().split()
if a == ["END_OF_TEXT"]:
break
else:
for i in a:
T.append(i)
cnt = 0
for i in range(len(T)):
if str.lower(T[i]) == W:
cnt += 1
print(cnt) | [
"[email protected]"
] | |
bc75714c5f84b0cdd6f1ee2695cef3b2a6d507a9 | 6e060e9730b58e4d7819335438f915179504e72c | /bit_hr_payroll_ec/report/hr_payslip_sal_ingresos.py | 695dbd818afa84203920308d4415bfff6a9eec8e | [] | no_license | missionpetroleumgit/addons_missiongit | 4dcdf1d0e79da982670c573d59574a939d1636c0 | 714514719d5d4d96f371dd529a70ac282070c43b | refs/heads/master | 2023-03-10T20:34:10.154050 | 2021-02-24T23:50:20 | 2021-02-24T23:50:20 | 342,027,534 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,750 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
from openerp.report import report_sxw
from .. import hr_payroll
from openerp.addons.decimal_precision import decimal_precision as dp
from string import upper
from time import strftime
import base64
import StringIO
import cStringIO
import time
from psycopg2.errorcodes import SUBSTRING_ERROR
from decimal import Decimal
from unicodedata import decimal
import csv
import mx.DateTime
from mx.DateTime import RelativeDateTime
import datetime
import xlwt as pycel #Libreria que Exporta a Excel
class hr_payslip_ing_salidas(osv.Model):
_name = "hr.payslip.ing.salidas"
_description = 'Ingreso y Salidas'
def get_lines_report_wage(self, cr, uid, form):
res = []
date_from = form.get('date_from', False)
date_to = form.get('date_to', False)
tipo = form.get('type_hr', False)
hr_employee_obj = self.pool.get('hr.employee')
hr_contract_obj = self.pool.get('hr.contract')
if tipo == 'salida':
hr_contract = self.pool.get('hr.contract')
contract_ids = hr_contract.search(cr, uid, [('date_end', '>=', date_from), ('date_end', '<=', date_to)], order='company_id, name')
contract_data = hr_contract.browse(cr, uid, contract_ids)
sec = 1
for contrato in contract_data:
salidas = []
data = {}
data['sec'] = sec
data['comp'] = contrato.employee_id.company_id.name
data['ced'] = contrato.employee_id.identification_id
data['nom'] = contrato.employee_id.name_related
data['car'] = contrato.employee_id.job_id.name
data['neg'] = contrato.employee_id.business_unit_id.name
data['fing'] = contrato.employee_id.contract_id.date_start
data['fsal'] = contrato.employee_id.contract_id.date_end
hr_liquidacion = self.pool.get('hr.employee.liquidation')
liquidacion_ids = hr_liquidacion.search(cr, uid, [('employee_id', '=', contrato.employee_id.id)])
liquidacion_data = hr_liquidacion.browse(cr, uid, liquidacion_ids)
sec = 1
for liquidacion in liquidacion_data:
if liquidacion.type == 'renuncia':
data['mtv'] = 'Renuncia Voluntaria'
if liquidacion.type == 'intem_fijo':
data['mtv'] = 'Por Despido Intempestivo'
if liquidacion.type == 'intem_faltantes':
data['mtv'] = 'Terminación de contrato periodo de prueba'
if liquidacion.type == 'deshaucio':
data['mtv'] = 'Deshaucio'
data['obs'] = liquidacion.observation
sec += 1
res.append(data)
elif tipo == 'ingreso':
hr_contract = self.pool.get('hr.contract')
contract_ids = hr_contract.search(cr, uid, [('date_start', '>=', date_from), ('date_start', '<=', date_to)], order='company_id, name')
contract_data = hr_contract.browse(cr, uid, contract_ids)
sec = 1
for contrato in contract_data:
ingresos = []
data = {}
data['sec'] = sec
data['comp'] = contrato.employee_id.company_id.name
data['ced'] = contrato.employee_id.identification_id
data['nom'] = contrato.employee_id.name_related
data['car'] = contrato.employee_id.job_id.name
data['neg'] = contrato.employee_id.business_unit_id.name
data['fing'] = contrato.employee_id.contract_id.date_start
data['vctr'] = contrato.employee_id.contract_id.wage
sec += 1
res.append(data)
return res
def _format_date(self, date):
if date:
campos = date.split('-')
date = datetime.date(int(campos[0]), int(campos[1]), int(campos[2]))
return date
def get_days(self, cr, uid, date_start, date_now):
#date_now = time.strftime("%Y-%m-%d")
days = (self._format_date(date_now) - self._format_date(date_start)).days
return days
def get_days_before(self, cr, uid, date_start, date_stop):
days = (self._format_date(date_stop) - self._format_date(date_start)).days
return days
def action_excel(self, cr, uid, ids, context=None):
if not ids:
return {'type_hr': 'ir.actions.act_window_close'}
if context is None:
context = {}
form = self.read(cr, uid, ids)[0]
date_from = form.get('date_from')
date_to = form.get('date_to')
type_hr = form.get('type_hr')
company_id = form.get('company_id', False)
#Formato de la Hoja de Excel
wb = pycel.Workbook(encoding='utf-8')
style_cabecera = pycel.easyxf('font: colour black, bold True;'
'align: vertical center, horizontal center;'
)
# style_titulo = pycel.easyxf('font: colour blue, bold True, 16px;'
# 'align: vertical center, horizontal center;'
# )
style_cabecerader = pycel.easyxf('font: bold True;'
'align: vertical center, horizontal right;'
)
style_cabeceraizq = pycel.easyxf('font: bold True;'
'align: vertical center, horizontal left;'
)
style_header = pycel.easyxf('font: bold True;'
'align: vertical center, horizontal center, wrap on;'
'borders: left 1, right 1, top 1, bottom 1;')
linea = pycel.easyxf('borders:bottom 1;')
linea_center = pycel.easyxf('font: colour black, height 140;'
'align: vertical center, horizontal center, wrap on;'
)
linea_izq = pycel.easyxf('font: colour black, height 140;'
'align: vertical center, horizontal left, wrap on;'
)
linea_izq_n = pycel.easyxf('font: colour black, height 140;'
'align: vertical center, horizontal left, wrap on;'
)
linea_der = pycel.easyxf('font: colour black, height 140;'
'align: vertical center, horizontal right;'
)
if type_hr == 'salida':
title = 'REPORTE DE SALIDA AL '
elif type_hr == 'ingreso':
title = 'REPORTE DE INGRESO AL'
ws = wb.add_sheet(title)
ws.show_grid = False
ws.header_str = u"&LFecha de Impresion: &D Hora: &T&RPagina &P de &N"
ws.footer_str = u""
compania = self.pool.get('res.users').browse(cr, uid, uid).company_id
print "compania", compania.name
print "direccion", compania.partner_id
x0 = 11
ws.write_merge(1, 1, 1, x0, compania.name, style_cabecera)
ws.write_merge(2, 2, 1, x0, 'Direccion: ' + compania.partner_id.street, style_cabecera)
ws.write_merge(3, 3, 1, x0, 'Ruc: ' + compania.partner_id.part_number, style_cabecera)
ws.write_merge(5, 5, 1, x0, title + time.strftime('%d de %B del %Y', time.strptime(date_to, '%Y-%m-%d')).upper(), style_cabecera)
ws.fit_num_pages = 1
ws.fit_height_to_pages = 0
ws.fit_width_to_pages = 1
ws.portrait = 1
align = pycel.Alignment()
align.horz = pycel.Alignment.HORZ_RIGHT
align.vert = pycel.Alignment.VERT_CENTER
font1 = pycel.Font()
font1.colour_index = 0x0
font1.height = 140
linea_izq_n.width = 150
#Formato de Numero
style = pycel.XFStyle()
style.num_format_str = '#,##0.00'
style.alignment = align
style.font = font1
#Formato de Numero Saldo
font = pycel.Font()
font.bold = True
font.colour_index = 0x27
style1 = pycel.XFStyle()
style1.num_format_str = '#,##0.00'
style1.alignment = align
style1.font = font
font2 = pycel.Font()
font2.bold = True
font2.colour_index = 0x0
style2 = pycel.XFStyle()
style2.num_format_str = '#,##0.00'
style2.alignment = align
style2.font = font2
style3 = pycel.XFStyle()
style3.num_format_str = '0'
style3.alignment = align
style3.font = font1
#info = self.get_payroll(cr, uid, form)
xi = 8 # Cabecera de Cliente
sec = 1
if type_hr == 'salida':
ws.write(xi, 1, 'SECUENCIAL', style_header)
ws.write(xi, 2, 'COMPANIA', style_header)
ws.write(xi, 3, 'EMPLEADO', style_header)
ws.write(xi, 4, 'No CEDULA', style_header)
ws.write(xi, 5, 'CARGO', style_header)
ws.write(xi, 6, 'UNIDAD DE NEGOCIO', style_header)
ws.write(xi, 7, 'FECHA DE INGRESO', style_header)
ws.write(xi, 8, 'FECHA DE SALIDA', style_header)
ws.write(xi, 9, 'MOTIVO', style_header)
ws.write(xi, 10, 'OBSERVACION', style_header)
if type_hr == 'ingreso':
ws.write(xi, 1, 'SECUENCIAL', style_header)
ws.write(xi, 2, 'COMPANIA', style_header)
ws.write(xi, 3, 'EMPLEADO', style_header)
ws.write(xi, 4, 'No CEDULA', style_header)
ws.write(xi, 5, 'CARGO', style_header)
ws.write(xi, 6, 'UNIDAD DE NEGOCIO', style_header)
ws.write(xi, 7, 'FECHA DE INGRESO', style_header)
ws.write(xi, 8, 'SUELDO', style_header)
xi += 1
rf = rr = ri = 0
amount_base = amount_calculate = 0.00
if type_hr == 'salida':
lineas = self.get_lines_report_wage(cr, uid, form)
print "***lineas: ", lineas
for linea in lineas:
ws.write(xi, 1, linea['sec'], linea_center)
ws.write(xi, 2, linea.get('comp', ''), linea_izq)
ws.write(xi, 3, linea.get('nom', ''), linea_izq)
ws.write(xi, 4, linea.get('ced', ''), linea_izq)
ws.write(xi, 5, linea.get('car', ''), linea_izq)
ws.write(xi, 6, linea.get('neg', ''), linea_izq)
ws.write(xi, 7, linea.get('fing', ''), linea_der)
ws.write(xi, 8, linea.get('fsal', ''), linea_der)
ws.write(xi, 9, linea.get('mtv', ''), linea_izq)
ws.write(xi, 10, linea.get('obs', ''), linea_izq)
xi += 1
if type_hr == 'ingreso':
lineas = self.get_lines_report_wage(cr, uid, form)
print "***lineas: ", lineas
for linea in lineas:
ws.write(xi, 1, linea['sec'], linea_center)
ws.write(xi, 2, linea.get('comp', ''), linea_izq)
ws.write(xi, 3, linea.get('nom', ''), linea_izq)
ws.write(xi, 4, linea.get('ced', ''), linea_izq)
ws.write(xi, 5, linea.get('car', ''), linea_izq)
ws.write(xi, 6, linea.get('neg', ''), linea_izq)
ws.write(xi, 7, linea.get('fing', ''), linea_der)
ws.write(xi, 8, linea.get('vctr', ''), linea_der)
xi += 1
ws.col(0).width = 2000
ws.col(1).width = 3800
ws.col(2).width = 9900
ws.col(3).width = 5000
ws.col(4).width = 6900
ws.col(5).width = 3250
ws.col(6).width = 2500
ws.col(7).width = 2500
ws.col(8).width = 8500
ws.col(9).width = 9500
ws.col(10).width = 6500
ws.row(8).height = 750
buf = cStringIO.StringIO()
wb.save(buf)
out = base64.encodestring(buf.getvalue())
buf.close()
return self.write(cr, uid, ids, {'data':out, 'txt_filename':'Reporte_RR_HH.xls', 'name':'Reporte_RR_HH.xls'})
_columns = {
'name' : fields.char('Descripcion', size=16,required=False, readonly=False),
'date_from': fields.date('Fecha Desde'),
'date_to': fields.date('Fecha Hasta'),
'txt_filename': fields.char(),
'type_hr':fields.selection([('salida', 'Salida'), ('ingreso', 'Ingreso')], 'Tipo De Reporte', required=True),
'data':fields.binary('Archivo', filters=None),
}
hr_payslip_ing_salidas()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
] | |
b50fa8b164038273f0f2510a5fa6d6b70bf5e855 | 0206ac23a29673ee52c367b103dfe59e7733cdc1 | /src/util/geo/misc.py | d49ea24c1dc86ad4e3c2233fae7cbe746f7c5725 | [] | no_license | guziy/RPN | 2304a93f9ced626ae5fc8abfcc079e33159ae56a | 71b94f4c73d4100345d29a6fbfa9fa108d8027b5 | refs/heads/master | 2021-11-27T07:18:22.705921 | 2021-11-27T00:54:03 | 2021-11-27T00:54:03 | 2,078,454 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | def deg_min_sec_to_deg(d, m, s, hem="N"):
if hem in ["S", "W"]:
mul = -1
else:
mul = 1
return mul * (d + m / 60 + s / 3600) | [
"[email protected]"
] | |
a193f8c7e319471422ad1e32ddfd1539b1cf5509 | 0469f9c57df4081527c7c1447881b23543fcd4d7 | /migrations/versions/e854ed68756a_init_commit.py | 883c4de77a1f5a4a80a00820bfa4b994218bcc69 | [] | no_license | Fajaragst/open-vote-api | 6585934977e5d0bc1c7d399b4212142c670a8380 | 011acf09ebd6493792d32bcb7410840ad97ca092 | refs/heads/master | 2023-03-24T20:55:45.751666 | 2019-07-21T14:11:12 | 2019-07-21T14:11:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,327 | py | """init commit
Revision ID: e854ed68756a
Revises:
Create Date: 2019-03-07 12:01:41.964280
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'e854ed68756a'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('election',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('name', sa.String(length=144), nullable=True),
sa.Column('images', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('status', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
op.create_table('candidate',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('name', sa.String(length=144), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column('images', sa.String(length=255), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('status', sa.Integer(), nullable=True),
sa.Column('election_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(['election_id'], ['election.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
op.create_table('user',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('username', sa.String(length=144), nullable=True),
sa.Column('identity_id', sa.String(length=144), nullable=True),
sa.Column('name', sa.String(length=144), nullable=True),
sa.Column('msisdn', sa.String(length=12), nullable=True),
sa.Column('email', sa.String(length=144), nullable=True),
sa.Column('images', sa.String(length=144), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('password', sa.String(length=128), nullable=True),
sa.Column('status', sa.Integer(), nullable=True),
sa.Column('role', sa.Integer(), nullable=True),
sa.Column('candidate_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(['candidate_id'], ['candidate.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('id'),
sa.UniqueConstraint('identity_id'),
sa.UniqueConstraint('msisdn'),
sa.UniqueConstraint('username')
)
op.create_table('vote',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('candidate_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('user_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['candidate_id'], ['candidate.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('vote')
op.drop_table('user')
op.drop_table('candidate')
op.drop_table('election')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
0a5de77e71250c0d77aa944487df2813a9bc55b6 | 423af56f8003dfd4c42971bdd622e50dee5bdd0a | /Ntuples/test/stoppedHSCPMuonTree_promptreco.py | fb3042762f07b9b230425bd1c154afa58acc75aa | [] | no_license | jalimena/StoppedHSCPMuon | 06c150af36173e83da4b49ff25ce203fb7900dfd | 263fa98d9d3a1e2547fc8925dd5c4bcac1fc4dd0 | refs/heads/master | 2016-09-16T15:17:04.765768 | 2016-05-02T09:22:59 | 2016-05-02T09:22:59 | 16,558,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,329 | py | #
# produce stopped HSCPMuon TTree
# assumes RAW, HLT, RECO data present
# Run this on StoppedHSCPMuon Skim
#
import FWCore.ParameterSet.Config as cms
import os, re
version = os.environ['CMSSW_VERSION']
#from StoppedHSCPMuon.Ntuples.stoppedHSCPMuonTree_MC_2012_cfg import *
from StoppedHSCPMuon.Ntuples.stoppedHSCPMuonTree_RECO_2012_cfg import *
process.MessageLogger.cerr.INFO.limit = cms.untracked.int32(-1)
# change Global Tag
#process.GlobalTag.globaltag = 'GR_P_V32::All'
if re.match( r'CMSSW_4_2_', version):
process.GlobalTag.globaltag = 'START42_V11::All' #for 428 signal MC
elif re.match( r'CMSSW_5_2_', version):
process.GlobalTag.globaltag = 'GR_P_V39_AN1::All' #for Run2012A and 2012B prompt reco (52X)
#elif re.match( r'CMSSW_5_3_', version):
#process.GlobalTag.globaltag = 'GR_P_V40_AN1::All' #for Run2012C prompt reco(53X)
elif re.match( r'CMSSW_5_3_', version):
process.GlobalTag.globaltag = 'GR_P_V41_AN2::All' #for Run2012C-v2 prompt reco, Run2012D prompt reco (53X)
else:
# unsupported version
pass
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
#process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1000) )
# input files
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
process.source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/190/456/5E1A8343-F380-E111-8FFD-003048D2BA82.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/190/456/A24CD3A0-0681-E111-AFE8-001D09F2AD4D.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/190/456/BA028006-F480-E111-BAA5-BCAEC5329727.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/190/465/4EF8B039-0181-E111-98CC-003048F024DC.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/190/465/92040779-0281-E111-AE7F-BCAEC518FF40.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/190/465/E8032D7B-0281-E111-8B44-0015C5FDE067.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/04859027-1C88-E111-B1A2-BCAEC518FF67.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/04B48DA3-CE87-E111-B787-003048D2C01A.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/06EFAF4D-D987-E111-B276-001D09F29524.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/08F5C40B-EA87-E111-9C52-5404A63886A2.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/10B2858C-0688-E111-9E53-5404A63886BD.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/1A847CF4-1188-E111-94BE-001D09F28D54.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/1E1A3046-1888-E111-A6A7-BCAEC518FF8E.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/202145C2-0588-E111-8FBB-0030486780AC.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/222802CB-EB87-E111-B03F-002481E0D646.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/22F0F138-DA87-E111-95A0-001D09F2A690.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/24957545-1188-E111-8938-00237DDBE49C.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/265965C4-FF87-E111-9AC6-BCAEC5329700.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/280346E8-E087-E111-8FB8-5404A63886D6.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/28066F34-CD87-E111-B63D-003048D2BF1C.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/2A1A649B-FB87-E111-B63C-BCAEC532970F.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/2A2B9A43-0C88-E111-AE8E-0019B9F72D71.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/2E494DA1-1088-E111-A685-003048D37524.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/30D253D1-E587-E111-A273-0025B32034EA.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/321AB090-DF87-E111-BA97-5404A63886B9.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/342DEBC8-1F88-E111-99D2-0025901D625A.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/3E7085B6-F087-E111-848C-001D09F28EA3.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/405D695E-D587-E111-AB4E-5404A638868F.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/407350BF-D687-E111-9856-E0CB4E55365C.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/46440CB0-0C88-E111-9ADF-BCAEC53296FB.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/48B6DFC4-0588-E111-AC64-001D09F23C73.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/4C61278C-ED87-E111-8B23-0025901D62A0.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/522AD5E4-E087-E111-A0C1-5404A63886D4.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/56771E6B-CA87-E111-96F1-003048D3C90E.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/6028199D-D887-E111-9875-E0CB4E4408E3.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/627EA527-0A88-E111-A67B-BCAEC53296FB.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/62EA198C-0688-E111-8E92-5404A63886A5.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/62EB7C36-DA87-E111-B407-001D09F295FB.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/64D7886F-F287-E111-BA3E-BCAEC518FF68.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/6804CADA-0F88-E111-9726-003048D3751E.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/6A0D8ACA-EB87-E111-B460-00215AEDFD74.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/6AB35653-DC87-E111-AC05-003048678110.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/6C565368-F387-E111-B37F-0025901D5C80.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/6E3B525A-1B88-E111-A05F-0019B9F4A1D7.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/7827A72B-0488-E111-A981-BCAEC518FF80.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/7CC33674-0988-E111-9605-001D09F29597.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/7CF4A4E2-1488-E111-A8F7-001D09F28D4A.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/7E6437C5-0588-E111-80A4-001D09F28F25.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/7E83470F-D387-E111-854A-0025901D5D78.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/7EFEC135-DA87-E111-8C1F-BCAEC518FF76.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/823845DA-0F88-E111-B63B-003048D3C944.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/86AB8797-DF87-E111-9560-0025901D627C.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/86C0F40F-D687-E111-A149-001D09F24D8A.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/86D3D54A-F787-E111-8616-002481E0D73C.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/86EE80FD-E787-E111-8227-0025B32445E0.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/88FE99F3-1188-E111-A36F-0030486733B4.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/8E17B2CB-EE87-E111-AC0E-5404A63886B6.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/8E4CD2A7-F487-E111-B627-5404A63886B7.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/8EEA0994-E287-E111-9F94-5404A63886A0.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/90525F47-D987-E111-AE3B-5404A63886BB.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/96E1736B-CA87-E111-AB2D-0030486780EC.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/9837BC7F-E387-E111-A2EF-0025901D624E.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/9C92F898-0B88-E111-B745-5404A6388699.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/9EF4B1F2-0788-E111-90BC-003048D2BD66.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/A6C8639D-0B88-E111-A370-001D09F2B30B.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/A6FE6FFB-F787-E111-BD4F-003048D3756A.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/AC7F7563-F987-E111-83C2-0025901D6268.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/ACE20099-F887-E111-9E18-BCAEC5329709.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/ACEDCE11-0B88-E111-A4E7-001D09F290BF.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/AE4CA351-DC87-E111-868B-001D09F2960F.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/B40D9AE2-1488-E111-9048-001D09F2437B.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/BA81C5C1-D687-E111-8E6E-5404A63886C5.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/BAF0AF49-D287-E111-9FB2-003048D3C90E.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/C04A54C3-D687-E111-881D-BCAEC5329702.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/C0AAE27C-E387-E111-94EC-BCAEC5364C42.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/C6D63CC2-DA87-E111-9507-BCAEC5329709.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/C6F7E23A-D787-E111-A34F-5404A63886AE.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/C82AF6E9-1688-E111-A55A-BCAEC518FF52.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/CAFAE555-1B88-E111-94FA-001D09F24EE3.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/CC1484F1-0188-E111-B321-BCAEC53296F3.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/D06A475D-F187-E111-B10C-5404A638869E.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/D2CB1162-E787-E111-8478-0025901D5D90.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/D6D69A9E-DB87-E111-AC4B-BCAEC518FF76.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/DAAAAD71-0988-E111-ABCF-0019B9F4A1D7.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/DE5A0817-F687-E111-903B-001D09F2841C.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/E008424D-F487-E111-9707-001D09F248F8.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/E076794D-F487-E111-9DA9-001D09F2B2CF.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/E4638EF2-DF87-E111-B595-E0CB4E5536AE.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/E8E9E961-0388-E111-BA9C-5404A63886B6.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/EAF22770-F287-E111-9A1A-5404A638869E.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/FC44603B-2188-E111-8505-001D09F295FB.root',
#'/store/data/Run2012A/SingleMu/RECO/PromptReco-v1/000/191/226/FE158D53-0E88-E111-84C0-E0CB4E5536AE.root',
#cosmic data
#'/store/data/Run2012B/NoBPTX/RECO/PromptReco-v1/000/195/155/2E8E0BB2-C0AA-E111-8562-5404A638869B.root',
#'/store/data/Run2012B/NoBPTX/RECO/PromptReco-v1/000/195/155/6C7AAC49-BDAA-E111-96C0-003048F117EA.root'
#Run2012C, run 198230, all LS good
#'rfio:/castor/cern.ch/cms/store/data/Run2012C/NoBPTX/RECO/PromptReco-v1/000/198/230/0AD096D0-A4C7-E111-A193-001D09F2447F.root',
#'/store/data/Run2012C/NoBPTX/RECO/PromptReco-v1/000/198/230/A4D557CA-A4C7-E111-A8B8-0025B32034EA.root',
#'rfio:/castor/cern.ch/cms/store/data/Run2012C/NoBPTX/RECO/PromptReco-v1/000/198/230/A8DABA86-9FC7-E111-ABF9-BCAEC53296F7.root'
#'root://eoscms//eos/cms/store/data/Run2012C/NoBPTX/RAW-RECO/LogError-PromptSkim-v2/000/198/230/00000/A6B42FB7-35C8-E111-8CA0-D8D385FF4A7C.root'
#Run2012C-v2, gives seg fault
#'file:/tmp/jalimena/Run2012C-v2/705074C5-51EE-E111-A904-5404A63886CE.root'
#'root://eoscms//eos/cms/store/data/Run2012D/NoBPTX/RAW-RECO/LogError-PromptSkim-v1/000/207/452/00000/6EC6926F-4432-E211-8B9C-002590200A54.root'
'root://eoscms//eos/cms/store/data/Run2012B/NoBPTX/RAW-RECO/PromptSkim-v1/0000/B64E29A2-23A6-E111-AC1C-001E673986B5.root'
] );
process.source.lumisToProcess = cms.untracked.VLuminosityBlockRange(
#'201535:1-201535:300'
#'190456:1-190456:184',
#'190465:68-190465:110'
#'190465:1-190465:110'
#'190465:122-190465:127'
#'193834:1-194713:1' #Run2012B, HLT_L2Mu20_NoVertex_NoBPTX3BX_v1
#'194735:1-197756:1' #Run2012B, HLT_L2Mu20_NoVertex_NoBPTX3BX_NoHalo_v3
#'197770:1-200000:1' #Run2012B, HLT_L2Mu20_NoVertex_2Cha_NoBPTX3BX_NoHalo_v1, last run is filler number
)
| [
"[email protected]"
] | |
59f9852b28460e6d5f9fbd669f9b8feed10e8782 | 396b3046c70a871b7fe5efe668c7cfc3d02cdc73 | /EE-data-analysis/input_data_processing/takeout_valid_ids.py | baf3b2306612c31e6725fb4ddc5657911f0a2325 | [] | no_license | imbornagainer/Python_Project | 01e22532c7504d40534ff6217e91a8f44c53ab17 | 08332af7341846bceb7b410a2f59d7f51bba7d70 | refs/heads/master | 2021-05-12T06:15:40.793504 | 2018-02-26T02:26:45 | 2018-02-26T02:26:45 | 117,215,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,898 | py |
# -*- coding: utf-8 -*-
# Author : jeonghoonkang , https://github.com/jeonghoonkang
# Author : jeongmoon417 , https://github.com/jeongmoon417
# 참고 url
# https://code.tutsplus.com/ko/tutorials/how-to-work-with-excel-documents-using-python--cms-25698
# http://egloos.zum.com/mcchae/v/11120944
import datetime
import openpyxl
import xlsxwriter
import sys
# sys.path.insert(0, '../doc_design')
# (to do) have to find how to add different location directory path and file
# now just using same dir location file
# from openpyxl.workbook import Workbook
# from openpyxl.writer.excel import ExcelWriter
# (error) from openpyxl.cell import get_column_letter
# from openpyxl import load_workbook
class excell_class :
__ofile = None
def __init__(self):
pass
#@staticmethod
def open_exc_doc(self):
# using unicode file name with u syntax
__ofile = openpyxl.load_workbook(u"_test__1.xlsx")
return __ofile
def read_vertical(self, sheet, __start, __end):
__vertical = []
print " ... Please use column[n]:column[m], vertical read "
cell_of_col = sheet[__start:__end]
for row in cell_of_col:
for cell in row:
v = cell.value
if v == None:
continue # do nothing below code, back to next for loop step
__vertical.append(v) # 리스트 __vertical에 아이디 추가
return __vertical #__cnt, __cnt_n # 세로 셀 데이터, 데이터 갯수, None 갯수
# 입력 리스트를 액셀에 저장
def save_exc(self, __vdata, __fname):
__t = str(datetime.datetime.now())
workbook = xlsxwriter.Workbook(__fname + __t + '.xlsx')
worksheet = workbook.add_worksheet()
row = 0
col = 0
for item in (__vdata):
worksheet.write(row, col, item)
row += 1
workbook.close()
def save_vdata(__vdata, fname):
__t = str(datetime.datetime.now())
__odata = fname + '='
print str(__vdata)
__odata = __odata + str(__vdata)
filename = fname + '.py'
__ofile = open(filename,"w")
__ofile.write(__odata)
__ofile.close()
# EE에서 수집하는 스마트미터 ID가 '-' 두개 들어가야 하는데,
# 한개만 들어간 경우를 확인하는 함수
# 일반적으로 괜찮은 아이디 00-250060021 (대부분 이런 형식)
# 이상해 보이는 아이디 06-25-0071186 등등
def check_id(__buf):
__itter = len(__buf) # buf 리스트 길이
__err_list = [] # 회신할 리스트 버퍼
for __i in range(__itter) :
print " check it is OK ? " + __buf[__i]
try:
if __buf[__i].index('-',0) != 2 : # 2번 인덱스에 '-' 부재면 get in
# 이상한 ID 출력, 리스트에 추가
print __buf[__i]
__err_list.append(__buf[__i])
# (2번 인덱스에 '-' 존재하고, 이전 if 에서 결정)
# 그리고, 5번 인덱스에 '-' 존재이면 get in
elif __buf[__i].index('-',5) == 5 :
# 이상한 ID 출력, 리스트에 추가
print __buf[__i]
__err_list.append(__buf[__i])
except:
#괜찮은 아이디
print" Proper ID format"# __buf[__i]
#__err_list.append(__buf[__i])
return __err_list
if __name__ == "__main__":
# open excell file
eclass = excell_class()
op = eclass.open_exc_doc()
sheets = op.get_sheet_names()
print " sheets = ", sheets
sh1 = op.get_sheet_by_name(sheets[0])
print " name =", sh1
buf = eclass.read_vertical(sh1,'b1','b541')
save_vdata(buf,"result_ids")
eclass.save_exc(buf,"result_ids")
exit (" ...congrats, finish")
| [
"BePious@DESKTOP-MT17J3L"
] | BePious@DESKTOP-MT17J3L |
d012c815cc66d570ca728e875aa1f6c642c7eb59 | 703a60185bb6d7607d3bff5afda2bbadfa96229c | /contabilidade/wsgi.py | 2534c73dc9bf771dcbebbf2e302e272d920c203a | [] | no_license | Eduardo-Lucas/contabilidade | 07b07f63272e564034fcd418310222a78b28c3c1 | c42d1ee2178d70a7301b57fe68329e732621abed | refs/heads/master | 2021-09-07T23:47:16.142421 | 2018-03-03T14:09:17 | 2018-03-03T14:09:17 | 102,148,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | """
WSGI config for contabilidade project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "contabilidade.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
aac39ead1e326cd55a04c437ae750dd7b38ff439 | 6359831db732f929409adbb8270092c7e9cca8d5 | /Q003_sub-array_with_maximum_sum.py | dea797f01d26dff8ae4cc1146a6a658e5c09d456 | [] | no_license | latika18/interviewbit | 11237219d982c98a22f0098be8248ef7a5b9246f | a065b19dc368136101dafbbbdab9b664fed0bf35 | refs/heads/master | 2020-03-15T15:20:30.002201 | 2018-08-22T07:39:21 | 2018-08-22T07:39:21 | 132,209,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 828 | py | def maxSubArray(A):
start ,stop = 0 , 0
curr = 0
max_sum = A[0]
current_sum = 0
for i in range(len(A)):
current_sum += A[i]
if max_sum < current_sum:
max_sum = current_sum
start = curr
stop = i
if current_sum < 0:
current_sum = 0
curr = i + 1
return max_sum , A[start:stop]
print maxSubArray([-2,-3,-4,-5])
print maxSubArray([-1])
print maxSubArray([-5, 1, -3, 7, -1, 2, 1, -4, 6])
print maxSubArray([-5, 1, -3, 7, -1, 2, 1, -6, 5])
print maxSubArray( [6, -3, -2, 7, -5, 2, 1, -7, 6])
print maxSubArray([-5, -2, -1, -4, -7])
print maxSubArray( [4, 1, 1, 4, -4, 10, -4, 10, 3, -3, -9, -8, 2, -6, -6, -5, -1, -7, 7, 8])
print maxSubArray([4, -5, -1, 0, -2, 20, -4, -3, -2, 8, -1, 10, -1, -1 ])
| [
"[email protected]"
] | |
cdab5fe6af763d9086bf84c60fb5baee3d83ced5 | 9efa07e8b0d63fc107124a8387fbe29cfc44ced9 | /konkord/apps/users/migrations/0002_auto_20170124_1003.py | 7e28bb774084e7e88638b6a2ba356d0f63c6a354 | [] | no_license | phonxis/konkord | ef7be9325bddeaceb28f1a70c094d3c7a8ea6b9b | cd564a41f1b0ac0665a7dee0e3f26730c7377154 | refs/heads/master | 2021-07-20T01:33:42.221782 | 2017-10-26T14:02:48 | 2017-10-26T14:02:48 | 105,538,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,223 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-24 10:03
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import users.models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
('objects', users.models.UserManager()),
],
),
migrations.AlterField(
model_name='email',
name='email',
field=models.CharField(max_length=255, verbose_name='Email'),
),
migrations.AlterField(
model_name='email',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='emails', to=settings.AUTH_USER_MODEL, verbose_name='User'),
),
migrations.AlterField(
model_name='phone',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='phones', to=settings.AUTH_USER_MODEL, verbose_name='User'),
),
]
| [
"[email protected]"
] | |
8f99e0a8b26cfb8845d447c795dc5d253ea25304 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/25/usersdata/132/11887/submittedfiles/av1_3.py | 611ca6bc5565b10c48ce258b48df5fbbec519a95 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
a=input('digite o valor de a:')
b=input('digite o valor de b:')
i=a
x=b
c=0
while True:
if i%x!=0:
c=c+1
if i%x=0:
break
x=i%x
i=x
print(c) | [
"[email protected]"
] | |
578f222d31e790f108a9e57c8d6fddaccdbd549e | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_mandible.py | 72cd876319bde71fb9a98f2eeed130011b2c8db0 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py |
#calss header
class _MANDIBLE():
def __init__(self,):
self.name = "MANDIBLE"
self.definitions = [u'in a person or animal, the lower jaw bone', u'in insects, one of the two parts of the mouth used for biting and cutting food']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
f8a4a253473bf3585a96fcfad8f0c972ee8a9423 | d72af5ad2b8f42b4faff296df0342dc3fbbd91ac | /tensorflow_estimator/python/estimator/head/base_head_test.py | bde6c898195caa1e958aefd6f2ac23a942e02548 | [
"Apache-2.0"
] | permissive | CheukNgai/estimator | 22eacf5b0a366d43206e441c9d0cbb096ab12614 | 673a50bd5ffa70d0672ce47e40f5075f1cbe0a62 | refs/heads/master | 2020-04-01T23:09:12.173802 | 2018-11-13T10:19:22 | 2018-11-13T10:19:22 | 153,744,529 | 0 | 0 | Apache-2.0 | 2018-11-13T10:19:23 | 2018-10-19T07:38:19 | Python | UTF-8 | Python | false | false | 6,502 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for base_head.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import summary_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
from tensorflow_estimator.python.estimator import model_fn
from tensorflow_estimator.python.estimator.head import base_head
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
def _initialize_variables(test_case, scaffold):
scaffold.finalize()
test_case.assertIsNone(scaffold.init_feed_dict)
test_case.assertIsNone(scaffold.init_fn)
scaffold.init_op.run()
scaffold.ready_for_local_init_op.eval()
scaffold.local_init_op.run()
scaffold.ready_op.eval()
test_case.assertIsNotNone(scaffold.saver)
def _assert_simple_summaries(test_case, expected_summaries, summary_str,
tol=1e-6):
"""Assert summary the specified simple values.
Args:
test_case: test case.
expected_summaries: Dict of expected tags and simple values.
summary_str: Serialized `summary_pb2.Summary`.
tol: Tolerance for relative and absolute.
"""
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
test_case.assertAllClose(expected_summaries, {
v.tag: v.simple_value for v in summary.value
}, rtol=tol, atol=tol)
def _assert_no_hooks(test_case, spec):
test_case.assertAllEqual([], spec.training_chief_hooks)
test_case.assertAllEqual([], spec.training_hooks)
class CreateEstimatorSpecTest(test.TestCase):
class _HeadWithTPUSupport(base_head.Head):
"""Head that overrides _create_tpu_estimator_spec."""
def name(self):
return 'HeadWithTPUSupport'
def logits_dimension(self):
return None
def loss_reduction(self):
return None
def loss(self, features, mode, logits, labels):
return None
def predictions(self, logits):
return None
def metrics(self, regularization_losses=None):
return None
def update_metrics(self, eval_metrics, features, logits, labels,
mode=None, regularization_losses=None):
return None
def _create_tpu_estimator_spec(self, features, mode, logits, labels=None,
optimizer=None, train_op_fn=None,
regularization_losses=None):
return model_fn._TPUEstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
loss=constant_op.constant(0.0, dtype=dtypes.float32))
class _HeadWithOutTPUSupport(base_head.Head):
"""Head that overrides create_estimator_spec."""
def name(self):
return 'HeadWithOutTPUSupport'
def logits_dimension(self):
return None
def loss_reduction(self):
return None
def loss(self, features, mode, logits, labels):
return None
def predictions(self, logits):
return None
def metrics(self, regularization_losses=None):
return None
def update_metrics(self, eval_metrics, features, logits, labels,
mode=None, regularization_losses=None):
return None
def create_estimator_spec(self, features, mode, logits, labels=None,
optimizer=None, train_op_fn=None,
regularization_losses=None):
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
loss=constant_op.constant(0.0, dtype=dtypes.float32))
class _InvalidHead(base_head.Head):
"""Head that overrides neither estimator_spec functions."""
def name(self):
return 'InvalidHead'
def logits_dimension(self):
return None
def loss_reduction(self):
return None
def loss(self, features, mode, logits, labels):
return None
def predictions(self, logits):
return None
def metrics(self, regularization_losses=None):
return None
def update_metrics(self, eval_metrics, features, logits, labels,
mode=None, regularization_losses=None):
return None
def test_head_override_tpu_estimator_spec(self):
"""Test for `_Head` that overrides _create_tpu_estimator_spec."""
head = self._HeadWithTPUSupport()
tpu_spec = head._create_tpu_estimator_spec(
features=None, mode=None, logits=None)
self.assertTrue(isinstance(tpu_spec, model_fn._TPUEstimatorSpec))
est_spec = head.create_estimator_spec(
features=None, mode=None, logits=None)
self.assertTrue(isinstance(est_spec, model_fn.EstimatorSpec))
def test_head_override_estimator_spec(self):
"""Test for `_Head` that overrides create_estimator_spec."""
head = self._HeadWithOutTPUSupport()
with self.assertRaisesRegexp(
NotImplementedError,
'TPUEstimatorSpec not available for this model head.'):
_ = head._create_tpu_estimator_spec(
features=None, mode=None, logits=None)
est_spec = head.create_estimator_spec(
features=None, mode=None, logits=None)
self.assertTrue(isinstance(est_spec, model_fn.EstimatorSpec))
def test_invalid_head_class(self):
head = self._InvalidHead()
with self.assertRaisesRegexp(
NotImplementedError,
'TPUEstimatorSpec not available for this model head.'):
_ = head._create_tpu_estimator_spec(
features=None, mode=None, logits=None)
with self.assertRaisesRegexp(
NotImplementedError,
r'Subclasses of Head must implement `create_estimator_spec\(\)` or '
r'_create_tpu_estimator_spec\(\).'):
_ = head.create_estimator_spec(
features=None, mode=None, logits=None)
if __name__ == '__main__':
test.main()
| [
"[email protected]"
] | |
6329126341964ef466c626b546a074040716bc53 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/insights/v20150501/export_configuration.py | 629905544c15b3e41801d1b73dcce0c71093e324 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,540 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['ExportConfigurationArgs', 'ExportConfiguration']
@pulumi.input_type
class ExportConfigurationArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
resource_name: pulumi.Input[str],
destination_account_id: Optional[pulumi.Input[str]] = None,
destination_address: Optional[pulumi.Input[str]] = None,
destination_storage_location_id: Optional[pulumi.Input[str]] = None,
destination_storage_subscription_id: Optional[pulumi.Input[str]] = None,
destination_type: Optional[pulumi.Input[str]] = None,
export_id: Optional[pulumi.Input[str]] = None,
is_enabled: Optional[pulumi.Input[str]] = None,
notification_queue_enabled: Optional[pulumi.Input[str]] = None,
notification_queue_uri: Optional[pulumi.Input[str]] = None,
record_types: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ExportConfiguration resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] resource_name: The name of the Application Insights component resource.
:param pulumi.Input[str] destination_account_id: The name of destination storage account.
:param pulumi.Input[str] destination_address: The SAS URL for the destination storage container. It must grant write permission.
:param pulumi.Input[str] destination_storage_location_id: The location ID of the destination storage container.
:param pulumi.Input[str] destination_storage_subscription_id: The subscription ID of the destination storage container.
:param pulumi.Input[str] destination_type: The Continuous Export destination type. This has to be 'Blob'.
:param pulumi.Input[str] export_id: The Continuous Export configuration ID. This is unique within a Application Insights component.
:param pulumi.Input[str] is_enabled: Set to 'true' to create a Continuous Export configuration as enabled, otherwise set it to 'false'.
:param pulumi.Input[str] notification_queue_enabled: Deprecated
:param pulumi.Input[str] notification_queue_uri: Deprecated
:param pulumi.Input[str] record_types: The document types to be exported, as comma separated values. Allowed values include 'Requests', 'Event', 'Exceptions', 'Metrics', 'PageViews', 'PageViewPerformance', 'Rdd', 'PerformanceCounters', 'Availability', 'Messages'.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "resource_name", resource_name)
if destination_account_id is not None:
pulumi.set(__self__, "destination_account_id", destination_account_id)
if destination_address is not None:
pulumi.set(__self__, "destination_address", destination_address)
if destination_storage_location_id is not None:
pulumi.set(__self__, "destination_storage_location_id", destination_storage_location_id)
if destination_storage_subscription_id is not None:
pulumi.set(__self__, "destination_storage_subscription_id", destination_storage_subscription_id)
if destination_type is not None:
pulumi.set(__self__, "destination_type", destination_type)
if export_id is not None:
pulumi.set(__self__, "export_id", export_id)
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
if notification_queue_enabled is not None:
pulumi.set(__self__, "notification_queue_enabled", notification_queue_enabled)
if notification_queue_uri is not None:
pulumi.set(__self__, "notification_queue_uri", notification_queue_uri)
if record_types is not None:
pulumi.set(__self__, "record_types", record_types)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="resourceName")
def resource_name(self) -> pulumi.Input[str]:
"""
The name of the Application Insights component resource.
"""
return pulumi.get(self, "resource_name")
@resource_name.setter
def resource_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_name", value)
@property
@pulumi.getter(name="destinationAccountId")
def destination_account_id(self) -> Optional[pulumi.Input[str]]:
"""
The name of destination storage account.
"""
return pulumi.get(self, "destination_account_id")
@destination_account_id.setter
def destination_account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination_account_id", value)
@property
@pulumi.getter(name="destinationAddress")
def destination_address(self) -> Optional[pulumi.Input[str]]:
"""
The SAS URL for the destination storage container. It must grant write permission.
"""
return pulumi.get(self, "destination_address")
@destination_address.setter
def destination_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination_address", value)
@property
@pulumi.getter(name="destinationStorageLocationId")
def destination_storage_location_id(self) -> Optional[pulumi.Input[str]]:
"""
The location ID of the destination storage container.
"""
return pulumi.get(self, "destination_storage_location_id")
@destination_storage_location_id.setter
def destination_storage_location_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination_storage_location_id", value)
@property
@pulumi.getter(name="destinationStorageSubscriptionId")
def destination_storage_subscription_id(self) -> Optional[pulumi.Input[str]]:
"""
The subscription ID of the destination storage container.
"""
return pulumi.get(self, "destination_storage_subscription_id")
@destination_storage_subscription_id.setter
def destination_storage_subscription_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination_storage_subscription_id", value)
@property
@pulumi.getter(name="destinationType")
def destination_type(self) -> Optional[pulumi.Input[str]]:
"""
The Continuous Export destination type. This has to be 'Blob'.
"""
return pulumi.get(self, "destination_type")
@destination_type.setter
def destination_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination_type", value)
@property
@pulumi.getter(name="exportId")
def export_id(self) -> Optional[pulumi.Input[str]]:
"""
The Continuous Export configuration ID. This is unique within a Application Insights component.
"""
return pulumi.get(self, "export_id")
@export_id.setter
def export_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "export_id", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[pulumi.Input[str]]:
"""
Set to 'true' to create a Continuous Export configuration as enabled, otherwise set it to 'false'.
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="notificationQueueEnabled")
def notification_queue_enabled(self) -> Optional[pulumi.Input[str]]:
"""
Deprecated
"""
return pulumi.get(self, "notification_queue_enabled")
@notification_queue_enabled.setter
def notification_queue_enabled(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notification_queue_enabled", value)
@property
@pulumi.getter(name="notificationQueueUri")
def notification_queue_uri(self) -> Optional[pulumi.Input[str]]:
"""
Deprecated
"""
return pulumi.get(self, "notification_queue_uri")
@notification_queue_uri.setter
def notification_queue_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notification_queue_uri", value)
@property
@pulumi.getter(name="recordTypes")
def record_types(self) -> Optional[pulumi.Input[str]]:
"""
The document types to be exported, as comma separated values. Allowed values include 'Requests', 'Event', 'Exceptions', 'Metrics', 'PageViews', 'PageViewPerformance', 'Rdd', 'PerformanceCounters', 'Availability', 'Messages'.
"""
return pulumi.get(self, "record_types")
@record_types.setter
def record_types(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "record_types", value)
class ExportConfiguration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
destination_account_id: Optional[pulumi.Input[str]] = None,
destination_address: Optional[pulumi.Input[str]] = None,
destination_storage_location_id: Optional[pulumi.Input[str]] = None,
destination_storage_subscription_id: Optional[pulumi.Input[str]] = None,
destination_type: Optional[pulumi.Input[str]] = None,
export_id: Optional[pulumi.Input[str]] = None,
is_enabled: Optional[pulumi.Input[str]] = None,
notification_queue_enabled: Optional[pulumi.Input[str]] = None,
notification_queue_uri: Optional[pulumi.Input[str]] = None,
record_types: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Properties that define a Continuous Export configuration.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] destination_account_id: The name of destination storage account.
:param pulumi.Input[str] destination_address: The SAS URL for the destination storage container. It must grant write permission.
:param pulumi.Input[str] destination_storage_location_id: The location ID of the destination storage container.
:param pulumi.Input[str] destination_storage_subscription_id: The subscription ID of the destination storage container.
:param pulumi.Input[str] destination_type: The Continuous Export destination type. This has to be 'Blob'.
:param pulumi.Input[str] export_id: The Continuous Export configuration ID. This is unique within a Application Insights component.
:param pulumi.Input[str] is_enabled: Set to 'true' to create a Continuous Export configuration as enabled, otherwise set it to 'false'.
:param pulumi.Input[str] notification_queue_enabled: Deprecated
:param pulumi.Input[str] notification_queue_uri: Deprecated
:param pulumi.Input[str] record_types: The document types to be exported, as comma separated values. Allowed values include 'Requests', 'Event', 'Exceptions', 'Metrics', 'PageViews', 'PageViewPerformance', 'Rdd', 'PerformanceCounters', 'Availability', 'Messages'.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] resource_name_: The name of the Application Insights component resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ExportConfigurationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Properties that define a Continuous Export configuration.
:param str resource_name: The name of the resource.
:param ExportConfigurationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ExportConfigurationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
destination_account_id: Optional[pulumi.Input[str]] = None,
destination_address: Optional[pulumi.Input[str]] = None,
destination_storage_location_id: Optional[pulumi.Input[str]] = None,
destination_storage_subscription_id: Optional[pulumi.Input[str]] = None,
destination_type: Optional[pulumi.Input[str]] = None,
export_id: Optional[pulumi.Input[str]] = None,
is_enabled: Optional[pulumi.Input[str]] = None,
notification_queue_enabled: Optional[pulumi.Input[str]] = None,
notification_queue_uri: Optional[pulumi.Input[str]] = None,
record_types: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ExportConfigurationArgs.__new__(ExportConfigurationArgs)
__props__.__dict__["destination_account_id"] = destination_account_id
__props__.__dict__["destination_address"] = destination_address
__props__.__dict__["destination_storage_location_id"] = destination_storage_location_id
__props__.__dict__["destination_storage_subscription_id"] = destination_storage_subscription_id
__props__.__dict__["destination_type"] = destination_type
__props__.__dict__["export_id"] = export_id
__props__.__dict__["is_enabled"] = is_enabled
__props__.__dict__["notification_queue_enabled"] = notification_queue_enabled
__props__.__dict__["notification_queue_uri"] = notification_queue_uri
__props__.__dict__["record_types"] = record_types
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if resource_name_ is None and not opts.urn:
raise TypeError("Missing required property 'resource_name_'")
__props__.__dict__["resource_name"] = resource_name_
__props__.__dict__["application_name"] = None
__props__.__dict__["container_name"] = None
__props__.__dict__["export_status"] = None
__props__.__dict__["instrumentation_key"] = None
__props__.__dict__["is_user_enabled"] = None
__props__.__dict__["last_gap_time"] = None
__props__.__dict__["last_success_time"] = None
__props__.__dict__["last_user_update"] = None
__props__.__dict__["permanent_error_reason"] = None
__props__.__dict__["resource_group"] = None
__props__.__dict__["storage_name"] = None
__props__.__dict__["subscription_id"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:insights:ExportConfiguration")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ExportConfiguration, __self__).__init__(
'azure-native:insights/v20150501:ExportConfiguration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ExportConfiguration':
"""
Get an existing ExportConfiguration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ExportConfigurationArgs.__new__(ExportConfigurationArgs)
__props__.__dict__["application_name"] = None
__props__.__dict__["container_name"] = None
__props__.__dict__["destination_account_id"] = None
__props__.__dict__["destination_storage_location_id"] = None
__props__.__dict__["destination_storage_subscription_id"] = None
__props__.__dict__["destination_type"] = None
__props__.__dict__["export_id"] = None
__props__.__dict__["export_status"] = None
__props__.__dict__["instrumentation_key"] = None
__props__.__dict__["is_user_enabled"] = None
__props__.__dict__["last_gap_time"] = None
__props__.__dict__["last_success_time"] = None
__props__.__dict__["last_user_update"] = None
__props__.__dict__["notification_queue_enabled"] = None
__props__.__dict__["permanent_error_reason"] = None
__props__.__dict__["record_types"] = None
__props__.__dict__["resource_group"] = None
__props__.__dict__["storage_name"] = None
__props__.__dict__["subscription_id"] = None
return ExportConfiguration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="applicationName")
def application_name(self) -> pulumi.Output[str]:
"""
The name of the Application Insights component.
"""
return pulumi.get(self, "application_name")
@property
@pulumi.getter(name="containerName")
def container_name(self) -> pulumi.Output[str]:
"""
The name of the destination storage container.
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter(name="destinationAccountId")
def destination_account_id(self) -> pulumi.Output[str]:
"""
The name of destination account.
"""
return pulumi.get(self, "destination_account_id")
@property
@pulumi.getter(name="destinationStorageLocationId")
def destination_storage_location_id(self) -> pulumi.Output[str]:
"""
The destination account location ID.
"""
return pulumi.get(self, "destination_storage_location_id")
@property
@pulumi.getter(name="destinationStorageSubscriptionId")
def destination_storage_subscription_id(self) -> pulumi.Output[str]:
"""
The destination storage account subscription ID.
"""
return pulumi.get(self, "destination_storage_subscription_id")
@property
@pulumi.getter(name="destinationType")
def destination_type(self) -> pulumi.Output[str]:
"""
The destination type.
"""
return pulumi.get(self, "destination_type")
@property
@pulumi.getter(name="exportId")
def export_id(self) -> pulumi.Output[str]:
"""
The unique ID of the export configuration inside an Application Insights component. It is auto generated when the Continuous Export configuration is created.
"""
return pulumi.get(self, "export_id")
@property
@pulumi.getter(name="exportStatus")
def export_status(self) -> pulumi.Output[str]:
"""
This indicates current Continuous Export configuration status. The possible values are 'Preparing', 'Success', 'Failure'.
"""
return pulumi.get(self, "export_status")
@property
@pulumi.getter(name="instrumentationKey")
def instrumentation_key(self) -> pulumi.Output[str]:
"""
The instrumentation key of the Application Insights component.
"""
return pulumi.get(self, "instrumentation_key")
@property
@pulumi.getter(name="isUserEnabled")
def is_user_enabled(self) -> pulumi.Output[str]:
"""
This will be 'true' if the Continuous Export configuration is enabled, otherwise it will be 'false'.
"""
return pulumi.get(self, "is_user_enabled")
@property
@pulumi.getter(name="lastGapTime")
def last_gap_time(self) -> pulumi.Output[str]:
"""
The last time the Continuous Export configuration started failing.
"""
return pulumi.get(self, "last_gap_time")
@property
@pulumi.getter(name="lastSuccessTime")
def last_success_time(self) -> pulumi.Output[str]:
"""
The last time data was successfully delivered to the destination storage container for this Continuous Export configuration.
"""
return pulumi.get(self, "last_success_time")
@property
@pulumi.getter(name="lastUserUpdate")
def last_user_update(self) -> pulumi.Output[str]:
"""
Last time the Continuous Export configuration was updated.
"""
return pulumi.get(self, "last_user_update")
@property
@pulumi.getter(name="notificationQueueEnabled")
def notification_queue_enabled(self) -> pulumi.Output[Optional[str]]:
"""
Deprecated
"""
return pulumi.get(self, "notification_queue_enabled")
@property
@pulumi.getter(name="permanentErrorReason")
def permanent_error_reason(self) -> pulumi.Output[str]:
"""
This is the reason the Continuous Export configuration started failing. It can be 'AzureStorageNotFound' or 'AzureStorageAccessDenied'.
"""
return pulumi.get(self, "permanent_error_reason")
@property
@pulumi.getter(name="recordTypes")
def record_types(self) -> pulumi.Output[Optional[str]]:
"""
This comma separated list of document types that will be exported. The possible values include 'Requests', 'Event', 'Exceptions', 'Metrics', 'PageViews', 'PageViewPerformance', 'Rdd', 'PerformanceCounters', 'Availability', 'Messages'.
"""
return pulumi.get(self, "record_types")
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> pulumi.Output[str]:
"""
The resource group of the Application Insights component.
"""
return pulumi.get(self, "resource_group")
@property
@pulumi.getter(name="storageName")
def storage_name(self) -> pulumi.Output[str]:
"""
The name of the destination storage account.
"""
return pulumi.get(self, "storage_name")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> pulumi.Output[str]:
"""
The subscription of the Application Insights component.
"""
return pulumi.get(self, "subscription_id")
| [
"[email protected]"
] | |
afcbab42b80184c81c6eedbcb945ba989acb12dc | 5736fa4213981815bce5b2527b1db69b9405a9e3 | /tools/pe-clr.permissions.py | aa15c2711586ec54bd9104ab85c43ea5ab66c656 | [
"BSD-2-Clause"
] | permissive | mmg1/syringe-1 | a4f399f9eaf31fb784a34715aeb8dfb230c56920 | 4c38701756504de32282e95d6c4d76eb194fa46f | refs/heads/master | 2020-03-28T09:04:20.664636 | 2018-07-28T03:46:10 | 2018-07-28T03:46:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,951 | py | import logging, time
#logging.root.setLevel(logging.INFO)
import pecoff, ptypes
from ptypes import pstruct, dyn
from pecoff.portable import clr
class CustomField(pstruct.type):
_fields_ = [
(clr.ELEMENT_TYPE, 'FieldOrProp'),
(clr.ELEMENT_TYPE, 'FieldOrPropType'),
(clr.SerString, 'FieldOrPropName'),
(lambda s: clr.ElementType.lookup(s['FieldOrPropType'].li.int()), 'Value'),
]
class Fields(pstruct.type):
_fields_ = [
(clr.CInt, 'Count'),
(lambda s: dyn.array(CustomField, s['Count'].li.Get()), 'Fields'),
]
def log(stdout):
start = ts = time.time()
while True:
message = (yield)
ts = time.time()
print >>stdout, "{:.3f} : {:s}".format(ts - start, message)
return
def strify(value):
if isinstance(value, (int, long)):
return "{:d}".format(value)
elif isinstance(value, basestring):
return "{:s}".format(value)
return "{!r}".format(value)
if __name__ == '__main__':
import sys, os
import ptypes, pecoff
if len(sys.argv) != 2:
print >>sys.stderr, "Usage: {:s} file".format(sys.argv[0] if len(sys.argv) else 'test')
sys.exit(1)
filename = sys.argv[1]
L = log(sys.stderr); next(L)
ptypes.setsource(ptypes.prov.file(filename, mode='r'))
L.send("Loading executable for {:s}".format(os.path.basename(filename)))
z = pecoff.Executable.File()
z = z.l
dd = z['next']['header']['datadirectory'][14]
if dd['address'].int() == 0:
L.send("No IMAGE_COR20_HEADER found in executable!".format(os.path.basename(filename)))
sys.exit(2)
comdd = dd['address'].d.l
meta = comdd['MetaData']['Address'].d.l
strings = meta['StreamHeaders'].Get('#Strings')['Offset'].d
#userstrings = meta['StreamHeaders'].Get('#US')['Offset'].d
guids = meta['StreamHeaders'].Get('#GUID')['Offset'].d
blobs = meta['StreamHeaders'].Get('#Blob')['Offset'].d
htables = meta['StreamHeaders'].Get('#~')['Offset'].d
ts = time.time()
L.send("Loading heap \"{:s}\"".format('#~'))
htables.l
L.send("Loading heap \"{:s}\"".format('#Strings'))
strings.l
L.send("Loading heap \"{:s}\"".format('#GUID'))
guids.l
L.send("Loading heap \"{:s}\"".format('#Blob'))
blobs.l
L.send("Finished loading heaps in {:.3f}".format(time.time()-ts))
tables = htables['tables']
# output modules
L.send("Enumerating {:d} modules.".format(len(tables['Module'])))
modules = []
for i, m in enumerate(tables['Module']):
res = strings.field(m['Name'].int())
if m['Mvid'].int():
g = guids.Get(m['Mvid'].int())
print >>sys.stdout, '{:s} {:s}'.format(res.str(), g.str())
modules.append((res.str(), g))
else:
print >>sys.stdout, '{:s}'.format(res.str())
# collect assemblies
L.send("Enumerating {:d} assemblies.".format(len(tables['Assembly'])))
assembly = {}
for i, a in enumerate(tables['Assembly']):
res = strings.field(a['Name'].int())
assembly[i+1] = res.str()
# for each permission that points to an Assembly
perms = ((p['Parent'].Index(), p['Action'].str(), p['PermissionSet'].int()) for p in tables['DeclSecurity'] if p['Parent']['Tag'].Get() == 'Assembly')
L.send("Listing properties from each permission.")
for mi, ma, bi in perms:
permset = blobs.field(bi)['data'].cast(clr.PermissionSet)
attributes = []
for attr in permset['attributes']:
props = attr['Properties']['data'].cast(Fields)
res = {}
for f in props['Fields']:
res[ f['FieldOrPropName'].str() ] = f['Value'].get()
attributes.append(res)
res = {}
map(res.update, attributes)
print >>sys.stdout, '\t{:s} : {:s} : {:s}'.format(assembly[mi], ma, ', '.join('{:s}={:s}'.format(k, strify(v)) for k, v in res.viewitems()))
| [
"[email protected]"
] | |
3c38afc3258b22e70b1d643c45f3a7b04163dd71 | 328afd873e3e4fe213c0fb4ce6621cb1a450f33d | /W3School/conditional_statement_loops/5.py | 9a8d154a3dfbafb59247588b50c5ac24bffeb6c0 | [] | no_license | TorpidCoder/Python | 810371d1bf33c137c025344b8d736044bea0e9f5 | 9c46e1de1a2926e872eee570e6d49f07dd533956 | refs/heads/master | 2021-07-04T08:21:43.950665 | 2020-08-19T18:14:09 | 2020-08-19T18:14:09 | 148,430,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | word = input("Enter the word : ")
if(word == ""):
print("you have not entered any word ")
else:
print(word[::-1])
| [
"[email protected]"
] | |
0969d72a4236cb5feccfde67e764dd2271af3c61 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/J/Julian_Todd/python-png-header-test.py | 36dbf301d83f5b219c9b36fed197cfeb24851ab4 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,316 | py | import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
| [
"[email protected]"
] | |
dc1b5ad0564691cc8fb717a8bfc29860996e7e8f | d78989a8ce52a98f48d77228c4ea893f7aae31f7 | /symbolic_expressions/sample25-virt-max-merge-lenght-20.py | add5f4f590b7cb83f8750aaa25befa480edb54ef | [] | no_license | llyuer/Tigress_protection | 78ead2cf9979a7b3287175cd812833167d520244 | 77c68c4c949340158b855561726071cfdd82545f | refs/heads/master | 2020-06-17T11:16:40.078433 | 2019-04-16T09:27:29 | 2019-04-16T09:27:29 | 195,908,093 | 1 | 0 | null | 2019-07-09T01:14:06 | 2019-07-09T01:14:06 | null | UTF-8 | Python | false | false | 8,794 | py | #!/usr/bin/env python2
## -*- coding: utf-8 -*-
import sys
def sx(bits, value):
sign_bit = 1 << (bits - 1)
return (value & (sign_bit - 1)) - (value & sign_bit)
SymVar_0 = int(sys.argv[1])
ref_263 = SymVar_0
ref_278 = ref_263 # MOV operation
ref_5424 = ref_278 # MOV operation
ref_5456 = ref_5424 # MOV operation
ref_5470 = ((ref_5456 << (0xD & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_5880 = ref_278 # MOV operation
ref_5912 = ref_5880 # MOV operation
ref_5926 = (ref_5912 >> (0x33 & 0x3F)) # SHR operation
ref_5963 = ref_5470 # MOV operation
ref_5975 = ref_5926 # MOV operation
ref_5977 = (ref_5975 | ref_5963) # OR operation
ref_6016 = ref_5977 # MOV operation
ref_6709 = ref_278 # MOV operation
ref_6735 = ref_6709 # MOV operation
ref_6751 = ((((0x0) << 64 | ref_6735) / 0x6) & 0xFFFFFFFFFFFFFFFF) # DIV operation
ref_6796 = ref_6751 # MOV operation
ref_6810 = (((sx(0x40, 0xFA0000000002C90C) * sx(0x40, ref_6796)) & 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF) & 0xFFFFFFFFFFFFFFFF) # IMUL operation
ref_6846 = ref_6810 # MOV operation
ref_7472 = ref_6016 # MOV operation
ref_7746 = ref_6846 # MOV operation
ref_7778 = ref_7472 # MOV operation
ref_7790 = ref_7746 # MOV operation
ref_7792 = (ref_7790 | ref_7778) # OR operation
ref_8304 = ref_278 # MOV operation
ref_8336 = ref_8304 # MOV operation
ref_8348 = ref_7792 # MOV operation
ref_8350 = ((ref_8348 + ref_8336) & 0xFFFFFFFFFFFFFFFF) # ADD operation
ref_8390 = ref_8350 # MOV operation
ref_9052 = ref_6016 # MOV operation
ref_9102 = ref_9052 # MOV operation
ref_9116 = ((ref_9102 - 0x2ED5CD7E) & 0xFFFFFFFFFFFFFFFF) # SUB operation
ref_9124 = ref_9116 # MOV operation
ref_9152 = ref_9124 # MOV operation
ref_9154 = ((0x28E5FC28 - ref_9152) & 0xFFFFFFFFFFFFFFFF) # SUB operation
ref_9162 = ref_9154 # MOV operation
ref_9180 = ref_9162 # MOV operation
ref_9194 = (ref_9180 >> (0x2 & 0x3F)) # SHR operation
ref_9241 = ref_9194 # MOV operation
ref_9247 = (0x7 & ref_9241) # AND operation
ref_9294 = ref_9247 # MOV operation
ref_9300 = (0x1 | ref_9294) # OR operation
ref_9685 = ref_6846 # MOV operation
ref_10114 = ref_278 # MOV operation
ref_10146 = ref_10114 # MOV operation
ref_10158 = ref_9685 # MOV operation
ref_10160 = ((ref_10158 + ref_10146) & 0xFFFFFFFFFFFFFFFF) # ADD operation
ref_10198 = ref_10160 # MOV operation
ref_10210 = ref_9300 # MOV operation
ref_10212 = (ref_10198 >> ((ref_10210 & 0xFF) & 0x3F)) # SHR operation
ref_10251 = ref_10212 # MOV operation
ref_10895 = ref_10251 # MOV operation
ref_10919 = ref_10895 # MOV operation
ref_10927 = (ref_10919 >> (0x1 & 0x3F)) # SHR operation
ref_10934 = ref_10927 # MOV operation
ref_10968 = ref_10934 # MOV operation
ref_10982 = (0x7 & ref_10968) # AND operation
ref_11021 = ref_10982 # MOV operation
ref_11035 = (0x1 | ref_11021) # OR operation
ref_11400 = ref_10251 # MOV operation
ref_11424 = ref_11400 # MOV operation
ref_11428 = ref_11035 # MOV operation
ref_11430 = (ref_11428 & 0xFFFFFFFF) # MOV operation
ref_11432 = (ref_11424 >> ((ref_11430 & 0xFF) & 0x3F)) # SHR operation
ref_11439 = ref_11432 # MOV operation
ref_11465 = ref_11439 # MOV operation
ref_11467 = ((ref_11465 >> 56) & 0xFF) # Byte reference - MOV operation
ref_11468 = ((ref_11465 >> 48) & 0xFF) # Byte reference - MOV operation
ref_11469 = ((ref_11465 >> 40) & 0xFF) # Byte reference - MOV operation
ref_11470 = ((ref_11465 >> 32) & 0xFF) # Byte reference - MOV operation
ref_11471 = ((ref_11465 >> 24) & 0xFF) # Byte reference - MOV operation
ref_11472 = ((ref_11465 >> 16) & 0xFF) # Byte reference - MOV operation
ref_11473 = ((ref_11465 >> 8) & 0xFF) # Byte reference - MOV operation
ref_11474 = (ref_11465 & 0xFF) # Byte reference - MOV operation
ref_12525 = ref_8390 # MOV operation
ref_12955 = ref_6016 # MOV operation
ref_13005 = ref_12955 # MOV operation
ref_13019 = (0x7 & ref_13005) # AND operation
ref_13056 = ref_13019 # MOV operation
ref_13070 = ((ref_13056 << (0x2 & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_13107 = ref_12525 # MOV operation
ref_13119 = ref_13070 # MOV operation
ref_13121 = (ref_13119 | ref_13107) # OR operation
ref_13160 = ref_13121 # MOV operation
ref_13738 = ((((ref_11467) << 8 | ref_11468) << 8 | ref_11469) << 8 | ref_11470) # MOV operation
ref_13760 = (ref_13738 & 0xFFFFFFFF) # MOV operation
ref_14796 = ((((ref_11471) << 8 | ref_11472) << 8 | ref_11473) << 8 | ref_11474) # MOV operation
ref_14818 = (ref_14796 & 0xFFFFFFFF) # MOV operation
ref_15392 = (ref_13760 & 0xFFFFFFFF) # MOV operation
ref_15414 = (ref_15392 & 0xFFFFFFFF) # MOV operation
ref_16588 = ref_13160 # MOV operation
ref_17018 = ref_13160 # MOV operation
ref_17068 = ref_17018 # MOV operation
ref_17082 = (0x7 & ref_17068) # AND operation
ref_17119 = ref_17082 # MOV operation
ref_17133 = ((ref_17119 << (0x2 & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_17170 = ref_16588 # MOV operation
ref_17182 = ref_17133 # MOV operation
ref_17184 = (ref_17182 | ref_17170) # OR operation
ref_17223 = ref_17184 # MOV operation
ref_17801 = (ref_14818 & 0xFFFFFFFF) # MOV operation
ref_17823 = (ref_17801 & 0xFFFFFFFF) # MOV operation
ref_18859 = (ref_15414 & 0xFFFFFFFF) # MOV operation
ref_18881 = (ref_18859 & 0xFFFFFFFF) # MOV operation
ref_18883 = (((ref_18881 & 0xFFFFFFFF) >> 24) & 0xFF) # Byte reference - MOV operation
ref_18884 = (((ref_18881 & 0xFFFFFFFF) >> 16) & 0xFF) # Byte reference - MOV operation
ref_18885 = (((ref_18881 & 0xFFFFFFFF) >> 8) & 0xFF) # Byte reference - MOV operation
ref_18886 = ((ref_18881 & 0xFFFFFFFF) & 0xFF) # Byte reference - MOV operation
ref_19455 = (ref_17823 & 0xFFFFFFFF) # MOV operation
ref_19477 = (ref_19455 & 0xFFFFFFFF) # MOV operation
ref_19479 = (((ref_19477 & 0xFFFFFFFF) >> 24) & 0xFF) # Byte reference - MOV operation
ref_19480 = (((ref_19477 & 0xFFFFFFFF) >> 16) & 0xFF) # Byte reference - MOV operation
ref_19481 = (((ref_19477 & 0xFFFFFFFF) >> 8) & 0xFF) # Byte reference - MOV operation
ref_19482 = ((ref_19477 & 0xFFFFFFFF) & 0xFF) # Byte reference - MOV operation
ref_20941 = ref_17223 # MOV operation
ref_21215 = ((((((((ref_18883) << 8 | ref_18884) << 8 | ref_18885) << 8 | ref_18886) << 8 | ref_19479) << 8 | ref_19480) << 8 | ref_19481) << 8 | ref_19482) # MOV operation
ref_21259 = ref_21215 # MOV operation
ref_21273 = (((sx(0x40, 0x4E1A7F2) * sx(0x40, ref_21259)) & 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF) & 0xFFFFFFFFFFFFFFFF) # IMUL operation
ref_21407 = ref_20941 # MOV operation
ref_21411 = ref_21273 # MOV operation
ref_21413 = (ref_21411 ^ ref_21407) # XOR operation
ref_21460 = ref_21413 # MOV operation
ref_21466 = (0xF & ref_21460) # AND operation
ref_21513 = ref_21466 # MOV operation
ref_21519 = (0x1 | ref_21513) # OR operation
ref_21904 = ref_6016 # MOV operation
ref_22178 = ref_6846 # MOV operation
ref_22204 = ref_21904 # MOV operation
ref_22216 = ref_22178 # MOV operation
ref_22218 = (((sx(0x40, ref_22216) * sx(0x40, ref_22204)) & 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF) & 0xFFFFFFFFFFFFFFFF) # IMUL operation
ref_22244 = ref_22218 # MOV operation
ref_22248 = ref_21519 # MOV operation
ref_22250 = (ref_22248 & 0xFFFFFFFF) # MOV operation
ref_22252 = ((ref_22244 << ((ref_22250 & 0xFF) & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_22259 = ref_22252 # MOV operation
ref_22657 = ref_17223 # MOV operation
ref_22931 = ((((((((ref_18883) << 8 | ref_18884) << 8 | ref_18885) << 8 | ref_18886) << 8 | ref_19479) << 8 | ref_19480) << 8 | ref_19481) << 8 | ref_19482) # MOV operation
ref_22975 = ref_22931 # MOV operation
ref_22989 = (((sx(0x40, 0x4E1A7F2) * sx(0x40, ref_22975)) & 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF) & 0xFFFFFFFFFFFFFFFF) # IMUL operation
ref_23123 = ref_22657 # MOV operation
ref_23127 = ref_22989 # MOV operation
ref_23129 = (ref_23127 ^ ref_23123) # XOR operation
ref_23176 = ref_23129 # MOV operation
ref_23182 = (0xF & ref_23176) # AND operation
ref_23229 = ref_23182 # MOV operation
ref_23235 = (0x1 | ref_23229) # OR operation
ref_23284 = ref_23235 # MOV operation
ref_23286 = ((0x40 - ref_23284) & 0xFFFFFFFFFFFFFFFF) # SUB operation
ref_23294 = ref_23286 # MOV operation
ref_23674 = ref_6016 # MOV operation
ref_23948 = ref_6846 # MOV operation
ref_23974 = ref_23674 # MOV operation
ref_23986 = ref_23948 # MOV operation
ref_23988 = (((sx(0x40, ref_23986) * sx(0x40, ref_23974)) & 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF) & 0xFFFFFFFFFFFFFFFF) # IMUL operation
ref_24014 = ref_23988 # MOV operation
ref_24018 = ref_23294 # MOV operation
ref_24020 = (ref_24018 & 0xFFFFFFFF) # MOV operation
ref_24022 = (ref_24014 >> ((ref_24020 & 0xFF) & 0x3F)) # SHR operation
ref_24029 = ref_24022 # MOV operation
ref_24055 = ref_22259 # MOV operation
ref_24059 = ref_24029 # MOV operation
ref_24061 = (ref_24059 | ref_24055) # OR operation
ref_24100 = ref_24061 # MOV operation
ref_24347 = ref_24100 # MOV operation
ref_24349 = ref_24347 # MOV operation
print ref_24349 & 0xffffffffffffffff
| [
"[email protected]"
] | |
abe0ed484c1bcfa8535da2651e1123f7e700260f | 0178e6a705ee8aa6bb0b0a8512bf5184a9d00ded | /Sungjin/Math/n11050/11050.py | 5428a370db68b311085325f58386ff3cee0933cd | [] | no_license | comojin1994/Algorithm_Study | 0379d513abf30e3f55d6a013e90329bfdfa5adcc | 965c97a9b858565c68ac029f852a1c2218369e0b | refs/heads/master | 2021-08-08T14:55:15.220412 | 2021-07-06T11:54:33 | 2021-07-06T11:54:33 | 206,978,984 | 0 | 1 | null | 2020-05-14T14:06:46 | 2019-09-07T14:23:31 | Python | UTF-8 | Python | false | false | 340 | py | import sys
input = sys.stdin.readline
fac = {0:1, 1:1, 2:2, 3:6}
N, K = map(int, input().strip().split())
def facto(n):
if n in fac.keys():
return fac[n]
result = n * facto(n-1)
fac[n] = result
return result
facto(10)
def com(n,k):
result = facto(n)//(facto(k)*facto(n-k))
return result
print(com(N,K)) | [
"[email protected]"
] | |
8647c5a51442aadd068dcd58c6a1fb7470e8818a | 80b7f2a10506f70477d8720e229d7530da2eff5d | /uhd_restpy/testplatform/sessions/ixnetwork/globals/protocolstack/eapoudpglobals/nacsettings/nactlv/apptyperef/apptyperef.py | 43ad9c9e986be6cfddf60fcd98fab1edd7c4fb48 | [
"MIT"
] | permissive | OpenIxia/ixnetwork_restpy | 00fdc305901aa7e4b26e4000b133655e2d0e346a | c8ecc779421bffbc27c906c1ea51af3756d83398 | refs/heads/master | 2023-08-10T02:21:38.207252 | 2023-07-19T14:14:57 | 2023-07-19T14:14:57 | 174,170,555 | 26 | 16 | MIT | 2023-02-02T07:02:43 | 2019-03-06T15:27:20 | Python | UTF-8 | Python | false | false | 5,524 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from uhd_restpy.base import Base
from uhd_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class AppTypeRef(Base):
"""TLV Application Type
The AppTypeRef class encapsulates a required appTypeRef resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'appTypeRef'
_SDM_ATT_MAP = {
'Name': 'name',
'ObjectId': 'objectId',
'Value': 'value',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(AppTypeRef, self).__init__(parent, list_op)
@property
def NacApps(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.protocolstack.eapoudpglobals.nacsettings.nactlv.apptyperef.nacapps.nacapps.NacApps): An instance of the NacApps class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.protocolstack.eapoudpglobals.nacsettings.nactlv.apptyperef.nacapps.nacapps import NacApps
if len(self._object_properties) > 0:
if self._properties.get('NacApps', None) is not None:
return self._properties.get('NacApps')
return NacApps(self)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: AppType Name.
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def ObjectId(self):
# type: () -> str
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
@property
def Value(self):
# type: () -> int
"""
Returns
-------
- number: AppType ID.
"""
return self._get_attribute(self._SDM_ATT_MAP['Value'])
@Value.setter
def Value(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['Value'], value)
def update(self, Name=None, Value=None):
# type: (str, int) -> AppTypeRef
"""Updates appTypeRef resource on the server.
Args
----
- Name (str): AppType Name.
- Value (number): AppType ID.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Name=None, ObjectId=None, Value=None):
# type: (str, str, int) -> AppTypeRef
"""Finds and retrieves appTypeRef resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve appTypeRef resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all appTypeRef resources from the server.
Args
----
- Name (str): AppType Name.
- ObjectId (str): Unique identifier for this object
- Value (number): AppType ID.
Returns
-------
- self: This instance with matching appTypeRef resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of appTypeRef data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the appTypeRef resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"[email protected]"
] | |
aa9f33b52226fb2e8f1ef81ab0dddd5b904536b7 | 24bc6f0a0a7b4a04c3289fe96368de81a7bd5191 | /scdaily.py | 7a1704c6456a06aceecf2de6e7a99f2e2d082a8d | [] | no_license | xiaol/foreign_news_crawler | b0b39a22f498a21d303664715a33645306ac28cb | f7471a5e6f884408a6fe451dfe293bf92b61db3c | refs/heads/master | 2021-03-24T10:18:33.989684 | 2016-03-21T06:48:30 | 2016-03-21T06:48:30 | 40,290,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,972 | py | # -*- coding: utf-8 -*-
# 繁体
from crawler_framework.page import get_page
from crawler_framework.Logger import INFO, DBG, ERR
from lxml import etree
from StringIO import StringIO
import traceback
import redis
import time
from Cleaners.langconv import *
r = redis.StrictRedis(host='localhost', port=6379)
source = u"美南新闻"
def scdaily_crawler(url):
text = get_page(url)
parser = etree.HTMLParser()
tree = etree.parse(StringIO(text), parser)
story_links = tree.xpath('.//td[@class="kuang2"]//a')
for story_link in story_links:
try:
story_text_link = "http://www.scdaily.com/" + story_link.get("href")
except:
continue
try:
if r.sismember('duplicates', story_text_link) == True:
continue
story_title = story_link.text.strip()
story_title = Converter('zh-hans').convert(story_title)
story_info = get_text(story_text_link, story_title)
story_text = story_info['content']
if len(story_text) == 0:
continue
r.sadd('duplicates', story_text_link)
r.rpush('stories', story_info)
except:
print traceback.format_exc(),url
pass
def get_text(url, story_title):
text = get_page(url)
parser = etree.HTMLParser()
tree = etree.parse(StringIO(text), parser)
create_time = time.strftime('%Y-%m-%d %H:%M:%S')
story_text = []
count = 0
imgnum = 0
for x in tree.find('.//td[@align="center"]').iter():
try:
if x.tag == "p":
t = x.text.strip()
t = Converter('zh-hans').convert(t)
if len(t) != 0:
dict = {}
dict[str(count)] = {}
dict[str(count)]["txt"] = t
count += 1
story_text.append(dict)
if x.tag == "br":
t = x.tail.strip()
if len(t) != 0:
dict = {}
dict[str(count)] = {}
dict[str(count)]["txt"] = t
count += 1
story_text.append(dict)
if x.tag == "img":
dict = {}
dict[str(count)] = {}
dict[str(count)]["img"] = x.get("src")
count += 1
story_text.append(dict)
imgnum += 1
except:
pass
story_info = {
'content': story_text,
'source': source,
'title': story_title,
'url': url,
'create_time': create_time,
'imgnum': imgnum,
'source_url': url,
'sourceSiteName': source
}
return story_info
if __name__ == "__main__":
scdaily_crawler(url="http://www.scdaily.com/Newslist_more.aspx?Bid=48&Cid=28")
scdaily_crawler(url="http://www.scdaily.com/Newslist_more.aspx?Bid=48&Cid=34") | [
"[email protected]"
] | |
c5eaba36db5bd95c4036ed8903b2d165a8b19ca4 | 56d7cb3fd8911c5821115e145b3f03ca9240fcd0 | /mico/wpi_jaco/build/moveit_ros/visualization/catkin_generated/pkg.installspace.context.pc.py | f4c3063ea2489e6c9bc087c074d104def4dd368e | [] | no_license | Wisc-HCI/therblig-motion-library | d0194da3d54c55491e95bb3ee6e0c594347222db | 47a33b5bb2e1fbd4b8e552ec0126fdd2f89b2092 | refs/heads/master | 2020-04-04T06:17:08.151880 | 2017-12-01T22:05:48 | 2017-12-01T22:05:48 | 52,892,979 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/hcilab/Documents/John/nri-authoring-environment/wpi_jaco/install/include".split(';') if "/home/hcilab/Documents/John/nri-authoring-environment/wpi_jaco/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "moveit_ros_planning_interface;moveit_ros_robot_interaction".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lmoveit_rviz_plugin_render_tools;-lmoveit_robot_state_rviz_plugin_core;-lmoveit_motion_planning_rviz_plugin_core;-lmoveit_trajectory_rviz_plugin_core;-lmoveit_planning_scene_rviz_plugin_core".split(';') if "-lmoveit_rviz_plugin_render_tools;-lmoveit_robot_state_rviz_plugin_core;-lmoveit_motion_planning_rviz_plugin_core;-lmoveit_trajectory_rviz_plugin_core;-lmoveit_planning_scene_rviz_plugin_core" != "" else []
PROJECT_NAME = "moveit_ros_visualization"
PROJECT_SPACE_DIR = "/home/hcilab/Documents/John/nri-authoring-environment/wpi_jaco/install"
PROJECT_VERSION = "0.6.5"
| [
"[email protected]"
] | |
b3e9fc513baf6b7485ab74e36b4b174fc32fa36e | b22b0760b29d24cff24eda9d1c114094fd1a588f | /Python/Easy/1160. Find Words That Can Be Formed by Characters.py | eda11a25b8d2c586204944db2b82eddef85448f7 | [] | no_license | MridulGangwar/Leetcode-Solutions | bbbaa06058a7b3e7621fc54050e344c06a256080 | d41b1bbd762030733fa271316f19724d43072cd7 | refs/heads/master | 2022-03-07T12:20:33.485573 | 2022-02-21T07:22:38 | 2022-02-21T07:22:38 | 231,700,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | class Solution(object):
def create_dic(self,char):
d={}
for i in char:
if i not in d:
d[i]=1
else:
d[i]+=1
return d
def countCharacters(self, words, chars):
"""
:type words: List[str]
:type chars: str
:rtype: int
"""
dic_chars=self.create_dic(chars)
result=0
for word in words:
dic_word = self.create_dic(word)
count=0
for key in dic_word:
if key in dic_chars and dic_word[key] <= dic_chars[key]:
count+=1
if count==len(dic_word):
result+=len(word)
return result | [
"[email protected]"
] | |
cdaa1a800caa652c9a3c5ec4771a3bf33cacfd17 | 0899b1708e33a54ea34197525e4c5bb08cb238dc | /Day 14/Question_8.py | 8acbfa2526e95eda2c390e868183adb53f12bcfb | [] | no_license | anuragmukherjee2001/30-days-of-python | 510fb6511b36c91b11ef8230b6539f959e1b1f72 | 99bdd4b72f0566c9341fc5e58a159e44f0ac90bc | refs/heads/master | 2023-02-07T09:14:35.381064 | 2020-12-30T15:54:48 | 2020-12-30T15:54:48 | 319,247,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
def square(n):
return n * n
res = map(square, numbers)
print(list(res)) | [
"[email protected]"
] | |
a14fe8bbbf3d0059b982760ec9e6f20dc9de0cd6 | 33dba187e9fd855534c466268012f5589eca9e5d | /pleiades/bulkup/__init__.py | fad8d7eed5eb05d290ebc7c90f44c37d024e9084 | [] | no_license | isawnyu/pleiades-bulkup | c21f75b911fbc1864376647b7e56bccef12d87e5 | 9e70382d2f4abd057c9385228796c4cc2f7b931b | refs/heads/master | 2021-01-10T10:24:32.033323 | 2016-04-06T07:26:38 | 2016-04-06T07:26:38 | 51,455,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py |
from AccessControl.SecurityManagement import newSecurityManager
from Products.CMFCore.utils import getToolByName
from Products.CMFUid.interfaces import IUniqueIdGenerator, \
IUniqueIdAnnotationManagement, IUniqueIdHandler
from zope.component import provideUtility
def secure(context, username):
membership = getToolByName(context, 'portal_membership')
user=membership.getMemberById(username).getUser()
newSecurityManager(None, user.__of__(context.acl_users))
def setup_cmfuid(context):
provideUtility(
getToolByName(context, 'portal_uidgenerator'), IUniqueIdGenerator)
provideUtility(
getToolByName(context, 'portal_uidannotation'),
IUniqueIdAnnotationManagement)
provideUtility(
getToolByName(context, 'portal_uidhandler'), IUniqueIdHandler)
| [
"[email protected]"
] | |
bfcdcf7f283e5a3ddfe38976aabedc3679d0df3a | ed7cd7760c708720f5a847a02b0c3a50cca0175e | /examples/placeholder.py | a61739b86f87b4f7733e7bb9f6ea01bac1fb0fd0 | [
"MIT"
] | permissive | jcapriot/aurora | bf98b1236e7dc43e0189df71725f7f862d271984 | 08d5ccc671054a2b646a4effb412a2ed48314646 | refs/heads/main | 2023-09-05T00:07:16.984109 | 2021-10-27T02:49:41 | 2021-10-27T02:49:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | """
Placeholder for example
===========================
This example is a placeholder that uses the sphinx-gallery syntax
for creating examples
"""
import aurora
import numpy as np
import matplotlib.pyplot as plt
###############################################################################
# Step 1
# ------
# Some description of what we are doing in step one
x = np.linspace(0, 4*np.pi, 100)
# take the sin(x)
y = np.sin(x)
###############################################################################
# Step 2
# ------
# Plot it
fig, ax = plt.subplots(1, 1)
ax.plot(x, y)
| [
"[email protected]"
] | |
fd6330691ccb8216b80605439df638507cbcadea | c92f43835821d8df2b93dfd781f890e56891f849 | /Python3/136. Single Number.py | 08c551b8c58bbccbf71e1502468350056064ee29 | [] | no_license | iamdoublewei/Leetcode | f4ae87ed8c31537098790842a72cafa5747d8588 | e36f343aab109b051a9c3a96956c50b5580c7c15 | refs/heads/master | 2022-11-06T01:31:56.181800 | 2022-11-04T20:07:35 | 2022-11-04T20:07:35 | 71,944,123 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | '''
Given a non-empty array of integers, every element appears twice except for one. Find that single one.
Note:
Your algorithm should have a linear runtime complexity. Could you implement it without using extra memory?
Example 1:
Input: [2,2,1]
Output: 1
Example 2:
Input: [4,1,2,1,2]
Output: 4
'''
class Solution:
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort()
for i in range(0, len(nums), 2):
if i+1<len(nums) and nums[i] != nums[i+1]:
return nums[i]
return nums[len(nums) - 1] | [
"[email protected]"
] | |
3431b32a3dfd191d918b9c47842bad2a65b9ff7d | ecfa863dd3c5826c1df82df92860439616f4b0f4 | /常见算法.py | 2eb7fff127951601364d6b0d4e870272a7e4c1e5 | [
"MIT"
] | permissive | TinlokLee/Algorithm | 92affce8f20f692b8f1784dbc8d995dec58ede61 | 2230377222209c99929b2e7430798be420c73420 | refs/heads/master | 2020-04-11T23:38:49.122767 | 2018-12-17T18:36:36 | 2018-12-17T18:36:36 | 162,174,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,523 | py | # 冒泡排序
def bubble_sort(arr):
for i in range(len(arr) - 1):
for j in range(len(arr) - i - 1):
if arr[j] > arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
return arr
'''
冒泡排序:
重复遍历待排序列,一次比较两个相邻元素(A-Z0-9)按升序排,直到没有需要交换的元素
时间复制度 O(n)
'''
# 选择排序
def selection_sort(nd):
n = len(nd)
for i in range(n-1):
min_index = i
for j in range(i+1, n):
if nd[j] < nd[min_index]:
min_index = j
if min_index != i:
nd[i], nd[min_index] = nd[min_index], nd[i]
'''
选择排序:
待排序列取最大(小)元素,放在序列起始位置,再从序列中选最值,放末尾,依此类推
数据移动排序
时间复制度O(n2)
'''
# 插入排序
def insert_sort(nd):
for i in range(1, len(nd)):
for j in range(i, 0, -1):
if nd[j] < nd[j-1]:
nd[j], nd[j-1] = nd[j-1], nd[j]
'''
插入排序:
有序序列,从后向前扫描,找到相应位置并插入
时间复制度O(n)
'''
# 快速排序
def quick_sort(nd):
if len(nd) == 0: return
tem = nd[0]
for i in range(len(nd)-1):
less = quick_sort([i for i in nd[1:] if tem < i])
gerter = quick_sort([i for i in nd[1:] if tem > i])
return less + [tem] + gerter
| [
"[email protected]"
] | |
3a6b3e8740f8ae94a19ceb717f7a32d089c78bf0 | ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb | /examples/docs_snippets/docs_snippets/integrations/dagstermill/notebook_outputs.py | defefa97d47cec4e5bcd41f8d4299e2f663056b1 | [
"Apache-2.0"
] | permissive | dagster-io/dagster | 6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a | fe21995e0402878437a828c6a4244025eac8c43b | refs/heads/master | 2023-09-05T20:46:08.203794 | 2023-09-05T19:54:52 | 2023-09-05T19:54:52 | 131,619,646 | 8,565 | 1,154 | Apache-2.0 | 2023-09-14T21:57:37 | 2018-04-30T16:30:04 | Python | UTF-8 | Python | false | false | 751 | py | # start_notebook
# my_notebook.ipynb
import dagstermill
dagstermill.yield_result(3, output_name="my_output")
# end_notebook
# start_py_file
from dagstermill import ConfigurableLocalOutputNotebookIOManager, define_dagstermill_op
from dagster import Out, file_relative_path, job, op
my_notebook_op = define_dagstermill_op(
name="my_notebook",
notebook_path=file_relative_path(__file__, "./notebooks/my_notebook.ipynb"),
output_notebook_name="output_notebook",
outs={"my_output": Out(int)},
)
@op
def add_two(x):
return x + 2
@job(
resource_defs={
"output_notebook_io_manager": ConfigurableLocalOutputNotebookIOManager(),
}
)
def my_job():
three, _ = my_notebook_op()
add_two(three)
# end_py_file
| [
"[email protected]"
] | |
cbf2de85455c666aeb0a091025fe568c2ef5cfcb | 7895cbaced82455230c2bc4d42be5b3e4c67cab8 | /pytransform3d/test/test_batch_rotations.py | 2b1c68edbc001bdbde3ab85558dbfdc813fefbcd | [
"BSD-3-Clause"
] | permissive | dfki-ric/pytransform3d | 0ee7e37c92e7bd328f31813610a9797d296e3cd3 | cc923cd91417a41ab08b32278eeabc2e31ab6a93 | refs/heads/main | 2023-08-18T07:22:09.271385 | 2023-08-10T08:41:15 | 2023-08-10T08:45:10 | 91,809,394 | 168 | 25 | NOASSERTION | 2023-08-22T20:09:27 | 2017-05-19T13:35:08 | Python | UTF-8 | Python | false | false | 16,996 | py | import numpy as np
from pytransform3d import rotations as pr
from pytransform3d import batch_rotations as pbr
from numpy.testing import assert_array_almost_equal
import pytest
def test_norm_vectors_0dims():
rng = np.random.default_rng(8380)
V = rng.standard_normal(size=3)
V_unit = pbr.norm_vectors(V)
assert pytest.approx(np.linalg.norm(V_unit)) == 1.0
def test_norm_vectors_1dim():
rng = np.random.default_rng(8381)
V = rng.standard_normal(size=(100, 3))
V_unit = pbr.norm_vectors(V)
assert_array_almost_equal(
np.linalg.norm(V_unit, axis=1), np.ones(len(V)))
def test_norm_vectors_1dim_output_variable():
rng = np.random.default_rng(8381)
V = rng.standard_normal(size=(100, 3))
pbr.norm_vectors(V, out=V)
assert_array_almost_equal(
np.linalg.norm(V, axis=1), np.ones(len(V)))
def test_norm_vectors_3dims():
rng = np.random.default_rng(8382)
V = rng.standard_normal(size=(8, 2, 8, 3))
V_unit = pbr.norm_vectors(V)
assert_array_almost_equal(
np.linalg.norm(V_unit, axis=-1), np.ones(V_unit.shape[:-1]))
def test_norm_vectors_zero():
V = np.zeros((3, 8, 1, 2))
V_unit = pbr.norm_vectors(V)
assert_array_almost_equal(V_unit, V)
def test_angles_between_vectors_0dims():
rng = np.random.default_rng(228)
A = rng.standard_normal(size=3)
B = rng.standard_normal(size=3)
angles = pbr.angles_between_vectors(A, B)
angles2 = pr.angle_between_vectors(A, B)
assert_array_almost_equal(angles, angles2)
def test_angles_between_vectors_1dim():
rng = np.random.default_rng(229)
A = rng.standard_normal(size=(100, 3))
B = rng.standard_normal(size=(100, 3))
angles = pbr.angles_between_vectors(A, B)
angles2 = [pr.angle_between_vectors(a, b) for a, b in zip(A, B)]
assert_array_almost_equal(angles, angles2)
def test_angles_between_vectors_3dims():
rng = np.random.default_rng(230)
A = rng.standard_normal(size=(2, 4, 3, 4))
B = rng.standard_normal(size=(2, 4, 3, 4))
angles = pbr.angles_between_vectors(A, B).ravel()
angles2 = [pr.angle_between_vectors(a, b)
for a, b in zip(A.reshape(-1, 4), B.reshape(-1, 4))]
assert_array_almost_equal(angles, angles2)
def test_active_matrices_from_angles_0dims():
R = pbr.active_matrices_from_angles(0, 0.4)
assert_array_almost_equal(R, pr.active_matrix_from_angle(0, 0.4))
def test_active_matrices_from_angles_1dim():
Rs = pbr.active_matrices_from_angles(1, [0.4, 0.5, 0.6])
assert_array_almost_equal(Rs[0], pr.active_matrix_from_angle(1, 0.4))
assert_array_almost_equal(Rs[1], pr.active_matrix_from_angle(1, 0.5))
assert_array_almost_equal(Rs[2], pr.active_matrix_from_angle(1, 0.6))
def test_active_matrices_from_angles_3dims():
rng = np.random.default_rng(8383)
angles = rng.standard_normal(size=(2, 3, 4))
Rs = pbr.active_matrices_from_angles(2, angles)
Rs = Rs.reshape(-1, 3, 3)
Rs2 = [pr.active_matrix_from_angle(2, angle)
for angle in angles.reshape(-1)]
assert_array_almost_equal(Rs, Rs2)
def test_active_matrices_from_angles_3dims_output_variable():
rng = np.random.default_rng(8384)
angles = rng.standard_normal(size=(2, 3, 4))
Rs = np.empty((2, 3, 4, 3, 3))
pbr.active_matrices_from_angles(2, angles, out=Rs)
Rs = Rs.reshape(-1, 3, 3)
Rs2 = [pr.active_matrix_from_angle(2, angle)
for angle in angles.reshape(-1)]
assert_array_almost_equal(Rs, Rs2)
def test_active_matrices_from_intrinsic_euler_angles_0dims():
rng = np.random.default_rng(8383)
e = rng.standard_normal(size=3)
R = pbr.active_matrices_from_intrinsic_euler_angles(2, 1, 0, e)
R2 = pr.active_matrix_from_intrinsic_euler_zyx(e)
assert_array_almost_equal(R, R2)
def test_active_matrices_from_intrinsic_euler_angles_1dim():
rng = np.random.default_rng(8384)
e = rng.standard_normal(size=(10, 3))
Rs = pbr.active_matrices_from_intrinsic_euler_angles(2, 1, 0, e)
for i in range(len(e)):
Ri = pr.active_matrix_from_intrinsic_euler_zyx(e[i])
assert_array_almost_equal(Rs[i], Ri)
def test_active_matrices_from_intrinsic_euler_angles_1dim_output_variables():
rng = np.random.default_rng(8384)
e = rng.standard_normal(size=(10, 3))
Rs = np.empty((10, 3, 3))
pbr.active_matrices_from_intrinsic_euler_angles(2, 1, 0, e, out=Rs)
for i in range(len(e)):
Ri = pr.active_matrix_from_intrinsic_euler_zyx(e[i])
assert_array_almost_equal(Rs[i], Ri)
def test_active_matrices_from_intrinsic_euler_angles_3dims():
rng = np.random.default_rng(8385)
e = rng.standard_normal(size=(2, 3, 4, 3))
Rs = pbr.active_matrices_from_intrinsic_euler_angles(
2, 1, 0, e).reshape(-1, 3, 3)
e = e.reshape(-1, 3)
for i in range(len(e)):
Ri = pr.active_matrix_from_intrinsic_euler_zyx(e[i])
assert_array_almost_equal(Rs[i], Ri)
def test_active_matrices_from_extrinsic_euler_angles_0dims():
rng = np.random.default_rng(8383)
e = rng.standard_normal(size=3)
R = pbr.active_matrices_from_extrinsic_euler_angles(2, 1, 0, e)
R2 = pr.active_matrix_from_extrinsic_euler_zyx(e)
assert_array_almost_equal(R, R2)
def test_active_matrices_from_extrinsic_euler_angles_1dim():
rng = np.random.default_rng(8384)
e = rng.standard_normal(size=(10, 3))
Rs = pbr.active_matrices_from_extrinsic_euler_angles(2, 1, 0, e)
for i in range(len(e)):
Ri = pr.active_matrix_from_extrinsic_euler_zyx(e[i])
assert_array_almost_equal(Rs[i], Ri)
def test_active_matrices_from_extrinsic_euler_angles_3dim():
rng = np.random.default_rng(8385)
e = rng.standard_normal(size=(2, 3, 4, 3))
Rs = pbr.active_matrices_from_extrinsic_euler_angles(
2, 1, 0, e).reshape(-1, 3, 3)
e = e.reshape(-1, 3)
for i in range(len(e)):
Ri = pr.active_matrix_from_extrinsic_euler_zyx(e[i])
assert_array_almost_equal(Rs[i], Ri)
def test_active_matrices_from_extrinsic_euler_angles_1dim_output_variable():
rng = np.random.default_rng(8385)
e = rng.standard_normal(size=(10, 3))
Rs = np.empty((10, 3, 3))
pbr.active_matrices_from_extrinsic_euler_angles(2, 1, 0, e, out=Rs)
for i in range(len(e)):
Ri = pr.active_matrix_from_extrinsic_euler_zyx(e[i])
assert_array_almost_equal(Rs[i], Ri)
def test_cross_product_matrix():
rng = np.random.default_rng(3820)
v = rng.standard_normal(size=3)
assert_array_almost_equal(
pbr.cross_product_matrices(v), pr.cross_product_matrix(v))
def test_cross_product_matrices():
rng = np.random.default_rng(3820)
V = rng.standard_normal(size=(2, 2, 3, 3))
V_cpm = pbr.cross_product_matrices(V)
V_cpm = V_cpm.reshape(-1, 3, 3)
V_cpm2 = [pr.cross_product_matrix(v) for v in V.reshape(-1, 3)]
assert_array_almost_equal(V_cpm, V_cpm2)
def test_matrices_from_quaternions():
rng = np.random.default_rng(83)
for _ in range(5):
q = pr.random_quaternion(rng)
R = pbr.matrices_from_quaternions(
q[np.newaxis], normalize_quaternions=False)[0]
q2 = pr.quaternion_from_matrix(R)
pr.assert_quaternion_equal(q, q2)
for _ in range(5):
q = rng.standard_normal(size=4)
R = pbr.matrices_from_quaternions(
q[np.newaxis], normalize_quaternions=True)[0]
q2 = pr.quaternion_from_matrix(R)
pr.assert_quaternion_equal(q / np.linalg.norm(q), q2)
def test_quaternions_from_matrices():
rng = np.random.default_rng(84)
for _ in range(5):
q = pr.random_quaternion(rng)
R = pr.matrix_from_quaternion(q)
q2 = pbr.quaternions_from_matrices(R[np.newaxis])[0]
pr.assert_quaternion_equal(q, q2)
a = np.array([1.0, 0.0, 0.0, np.pi])
q = pr.quaternion_from_axis_angle(a)
R = pr.matrix_from_axis_angle(a)
q_from_R = pbr.quaternions_from_matrices(R[np.newaxis])[0]
assert_array_almost_equal(q, q_from_R)
a = np.array([0.0, 1.0, 0.0, np.pi])
q = pr.quaternion_from_axis_angle(a)
R = pr.matrix_from_axis_angle(a)
q_from_R = pbr.quaternions_from_matrices(R[np.newaxis])[0]
assert_array_almost_equal(q, q_from_R)
a = np.array([0.0, 0.0, 1.0, np.pi])
q = pr.quaternion_from_axis_angle(a)
R = pr.matrix_from_axis_angle(a)
q_from_R = pbr.quaternions_from_matrices(R[np.newaxis])[0]
assert_array_almost_equal(q, q_from_R)
def test_quaternions_from_matrices_no_batch():
rng = np.random.default_rng(85)
for _ in range(5):
q = pr.random_quaternion(rng)
R = pr.matrix_from_quaternion(q)
q2 = pbr.quaternions_from_matrices(R)
pr.assert_quaternion_equal(q, q2)
a = np.array([1.0, 0.0, 0.0, np.pi])
q = pr.quaternion_from_axis_angle(a)
R = pr.matrix_from_axis_angle(a)
q_from_R = pbr.quaternions_from_matrices(R)
assert_array_almost_equal(q, q_from_R)
a = np.array([0.0, 1.0, 0.0, np.pi])
q = pr.quaternion_from_axis_angle(a)
R = pr.matrix_from_axis_angle(a)
q_from_R = pbr.quaternions_from_matrices(R)
assert_array_almost_equal(q, q_from_R)
a = np.array([0.0, 0.0, 1.0, np.pi])
q = pr.quaternion_from_axis_angle(a)
R = pr.matrix_from_axis_angle(a)
q_from_R = pbr.quaternions_from_matrices(R)
assert_array_almost_equal(q, q_from_R)
def test_quaternions_from_matrices_4d():
rng = np.random.default_rng(84)
for _ in range(5):
q = pr.random_quaternion(rng)
R = pr.matrix_from_quaternion(q)
q2 = pbr.quaternions_from_matrices([[R, R], [R, R]])
pr.assert_quaternion_equal(q, q2[0, 0])
pr.assert_quaternion_equal(q, q2[0, 1])
pr.assert_quaternion_equal(q, q2[1, 0])
pr.assert_quaternion_equal(q, q2[1, 1])
def test_axis_angles_from_matrices_0dims():
rng = np.random.default_rng(84)
A = rng.standard_normal(size=3)
A /= np.linalg.norm(A, axis=-1)[..., np.newaxis]
A *= rng.random() * np.pi
Rs = pbr.matrices_from_compact_axis_angles(A)
A2 = pbr.axis_angles_from_matrices(Rs)
A2_compact = A2[:3] * A2[3]
assert_array_almost_equal(A, A2_compact)
def test_axis_angles_from_matrices():
rng = np.random.default_rng(84)
A = rng.standard_normal(size=(2, 3, 3))
A /= np.linalg.norm(A, axis=-1)[..., np.newaxis]
A *= rng.random(size=(2, 3, 1)) * np.pi
A[0, 0, :] = 0.0
Rs = pbr.matrices_from_compact_axis_angles(A)
A2 = pbr.axis_angles_from_matrices(Rs)
A2_compact = A2[..., :3] * A2[..., 3, np.newaxis]
assert_array_almost_equal(A, A2_compact)
def test_axis_angles_from_matrices_output_variable():
rng = np.random.default_rng(84)
A = rng.standard_normal(size=(2, 3, 3))
A /= np.linalg.norm(A, axis=-1)[..., np.newaxis]
A *= rng.random(size=(2, 3, 1)) * np.pi
A[0, 0, :] = 0.0
Rs = np.empty((2, 3, 3, 3))
pbr.matrices_from_compact_axis_angles(A, out=Rs)
A2 = np.empty((2, 3, 4))
pbr.axis_angles_from_matrices(Rs, out=A2)
A2_compact = A2[..., :3] * A2[..., 3, np.newaxis]
assert_array_almost_equal(A, A2_compact)
def test_quaternion_slerp_batch_zero_angle():
rng = np.random.default_rng(228)
q = pr.random_quaternion(rng)
Q = pbr.quaternion_slerp_batch(q, q, [0.5])
pr.assert_quaternion_equal(q, Q[0])
def test_quaternion_slerp_batch():
rng = np.random.default_rng(229)
q_start = pr.random_quaternion(rng)
q_end = pr.random_quaternion(rng)
t = np.linspace(0, 1, 101)
Q = pbr.quaternion_slerp_batch(q_start, q_end, t)
for i in range(len(t)):
qi = pr.quaternion_slerp(q_start, q_end, t[i])
pr.assert_quaternion_equal(Q[i], qi)
def test_quaternion_slerp_batch_sign_ambiguity():
n_steps = 10
rng = np.random.default_rng(2323)
q1 = pr.random_quaternion(rng)
a1 = pr.axis_angle_from_quaternion(q1)
a2 = np.r_[a1[:3], a1[3] * 1.1]
q2 = pr.quaternion_from_axis_angle(a2)
if np.sign(q1[0]) != np.sign(q2[0]):
q2 *= -1.0
traj_q = pbr.quaternion_slerp_batch(
q1, q2, np.linspace(0, 1, n_steps), shortest_path=True)
path_length = np.sum([pr.quaternion_dist(r, s)
for r, s in zip(traj_q[:-1], traj_q[1:])])
q2 *= -1.0
traj_q_opposing = pbr.quaternion_slerp_batch(
q1, q2, np.linspace(0, 1, n_steps), shortest_path=False)
path_length_opposing = np.sum(
[pr.quaternion_dist(r, s)
for r, s in zip(traj_q_opposing[:-1], traj_q_opposing[1:])])
assert path_length_opposing > path_length
traj_q_opposing_corrected = pbr.quaternion_slerp_batch(
q1, q2, np.linspace(0, 1, n_steps), shortest_path=True)
path_length_opposing_corrected = np.sum(
[pr.quaternion_dist(r, s)
for r, s in zip(traj_q_opposing_corrected[:-1],
traj_q_opposing_corrected[1:])])
assert pytest.approx(path_length_opposing_corrected) == path_length
def test_batch_concatenate_quaternions_mismatch():
Q1 = np.zeros((1, 2, 4))
Q2 = np.zeros((1, 2, 3, 4))
with pytest.raises(
ValueError, match="Number of dimensions must be the same."):
pbr.batch_concatenate_quaternions(Q1, Q2)
Q1 = np.zeros((1, 2, 4, 4))
Q2 = np.zeros((1, 2, 3, 4))
with pytest.raises(
ValueError, match="Size of dimension 3 does not match"):
pbr.batch_concatenate_quaternions(Q1, Q2)
Q1 = np.zeros((1, 2, 3, 3))
Q2 = np.zeros((1, 2, 3, 4))
with pytest.raises(
ValueError, match="Last dimension of first argument does not "
"match."):
pbr.batch_concatenate_quaternions(Q1, Q2)
Q1 = np.zeros((1, 2, 3, 4))
Q2 = np.zeros((1, 2, 3, 3))
with pytest.raises(
ValueError, match="Last dimension of second argument does "
"not match."):
pbr.batch_concatenate_quaternions(Q1, Q2)
def test_batch_concatenate_quaternions_1d():
rng = np.random.default_rng(230)
q1 = pr.random_quaternion(rng)
q2 = pr.random_quaternion(rng)
q12 = np.empty(4)
pbr.batch_concatenate_quaternions(q1, q2, out=q12)
assert_array_almost_equal(
q12, pr.concatenate_quaternions(q1, q2))
def test_batch_q_conj_1d():
rng = np.random.default_rng(230)
q = pr.random_quaternion(rng)
assert_array_almost_equal(pr.q_conj(q), pbr.batch_q_conj(q))
def test_batch_concatenate_q_conj():
rng = np.random.default_rng(231)
Q = np.array([pr.random_quaternion(rng)
for _ in range(10)])
Q = Q.reshape(2, 5, 4)
Q_conj = pbr.batch_q_conj(Q)
Q_Q_conj = pbr.batch_concatenate_quaternions(Q, Q_conj)
assert_array_almost_equal(
Q_Q_conj.reshape(-1, 4),
np.array([[1, 0, 0, 0]] * 10))
def test_batch_convert_quaternion_conventions():
q_wxyz = np.array([[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]])
q_xyzw = pbr.batch_quaternion_xyzw_from_wxyz(q_wxyz)
assert_array_almost_equal(
q_xyzw, np.array([[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0]]))
pbr.batch_quaternion_xyzw_from_wxyz(q_wxyz, out=q_xyzw)
assert_array_almost_equal(
q_xyzw, np.array([[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0]]))
q_wxyz2 = pbr.batch_quaternion_wxyz_from_xyzw(q_xyzw)
assert_array_almost_equal(q_wxyz, q_wxyz2)
pbr.batch_quaternion_wxyz_from_xyzw(q_xyzw, out=q_wxyz2)
assert_array_almost_equal(q_wxyz, q_wxyz2)
rng = np.random.default_rng(42)
q_wxyz_random = pr.random_quaternion(rng)
q_xyzw_random = pbr.batch_quaternion_xyzw_from_wxyz(q_wxyz_random)
assert_array_almost_equal(q_xyzw_random[:3], q_wxyz_random[1:])
assert q_xyzw_random[3] == q_wxyz_random[0]
q_wxyz_random2 = pbr.batch_quaternion_wxyz_from_xyzw(q_xyzw_random)
assert_array_almost_equal(q_wxyz_random, q_wxyz_random2)
def test_smooth_quaternion_trajectory():
rng = np.random.default_rng(232)
q_start = pr.random_quaternion(rng)
if q_start[1] < 0.0:
q_start *= -1.0
q_goal = pr.random_quaternion(rng)
n_steps = 101
Q = np.empty((n_steps, 4))
for i, t in enumerate(np.linspace(0, 1, n_steps)):
Q[i] = pr.quaternion_slerp(q_start, q_goal, t)
Q_broken = Q.copy()
Q_broken[20:23, :] *= -1.0
Q_broken[80:, :] *= -1.0
Q_smooth = pbr.smooth_quaternion_trajectory(Q_broken)
assert_array_almost_equal(Q_smooth, Q)
def test_smooth_quaternion_trajectory_start_component_negative():
rng = np.random.default_rng(232)
for index in range(4):
component = "wxyz"[index]
q = pr.random_quaternion(rng)
if q[index] > 0.0:
q *= -1.0
q_corrected = pbr.smooth_quaternion_trajectory(
[q], start_component_positive=component)[0]
assert q_corrected[index] > 0.0
def test_smooth_quaternion_trajectory_empty():
with pytest.raises(
ValueError, match=r"At least one quaternion is expected"):
pbr.smooth_quaternion_trajectory(np.zeros((0, 4)))
| [
"[email protected]"
] | |
a7ca652897c1a319681cfd0644e3acbbff503f72 | f6ff58f0bcc22731f246de979bc4ff98216b7fa9 | /FriendsListHolder.py | a1914e7749cc895bca8be04d4c65fcc90cc11e63 | [] | no_license | stephenhky/fb_analysis | 80e0492a35ea265e1c399ba0039b278513c5edb3 | 93f4ca590b57812f8cb124c1eeb7946cbf24d96b | refs/heads/master | 2020-05-18T04:42:59.069845 | 2013-09-06T20:21:21 | 2013-09-06T20:21:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 25 17:35:46 2013
@author: hok1
"""
import fbtools
class FriendListHolder:
def __init__(self, selfuid, access_token):
self.selfuid = selfuid
self.access_token = access_token
self.friend_uids = fbtools.getFriendUIDList(selfuid, access_token)
def check_iffriend(self, uid):
return (uid in self.friend_uids)
| [
"[email protected]"
] | |
d161ba21b9554ec5054d115609033279790f3297 | 844e0cd4ffbe1ead05b844508276f66cc20953d5 | /test/utilityfortest.py | a2a80aae6eaf0ece9c2d9766dd6d227194034b68 | [] | no_license | Archanciel/cryptopricer | a256fa793bb1f2d65b5c032dd81a266ee5be79cc | 00c0911fe1c25c1da635dbc9b26d45be608f0cc5 | refs/heads/master | 2022-06-29T13:13:22.435670 | 2022-05-11T20:37:43 | 2022-05-11T20:37:43 | 100,196,449 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,271 | py | import os,sys,inspect
import re
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from datetimeutil import DateTimeUtil
class UtilityForTest:
'''
This class contains static utility methods used by some unit test classes. It avoids code duplication.
'''
@staticmethod
def getFormattedDateTimeComponentsForArrowDateTimeObj(dateTimeObj):
'''
Return dateTimeObjYearStr, dateTimeObjMonthStr, dateTimeObjDayStr, dateTimeObjHourStr,
dateTimeObjMinuteStr corresponding to the passed Arrow date object
:param dateTimeObj: passed Arrow date object
:return:
'''
dateTimeObjDayStr = dateTimeObj.format('DD')
dateTimeObjMonthStr = dateTimeObj.format('MM')
dateTimeObjYearStr = dateTimeObj.format('YY')
dateTimeObjHourStr = dateTimeObj.format('HH')
dateTimeObjMinuteStr = dateTimeObj.format('mm')
return dateTimeObjYearStr, dateTimeObjMonthStr, dateTimeObjDayStr, dateTimeObjHourStr, dateTimeObjMinuteStr
@staticmethod
def removeOneEndPriceFromResult(resultStr):
'''
Used to remove unique price from RT request results or variable date/time price request results
:param resultStr:
:return:
'''
patternNoWarning = r"(.*) ([\d\.]*)"
patternOneWarning = r"(.*) ([\d\.]*)(\n.*)" #in raw string, \ must not be escaped (\\n not working !)
match = re.match(patternOneWarning, resultStr)
if (match):
if len(match.groups()) == 3:
# here, resultStr contains a warning like in
# BTC/USD on CCCAGG: 30/01/18 01:51R 11248.28\nWarning - unsupported command -ebitfinex in request btc usd 0 all -ebitfinex !
return match.group(1) + match.group(3)
match = re.match(patternNoWarning, resultStr)
if (match):
if len(match.groups()) == 2:
# the case for resultStr containing BTC/USD on CCCAGG: 30/01/18 01:49R 11243.72 for example !
return match.group(1)
return ()
@staticmethod
def removeTwoEndPricesFromResult(resultStr):
'''
Used to remove two prices from RT request results with -f (fiat) option or variable date/time price request
results with -f (fiat) option
:param resultStr:
:return:
'''
patternNoWarning = r"(.*) (?:[\d\.]*) (?:[\d\.]*)"
patternOneWarning = r"(.*) (?:[\d\.]*) (?:[\d\.]*)(\n.*)" #in raw string, \ must not be escaped (\\n not working !)
match = re.match(patternOneWarning, resultStr)
if (match):
if len(match.groups()) == 2:
# here, resultStr contains a warning like in
# BTC/USD on CCCAGG: 30/01/18 01:51R 11248.28\nWarning - unsupported command -ebitfinex in request btc usd 0 all -ebitfinex !
return match.group(1) + match.group(2)
match = re.match(patternNoWarning, resultStr)
if (match):
if len(match.groups()) == 1:
# the case for resultStr containing BTC/USD on CCCAGG: 30/01/18 01:49R 11243.72 for example !
return match.group(1)
return ()
@staticmethod
def removeAllPricesFromCommandValueResult(resultStr):
'''
Used to remove multiple prices from RT request results or variable date/time price request results
:param resultStr:
:return:
'''
# pattern for this kind of output result:
# 0.06180355 ETH/100 USD on AVG: 03/03/21 20:03M 1618.03 which correspond to request
# eth usd 0 binance -v100usd
patternNoWarningValueOption = r"(?:[\d\.]*) (\w*/)(?:[\d\.]*) (.*) (?:[\d\.]*)"
patternOneWarningValueOption = r"(?:[\d\.]*) (\w*/)(?:[\d\.]*) (.*) (?:[\d\.]*(\n.*))"
# pattern for this kind of output result:
# 60851.6949266 CHSB/1.46530881 BTC/1000 USD.Kraken on HitBTC: 06/03/21 20:00R 0.00002408 0.0164334 which correspond to request
# chsb btc 0 hitbtc -v1000usd -fusd.kraken
patternNoWarningValueAndFiatOption = r"(?:[\d\.]*) (\w*/)(?:[\d\.]*) (\w*/)(?:[\d\.]* )(.*) (?:[\d\.]*) (?:[\d\.]*)"
patternOneWarningValueAndFiatOption = r"(?:[\d\.]*) (\w*/)(?:[\d\.]*) (\w*/)(?:[\d\.]* )(.*) (?:[\d\.]*) (?:[\d\.]*(\n.*))"
# pattern for this kind of output result:
# CHSB/BTC/USD.Kraken on HitBTC: 06/03/21 20:00R 0.00002408 0.0164334 which correspond to request
# chsb btc 0 hitbtc -fusd.kraken
patternNoWarningFiatOption = '(.*) (?:[\d\.]*) (?:[\d\.]*)'
patternOneWarningFiatOption = '(.*) (?:[\d\.]*) (?:[\d\.]*(\n.*))'
match = re.match(patternOneWarningValueAndFiatOption, resultStr)
if match != None and len(match.groups()) == 4:
return match.group(1) + match.group(2) + match.group(3) + match.group(4)
match = re.match(patternNoWarningValueAndFiatOption, resultStr)
if match != None and len(match.groups()) == 3:
return match.group(1) + match.group(2) + match.group(3)
match = re.match(patternOneWarningValueOption, resultStr)
if match != None and len(match.groups()) == 3:
return match.group(1) + match.group(2) + match.group(3)
match = re.match(patternNoWarningValueOption, resultStr)
if match != None and len(match.groups()) == 2:
return match.group(1) + match.group(2)
match = re.match(patternOneWarningFiatOption, resultStr)
if match != None and len(match.groups()) == 2:
return match.group(1) + match.group(2)
match = re.match(patternNoWarningFiatOption, resultStr)
if match != None and len(match.groups()) == 1:
return match.group(1)
else:
return ()
@staticmethod
def extractDateTimeStr(resultStr):
dateTimePattern = r"(\d*/\d*/\d* \d*:\d*)"
s = re.search(dateTimePattern, resultStr)
if s != None:
if len(s.groups()) == 1:
group = s.group(1)
return group
@staticmethod
def doAssertAcceptingOneMinuteDateTimeDifference(unitTest,
nowDayStr,
nowHourStr,
nowMinuteStr,
nowMonthStr,
nowYearStr,
requestResultNoEndPrice,
expectedPrintResultNoDateTimeNoEndPrice):
"""
This method verifies that the passed real time request result requestResultNoEndPrice
date/time value correspond to now +/- 60 seconds. The purpose is to avoid a test
failure due to the fact that the crypto price provider was requested at, say,
11:54:59 (now value) and returns a result with time 11:55.
:param unitTest:
:param nowDayStr:
:param nowHourStr:
:param nowMinuteStr:
:param nowMonthStr:
:param nowYearStr:
:param requestResultNoEndPrice:
:param expectedPrintResultNoDateTimeNoEndPrice:
:return:
"""
actualDateTimeStr = UtilityForTest.extractDateTimeStr(requestResultNoEndPrice)
expectedDateTimeStr = '{}/{}/{} {}:{}'.format(nowDayStr, nowMonthStr, nowYearStr, nowHourStr,
nowMinuteStr)
actualDateTimeStamp = DateTimeUtil.dateTimeStringToTimeStamp(actualDateTimeStr, 'Europe/Zurich',
'DD/MM/YY HH:mm')
expectedDateTimeStamp = DateTimeUtil.dateTimeStringToTimeStamp(expectedDateTimeStr, 'Europe/Zurich',
'DD/MM/YY HH:mm')
unitTest.assertAlmostEqual(actualDateTimeStamp, expectedDateTimeStamp, delta=60)
unitTest.assertEqual(expectedPrintResultNoDateTimeNoEndPrice,
requestResultNoEndPrice.replace(actualDateTimeStr, ''))
if __name__ == '__main__':
now = DateTimeUtil.localNow('Europe/Zurich')
nowMonthStr, nowDayStr, nowHourStr, nowMinuteStr = UtilityForTest.getFormattedDateTimeComponentsForArrowDateTimeObj(now)
print("{}/{} {}:{}".format(nowDayStr, nowMonthStr, nowHourStr, nowMinuteStr)) | [
"[email protected]"
] | |
469827f7e14422e8fd4fb57c5dd38341aea37fc8 | 52d9daeefbae6a1c6346e61f22122184b1beef20 | /11134.py | 591ad16026f0e1db114e169e90d12f1ca8200ece | [] | no_license | chiseungii/baekjoon | 4034947f63c515bdebc24c3f48247a7f00319ab3 | 6d0fb79292ef4927ab6343f6b82ea63609f560b5 | refs/heads/master | 2021-06-13T14:50:17.762402 | 2021-05-26T16:00:41 | 2021-05-26T16:00:41 | 188,278,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | for i in range(int(input())):
N, C = map(int, input().split())
if N % C: print(N // C + 1)
else: print(N // C)
| [
"[email protected]"
] | |
afb205b56da7ad95a6986ebaf0243e43e79cb058 | cc2f611a2d837cb81dd6957253388c683c849b0b | /Problems/mergeSort.py | fbe6e1c4228d7a5291e444bd9366d8941e2dc22b | [] | no_license | tylors1/Leetcode | 3474dec224a5376f0c360d3fce9fb8030fe17539 | e78129616468fa02abc1850ad7e7b26ddbdec871 | refs/heads/master | 2021-04-06T01:38:35.337711 | 2018-07-25T00:26:36 | 2018-07-25T00:26:36 | 125,107,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | def merge(A,start,mid,end):
L = A[start:mid]
R = A[mid:end]
j = i = 0
k = start
for l in range(k,end):
if j >= len(R) or (i < len(L) and L[i] < R[j]):
A[l] = L[i]
i = i + 1
else:
A[l] = R[j]
j = j + 1
def mergeSort(A,p,r):
if r - p > 1:
mid = int((p+r)/2)
mergeSort(A,p,mid)
mergeSort(A,mid,r)
merge(A,p,mid,r)
A = [20, 30, 21, 15, 42, 45, 31, 0, 9]
mergeSort(A,0,len(A))
print A | [
"[email protected]"
] | |
715948755c322ba64467c7fcb1b7473bdd0c5ce0 | 8ad5ab7236dcb6717b56b4a494eac3fcc08d2c62 | /redap/services/__init__.py | ef310db23b459f0521c2d1728dcbf59e02b3e05d | [
"MIT"
] | permissive | bgedney/redap | 2335664416660f6420b5ed8c10f2cbff5a8a86d8 | 34d6338c4d8fe330ce3a40d2a694456964f1496d | refs/heads/master | 2020-12-31T21:45:52.353173 | 2018-07-17T09:50:07 | 2018-07-17T09:50:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | # -*- coding: utf-8 -*-
from .user import UserService
from .group import GroupService
users = UserService()
groups = GroupService()
| [
"[email protected]"
] | |
8a5587a23b5e151f75a9ce9fe7fb103ce614d93d | 97966f120fad0b817c4fa573c0964255c40ff88d | /app/schemas/notification.py | 00880fb6063d84e4244108671a007c4c8989ab0e | [] | no_license | sralloza/full-power-backend | 0d0c9a0b7bd3a90e07e14b259b6bb2ae582a9598 | 8c766bf8c44c870a47dec71966ea68d984f214bd | refs/heads/master | 2023-06-01T16:09:32.365632 | 2021-06-23T19:37:53 | 2021-06-23T19:37:53 | 365,820,909 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | from pydantic import BaseModel
class QuestionNotification(BaseModel):
content: str
id: str
| [
"[email protected]"
] | |
d4e92c7f4952c0665079f06b03c4e86bb6cc9cfa | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_55/168.py | bd9a92e7f38786c5038cbe13c574cf7d59e4685b | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | f = open('1.in','r')
o = open('out.dat','w')
n = int(f.readline())
for i in xrange(n):
R,k,N = f.readline().split()
R = int(R)
k = int(k)
N = int(N)
g = f.readline().split()
g = [int(g[j]) for j in xrange(N)]
y = 0
p = 0
for j in xrange(R):
m = 0
p0 = p
while (m+g[p]) <= k:
y += g[p]
m+=g[p]
if (p+1)<N:
p +=1
else:
p=0
if p == p0:
break
o.write('Case #' + str(i+1) + ': '+ str(y) +'\n')
| [
"[email protected]"
] | |
7d85d2c7ce76018934193471af9c073a3e71d51b | be8d0f0aadcac53f90a34716153fe56ed1d44b11 | /edx/app/edxapp/venvs/edxapp/lib/python2.7/site-packages/consent/migrations/0001_initial.py | a351521d395faef7c07ec0f37a0b147a1a1c8366 | [] | no_license | AlaaSwedan/edx | 5353e6afa7c75d63b6c28150b6ef54180d3ddc84 | 73fec97eb2850e67e5f57e391641116465424d88 | refs/heads/master | 2021-09-01T14:53:48.342510 | 2017-12-27T09:46:39 | 2017-12-27T09:46:39 | 115,434,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,752 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import consent.mixins
import django.db.models.deletion
import django.utils.timezone
from django.conf import settings
from django.db import migrations, models
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('enterprise', '0024_enterprisecustomercatalog_historicalenterprisecustomercatalog'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DataSharingConsent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('username', models.CharField(help_text='Name of the user whose consent state is stored.', max_length=255)),
('granted', models.NullBooleanField(help_text='Whether consent is granted.')),
('course_id', models.CharField(help_text='Course key for which data sharing consent is granted.', max_length=255)),
('enterprise_customer', models.ForeignKey(related_name='enterprise_customer_consent', to='enterprise.EnterpriseCustomer', on_delete=django.db.models.deletion.CASCADE)),
],
options={
'abstract': False,
'verbose_name': 'Data Sharing Consent Record',
'verbose_name_plural': 'Data Sharing Consent Records',
},
bases=(consent.mixins.ConsentModelMixin, models.Model),
),
migrations.CreateModel(
name='HistoricalDataSharingConsent',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('username', models.CharField(help_text='Name of the user whose consent state is stored.', max_length=255)),
('granted', models.NullBooleanField(help_text='Whether consent is granted.')),
('course_id', models.CharField(help_text='Course key for which data sharing consent is granted.', max_length=255)),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('enterprise_customer', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='enterprise.EnterpriseCustomer', null=True)),
('history_user', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical Data Sharing Consent Record',
},
),
migrations.AlterUniqueTogether(
name='datasharingconsent',
unique_together=set([('enterprise_customer', 'username', 'course_id')]),
),
]
| [
"root@tatweer02.cxmgqbjhlc0u3gsvisa212wuxe.fx.internal.cloudapp.net"
] | root@tatweer02.cxmgqbjhlc0u3gsvisa212wuxe.fx.internal.cloudapp.net |
705b4a858fa80c737536a7f601d2cb7a67ae2372 | f5a53f0f2770e4d7b3fdace83486452ddcc996e1 | /netbox/tenancy/api/urls.py | 5762f9a0d52cea35a2c7ded2b71d8c314c3c4906 | [
"Apache-2.0"
] | permissive | fireman0865/PingBox | 35e8fc9966b51320d571b63967e352a134022128 | 0f00eaf88b88e9441fffd5173a1501e56c13db03 | refs/heads/master | 2023-01-20T07:55:59.433046 | 2020-03-15T13:36:31 | 2020-03-15T13:36:31 | 247,466,832 | 1 | 0 | Apache-2.0 | 2022-12-26T21:30:32 | 2020-03-15T12:59:16 | Python | UTF-8 | Python | false | false | 544 | py | from rest_framework import routers
from . import views
class TenancyRootView(routers.APIRootView):
"""
Tenancy API root view
"""
def get_view_name(self):
return 'Tenancy'
router = routers.DefaultRouter()
router.APIRootView = TenancyRootView
# Field choices
router.register('_choices', views.TenancyFieldChoicesViewSet, basename='field-choice')
# Tenants
router.register('tenant-groups', views.TenantGroupViewSet)
router.register('tenants', views.TenantViewSet)
app_name = 'tenancy-api'
urlpatterns = router.urls
| [
"[email protected]"
] | |
1c105c840d3ca23d00e5560635f2901d287b81b1 | 1dbb9ae42a82a854a0fba3eb8e4d0482e0a08a44 | /util/constraint_applyer.py | 66ef43d9a3a40e8244eedd91f4c448cdedb00fe1 | [] | no_license | roderiklagerweij/PyThings | 1de9b7d13ed6dc999f958493f23dac053c2edcd5 | 1f565dd1a9431ff18de0b3d260d32e0d84874cb6 | refs/heads/master | 2021-08-24T00:53:20.949058 | 2017-10-25T12:26:33 | 2017-10-25T12:26:33 | 106,086,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,173 | py | import settings
from util import view_finder
__author__ = 'Roderik'
def apply_width_constraint(parent, id_list):
views = []
for id in id_list:
views.extend(view_finder.find_views_with_id(id, parent))
for i in range(10):
parent.measure()
parent.post_measure(settings.SCREEN_WIDTH, settings.SCREEN_HEIGHT)
parent.apply_gravity(0, 0, settings.SCREEN_WIDTH, settings.SCREEN_HEIGHT)
max_width = 0
for view in views:
if view.width > max_width:
max_width = view.width
for view in views:
view.width = max_width
def apply_height_constraint(parent, id_list):
views = []
for id in id_list:
views.extend(view_finder.find_views_with_id(id, parent))
for i in range(10):
parent.measure()
parent.post_measure(settings.SCREEN_WIDTH, settings.SCREEN_HEIGHT)
parent.apply_gravity(0, 0, settings.SCREEN_WIDTH, settings.SCREEN_HEIGHT)
max_height = 0
for view in views:
if view.height > max_height:
max_height = view.height
for view in views:
view.height = max_height | [
"="
] | = |
8106d055672db8dfdbc2cbdec865e90f66edfc0c | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/PearRay/core/render.py | bf8e13532a626d16cc4b1257bd976588d9231315 | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,851 | py | import bpy
import os
import sys
import time
import subprocess
import threading
import re
import importlib
import numpy as np
from collections import deque
from .. import export
pearray_package = __import__(__name__.split('.')[0])
class PearRayRender(bpy.types.RenderEngine):
bl_idname = 'PEARRAY_RENDER'
bl_label = "PearRay"
#bl_use_preview = True
bl_use_exclude_layers = True
@staticmethod
def _setup_package():
addon_prefs = bpy.context.user_preferences.addons[pearray_package.__package__].preferences
if addon_prefs.package_dir:
sys.path.append(bpy.path.resolve_ncase(bpy.path.abspath(addon_prefs.package_dir)))
return importlib.import_module("pypearray")
def _proc_wait(self, renderer):
time.sleep(0.25)
# User interrupts the rendering
if self.test_break():
try:
renderer.stop()
print("<<< PEARRAY INTERRUPTED >>>")
except OSError:
pass
return False
if renderer.finished:
return False
return True
def _handle_render_stat(self, renderer):
stat = renderer.status
line = "Pass %s S %i R %i EH %i BH %i" % (renderer.currentPass+1,
stat['global.pixel_sample_count'],
stat['global.ray_count'],
stat['global.entity_hit_count'],
stat['global.background_hit_count'])
self.update_stats("", "PearRay: Rendering [%s]..." % (line))
self.update_progress(stat.percentage)
def render(self, scene):
addon_prefs = bpy.context.user_preferences.addons[pearray_package.__package__].preferences
pr = PearRayRender._setup_package()
pr.Logger.instance.verbosity = pr.LogLevel.DEBUG if addon_prefs.verbose else pr.LogLevel.INFO
specDesc = pr.SpectrumDescriptor.createStandardSpectral()
import tempfile
render = scene.render
x = int(render.resolution_x * render.resolution_percentage * 0.01)
y = int(render.resolution_y * render.resolution_percentage * 0.01)
print("<<< START PEARRAY >>>")
blendSceneName = bpy.data.filepath.split(os.path.sep)[-1].split(".")[0]
if not blendSceneName:
blendSceneName = "blender_scene"
sceneFile = ""
renderPath = ""
# has to be called to update the frame on exporting animations
scene.frame_set(scene.frame_current)
renderPath = bpy.path.resolve_ncase(bpy.path.abspath(render.filepath))
if not render.filepath:
renderPath = tempfile.gettempdir()
if scene.pearray.keep_prc:
sceneFile = os.path.normpath(renderPath + "/scene.prc")
else:
sceneFile = tempfile.NamedTemporaryFile(suffix=".prc").name
self.update_stats("", "PearRay: Exporting data")
scene_exporter = export.Exporter(sceneFile, scene)
scene_exporter.write_scene(pr)
self.update_stats("", "PearRay: Starting render")
environment = pr.SceneLoader.loadFromFile(sceneFile)
toneMapper = pr.ToneMapper(x, y)
toneMapper.colorMode = pr.ToneColorMode.SRGB
toneMapper.gammaMode = pr.ToneGammaMode.NONE
toneMapper.mapperMode = pr.ToneMapperMode.NONE
colorBuffer = pr.ColorBuffer(x,y,pr.ColorBufferMode.RGBA)
environment.registry.set('/renderer/film/width', x)
environment.registry.set('/renderer/film/height', y)
if addon_prefs.verbose:
print("Registry:")
print(environment.registry.dump())
pr_scene = environment.sceneFactory.create()
if not pr_scene:
self.report({'ERROR'}, "PearRay: could not create pearray scene instance")
print("<<< PEARRAY FAILED >>>")
return
factory = pr.RenderFactory(specDesc, pr_scene, environment.registry, renderPath)
addon_prefs = bpy.context.user_preferences.addons[pearray_package.__package__].preferences
renderer = factory.create()
if not renderer:
self.report({'ERROR'}, "PearRay: could not create pearray render instance")
print("<<< PEARRAY FAILED >>>")
return
environment.setup(renderer)
if not os.path.exists(renderPath):
os.makedirs(renderPath)
threads = 0
if scene.render.threads_mode == 'FIXED':
threads = scene.render.threads
renderer.start(scene.render.tile_x, scene.render.tile_y, threads)
# Update image
result = self.begin_result(0, 0, x, y)
layer = result.layers[0]
def update_image():
colorBuffer.map(toneMapper, renderer.output.spectral)
arr = np.array(colorBuffer, copy=False)
arr = np.reshape(np.flip(arr,0), (x*y,4), 'C')
layer.passes["Combined"].rect = arr
self.update_result(result)
update_image()
prog_start = time.time()
img_start = time.time()
while self._proc_wait(renderer):
prog_end = time.time()
if addon_prefs.show_progress_interval < (prog_end - prog_start):
self._handle_render_stat(renderer)
prog_start = prog_end
if addon_prefs.show_image_interval > 0:
img_end = time.time()
if addon_prefs.show_image_interval < (img_end - img_start):
update_image()
img_start = img_end
update_image()
self.end_result(result)
environment.save(renderer, toneMapper, True)
if not scene.pearray.keep_prc:
os.remove(sceneFile)
self.update_stats("", "")
print("<<< PEARRAY FINISHED >>>") | [
"[email protected]"
] | |
c39630bff4eb7fbbfa71bfb7efe5c90e56a9bed1 | 76fb0a3cfc9d9362ab29174bd1d55e888ea4d7f6 | /tfx/examples/tfjs_next_page_prediction/__init__.py | c000dce99c127e0e55b9386ffcd07b46e7555ae8 | [
"Apache-2.0"
] | permissive | tensorflow/tfx | 0cfc9c55171352ecc98c9dfa8ffe976c689d7073 | 1b328504fa08a70388691e4072df76f143631325 | refs/heads/master | 2023-08-30T11:56:50.894497 | 2023-08-29T22:47:19 | 2023-08-29T22:48:26 | 169,116,405 | 2,116 | 899 | Apache-2.0 | 2023-09-14T21:51:42 | 2019-02-04T17:14:36 | Python | UTF-8 | Python | false | false | 596 | py | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| [
"[email protected]"
] | |
df8527c28632b605a3e07d6553228145bf09d138 | 4bb6a8cbd7ac887ec4abc6abc97f0cb17415e82d | /Chapter 10 Class/die3.py | feecac569c11d71ca755666f71d074ecfcb2c079 | [] | no_license | jbhennes/CSCI-220-Programming-1 | cdc9cab47b4a79dccabf014224a175674e9a7155 | ac9e85582eeb51a205981674ffdebe8a5b93a205 | refs/heads/master | 2021-01-01T03:54:50.723923 | 2016-05-02T16:06:55 | 2016-05-02T16:06:55 | 57,902,553 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | # die3.py
# Class that defines a six-sided die object.
##from random import randint
class Die:
def __init__(self):
self.faceValue = 1
def getFaceValue(self):
return self.faceValue
def setFaceValue(self, value):
if value > 0 and value < 7:
self.faceValue = value
else:
print "Don't try to cheat!"
def roll(self):
value = randint(1,6)
self.faceValue = value
def __str__(self):
return "Die Value: " + str(self.faceValue)
##
## def roll(self):
## self.faceValue = randrange(1, 7)
##
## def getValue(self):
## return self.faceValue
##
## def setValue(self, value):
## if value >= 1 and value <= 6:
## self.faceValue = int(value)
## else:
## self.faceValue = randrange(1, 7)
##
## def __str__(self):
## return "The die's value is: " + str(self.faceValue)
| [
"[email protected]"
] | |
6fc3612754da59a46ae1c9ecb026451637ba5f7e | 64d7d065c92ba5a4a7f3aab1eabeb786c7864ad0 | /profiles_api/migrations/0002_profilefeeditem.py | 72eefdaa9399c90de5c7811a3f54cb5df3e9e16e | [
"MIT"
] | permissive | alardosa/profiles-rest-api | 1bfa20fbf442ce7487a07b7a531ce84436007326 | d7ee8606b9d0c16ea7b4d8d43ca22f94658c6ee7 | refs/heads/master | 2021-09-27T06:49:37.161651 | 2020-02-20T06:14:56 | 2020-02-20T06:14:56 | 238,153,303 | 1 | 0 | MIT | 2021-09-22T18:32:03 | 2020-02-04T08:01:23 | Python | UTF-8 | Python | false | false | 790 | py | # Generated by Django 3.0.3 on 2020-02-12 01:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles_api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProfileFeedItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status_text', models.CharField(max_length=255)),
('created_on', models.DateTimeField(auto_now_add=True)),
('user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
8a8be3e81dcfb6ac9e3a2c296b49136138a80d34 | ba0cbdae81c171bd4be7b12c0594de72bd6d625a | /MyToontown/py2/toontown/coghq/BattleBlocker.pyc.py | ca6c57e799dcef409f253ece85f07fca491e4c22 | [] | no_license | sweep41/Toontown-2016 | 65985f198fa32a832e762fa9c59e59606d6a40a3 | 7732fb2c27001264e6dd652c057b3dc41f9c8a7d | refs/heads/master | 2021-01-23T16:04:45.264205 | 2017-06-04T02:47:34 | 2017-06-04T02:47:34 | 93,279,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,757 | py | # 2013.08.22 22:18:02 Pacific Daylight Time
# Embedded file name: toontown.coghq.BattleBlocker
from pandac.PandaModules import *
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from otp.level import BasicEntities
from toontown.toonbase import ToontownGlobals
from direct.directnotify import DirectNotifyGlobal
class BattleBlocker(BasicEntities.DistributedNodePathEntity):
__module__ = __name__
notify = DirectNotifyGlobal.directNotify.newCategory('BattleBlocker')
def __init__(self, cr):
BasicEntities.DistributedNodePathEntity.__init__(self, cr)
self.suitIds = []
self.battleId = None
return
def setActive(self, active):
self.active = active
def announceGenerate(self):
BasicEntities.DistributedNodePathEntity.announceGenerate(self)
self.initCollisionGeom()
def disable(self):
self.ignoreAll()
self.unloadCollisionGeom()
BasicEntities.DistributedNodePathEntity.disable(self)
def destroy(self):
BasicEntities.DistributedNodePathEntity.destroy(self)
def setSuits(self, suitIds):
self.suitIds = suitIds
def setBattle(self, battleId):
self.battleId = battleId
def setBattleFinished(self):
self.ignoreAll()
def initCollisionGeom(self):
self.cSphere = CollisionSphere(0, 0, 0, self.radius)
self.cSphereNode = CollisionNode('battleBlocker-%s-%s' % (self.level.getLevelId(), self.entId))
self.cSphereNode.addSolid(self.cSphere)
self.cSphereNodePath = self.attachNewNode(self.cSphereNode)
self.cSphereNode.setCollideMask(ToontownGlobals.WallBitmask)
self.cSphere.setTangible(0)
self.enterEvent = 'enter' + self.cSphereNode.getName()
self.accept(self.enterEvent, self.__handleToonEnter)
def unloadCollisionGeom(self):
if hasattr(self, 'cSphereNodePath'):
self.ignore(self.enterEvent)
del self.cSphere
del self.cSphereNode
self.cSphereNodePath.removeNode()
del self.cSphereNodePath
def __handleToonEnter(self, collEntry):
self.notify.debug('__handleToonEnter, %s' % self.entId)
self.startBattle()
def startBattle(self):
if not self.active:
return
callback = None
if self.battleId != None and self.battleId in base.cr.doId2do:
battle = base.cr.doId2do.get(self.battleId)
if battle:
self.notify.debug('act like we collided with battle %d' % self.battleId)
callback = battle.handleBattleBlockerCollision
elif len(self.suitIds) > 0:
for suitId in self.suitIds:
suit = base.cr.doId2do.get(suitId)
if suit:
self.notify.debug('act like we collided with Suit %d ( in state %s )' % (suitId, suit.fsm.getCurrentState().getName()))
callback = suit.handleBattleBlockerCollision
break
self.showReaction(callback)
return
def showReaction(self, callback = None):
if not base.localAvatar.wantBattles:
return
track = Sequence()
if callback:
track.append(Func(callback))
track.start()
if __dev__:
def attribChanged(self, *args):
self.unloadCollisionGeom()
self.initCollisionGeom()
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\toontown\coghq\BattleBlocker.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:18:03 Pacific Daylight Time
| [
"[email protected]"
] | |
91903958737a641020ce96b9d7a046cac600a9f3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03474/s109158950.py | f80c891d3a6c6b61b79f29390528b638d503cc9e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | a, b = map(int, input().split())
s = input()
if s[:a].isdigit() and s[a] =='-' and s[-b:].isdigit():
print('Yes')
else:
print('No')
| [
"[email protected]"
] | |
6e22a2f32badb987a9519a92b1276290cac285f4 | 1215055fe1f77ee3e4d62f923267781a037cdc74 | /GLSL/Blur/Crok_diffuse_GL/Crok_diffuse_GL.py | c6088733ffc41d7eca05440fcec471ba3192fa6c | [] | no_license | felipebetancur/natron-plugins | f61c3a16477e19f25dc3652f5bdc9aa34200b00d | 7c96e245bab792ecc034b0155f23db8ccb6c604c | refs/heads/master | 2020-03-25T00:54:39.150274 | 2018-08-01T19:36:08 | 2018-08-01T19:36:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,813 | py | # -*- coding: utf-8 -*-
# DO NOT EDIT THIS FILE
# This file was automatically generated by Natron PyPlug exporter version 10.
# Hand-written code should be added in a separate file named Crok_diffuse_GLExt.py
# See http://natron.readthedocs.org/en/master/devel/groups.html#adding-hand-written-code-callbacks-etc
# Note that Viewers are never exported
import NatronEngine
import sys
# Try to import the extensions file where callbacks and hand-written code should be located.
try:
from Crok_diffuse_GLExt import *
except ImportError:
pass
def getPluginID():
return "natron.community.plugins.Crok_diffuse_GL"
def getLabel():
return "Crok_diffuse_GL"
def getVersion():
return 1.0
def getIconPath():
return "Crok_diffuse_GL.png"
def getGrouping():
return "Community/GLSL/Blur"
def getPluginDescription():
return "Creates a noisy blur.\n( http://vimeo.com/110007581 )"
def createInstance(app,group):
# Create all nodes in the group
# Create the parameters of the group node the same way we did for all internal nodes
lastNode = group
lastNode.setColor(1, 0.5686, 0.3333)
# Create the user parameters
lastNode.Controls = lastNode.createPageParam("Controls", "Controls")
param = lastNode.createStringParam("sep01", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep01 = param
del param
param = lastNode.createStringParam("sep02", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep02 = param
del param
param = lastNode.createSeparatorParam("SETUP", "Setup")
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.SETUP = param
del param
param = lastNode.createStringParam("sep03", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep03 = param
del param
param = lastNode.createStringParam("sep04", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep04 = param
del param
param = lastNode.createIntParam("Shadertoy1_2paramValueInt0", "Iterations : ")
param.setMinimum(1, 0)
param.setMaximum(500, 0)
param.setDisplayMinimum(1, 0)
param.setDisplayMaximum(500, 0)
param.setDefaultValue(50, 0)
param.restoreDefaultValue(0)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.Shadertoy1_2paramValueInt0 = param
del param
param = lastNode.createStringParam("sep05", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep05 = param
del param
param = lastNode.createDoubleParam("Shadertoy1_2paramValueFloat1", "Size : ")
param.setMinimum(0, 0)
param.setMaximum(10000, 0)
param.setDisplayMinimum(0, 0)
param.setDisplayMaximum(10000, 0)
param.setDefaultValue(100, 0)
param.restoreDefaultValue(0)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.Shadertoy1_2paramValueFloat1 = param
del param
param = lastNode.createStringParam("sep06", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep06 = param
del param
param = lastNode.createStringParam("sep07", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep07 = param
del param
param = lastNode.createSeparatorParam("LINE01", "")
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.LINE01 = param
del param
param = lastNode.createChoiceParam("Shadertoy1_2wrap0", "Edge extend : ")
entries = [ ("repeat", "WRAP_S/T = GL_REPEAT"),
("clamp", "WRAP_S/T = GL_CLAMP_TO_EDGE"),
("mirror", "WRAP_S/T = GL_MIRRORED_REPEAT")]
param.setOptions(entries)
del entries
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("Texture wrap parameter for this input.")
param.setAddNewLine(True)
param.setAnimationEnabled(False)
lastNode.Shadertoy1_2wrap0 = param
del param
param = lastNode.createStringParam("sep08", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep08 = param
del param
param = lastNode.createStringParam("sep09", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep09 = param
del param
lastNode.Credits = lastNode.createPageParam("Credits", "Credits")
param = lastNode.createStringParam("sep101", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep101 = param
del param
param = lastNode.createStringParam("sep102", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep102 = param
del param
param = lastNode.createSeparatorParam("NAME", "Crok_diffuse_GL v1.01")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.NAME = param
del param
param = lastNode.createStringParam("sep103", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep103 = param
del param
param = lastNode.createStringParam("sep104", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep104 = param
del param
param = lastNode.createSeparatorParam("LINE101", "")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.LINE101 = param
del param
param = lastNode.createStringParam("sep105", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep105 = param
del param
param = lastNode.createStringParam("sep106", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep106 = param
del param
param = lastNode.createSeparatorParam("FR", "ShaderToy 0.8.8")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.FR = param
del param
param = lastNode.createStringParam("sep107", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep107 = param
del param
param = lastNode.createStringParam("sep108", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep108 = param
del param
param = lastNode.createSeparatorParam("CONVERSION", " (Fabrice Fernandez - 2018)")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.CONVERSION = param
del param
param = lastNode.createStringParam("sep109", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep109 = param
del param
param = lastNode.createStringParam("sep110", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep110 = param
del param
# Refresh the GUI with the newly created parameters
lastNode.setPagesOrder(['Controls', 'Credits', 'Node', 'Settings'])
lastNode.refreshUserParamsGUI()
del lastNode
# Start of node "Output2"
lastNode = app.createNode("fr.inria.built-in.Output", 1, group)
lastNode.setLabel("Output2")
lastNode.setPosition(4139, 3997)
lastNode.setSize(90, 36)
lastNode.setColor(0.7, 0.7, 0.7)
groupOutput2 = lastNode
del lastNode
# End of node "Output2"
# Start of node "Source"
lastNode = app.createNode("fr.inria.built-in.Input", 1, group)
lastNode.setScriptName("Source")
lastNode.setLabel("Source")
lastNode.setPosition(4139, 3697)
lastNode.setSize(90, 36)
lastNode.setColor(0.3, 0.5, 0.2)
groupSource = lastNode
del lastNode
# End of node "Source"
# Start of node "Shadertoy1_2"
lastNode = app.createNode("net.sf.openfx.Shadertoy", 1, group)
lastNode.setScriptName("Shadertoy1_2")
lastNode.setLabel("Shadertoy1_2")
lastNode.setPosition(4139, 3856)
lastNode.setSize(90, 36)
lastNode.setColor(0.3, 0.5, 0.2)
groupShadertoy1_2 = lastNode
param = lastNode.getParam("paramValueInt0")
if param is not None:
param.setValue(50, 0)
del param
param = lastNode.getParam("paramValueFloat1")
if param is not None:
param.setValue(100, 0)
del param
param = lastNode.getParam("imageShaderFileName")
if param is not None:
param.setValue("/users/ffernandez/Natron2-3-6/Plugins/OFX/Natron/Shadertoy.ofx.bundle/Contents/Resources/presets/Shadertoy/Crok_diffuse.frag.glsl")
del param
param = lastNode.getParam("imageShaderSource")
if param is not None:
param.setValue("//\n//\n// MMMMMMMMMMMMMMMMMMMMMMMMMMMM\n// MM. .MM\n// MM. .MMMMMMMMMMMMMMMMMMMMMM. .MM\n// MM. .MMMMMMMMMMMMMMMMMMMMMMMM. .MM\n// MM. .MMMM MMMMMMM MMM. .MM\n// MM. .MMM MMMMMM MMM. .MM\n// MM. .MmM MMMM MMM. .MM\n// MM. .MMM MM MMM. .MM\n// MM. .MMM M MMM. .MM\n// MM. .MMM MMM. .MM\n// MM. .MMM MMM. .MM\n// MM. .MMM M MMM. .MM\n// MM. .MMM MM MMM. .MM\n// MM. .MMM MMM MMM. .MM\n// MM. .MMM MMMM MMM. .MM\n// MM. .MMMMMMMMMMMMMMMMMMMMMMMM. .MM\n// MM. .MMMMMMMMMMMMMMMMMMMMMM. .MM\n// MM. .MM\n// MMMMMMMMMMMMMMMMMMMMMMMMMMMM\n//\n//\n//\n//\n// Adaptation pour Natron par F. Fernandez\n// Code original : crok_diffuse Matchbox pour Autodesk Flame\n\n// Adapted to Natron by F.Fernandez\n// Original code : crok_diffuse Matchbox for Autodesk Flame\n\n// based on https://www.shadertoy.com/view/MdXXWr\n\n\n// iChannel0: Source, filter = linear, wrap = clamp\n// iChannel1: Strength map, filter = linear, wrap = clamp\n\n// BBox: iChannel0\n\n\n\n\n\nuniform int itteration = 50; // Iterations : (iterations), min=1, max=500\nuniform float size = 100; // Size : (size), min=0, max=10000\n\n\n\nfloat time = iTime *.05;\nfloat cent = 0.0;\n\n\n\nfloat rand1(vec2 a, out float r)\n{\n\tvec3 p = vec3(a,1.0);\n\tr = fract(sin(dot(p,vec3(37.1,61.7, 12.4)))*3758.5453123);\n\treturn r;\n}\n\nfloat rand2(inout float b)\n{\n\tb = fract((134.324324) * b);\n\treturn (b-0.5)*2.0;\n}\n\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n\tvec2 uv = fragCoord.xy / iResolution.xy;\n\t\n\tfloat strength = texture2D(iChannel1, uv).r;\n\tfloat n = size * strength / iResolution.x;\n\trand1(uv, cent);\n\tvec4 col = vec4(0.0);\n\tfor(int i=0;i<itteration;i++)\n\t{\n\t\tfloat noisex = rand2(cent);\n\t\tfloat noisey = rand2(cent);\n\t\tcol += texture2D(iChannel0, uv - vec2(noisex, noisey) * n) / float(itteration);\n\t}\n\tfragColor = col;\n}")
del param
param = lastNode.getParam("mipmap0")
if param is not None:
param.set("linear")
del param
param = lastNode.getParam("inputLabel0")
if param is not None:
param.setValue("Source")
del param
param = lastNode.getParam("mipmap1")
if param is not None:
param.set("linear")
del param
param = lastNode.getParam("wrap1")
if param is not None:
param.set("clamp")
del param
param = lastNode.getParam("inputLabel1")
if param is not None:
param.setValue("Strength map")
del param
param = lastNode.getParam("inputEnable2")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("inputEnable3")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("bbox")
if param is not None:
param.set("iChannel0")
del param
param = lastNode.getParam("NatronParamFormatChoice")
if param is not None:
param.set("PC_Video")
del param
param = lastNode.getParam("mouseParams")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("paramCount")
if param is not None:
param.setValue(2, 0)
del param
param = lastNode.getParam("paramType0")
if param is not None:
param.set("int")
del param
param = lastNode.getParam("paramName0")
if param is not None:
param.setValue("itteration")
del param
param = lastNode.getParam("paramLabel0")
if param is not None:
param.setValue("Iterations :")
del param
param = lastNode.getParam("paramHint0")
if param is not None:
param.setValue("iterations")
del param
param = lastNode.getParam("paramDefaultInt0")
if param is not None:
param.setValue(50, 0)
del param
param = lastNode.getParam("paramMinInt0")
if param is not None:
param.setValue(1, 0)
del param
param = lastNode.getParam("paramMaxInt0")
if param is not None:
param.setValue(500, 0)
del param
param = lastNode.getParam("paramType1")
if param is not None:
param.set("float")
del param
param = lastNode.getParam("paramName1")
if param is not None:
param.setValue("size")
del param
param = lastNode.getParam("paramLabel1")
if param is not None:
param.setValue("Size :")
del param
param = lastNode.getParam("paramHint1")
if param is not None:
param.setValue("size")
del param
param = lastNode.getParam("paramDefaultFloat1")
if param is not None:
param.setValue(99.99999999999999, 0)
del param
param = lastNode.getParam("paramMinFloat1")
if param is not None:
param.setValue(0, 0)
del param
param = lastNode.getParam("paramMaxFloat1")
if param is not None:
param.setValue(10000, 0)
del param
del lastNode
# End of node "Shadertoy1_2"
# Start of node "Strength_map"
lastNode = app.createNode("fr.inria.built-in.Input", 1, group)
lastNode.setScriptName("Strength_map")
lastNode.setLabel("Strength map")
lastNode.setPosition(4342, 3856)
lastNode.setSize(90, 36)
lastNode.setColor(0.3, 0.5, 0.2)
groupStrength_map = lastNode
del lastNode
# End of node "Strength_map"
# Now that all nodes are created we can connect them together, restore expressions
groupOutput2.connectInput(0, groupShadertoy1_2)
groupShadertoy1_2.connectInput(0, groupSource)
groupShadertoy1_2.connectInput(1, groupStrength_map)
param = groupShadertoy1_2.getParam("paramValueInt0")
group.getParam("Shadertoy1_2paramValueInt0").setAsAlias(param)
del param
param = groupShadertoy1_2.getParam("paramValueFloat1")
group.getParam("Shadertoy1_2paramValueFloat1").setAsAlias(param)
del param
try:
extModule = sys.modules["Crok_diffuse_GLExt"]
except KeyError:
extModule = None
if extModule is not None and hasattr(extModule ,"createInstanceExt") and hasattr(extModule.createInstanceExt,"__call__"):
extModule.createInstanceExt(app,group)
| [
"[email protected]"
] | |
75129cdcd47f48a1bd12a6212201012ffae573e7 | e7fcc1d64cd95805918ab1b5786bf81a92f973ef | /2017/day22/test_day22.py | dce7c09a50212dd957967cad85f56d655a5738d5 | [] | no_license | trolen/advent-of-code | 8145c1e36fea04e53d4b7a885efcc2da71fbfe57 | 0a4e022a6a810d86e044a15036a2f5778f0d38af | refs/heads/master | 2023-02-26T13:11:58.341006 | 2023-02-20T23:22:27 | 2023-02-20T23:22:27 | 54,579,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | #! /usr/bin/env python3
import unittest
import day22
class TestDay22(unittest.TestCase):
def setUp(self):
data = [
'..#',
'#..',
'...'
]
self._cluster = day22.Cluster(data)
def test_part1(self):
self.assertEqual(5, self._cluster.run(7))
self.assertEqual(41, self._cluster.run(70))
self.assertEqual(5587, self._cluster.run(10000))
def test_part2(self):
self.assertEqual(26, self._cluster.run(100, part2=True))
self.assertEqual(2511944, self._cluster.run(10000000, part2=True))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
891b53bbd77591bc00cdd629ba6223dfc79c55d9 | 1287804d1ff6409f2812366e04652a5d3031d159 | /perf_dashboard/regressions/views.py | 11b1078f17b292b103a8b472f2b0f9be2c57d2e6 | [
"Apache-2.0"
] | permissive | isabella232/tools-2 | 62189c2860cb1f297849229cb83593ec6ca84d7d | e793cb433dd6363367634a62c183148addd45373 | refs/heads/master | 2023-03-09T14:44:50.875280 | 2020-11-13T15:58:28 | 2020-11-13T15:58:28 | 313,067,485 | 0 | 0 | Apache-2.0 | 2021-02-23T15:35:27 | 2020-11-15T15:58:21 | null | UTF-8 | Python | false | false | 9,104 | py | # Copyright Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.shortcuts import render
from helpers import download
import pandas as pd
import os
cwd = os.getcwd()
perf_data_path = cwd + "/perf_data/"
current_release = [os.getenv('CUR_RELEASE')]
# Create your views here.
def monitoring_overview(request):
return render(request, "monitoring_overview.html")
def cur_regression(request):
cur_href_links, _, cur_release_dates, _, _, _ = download.download_benchmark_csv(60)
latency_none_mtls_base_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_mtls_baseline', 'p90')
latency_none_mtls_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_mtls_both', 'p90')
latency_none_plaintext_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_plaintext_both', 'p90')
latency_v2_stats_nullvm_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-stats-nullvm_both', 'p90')
latency_v2_stats_wasm_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-stats-wasm_both', 'p90')
latency_v2_sd_nologging_nullvm_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-sd-nologging-nullvm_both', 'p90')
latency_v2_sd_full_nullvm_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-sd-full-nullvm_both', 'p90')
latency_none_mtls_base_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_mtls_baseline', 'p99')
latency_none_mtls_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_mtls_both', 'p99')
latency_none_plaintext_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_plaintext_both', 'p99')
latency_v2_stats_nullvm_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-stats-nullvm_both', 'p99')
latency_v2_stats_wasm_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-stats-wasm_both', 'p99')
latency_v2_sd_nologging_nullvm_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-sd-nologging-nullvm_both', 'p99')
latency_v2_sd_full_nullvm_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-sd-full-nullvm_both', 'p99')
context = {'current_release': current_release,
'latency_none_mtls_base_p90': latency_none_mtls_base_p90,
'latency_none_mtls_both_p90': latency_none_mtls_both_p90,
'latency_none_plaintext_both_p90': latency_none_plaintext_both_p90,
'latency_v2_stats_nullvm_both_p90': latency_v2_stats_nullvm_both_p90,
'latency_v2_stats_wasm_both_p90': latency_v2_stats_wasm_both_p90,
'latency_v2_sd_nologging_nullvm_both_p90': latency_v2_sd_nologging_nullvm_both_p90,
'latency_v2_sd_full_nullvm_both_p90': latency_v2_sd_full_nullvm_both_p90,
'latency_none_mtls_base_p99': latency_none_mtls_base_p99,
'latency_none_mtls_both_p99': latency_none_mtls_both_p99,
'latency_none_plaintext_both_p99': latency_none_plaintext_both_p99,
'latency_v2_stats_nullvm_both_p99': latency_v2_stats_nullvm_both_p99,
'latency_v2_stats_wasm_both_p99': latency_v2_stats_wasm_both_p99,
'latency_v2_sd_nologging_nullvm_both_p99': latency_v2_sd_nologging_nullvm_both_p99,
'latency_v2_sd_full_nullvm_both_p99': latency_v2_sd_full_nullvm_both_p99,
}
return render(request, "cur_regression.html", context=context)
# Create your views here.
def master_regression(request):
_, _, _, master_href_links, _, master_release_dates = download.download_benchmark_csv(60)
latency_none_mtls_base_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_none_mtls_baseline', 'p90')
latency_none_mtls_both_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_none_mtls_both', 'p90')
latency_none_plaintext_both_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_none_plaintext_both', 'p90')
latency_v2_stats_nullvm_both_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-stats-nullvm_both', 'p90')
latency_v2_stats_wasm_both_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-stats-wasm_both', 'p90')
latency_v2_sd_nologging_nullvm_both_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-sd-nologging-nullvm_both', 'p90')
latency_v2_sd_full_nullvm_both_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-sd-full-nullvm_both', 'p90')
latency_none_mtls_base_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_none_mtls_baseline', 'p99')
latency_none_mtls_both_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_none_mtls_both', 'p99')
latency_none_plaintext_both_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_none_plaintext_both', 'p99')
latency_v2_stats_nullvm_both_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-stats-nullvm_both', 'p99')
latency_v2_stats_wasm_both_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-stats-wasm_both', 'p99')
latency_v2_sd_nologging_nullvm_both_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-sd-nologging-nullvm_both', 'p99')
latency_v2_sd_full_nullvm_both_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-sd-full-nullvm_both', 'p99')
context = {'latency_none_mtls_base_p90_master': latency_none_mtls_base_p90_master,
'latency_none_mtls_both_p90_master': latency_none_mtls_both_p90_master,
'latency_none_plaintext_both_p90_master': latency_none_plaintext_both_p90_master,
'latency_v2_stats_nullvm_both_p90_master': latency_v2_stats_nullvm_both_p90_master,
'latency_v2_stats_wasm_both_p90_master': latency_v2_stats_wasm_both_p90_master,
'latency_v2_sd_nologging_nullvm_both_p90_master': latency_v2_sd_nologging_nullvm_both_p90_master,
'latency_v2_sd_full_nullvm_both_p90_master': latency_v2_sd_full_nullvm_both_p90_master,
'latency_none_mtls_base_p99_master': latency_none_mtls_base_p99_master,
'latency_none_mtls_both_p99_master': latency_none_mtls_both_p99_master,
'latency_none_plaintext_both_p99_master': latency_none_plaintext_both_p99_master,
'latency_v2_stats_nullvm_both_p99_master': latency_v2_stats_nullvm_both_p99_master,
'latency_v2_stats_wasm_both_p99_master': latency_v2_stats_wasm_both_p99_master,
'latency_v2_sd_nologging_nullvm_both_p99_master': latency_v2_sd_nologging_nullvm_both_p99_master,
'latency_v2_sd_full_nullvm_both_p99_master': latency_v2_sd_full_nullvm_both_p99_master,
}
return render(request, "master_regression.html", context=context)
# Helpers
def get_latency_y_data_point(df, telemetry_mode, quantiles):
y_series_data = []
data = df.query('ActualQPS == 1000 and NumThreads == 16 and Labels.str.endswith(@telemetry_mode)')
quantile_data = data.get(quantiles)
if quantile_data is None or len(quantile_data) == 0:
y_series_data.append('null')
else:
y_series_data.append(data[quantiles].head(1).values[0] / 1000)
return y_series_data
def get_telemetry_mode_y_series(release_href_links, release_dates, telemetry_mode, quantiles):
trending_data = [[]] * len(release_href_links)
for i in range(len(release_href_links)):
release_year = release_dates[i][0:4]
release_month = release_dates[i][4:6]
release_date = release_dates[i][6:]
release_list = [release_year, release_month, release_date]
try:
href_parts = release_href_links[i].split("/")
benchmark_test_id = href_parts[4]
df = pd.read_csv(perf_data_path + benchmark_test_id + "_benchmark.csv")
except Exception as e:
print(e)
trending_data[i] = release_list + ["null"]
else:
trending_data[i] = release_list + [get_latency_y_data_point(df, telemetry_mode, quantiles)]
return trending_data
| [
"[email protected]"
] | |
e8754bac157825e839ab2223e27997427aa4e5ca | 5831b0293cbb6f9e0660ac4ec952cbdb047d051d | /tests/test_dataset_wikipedia.py | 0a969b520c5fc07dc8a4b52634061969907c9eae | [
"Apache-2.0"
] | permissive | mdlynch37/textacy | 03e3287fd8ee8bd4d06e48b7b87edf8324a987e5 | c1c7376a84a62faeee496e9b8cc2a29edc28c7d1 | refs/heads/master | 2021-01-20T09:29:54.627035 | 2017-12-04T05:31:14 | 2017-12-04T05:31:14 | 101,596,726 | 0 | 0 | null | 2017-08-28T02:36:30 | 2017-08-28T02:36:30 | null | UTF-8 | Python | false | false | 1,831 | py | from __future__ import absolute_import, unicode_literals
import os
import shutil
import tempfile
import unittest
from textacy import data_dir
from textacy.compat import unicode_
from textacy.datasets.wikipedia import Wikipedia
DATASET = Wikipedia(lang='en', version='latest')
@unittest.skipUnless(
DATASET.filename, 'Wikipedia dataset must be downloaded before running tests')
class WikipediaTestCase(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp(
prefix='test_datasets_', dir=os.path.dirname(os.path.abspath(__file__)))
@unittest.skip("No need to download a new dataset every time")
def test_download(self):
dataset = Wikipedia(data_dir=self.tempdir)
dataset.download()
self.assertTrue(os.path.exists(dataset.filename))
def test_ioerror(self):
dataset = Wikipedia(data_dir=self.tempdir)
with self.assertRaises(IOError):
_ = list(dataset.texts())
def test_texts(self):
for text in DATASET.texts(limit=3):
self.assertIsInstance(text, unicode_)
def test_texts_limit(self):
for limit in (1, 5, 10):
self.assertEqual(sum(1 for _ in DATASET.texts(limit=limit)), limit)
def test_texts_min_len(self):
for min_len in (100, 200, 500):
self.assertTrue(
all(len(text) >= min_len
for text in DATASET.texts(min_len=min_len, limit=10)))
def test_records(self):
for record in DATASET.records(limit=3):
self.assertIsInstance(record, dict)
def test_records_fast(self):
for record in DATASET.records(limit=3, fast=True):
self.assertIsInstance(record, dict)
# TODO: test individual parsing functions
def tearDown(self):
shutil.rmtree(self.tempdir)
| [
"[email protected]"
] | |
ecb2ca674db64896f08fcb1569dd375ed4e5fa62 | 0b358a0d64eb03655c030b36c0ae87880b153951 | /mmdet/models/losses/cross_entropy_loss.py | f3aca80f7a4ba24cdcb65e1a5737257e913c123d | [] | permissive | jshilong/DDQ | db05ff309d63316c62faa59b28c66d65eef973d1 | de9331e4579aaafab4d69e3a9a3c6638efc5392c | refs/heads/main | 2023-06-03T15:02:09.949907 | 2023-05-24T03:32:12 | 2023-05-24T03:32:12 | 498,974,099 | 199 | 6 | Apache-2.0 | 2022-06-02T05:01:53 | 2022-06-02T03:10:25 | null | UTF-8 | Python | false | false | 9,696 | py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=-100):
"""Calculate the CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (int | None): The label index to be ignored.
If None, it will be set to default value. Default: -100.
Returns:
torch.Tensor: The calculated loss
"""
# The default value of ignore_index is the same as F.cross_entropy
ignore_index = -100 if ignore_index is None else ignore_index
# element-wise losses
loss = F.cross_entropy(
pred,
label,
weight=class_weight,
reduction='none',
ignore_index=ignore_index)
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index):
"""Expand onehot labels to match the size of prediction."""
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
valid_mask = (labels >= 0) & (labels != ignore_index)
inds = torch.nonzero(
valid_mask & (labels < label_channels), as_tuple=False)
if inds.numel() > 0:
bin_labels[inds, labels[inds]] = 1
valid_mask = valid_mask.view(-1, 1).expand(labels.size(0),
label_channels).float()
if label_weights is None:
bin_label_weights = valid_mask
else:
bin_label_weights = label_weights.view(-1, 1).repeat(1, label_channels)
bin_label_weights *= valid_mask
return bin_labels, bin_label_weights
def binary_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=-100):
"""Calculate the binary CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 1).
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (int | None): The label index to be ignored.
If None, it will be set to default value. Default: -100.
Returns:
torch.Tensor: The calculated loss.
"""
# The default value of ignore_index is the same as F.cross_entropy
ignore_index = -100 if ignore_index is None else ignore_index
if pred.dim() != label.dim():
label, weight = _expand_onehot_labels(label, weight, pred.size(-1),
ignore_index)
# weighted element-wise losses
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(
pred, label.float(), pos_weight=class_weight, reduction='none')
# do the reduction for the weighted loss
loss = weight_reduce_loss(
loss, weight, reduction=reduction, avg_factor=avg_factor)
return loss
def mask_cross_entropy(pred,
target,
label,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=None):
"""Calculate the CrossEntropy loss for masks.
Args:
pred (torch.Tensor): The prediction with shape (N, C, *), C is the
number of classes. The trailing * indicates arbitrary shape.
target (torch.Tensor): The learning label of the prediction.
label (torch.Tensor): ``label`` indicates the class label of the mask
corresponding object. This will be used to select the mask in the
of the class which the object belongs to when the mask prediction
if not class-agnostic.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (None): Placeholder, to be consistent with other loss.
Default: None.
Returns:
torch.Tensor: The calculated loss
Example:
>>> N, C = 3, 11
>>> H, W = 2, 2
>>> pred = torch.randn(N, C, H, W) * 1000
>>> target = torch.rand(N, H, W)
>>> label = torch.randint(0, C, size=(N,))
>>> reduction = 'mean'
>>> avg_factor = None
>>> class_weights = None
>>> loss = mask_cross_entropy(pred, target, label, reduction,
>>> avg_factor, class_weights)
>>> assert loss.shape == (1,)
"""
assert ignore_index is None, 'BCE loss does not support ignore_index'
# TODO: handle these two reserved arguments
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(
pred_slice, target, weight=class_weight, reduction='mean')[None]
@LOSSES.register_module()
class CrossEntropyLoss(nn.Module):
def __init__(self,
use_sigmoid=False,
use_mask=False,
reduction='mean',
class_weight=None,
ignore_index=None,
loss_weight=1.0):
"""CrossEntropyLoss.
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Defaults to False.
use_mask (bool, optional): Whether to use mask cross entropy loss.
Defaults to False.
reduction (str, optional): . Defaults to 'mean'.
Options are "none", "mean" and "sum".
class_weight (list[float], optional): Weight of each class.
Defaults to None.
ignore_index (int | None): The label index to be ignored.
Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
"""
super(CrossEntropyLoss, self).__init__()
assert (use_sigmoid is False) or (use_mask is False)
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = class_weight
self.ignore_index = ignore_index
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = cross_entropy
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
ignore_index=None,
**kwargs):
"""Forward function.
Args:
cls_score (torch.Tensor): The prediction.
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The method used to reduce the
loss. Options are "none", "mean" and "sum".
ignore_index (int | None): The label index to be ignored.
If not None, it will override the default value. Default: None.
Returns:
torch.Tensor: The calculated loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if ignore_index is None:
ignore_index = self.ignore_index
if self.class_weight is not None:
class_weight = cls_score.new_tensor(
self.class_weight, device=cls_score.device)
else:
class_weight = None
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
weight,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
ignore_index=ignore_index,
**kwargs)
return loss_cls
| [
"[email protected]"
] | |
d011cac7d86757a68f11dfebdb8dd76101cecde5 | 8114909d3ed6ee1e6d1fbe14a37723015ab53af6 | /googleplus_test.py | 3db6be74738968a4cf7a2c5828069a7657cb4865 | [
"LicenseRef-scancode-public-domain"
] | permissive | notenoughneon/activitystreams-unofficial | b0c66d48eb3b43d68b76df069ba237dce9d77489 | 1f45bde45d3d18ef39d69ebd698e248233b94ce9 | refs/heads/master | 2021-01-18T03:01:03.101619 | 2014-08-15T15:00:00 | 2014-08-15T23:48:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,058 | py | """Unit tests for googleplus.py.
See apiclient/http.py for details on using RequestMockBuilder to mock out Google
API calls. (This is the current doc on apiclient mocks, but it doesn't mention
RequestMockBuilder:
https://developers.google.com/api-client-library/python/guide/mocks )
"""
__author__ = ['Ryan Barrett <[email protected]>']
import copy
import json
import appengine_config
import httplib2
from apiclient import discovery
from apiclient import http
appengine_config.GOOGLE_CLIENT_ID = 'my client id'
appengine_config.GOOGLE_CLIENT_SECRET = 'my client secret'
import googleplus
from oauth_dropins import googleplus as oauth_googleplus
from oauth_dropins.webutil import testutil
from oauth_dropins.webutil import util
DISCOVERY_DOC = appengine_config.read('googleplus_api_discovery.json')
def tag_uri(name):
return util.tag_uri('plus.google.com', name)
ACTIVITY_GP = { # Google+
'kind': 'plus#activity',
'verb': 'post',
'id': '001',
'actor': {'id': '444', 'displayName': 'Charles'},
'object': {
'content': 'my post',
'url': 'http://plus.google.com/001',
},
}
ACTIVITY_AS = copy.deepcopy(ACTIVITY_GP) # ActivityStreams
ACTIVITY_AS['id'] = tag_uri('001')
ACTIVITY_AS['object']['author'] = ACTIVITY_GP['actor']
ACTIVITY_AS['object']['to'] = [{'objectType':'group', 'alias':'@public'}]
COMMENT_GP = { # Google+
'kind': 'plus#comment',
'verb': 'post',
'id': '888',
'actor': {'id': '777', 'displayName': 'Eve'},
'object': {'content': 'my content'},
'inReplyTo': [{'url': 'http://post/url'}],
}
COMMENT_AS = copy.deepcopy(COMMENT_GP)
COMMENT_AS.update({ # ActivityStreams
'author': COMMENT_AS.pop('actor'),
'displayName': 'my content',
'content': 'my content',
'id': tag_uri('888'),
'url': 'http://post/url',
'to': [{'objectType':'group', 'alias':'@public'}],
})
PLUSONER = { # Google+
'kind': 'plus#person',
'id': '222',
'displayName': 'Alice',
'url': 'https://profiles.google.com/alice',
'image': {'url': 'https://alice/picture'},
}
LIKE = { # ActivityStreams
'id': tag_uri('001_liked_by_222'),
'url': 'http://plus.google.com/001',
'objectType': 'activity',
'verb': 'like',
'object': {'url': 'http://plus.google.com/001'},
'author': {
'kind': 'plus#person',
'id': tag_uri('222'),
'displayName': 'Alice',
'url': 'https://profiles.google.com/alice',
'image': {'url': 'https://alice/picture'},
},
'displayName': 'Alice +1ed this.',
'content': '+1ed this.',
}
RESHARER = { # Google+
'kind': 'plus#person',
'id': '444',
'displayName': 'Bob',
'url': 'https://plus.google.com/bob',
'image': {'url': 'https://bob/picture'},
}
SHARE = { # ActivityStreams
'id': tag_uri('001_shared_by_444'),
'url': 'http://plus.google.com/001',
'objectType': 'activity',
'verb': 'share',
'object': {'url': 'http://plus.google.com/001'},
'author': {
'kind': 'plus#person',
'id': tag_uri('444'),
'displayName': 'Bob',
'url': 'https://plus.google.com/bob',
'image': {'url': 'https://bob/picture'},
},
'displayName': 'Bob reshared this.',
'content': 'reshared this.',
}
ACTIVITY_GP_EXTRAS = copy.deepcopy(ACTIVITY_GP) # Google+
ACTIVITY_GP_EXTRAS['object'].update({
'replies': {'totalItems': 1},
'plusoners': {'totalItems': 1},
'resharers': {'totalItems': 1},
})
ACTIVITY_AS_EXTRAS = copy.deepcopy(ACTIVITY_GP_EXTRAS) # ActivityStreams
ACTIVITY_AS_EXTRAS['id'] = tag_uri('001')
ACTIVITY_AS_EXTRAS['object'].update({
'author': ACTIVITY_GP_EXTRAS['actor'],
'to': [{'objectType':'group', 'alias':'@public'}],
'replies': {'totalItems': 1, 'items': [COMMENT_AS]},
'tags': [LIKE, SHARE],
})
class GooglePlusTest(testutil.HandlerTest):
def setUp(self):
super(GooglePlusTest, self).setUp()
self.auth_entity = oauth_googleplus.GooglePlusAuth(
id='my_string_id',
user_json=json.dumps({
'displayName': 'Bob',
}),
creds_json=json.dumps({
'access_token': 'my token',
'client_id': appengine_config.GOOGLE_CLIENT_ID,
'client_secret': appengine_config.GOOGLE_CLIENT_SECRET,
'refresh_token': 'my refresh token',
'token_expiry': '',
'token_uri': '',
'user_agent': '',
'invalid': '',
}))
self.googleplus = googleplus.GooglePlus(auth_entity=self.auth_entity)
def tearDown(self):
oauth_googleplus.json_service = None
def init(self, **kwargs):
"""Sets up the API service from googleplus_test_discovery.
Pass a requestBuilder or http kwarg to inject expected HTTP requests and
responses.
"""
oauth_googleplus.json_service = discovery.build_from_document(
DISCOVERY_DOC, **kwargs)
def test_get_comment(self):
self.init(requestBuilder=http.RequestMockBuilder({
'plus.comments.get': (None, json.dumps(COMMENT_GP)) # None means 200 OK
}))
self.assert_equals(COMMENT_AS, self.googleplus.get_comment('234'))
def test_get_activity(self):
self.init(requestBuilder=http.RequestMockBuilder({
'plus.activities.get': (None, json.dumps(ACTIVITY_GP))
}))
self.assert_equals([ACTIVITY_AS],
self.googleplus.get_activities(activity_id='234'))
def test_get_activities_no_extras_to_fetch(self):
self.init(requestBuilder=http.RequestMockBuilder({
'plus.activities.list': (None, json.dumps({
'items': [ACTIVITY_GP, ACTIVITY_GP],
})),
},
# ACTIVITY_GP doesn't say there are any comments, +1s, or shares (via
# totalItems), so we shouldn't ask for them.
check_unexpected=True))
got = self.googleplus.get_activities(fetch_replies=True, fetch_likes=True,
fetch_shares=True)
self.assert_equals([ACTIVITY_AS, ACTIVITY_AS], got)
def test_get_activities_fetch_extras(self):
self.init()
http_seq = http.HttpMockSequence(
[({'status': '200'}, json.dumps({'items': [item]})) for item in
ACTIVITY_GP_EXTRAS,
# should only ask for these the first time, use the cache for the second
COMMENT_GP, PLUSONER, RESHARER,
ACTIVITY_GP_EXTRAS,
])
self.auth_entity.http = lambda: http_seq
cache = testutil.FakeCache()
self.assert_equals([ACTIVITY_AS_EXTRAS], self.googleplus.get_activities(
fetch_replies=True, fetch_likes=True, fetch_shares=True, cache=cache))
# no new extras, so another request won't fill them in
activity = copy.deepcopy(ACTIVITY_AS)
for field in 'replies', 'plusoners', 'resharers':
activity['object'][field] = {'totalItems': 1}
self.assert_equals([activity], self.googleplus.get_activities(
fetch_replies=True, fetch_likes=True, fetch_shares=True, cache=cache))
# TODO: resurrect?
# def test_get_activities_request_etag(self):
# self.init()
# http_seq = http.HttpMockSequence(
# [({'status': '200'}, json.dumps({'items': [item]}))])
# self.auth_entity.http = lambda: http_seq
# resp = self.googleplus.get_activities_response(
# fetch_replies=True, fetch_likes=True, fetch_shares=True)
# self.assertEquals('"my etag"', resp['etag'])
def test_get_activities_response_etag(self):
self.init(requestBuilder=http.RequestMockBuilder({
'plus.activities.list': (httplib2.Response({'status': 200}),
json.dumps({'etag': '"my etag"'})),
}))
resp = self.googleplus.get_activities_response(
fetch_replies=True, fetch_likes=True, fetch_shares=True)
self.assertEquals('"my etag"', resp['etag'])
def test_get_activities_304_not_modified(self):
"""Requests with matching ETags return 304 Not Modified."""
self.init(requestBuilder=http.RequestMockBuilder({
'plus.activities.list': (httplib2.Response({'status': 304}), '{}'),
}))
self.assert_equals([], self.googleplus.get_activities(
fetch_replies=True, fetch_likes=True, fetch_shares=True))
| [
"[email protected]"
] | |
1c65020969dd11db886967e06b349d0c3a57a32b | 2363cc167aa8bb8f652298025ca183a35f8c655e | /skytools/gzlog.py | 2e601cd64502b172db9f3dd49ebfd1cb673b0558 | [
"ISC"
] | permissive | zzahti/python-skytools | a15d1e0ad6e02ca62d6596bd647766aa6e64eac7 | b4633810efe9f7640fbc2028005be548fbad7ccb | refs/heads/master | 2022-12-22T16:51:10.312223 | 2020-09-29T10:05:15 | 2020-09-29T10:05:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | """Atomic append of gzipped data.
The point is - if several gzip streams are concatenated,
they are read back as one whole stream.
"""
import gzip
import io
__all__ = ('gzip_append',)
def gzip_append(filename, data, level=6):
"""Append a block of data to file with safety checks."""
# compress data
buf = io.BytesIO()
with gzip.GzipFile(fileobj=buf, compresslevel=level, mode="w") as g:
g.write(data)
zdata = buf.getvalue()
# append, safely
with open(filename, "ab+", 0) as f:
f.seek(0, 2)
pos = f.tell()
try:
f.write(zdata)
except Exception as ex:
# rollback on error
f.seek(pos, 0)
f.truncate()
raise ex
| [
"[email protected]"
] | |
cd0066973556580295a757e52687dcc5550a2ef5 | 039f2c747a9524daa1e45501ada5fb19bd5dd28f | /ABC157/ABC157e.py | ff912cc8831fb416bb93d9b7e2507f2861b4dd5d | [
"Unlicense"
] | permissive | yuto-moriizumi/AtCoder | 86dbb4f98fea627c68b5391bf0cc25bcce556b88 | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | refs/heads/master | 2023-03-25T08:10:31.738457 | 2021-03-23T08:48:01 | 2021-03-23T08:48:01 | 242,283,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,522 | py | # ABC157e
#print(bin(toBitSet([1, 2, 3, 2])))
# for i in range(97, 123):
# print(chr(i))
def main():
import sys
sys.setrecursionlimit(10**6)
# 再帰関数を使わない限りPypyで出すこと
class SegmentTree:
#####単位元######
ide_ele = 0
# num:n以上の最小の2のべき乗
def segfunc(self, x, y):
return x | y # 例としてmin関数を設定
def __init__(self, n):
super().__init__()
self.num = 2**(n-1).bit_length() # nは元々の配列の長さ
self.seg = [self.ide_ele]*(2*self.num+1)
def init(self, init_val): # セグ木にしたい配列を渡す
# set_val
for i in range(len(init_val)):
self.seg[i+self.num-1] = init_val[i]
# built
for i in range(self.num-2, -1, -1):
self.seg[i] = self.segfunc(self.seg[2*i+1], self.seg[2*i+2])
def update(self, k, x):
k += self.num-1
self.seg[k] = x
while k:
k = (k-1)//2
self.seg[k] = self.segfunc(self.seg[k*2+1], self.seg[k*2+2])
def query(self, p, q):
if q <= p:
return ide_ele
p += self.num-1
q += self.num-2
res = self.ide_ele
while q-p > 1:
if p & 1 == 0:
res = self.segfunc(res, self.seg[p])
if q & 1 == 1:
res = self.segfunc(res, self.seg[q])
q -= 1
p = p//2
q = (q-1)//2
if p == q:
res = self.segfunc(res, self.seg[p])
else:
res = self.segfunc(self.segfunc(res, self.seg[p]), self.seg[q])
return res
n = int(input())
s = list(input())
q = int(input())
def toBitSet(numberSet):
theSet = 0
for i in numberSet:
theSet = theSet | 1 << i
return theSet
def alphabetToZeroIndexed(alphabet):
return ord(alphabet) - 97
tree = SegmentTree(n)
for i in range(n):
tree.update(i+1, toBitSet([alphabetToZeroIndexed(s[i])]))
for _ in range(q):
a, b, c = input().split()
b = int(b)
if int(a) == 1:
tree.update(b, toBitSet([alphabetToZeroIndexed(c)]))
else:
c = int(c)
print(bin(tree.query(b, c+1)).count('1'))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
0647f0fd2b71a765527da8668a5b0f8b95257b53 | 603519e0d087967caac72cce854dc7f1dfaa5262 | /bioinformatics stronghold/GC.py | ddc7d57d820b55831cc86a23662f68acb74b711c | [] | no_license | Morpheus2112/Rosalind-exercise | e591570521a12905864cb7e7f72b66816da7ae3a | e1047a5f6725e07c8cbf17594bfe4969cbc5d708 | refs/heads/master | 2022-07-25T00:07:17.316099 | 2020-02-16T07:18:21 | 2020-02-16T07:18:21 | 240,848,262 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,875 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 31 20:48:47 2017
@author: Memphis
"""
"""
Problem
The GC-content of a DNA string is given by the percentage of symbols in the
string that are 'C' or 'G'. For example, the GC-content of "AGCTATAG" is
37.5%. Note that the reverse complement of any DNA string has the same
GC-content.
DNA strings must be labeled when they are consolidated into a database. A
commonly used method of string labeling is called FASTA format. In this format,
the string is introduced by a line that begins with '>', followed by some
labeling information. Subsequent lines contain the string itself; the first line
to begin with '>' indicates the label of the next string.
In Rosalind's implementation, a string in FASTA format will be labeled by the ID
"Rosalind_xxxx", where"xxxx" denotes a four-digit code between 0000 and 9999.
Given: At most 10 DNA strings in FASTA format (of length at most 1 kbp each).
Return: The ID of the string having the highest GC-content, followed by the
GC-content of that string. Rosalind allows for a default error of 0.001 in all
decimal answers unless otherwise stated; please see the note on absolute error
below.
Sample Dataset
>Rosalind_6404
CCTGCGGAAGATCGGCACTAGAATAGCCAGAACCGTTTCTCTGAGGCTTCCGGCCTTCCC
TCCCACTAATAATTCTGAGG
>Rosalind_5959
CCATCGGTAGCGCATCCTTAGTCCAATTAAGTCCCTATCCAGGCGCTCCGCCGAAGGTCT
ATATCCATTTGTCAGCAGACACGC
>Rosalind_0808
CCACCCTCGTGGTATGGCTAGGCATTCAGGAACCGGAGAACGCTTCAGACCAGCCCGGAC
TGGGAACCTGCGGGCAGTAGGTGGAAT
Sample Output
Rosalind_0808
60.919540
"""
import sys
sys.path.append('../../')
import rosalind_utils
def gc():
records = rosalind_utils.read_fasta("rosalind_gc.txt")
gc_contents = [(desc, rosalind_utils.gc_content(seq)) for desc,seq in records]
max_gc_content = max(gc_contents, key=lambda x: x[1])
print max_gc_content[0]
print max_gc_content[1]*100
gc() | [
"[email protected]"
] | |
b9b884312eaf1f4e5f6dda63fce03a207fa955bf | 60c467d4afc722d284df0bd9d3cb5935fec7148e | /lib/python2.6/site-packages/mx/Stack/stackbench.py | d328678cd23a2f011304080e98664be4e0e62562 | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer",
"eGenix"
] | permissive | Nuevosmedios/quicklearn-env | 8ab9a1ba6303c97db5946684ad94c01755d4f482 | 6f777c8d842d42ec8006fc8780b278dc9b35409b | refs/heads/master | 2021-01-01T20:12:19.961777 | 2013-03-12T14:18:35 | 2013-03-12T14:18:35 | 7,424,404 | 0 | 1 | null | 2020-07-25T20:32:38 | 2013-01-03T14:54:02 | Python | UTF-8 | Python | false | false | 2,736 | py | #!/usr/local/bin/python -O
""" stackbench - stack implementation benchmark
Copyright (c) 2000, Marc-Andre Lemburg; mailto:[email protected]
Copyright (c) 2000-2012, eGenix.com Software GmbH; mailto:[email protected]
See the documentation for further information on copyrights,
or contact the author. All Rights Reserved.
"""
import time
from mx import Stack
import UserStack
from sys import argv, exit
try:
numtests, pushes, pops = eval(argv[1]), eval(argv[2]), eval(argv[3])
assert pushes >= pops
except:
print 'usage: stackbench.py <ntests> <pushes> <pops>, where <pushes> >= <pops>'
exit(1)
def test(reps, func):
start_cpu = time.clock()
for i in xrange(reps):
x = func()
return time.clock() - start_cpu
def method1():
x = [] # built-in list
push = x.append
for i in range(pushes): push('spam'+'i')
for i in range(pops): top = x[-1]; del x[-1]
if 0:
def method1a():
x = [] # built-in list
push = x.append
for i in range(pushes): push('spam'+'i')
for i in range(pops): top = x.pop()
def method2():
x = None # built-in tuples
for i in range(pushes): x = ('spam'+'i',x)
for i in range(pops): (top, x) = x
def method3():
s = Stack.Stack() # Stack
push = s.push
pop = s.pop
for i in range(pushes): push('spam'+'i')
for i in range(pops): top = pop()
def method3a():
s = Stack.Stack() # Stack
push = s.push
for i in range(pushes): push('spam'+'i')
t = s.pop_many(pops) # pop all at once
def method3b():
s = Stack.Stack() # Stack
push = s.push
for i in range(pushes): s << ('spam'+'i')
for i in range(pops): top = s >> 1
def method3c():
s = Stack.Stack() # Stack
l = [''] * pushes
for i in range(pushes): l[i] = ('spam'+'i')
s.push_many(l)
s.pop_many(pops)
def method4():
s = UserStack.UserStack() # UserStack
push = s.push
pop = s.pop
for i in range(pushes): push('spam'+'i')
for i in range(pops): top = pop()
print 'list: ', test(numtests, method1) # run func 20 tests
print 'tuples:', test(numtests, method2)
print 'Stack (with push + pop):', test(numtests, method3)
print 'Stack (with push + pop_many):', test(numtests, method3a)
print 'Stack (with << + >>):', test(numtests, method3b)
print 'Stack (with push_many + pop_many):', test(numtests, method3c)
print 'UserStack:', test(numtests, method4)
| [
"[email protected]"
] | |
a6eab24270bd68ff179f3e93105407ecd98842c0 | 19c764171650292706e3f4197530222e3611f3d5 | /legislator/platform/platform.py | 2ba53434c0067b027a728afbd28d712adbdf9f34 | [
"CC0-1.0"
] | permissive | thewayiam/twly_fileHandler | 6eaa3d914796c8484a66fe097c0112054b4105fd | b5757657621710cc7293f3825202e4c0a0f3051d | refs/heads/master | 2020-05-21T20:06:04.634397 | 2018-09-06T09:07:29 | 2018-09-06T09:07:29 | 11,305,913 | 5 | 1 | null | 2014-05-18T06:53:23 | 2013-07-10T07:58:29 | Python | UTF-8 | Python | false | false | 1,538 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.append('../../')
import re
import codecs
import db_settings
import ly_common
def personalPlatform(platform, id):
platform = '\n'.join(platform)
c.execute('''
UPDATE legislator_legislatordetail
SET platform = %s
WHERE id = %s
''', (platform, id))
def partyPlatform(platform, ad, party):
platform = '\n'.join(platform)
c.execute('''
UPDATE legislator_legislatordetail
SET platform = %s
WHERE ad = %s AND party = %s AND constituency = 0
''', (platform, ad, party))
conn = db_settings.con()
c = conn.cursor()
ad = 8
sourcetext = codecs.open(u"%d立委政見.txt" % ad, "r", "utf-8")
lines = []
for line in sourcetext.readlines():
line = line.strip()
lines.append(line)
if not line:
uid = ly_common.GetLegislatorId(c, lines[0])
if uid: # if this line is name of legislators
legislator_id = ly_common.GetLegislatorDetailId(c, uid, ad)
else:
print lines[0]
raw_input()
personalPlatform(lines[1:], legislator_id)
lines = []
conn.commit()
print u'8立委政見Succeed'
sourcetext = codecs.open(u"%d政黨政見.txt" % ad, "r", "utf-8")
lines = []
for line in sourcetext.readlines():
line = line.strip()
lines.append(line)
if not line:
partyPlatform(lines[1:], ad, lines[0])
lines = []
conn.commit()
print u'8政黨政見Succeed'
| [
"[email protected]"
] | |
0f4ab7fd73baac9575e00a1c630ead91c45359c4 | 638b207f3c7706cb0cb9dd1d6cf112ab91f69837 | /0x11-python-network_1/10-my_github.py | 6631d3a4c0e87af20fcaa3bd5fc5ecf014e28088 | [] | no_license | NasserAbuchaibe/holbertonschool-higher_level_programming | c30a066dfd4525e936b4121f930c3a63e6d911d6 | 5b0c11423e11bd9201cc057775c099eb0259f305 | refs/heads/master | 2022-12-16T17:15:57.775143 | 2020-09-25T03:00:56 | 2020-09-25T03:00:56 | 259,379,453 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | #!/usr/bin/python3
""" Python script that fetches https://intranet.hbtn.io/status
"""
import requests
from sys import argv
from requests.auth import HTTPBasicAuth
if __name__ == "__main__":
""" ok
"""
url = "https://api.github.com/users/"
usr = argv[1]
passw = argv[2]
r = requests.get("{}{}".format(url, usr), auth=(usr, passw))
print(r.json().get('id'))
| [
"[email protected]"
] | |
e6eba436be2d42a357bff3df3729c704ace538d6 | 3faeae950e361eb818830ad210f30a6232e5d7f1 | /wepppy/nodb/mods/locations/location_mixin.py | f24103800b72ee919308ab976b101894aeab0f1f | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | rogerlew/wepppy | 401e6cee524073209a4445c680b43ea0c6102dfc | 1af4548d725b918b73ee022f2572a63b5194cce0 | refs/heads/master | 2023-07-21T12:56:26.979112 | 2023-07-13T23:26:22 | 2023-07-13T23:26:22 | 125,935,882 | 10 | 6 | NOASSERTION | 2023-03-07T20:42:52 | 2018-03-20T00:01:27 | HTML | UTF-8 | Python | false | false | 4,404 | py | # Copyright (c) 2016-2018, University of Idaho
# All rights reserved.
#
# Roger Lew ([email protected])
#
# The project described was supported by NSF award number IIA-1301792
# from the NSF Idaho EPSCoR Program and by the National Science Foundation.
import os
import json
import csv
from copy import deepcopy
from os.path import join as _join
from os.path import split as _split
from os.path import exists as _exists
import jsonpickle
# from wepppy.all_your_base import RasterDatasetInterpolator
from ...landuse import Landuse
from ...soils import Soils
from ...watershed import Watershed
from ...wepp import Wepp
from wepppy.wepp.soils.utils import read_lc_file, soil_specialization, soil_is_water
from wepppy.wepp.soils.utils import WeppSoilUtil
from ...base import NoDbBase, TriggerEvents
_thisdir = os.path.dirname(__file__)
_data_dir = _join(_thisdir, 'data')
class LocationMixin(object):
@property
def location_doms(self):
data_dir = self.data_dir
lc_dict = read_lc_file(_join(data_dir, self.lc_lookup_fn))
return set([lc_dict[k]['LndcvrID'] for k in lc_dict])
def remap_landuse(self):
data_dir = self.data_dir
with open(_join(data_dir, 'landcover_map.json')) as fp:
lc_map = json.load(fp)
location_doms = self.location_doms
landuse = Landuse.getInstance(self.wd)
landuse.lock()
# noinspection PyBroadException
try:
for topaz_id, dom in landuse.domlc_d.items():
if int(dom) not in location_doms:
landuse.domlc_d[topaz_id] = lc_map[dom]
landuse.dump_and_unlock()
except Exception:
landuse.unlock('-f')
raise
def modify_soils(self, default_wepp_type=None, lc_lookup_fn=None):
data_dir = self.data_dir
wd = self.wd
soils_dir = self.soils_dir
if default_wepp_type is None:
default_wepp_type = self.default_wepp_type
if lc_lookup_fn is None:
lc_lookup_fn = self.lc_lookup_fn
lc_dict = read_lc_file(_join(data_dir, lc_lookup_fn))
with open(_join(data_dir, 'lc_soiltype_map.json')) as fp:
soil_type_map = json.load(fp)
soils = Soils.getInstance(wd)
soils.lock()
# noinspection PyBroadException
try:
domsoil_d = soils.domsoil_d
landuse = Landuse.getInstance(wd)
domlc_d = landuse.domlc_d
_soils = {}
for topaz_id, mukey in domsoil_d.items():
dom = domlc_d[topaz_id]
wepp_type = soil_type_map.get(mukey, default_wepp_type)
replacements = lc_dict[(dom, wepp_type)]
k = '%s-%s-%s' % (mukey, wepp_type, dom)
src_fn = _join(soils_dir, '%s.sol' % mukey)
dst_fn = _join(soils_dir, '%s.sol' % k)
is_water = soil_is_water(src_fn)
if is_water:
_soils[mukey] = deepcopy(soils.soils[mukey])
_soils[mukey].area = 0.0
domsoil_d[topaz_id] = mukey
else:
if k not in _soils:
caller = ':'.join(_split(self._nodb)[-1].split('.')[::-1])
soil_u = WeppSoilUtil(src_fn)
mod_soil = soil_u.to_7778disturbed(replacements, hostname='dev.wepp.cloud')
mod_soil.write(dst_fn)
# soil_specialization(src_fn, dst_fn, replacements, caller=caller)
_soils[k] = deepcopy(soils.soils[mukey])
_soils[k].mukey = k
_soils[k].fname = '%s.sol' % k
_soils[k].area = 0.0
domsoil_d[topaz_id] = k
# need to recalculate the pct_coverages
watershed = Watershed.getInstance(self.wd)
for topaz_id, k in domsoil_d.items():
_soils[k].area += watershed.area_of(topaz_id)
for k in _soils:
coverage = 100.0 * _soils[k].area / watershed.wsarea
_soils[k].pct_coverage = coverage
soils.soils = _soils
soils.domsoil_d = domsoil_d
soils.dump_and_unlock()
except Exception:
soils.unlock('-f')
raise
| [
"[email protected]"
] | |
867c6f465a5f4fad71ff4486ff5dc653a0851c06 | 00c9828a8b3b7984cf835f22fca38cf75dce7bae | /umongo/frameworks/mongomock.py | 6a731d95152fe6c01537d4d1c436804f03dd96e6 | [
"MIT"
] | permissive | KeithGao/umongo | d5bc32b8077af3cc409715bfd667af7a5ebfa461 | 04c7fbad4b4e2202d6e2b88c9cafe30fb8f62abe | refs/heads/master | 2021-01-20T10:19:20.213449 | 2017-08-15T06:39:45 | 2017-08-15T06:39:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | from .pymongo import PyMongoBuilder
from mongomock.database import Database
# Mongomock aims at working like pymongo
class MongoMockBuilder(PyMongoBuilder):
@staticmethod
def is_compatible_with(db):
return isinstance(db, Database)
| [
"[email protected]"
] | |
80b1daabb5f26c6cff42c9e3b127effdf3808038 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02715/s161181019.py | 6199ba7618c16ee2f820a448bd7e395cebbab40e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | def modpow(val, n, mod):
ret = 1
while n:
if n & 1:
ret = (ret * val) % mod
val = (val * val) % mod
n = n >> 1
return ret
mod = 10 ** 9 + 7
n, k = map(int, input().split())
my_dict = dict()
ret = 0
for i in range(k, 0, -1):
tmp = modpow(k // i, n, mod)
cnt = 2
while True:
val = i * cnt
if val > k:
break
else:
cnt += 1
tmp -= my_dict[val]
my_dict[i] = tmp
ret += tmp * i % mod
print(ret % mod)
| [
"[email protected]"
] | |
b1c6b964d138e9cb8fad01dbbfce6c7646c358d5 | d7e77abfa037a5e220980fdf197cb7b3c3b4cb47 | /calvin/calvinsys/io/display.py | 939f1eca551e16872b1d3d4640f7c9c2a077654f | [
"Apache-2.0"
] | permissive | imriss/calvin-base | e82db61cb815644653b6c5f51f7b4cdb0151a211 | 31e450b80ce0c8fedc3042464de7e405ac714953 | refs/heads/master | 2020-12-25T14:39:04.250727 | 2017-05-03T17:03:38 | 2017-05-03T17:03:38 | 49,008,248 | 0 | 0 | null | 2016-08-22T17:03:04 | 2016-01-04T16:17:07 | Python | UTF-8 | Python | false | false | 1,374 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.runtime.south.plugins.io.display import display
class Display(object):
"""
Control a display
"""
def __init__(self):
self.display = display.Display()
def enable(self, enable):
"""
Enable/disable display
"""
self.display.enable(enable)
def show(self, text, textcolor, bgcolor):
"""
Display text
"""
self.display.show(text, textcolor, bgcolor)
def show_text(self, text):
self.display.show(text, None, None)
def clear(self):
"""
Clear display
"""
self.display.clear()
def register(node=None, actor=None):
"""
Called when the system object is first created.
"""
return Display()
| [
"[email protected]"
] | |
27a57f0967b722bc1d77bbcbc21be306922419ab | 3b9b4049a8e7d38b49e07bb752780b2f1d792851 | /src/ozone/tools/jhbuild/wayland.jhbuildrc | 8e7a0e9b548aa70bdc496bdb8d1a8b6cd47eac28 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | webosce/chromium53 | f8e745e91363586aee9620c609aacf15b3261540 | 9171447efcf0bb393d41d1dc877c7c13c46d8e38 | refs/heads/webosce | 2020-03-26T23:08:14.416858 | 2018-08-23T08:35:17 | 2018-09-20T14:25:18 | 145,513,343 | 0 | 2 | Apache-2.0 | 2019-08-21T22:44:55 | 2018-08-21T05:52:31 | null | UTF-8 | Python | false | false | 295 | jhbuildrc | # -*- mode: python -*-
import os
use_local_modulesets = True
moduleset = os.getcwd() + '/wayland.modules'
modules = ['weston']
checkoutroot = os.getcwd() + '/../../../out/wayland/source'
prefix = os.getcwd() +'/../../../out/wayland/root'
autogenargs=''
os.environ['EGL_PLATFORM'] = 'wayland'
| [
"[email protected]"
] | |
39a8ca9fc07790aa0de27c59f2f09cf9d56cab18 | 3f9d28984403cdd84b984d7a82eb6136018966a4 | /delete_pos_appointment/models/pos_appointment_delete.py | de31796cf079c85dbab76ae12d434ca0e6ec781b | [] | no_license | joevm018/temasq | bee1c69aee4c87de415e66e236b40c28201e2951 | 74f003c6a009716bf26a6438f5ee48e7dfcbb030 | refs/heads/master | 2023-03-18T23:42:13.598853 | 2021-03-11T11:35:16 | 2021-03-11T11:35:16 | 346,671,390 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,896 | py | from odoo import api, fields, models,_
from odoo.exceptions import UserError
class PosAppointmentDelete(models.Model):
_name = "pos.appointment.delete"
date_from = fields.Datetime('From', required=True)
date_to = fields.Datetime('To', required=True)
@api.multi
def cancel_archive_appointment(self):
dom = [('date_order', '<=', self.date_to), ('date_order', '>=', self.date_from)]
pos_orders = self.env['pos.order'].search(dom, order='date_order asc')
for pos_ord in pos_orders:
pos_ord.write({'state': 'cancel','active':False})
@api.multi
def cancel_appointment(self):
dom = [('date_order', '<=', self.date_to), ('date_order', '>=', self.date_from)]
pos_orders = self.env['pos.order'].search(dom, order='date_order asc')
pos_orders.action_cancel_pos_appt()
class PosOrder(models.Model):
_inherit = "pos.order"
active = fields.Boolean(default=True)
@api.multi
def action_cancel_pos_appt(self):
for order in self:
for ord_line in order.lines:
# if order.redeemed_gift_id:
if order.redeemed_package_id:
session_avail = self.env['combo.session'].search(
[('order_line_id', '=',ord_line.id),
('order_id', '=', ord_line.order_id.id)], limit=1)
ord_line.write({
'package_card_id': False,
'combo_session_id': session_avail.id,
})
session_avail.write({
'order_line_id': False,
'order_id': False,
'state': 'draft',
'redeemed_date': False,
})
if order.purchased_gift_card_ids:
disc_gift_card_vals = {
'purchased_date': False,
'partner_id': False,
'gift_order_id': False,
'state': 'new',
'discount_gift_card_amount': 0.0,
'remaining_amount': 0.0,
}
order.purchased_gift_card_ids.write(disc_gift_card_vals)
if order.purchased_package_card_ids:
package_card_vals = {
'purchased_date': False,
'partner_id': False,
'package_order_id': False,
'state': 'new',
'package_card_amount': 0.0,
'combo_session_ids': False,
}
order.purchased_package_card_ids.write(package_card_vals)
if not order.invoice_id and not order.statement_ids and not order.picking_id:
order.write({'state': 'cancel'})
| [
"[email protected]"
] | |
c0166a9998e05c2bd86f74fd787ec8923c9a7f9c | f62fd455e593a7ad203a5c268e23129473d968b6 | /mistral-4.0.2/mistral/engine/default_executor.py | b360cc216ed783679d26b412e11415a5f31d00e3 | [
"Apache-2.0"
] | permissive | MinbinGong/OpenStack-Ocata | 5d17bcd47a46d48ff9e71e2055f667836174242f | 8b7650128cfd2fdf5d6c8bc4613ac2e396fb2fb3 | refs/heads/master | 2021-06-23T05:24:37.799927 | 2017-08-14T04:33:05 | 2017-08-14T04:33:05 | 99,709,985 | 0 | 2 | null | 2020-07-22T22:06:22 | 2017-08-08T15:48:44 | Python | UTF-8 | Python | false | false | 5,258 | py | # Copyright 2013 - Mirantis, Inc.
# Copyright 2016 - Brocade Communications Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from osprofiler import profiler
from mistral.actions import action_factory as a_f
from mistral.engine import base
from mistral.engine.rpc_backend import rpc
from mistral import exceptions as exc
from mistral.utils import inspect_utils as i_u
from mistral.workflow import utils as wf_utils
LOG = logging.getLogger(__name__)
class DefaultExecutor(base.Executor):
def __init__(self):
self._engine_client = rpc.get_engine_client()
@profiler.trace('executor-run-action', hide_args=True)
def run_action(self, action_ex_id, action_class_str, attributes,
action_params, safe_rerun, redelivered=False):
"""Runs action.
:param action_ex_id: Action execution id.
:param action_class_str: Path to action class in dot notation.
:param attributes: Attributes of action class which will be set to.
:param action_params: Action parameters.
:param safe_rerun: Tells if given action can be safely rerun.
:param redelivered: Tells if given action was run before on another
executor.
"""
def send_error_back(error_msg):
error_result = wf_utils.Result(error=error_msg)
if action_ex_id:
self._engine_client.on_action_complete(
action_ex_id,
error_result
)
return None
return error_result
if redelivered and not safe_rerun:
msg = (
"Request to run action %s was redelivered, but action %s"
" cannot be re-run safely. The only safe thing to do is fail"
" action."
% (action_class_str, action_class_str)
)
return send_error_back(msg)
action_cls = a_f.construct_action_class(action_class_str, attributes)
# Instantiate action.
try:
action = action_cls(**action_params)
except Exception as e:
msg = ("Failed to initialize action %s. Action init params = %s."
" Actual init params = %s. More info: %s"
% (action_class_str, i_u.get_arg_list(action_cls.__init__),
action_params.keys(), e))
LOG.warning(msg)
return send_error_back(msg)
# Run action.
try:
result = action.run()
# Note: it's made for backwards compatibility with already
# existing Mistral actions which don't return result as
# instance of workflow.utils.Result.
if not isinstance(result, wf_utils.Result):
result = wf_utils.Result(data=result)
except Exception as e:
msg = ("Failed to run action [action_ex_id=%s, action_cls='%s',"
" attributes='%s', params='%s']\n %s"
% (action_ex_id, action_cls, attributes, action_params, e))
LOG.exception(msg)
return send_error_back(msg)
# Send action result.
try:
if action_ex_id and (action.is_sync() or result.is_error()):
self._engine_client.on_action_complete(
action_ex_id,
result,
async_=True
)
except exc.MistralException as e:
# In case of a Mistral exception we can try to send error info to
# engine because most likely it's not related to the infrastructure
# such as message bus or network. One known case is when the action
# returns a bad result (e.g. invalid unicode) which can't be
# serialized.
msg = ("Failed to call engine's on_action_complete() method due"
" to a Mistral exception"
" [action_ex_id=%s, action_cls='%s',"
" attributes='%s', params='%s']\n %s"
% (action_ex_id, action_cls, attributes, action_params, e))
LOG.exception(msg)
return send_error_back(msg)
except Exception as e:
# If it's not a Mistral exception all we can do is only
# log the error.
msg = ("Failed to call engine's on_action_complete() method due"
" to an unexpected exception"
" [action_ex_id=%s, action_cls='%s',"
" attributes='%s', params='%s']\n %s"
% (action_ex_id, action_cls, attributes, action_params, e))
LOG.exception(msg)
return result
| [
"[email protected]"
] | |
6e628f32e0906b40690193765db3d1b9a953f010 | 59b3dce3c770e70b2406cc1dd623a2b1f68b8394 | /python_1/lessons/funcalls.py | 95393a4a7b9cdb9de7b774a53f683151fce9c03d | [] | no_license | patrickbeeson/python-classes | 04ed7b54fc4e1152a191eeb35d42adc214b08e39 | b5041e71badd1ca2c013828e3b2910fb02e9728f | refs/heads/master | 2020-05-20T07:17:36.693960 | 2015-01-23T14:41:46 | 2015-01-23T14:41:46 | 29,736,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | #!/usr/local/bin/python3
""" Take user input, convert to float, and print
out the number to two decimal places, with commas. """
import funcs
while True:
inval = input("Enter a number: ")
if not inval:
break
number = float(inval)
print(funcs.commareal("{0:.2f}".format(number)))
| [
"[email protected]"
] | |
875425c4fd573a994735fa5764ea42ffe381a71a | 48c3f0860c8c7e117ae1867a7660ff4affcb3d27 | /python/cosmicRays/pexConfig.py | 5b47870e153ccacb46e2c13f74f3df9d93709a51 | [
"NCSA"
] | permissive | DarkEnergySurvey/cosmicRays | 4b54291e99c32c11951c228d519a11113f1bebca | 5c29bd9fc4a9f37e298e897623ec98fff4a8d539 | refs/heads/main | 2023-03-28T10:34:34.282404 | 2021-03-29T20:25:49 | 2021-03-29T20:25:49 | 347,135,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52,409 | py | # This file is part of pex_config.
#
# Developed for the LSST Data Management System.
# This product includes software developed by the LSST Project
# (http://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This software is dual licensed under the GNU General Public License and also
# under a 3-clause BSD license. Recipients may choose which of these licenses
# to use; please see the files gpl-3.0.txt and/or bsd_license.txt,
# respectively. If you choose the GPL option then the following text applies
# (but note that there is still no warranty even if you opt for BSD instead):
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__all__ = ("Config", "ConfigMeta", "Field", "FieldValidationError")
import io
import importlib
import os
import re
import sys
import math
import copy
import tempfile
import shutil
import warnings
# if YAML is not available that's fine and we simply don't register
# the yaml representer since we know it won't be used.
try:
import yaml
except ImportError:
yaml = None
YamlLoaders = ()
doImport = None
from cosmicRays.pexComparison import getComparisonName, compareScalars, compareConfigs
from cosmicRays.pexCallStack import getStackFrame, getCallStack
if yaml:
YamlLoaders = (yaml.Loader, yaml.FullLoader, yaml.SafeLoader, yaml.UnsafeLoader)
try:
# CLoader is not always available
from yaml import CLoader
YamlLoaders += (CLoader,)
except ImportError:
pass
def _joinNamePath(prefix=None, name=None, index=None):
"""Generate nested configuration names.
"""
if not prefix and not name:
raise ValueError("Invalid name: cannot be None")
elif not name:
name = prefix
elif prefix and name:
name = prefix + "." + name
if index is not None:
return "%s[%r]" % (name, index)
else:
return name
def _autocast(x, dtype):
"""Cast a value to a type, if appropriate.
Parameters
----------
x : object
A value.
dtype : tpye
Data type, such as `float`, `int`, or `str`.
Returns
-------
values : object
If appropriate, the returned value is ``x`` cast to the given type
``dtype``. If the cast cannot be performed the original value of
``x`` is returned.
"""
if dtype == float and isinstance(x, int):
return float(x)
return x
def _typeStr(x):
"""Generate a fully-qualified type name.
Returns
-------
`str`
Fully-qualified type name.
Notes
-----
This function is used primarily for writing config files to be executed
later upon with the 'load' function.
"""
if hasattr(x, '__module__') and hasattr(x, '__name__'):
xtype = x
else:
xtype = type(x)
if (sys.version_info.major <= 2 and xtype.__module__ == '__builtin__') or xtype.__module__ == 'builtins':
return xtype.__name__
else:
return "%s.%s" % (xtype.__module__, xtype.__name__)
if yaml:
def _yaml_config_representer(dumper, data):
"""Represent a Config object in a form suitable for YAML.
Stores the serialized stream as a scalar block string.
"""
stream = io.StringIO()
data.saveToStream(stream)
config_py = stream.getvalue()
# Strip multiple newlines from the end of the config
# This simplifies the YAML to use | and not |+
config_py = config_py.rstrip() + "\n"
# Trailing spaces force pyyaml to use non-block form.
# Remove the trailing spaces so it has no choice
config_py = re.sub(r"\s+$", "\n", config_py, flags=re.MULTILINE)
# Store the Python as a simple scalar
return dumper.represent_scalar("lsst.pex.config.Config", config_py, style="|")
def _yaml_config_constructor(loader, node):
"""Construct a config from YAML"""
config_py = loader.construct_scalar(node)
return Config._fromPython(config_py)
# Register a generic constructor for Config and all subclasses
# Need to register for all the loaders we would like to use
for loader in YamlLoaders:
yaml.add_constructor("lsst.pex.config.Config", _yaml_config_constructor, Loader=loader)
class ConfigMeta(type):
"""A metaclass for `lsst.pex.config.Config`.
Notes
-----
``ConfigMeta`` adds a dictionary containing all `~lsst.pex.config.Field`
class attributes as a class attribute called ``_fields``, and adds
the name of each field as an instance variable of the field itself (so you
don't have to pass the name of the field to the field constructor).
"""
def __init__(cls, name, bases, dict_):
type.__init__(cls, name, bases, dict_)
cls._fields = {}
cls._source = getStackFrame()
def getFields(classtype):
fields = {}
bases = list(classtype.__bases__)
bases.reverse()
for b in bases:
fields.update(getFields(b))
for k, v in classtype.__dict__.items():
if isinstance(v, Field):
fields[k] = v
return fields
fields = getFields(cls)
for k, v in fields.items():
setattr(cls, k, copy.deepcopy(v))
def __setattr__(cls, name, value):
if isinstance(value, Field):
value.name = name
cls._fields[name] = value
type.__setattr__(cls, name, value)
class FieldValidationError(ValueError):
"""Raised when a ``~lsst.pex.config.Field`` is not valid in a
particular ``~lsst.pex.config.Config``.
Parameters
----------
field : `lsst.pex.config.Field`
The field that was not valid.
config : `lsst.pex.config.Config`
The config containing the invalid field.
msg : `str`
Text describing why the field was not valid.
"""
def __init__(self, field, config, msg):
self.fieldType = type(field)
"""Type of the `~lsst.pex.config.Field` that incurred the error.
"""
self.fieldName = field.name
"""Name of the `~lsst.pex.config.Field` instance that incurred the
error (`str`).
See also
--------
lsst.pex.config.Field.name
"""
self.fullname = _joinNamePath(config._name, field.name)
"""Fully-qualified name of the `~lsst.pex.config.Field` instance
(`str`).
"""
self.history = config.history.setdefault(field.name, [])
"""Full history of all changes to the `~lsst.pex.config.Field`
instance.
"""
self.fieldSource = field.source
"""File and line number of the `~lsst.pex.config.Field` definition.
"""
self.configSource = config._source
error = "%s '%s' failed validation: %s\n"\
"For more information see the Field definition at:\n%s"\
" and the Config definition at:\n%s" % \
(self.fieldType.__name__, self.fullname, msg,
self.fieldSource.format(), self.configSource.format())
super().__init__(error)
class Field:
"""A field in a `~lsst.pex.config.Config` that supports `int`, `float`,
`complex`, `bool`, and `str` data types.
Parameters
----------
doc : `str`
A description of the field for users.
dtype : type
The field's data type. ``Field`` only supports basic data types:
`int`, `float`, `complex`, `bool`, and `str`. See
`Field.supportedTypes`.
default : object, optional
The field's default value.
check : callable, optional
A callable that is called with the field's value. This callable should
return `False` if the value is invalid. More complex inter-field
validation can be written as part of the
`lsst.pex.config.Config.validate` method.
optional : `bool`, optional
This sets whether the field is considered optional, and therefore
doesn't need to be set by the user. When `False`,
`lsst.pex.config.Config.validate` fails if the field's value is `None`.
deprecated : None or `str`, optional
A description of why this Field is deprecated, including removal date.
If not None, the string is appended to the docstring for this Field.
Raises
------
ValueError
Raised when the ``dtype`` parameter is not one of the supported types
(see `Field.supportedTypes`).
See also
--------
ChoiceField
ConfigChoiceField
ConfigDictField
ConfigField
ConfigurableField
DictField
ListField
RangeField
RegistryField
Notes
-----
``Field`` instances (including those of any subclass of ``Field``) are used
as class attributes of `~lsst.pex.config.Config` subclasses (see the
example, below). ``Field`` attributes work like the `property` attributes
of classes that implement custom setters and getters. `Field` attributes
belong to the class, but operate on the instance. Formally speaking,
`Field` attributes are `descriptors
<https://docs.python.org/3/howto/descriptor.html>`_.
When you access a `Field` attribute on a `Config` instance, you don't
get the `Field` instance itself. Instead, you get the value of that field,
which might be a simple type (`int`, `float`, `str`, `bool`) or a custom
container type (like a `lsst.pex.config.List`) depending on the field's
type. See the example, below.
Examples
--------
Instances of ``Field`` should be used as class attributes of
`lsst.pex.config.Config` subclasses:
>>> from lsst.pex.config import Config, Field
>>> class Example(Config):
... myInt = Field("An integer field.", int, default=0)
...
>>> print(config.myInt)
0
>>> config.myInt = 5
>>> print(config.myInt)
5
"""
supportedTypes = set((str, bool, float, int, complex))
"""Supported data types for field values (`set` of types).
"""
def __init__(self, doc, dtype, default=None, check=None, optional=False, deprecated=None):
if dtype not in self.supportedTypes:
raise ValueError("Unsupported Field dtype %s" % _typeStr(dtype))
source = getStackFrame()
self._setup(doc=doc, dtype=dtype, default=default, check=check, optional=optional, source=source,
deprecated=deprecated)
def _setup(self, doc, dtype, default, check, optional, source, deprecated):
"""Set attributes, usually during initialization.
"""
self.dtype = dtype
"""Data type for the field.
"""
# append the deprecation message to the docstring.
if deprecated is not None:
doc = f"{doc} Deprecated: {deprecated}"
self.doc = doc
"""A description of the field (`str`).
"""
self.deprecated = deprecated
"""If not None, a description of why this field is deprecated (`str`).
"""
self.__doc__ = f"{doc} (`{dtype.__name__}`"
if optional or default is not None:
self.__doc__ += f", default ``{default!r}``"
self.__doc__ += ")"
self.default = default
"""Default value for this field.
"""
self.check = check
"""A user-defined function that validates the value of the field.
"""
self.optional = optional
"""Flag that determines if the field is required to be set (`bool`).
When `False`, `lsst.pex.config.Config.validate` will fail if the
field's value is `None`.
"""
self.source = source
"""The stack frame where this field is defined (`list` of
`lsst.pex.config.callStack.StackFrame`).
"""
def rename(self, instance):
"""Rename the field in a `~lsst.pex.config.Config` (for internal use
only).
Parameters
----------
instance : `lsst.pex.config.Config`
The config instance that contains this field.
Notes
-----
This method is invoked by the `lsst.pex.config.Config` object that
contains this field and should not be called directly.
Renaming is only relevant for `~lsst.pex.config.Field` instances that
hold subconfigs. `~lsst.pex.config.Fields` that hold subconfigs should
rename each subconfig with the full field name as generated by
`lsst.pex.config.config._joinNamePath`.
"""
pass
def validate(self, instance):
"""Validate the field (for internal use only).
Parameters
----------
instance : `lsst.pex.config.Config`
The config instance that contains this field.
Raises
------
lsst.pex.config.FieldValidationError
Raised if verification fails.
Notes
-----
This method provides basic validation:
- Ensures that the value is not `None` if the field is not optional.
- Ensures type correctness.
- Ensures that the user-provided ``check`` function is valid.
Most `~lsst.pex.config.Field` subclasses should call
`lsst.pex.config.field.Field.validate` if they re-implement
`~lsst.pex.config.field.Field.validate`.
"""
value = self.__get__(instance)
if not self.optional and value is None:
raise FieldValidationError(self, instance, "Required value cannot be None")
def freeze(self, instance):
"""Make this field read-only (for internal use only).
Parameters
----------
instance : `lsst.pex.config.Config`
The config instance that contains this field.
Notes
-----
Freezing is only relevant for fields that hold subconfigs. Fields which
hold subconfigs should freeze each subconfig.
**Subclasses should implement this method.**
"""
pass
def _validateValue(self, value):
"""Validate a value.
Parameters
----------
value : object
The value being validated.
Raises
------
TypeError
Raised if the value's type is incompatible with the field's
``dtype``.
ValueError
Raised if the value is rejected by the ``check`` method.
"""
if value is None:
return
if not isinstance(value, self.dtype):
msg = "Value %s is of incorrect type %s. Expected type %s" % \
(value, _typeStr(value), _typeStr(self.dtype))
raise TypeError(msg)
if self.check is not None and not self.check(value):
msg = "Value %s is not a valid value" % str(value)
raise ValueError(msg)
def _collectImports(self, instance, imports):
"""This function should call the _collectImports method on all config
objects the field may own, and union them with the supplied imports
set.
Parameters
----------
instance : instance or subclass of `lsst.pex.config.Config`
A config object that has this field defined on it
imports : `set`
Set of python modules that need imported after persistence
"""
pass
def save(self, outfile, instance):
"""Save this field to a file (for internal use only).
Parameters
----------
outfile : file-like object
A writeable field handle.
instance : `Config`
The `Config` instance that contains this field.
Notes
-----
This method is invoked by the `~lsst.pex.config.Config` object that
contains this field and should not be called directly.
The output consists of the documentation string
(`lsst.pex.config.Field.doc`) formatted as a Python comment. The second
line is formatted as an assignment: ``{fullname}={value}``.
This output can be executed with Python.
"""
value = self.__get__(instance)
fullname = _joinNamePath(instance._name, self.name)
if self.deprecated and value == self.default:
return
# write full documentation string as comment lines
# (i.e. first character is #)
doc = "# " + str(self.doc).replace("\n", "\n# ")
if isinstance(value, float) and not math.isfinite(value):
# non-finite numbers need special care
outfile.write(u"{}\n{}=float('{!r}')\n\n".format(doc, fullname, value))
else:
outfile.write(u"{}\n{}={!r}\n\n".format(doc, fullname, value))
def toDict(self, instance):
"""Convert the field value so that it can be set as the value of an
item in a `dict` (for internal use only).
Parameters
----------
instance : `Config`
The `Config` that contains this field.
Returns
-------
value : object
The field's value. See *Notes*.
Notes
-----
This method invoked by the owning `~lsst.pex.config.Config` object and
should not be called directly.
Simple values are passed through. Complex data structures must be
manipulated. For example, a `~lsst.pex.config.Field` holding a
subconfig should, instead of the subconfig object, return a `dict`
where the keys are the field names in the subconfig, and the values are
the field values in the subconfig.
"""
return self.__get__(instance)
def __get__(self, instance, owner=None, at=None, label="default"):
"""Define how attribute access should occur on the Config instance
This is invoked by the owning config object and should not be called
directly
When the field attribute is accessed on a Config class object, it
returns the field object itself in order to allow inspection of
Config classes.
When the field attribute is access on a config instance, the actual
value described by the field (and held by the Config instance) is
returned.
"""
if instance is None or not isinstance(instance, Config):
return self
else:
return instance._storage[self.name]
def __set__(self, instance, value, at=None, label='assignment'):
"""Set an attribute on the config instance.
Parameters
----------
instance : `lsst.pex.config.Config`
The config instance that contains this field.
value : obj
Value to set on this field.
at : `list` of `lsst.pex.config.callStack.StackFrame`
The call stack (created by
`lsst.pex.config.callStack.getCallStack`).
label : `str`, optional
Event label for the history.
Notes
-----
This method is invoked by the owning `lsst.pex.config.Config` object
and should not be called directly.
Derived `~lsst.pex.config.Field` classes may need to override the
behavior. When overriding ``__set__``, `~lsst.pex.config.Field` authors
should follow the following rules:
- Do not allow modification of frozen configs.
- Validate the new value **before** modifying the field. Except if the
new value is `None`. `None` is special and no attempt should be made
to validate it until `lsst.pex.config.Config.validate` is called.
- Do not modify the `~lsst.pex.config.Config` instance to contain
invalid values.
- If the field is modified, update the history of the
`lsst.pex.config.field.Field` to reflect the changes.
In order to decrease the need to implement this method in derived
`~lsst.pex.config.Field` types, value validation is performed in the
`lsst.pex.config.Field._validateValue`. If only the validation step
differs in the derived `~lsst.pex.config.Field`, it is simpler to
implement `lsst.pex.config.Field._validateValue` than to reimplement
``__set__``. More complicated behavior, however, may require
reimplementation.
"""
if instance._frozen:
raise FieldValidationError(self, instance, "Cannot modify a frozen Config")
history = instance._history.setdefault(self.name, [])
if value is not None:
value = _autocast(value, self.dtype)
try:
self._validateValue(value)
except BaseException as e:
raise FieldValidationError(self, instance, str(e))
instance._storage[self.name] = value
if at is None:
at = getCallStack()
history.append((value, at, label))
def __delete__(self, instance, at=None, label='deletion'):
"""Delete an attribute from a `lsst.pex.config.Config` instance.
Parameters
----------
instance : `lsst.pex.config.Config`
The config instance that contains this field.
at : `list` of `lsst.pex.config.callStack.StackFrame`
The call stack (created by
`lsst.pex.config.callStack.getCallStack`).
label : `str`, optional
Event label for the history.
Notes
-----
This is invoked by the owning `~lsst.pex.config.Config` object and
should not be called directly.
"""
if at is None:
at = getCallStack()
self.__set__(instance, None, at=at, label=label)
def _compare(self, instance1, instance2, shortcut, rtol, atol, output):
"""Compare a field (named `Field.name`) in two
`~lsst.pex.config.Config` instances for equality.
Parameters
----------
instance1 : `lsst.pex.config.Config`
Left-hand side `Config` instance to compare.
instance2 : `lsst.pex.config.Config`
Right-hand side `Config` instance to compare.
shortcut : `bool`, optional
**Unused.**
rtol : `float`, optional
Relative tolerance for floating point comparisons.
atol : `float`, optional
Absolute tolerance for floating point comparisons.
output : callable, optional
A callable that takes a string, used (possibly repeatedly) to
report inequalities.
Notes
-----
This method must be overridden by more complex `Field` subclasses.
See also
--------
lsst.pex.config.compareScalars
"""
v1 = getattr(instance1, self.name)
v2 = getattr(instance2, self.name)
name = getComparisonName(
_joinNamePath(instance1._name, self.name),
_joinNamePath(instance2._name, self.name)
)
return compareScalars(name, v1, v2, dtype=self.dtype, rtol=rtol, atol=atol, output=output)
class RecordingImporter:
"""Importer (for `sys.meta_path`) that records which modules are being
imported.
*This class does not do any importing itself.*
Examples
--------
Use this class as a context manager to ensure it is properly uninstalled
when done:
>>> with RecordingImporter() as importer:
... # import stuff
... import numpy as np
... print("Imported: " + importer.getModules())
"""
def __init__(self):
self._modules = set()
def __enter__(self):
self.origMetaPath = sys.meta_path
sys.meta_path = [self] + sys.meta_path
return self
def __exit__(self, *args):
self.uninstall()
return False # Don't suppress exceptions
def uninstall(self):
"""Uninstall the importer.
"""
sys.meta_path = self.origMetaPath
def find_module(self, fullname, path=None):
"""Called as part of the ``import`` chain of events.
"""
self._modules.add(fullname)
# Return None because we don't do any importing.
return None
def getModules(self):
"""Get the set of modules that were imported.
Returns
-------
modules : `set` of `str`
Set of imported module names.
"""
return self._modules
class Config(metaclass=ConfigMeta):
"""Base class for configuration (*config*) objects.
Notes
-----
A ``Config`` object will usually have several `~lsst.pex.config.Field`
instances as class attributes. These are used to define most of the base
class behavior.
``Config`` implements a mapping API that provides many `dict`-like methods,
such as `keys`, `values`, `items`, `iteritems`, `iterkeys`, and
`itervalues`. ``Config`` instances also support the ``in`` operator to
test if a field is in the config. Unlike a `dict`, ``Config`` classes are
not subscriptable. Instead, access individual fields as attributes of the
configuration instance.
Examples
--------
Config classes are subclasses of ``Config`` that have
`~lsst.pex.config.Field` instances (or instances of
`~lsst.pex.config.Field` subclasses) as class attributes:
>>> from lsst.pex.config import Config, Field, ListField
>>> class DemoConfig(Config):
... intField = Field(doc="An integer field", dtype=int, default=42)
... listField = ListField(doc="List of favorite beverages.", dtype=str,
... default=['coffee', 'green tea', 'water'])
...
>>> config = DemoConfig()
Configs support many `dict`-like APIs:
>>> config.keys()
['intField', 'listField']
>>> 'intField' in config
True
Individual fields can be accessed as attributes of the configuration:
>>> config.intField
42
>>> config.listField.append('earl grey tea')
>>> print(config.listField)
['coffee', 'green tea', 'water', 'earl grey tea']
"""
def __iter__(self):
"""Iterate over fields.
"""
return self._fields.__iter__()
def keys(self):
"""Get field names.
Returns
-------
names : `list`
List of `lsst.pex.config.Field` names.
See also
--------
lsst.pex.config.Config.iterkeys
"""
return list(self._storage.keys())
def values(self):
"""Get field values.
Returns
-------
values : `list`
List of field values.
See also
--------
lsst.pex.config.Config.itervalues
"""
return list(self._storage.values())
def items(self):
"""Get configurations as ``(field name, field value)`` pairs.
Returns
-------
items : `list`
List of tuples for each configuration. Tuple items are:
0. Field name.
1. Field value.
See also
--------
lsst.pex.config.Config.iteritems
"""
return list(self._storage.items())
def iteritems(self):
"""Iterate over (field name, field value) pairs.
Yields
------
item : `tuple`
Tuple items are:
0. Field name.
1. Field value.
See also
--------
lsst.pex.config.Config.items
"""
return iter(self._storage.items())
def itervalues(self):
"""Iterate over field values.
Yields
------
value : obj
A field value.
See also
--------
lsst.pex.config.Config.values
"""
return iter(self.storage.values())
def iterkeys(self):
"""Iterate over field names
Yields
------
key : `str`
A field's key (attribute name).
See also
--------
lsst.pex.config.Config.values
"""
return iter(self.storage.keys())
def __contains__(self, name):
"""!Return True if the specified field exists in this config
@param[in] name field name to test for
"""
return self._storage.__contains__(name)
def __new__(cls, *args, **kw):
"""Allocate a new `lsst.pex.config.Config` object.
In order to ensure that all Config object are always in a proper state
when handed to users or to derived `~lsst.pex.config.Config` classes,
some attributes are handled at allocation time rather than at
initialization.
This ensures that even if a derived `~lsst.pex.config.Config` class
implements ``__init__``, its author does not need to be concerned about
when or even the base ``Config.__init__`` should be called.
"""
name = kw.pop("__name", None)
at = kw.pop("__at", getCallStack())
# remove __label and ignore it
kw.pop("__label", "default")
instance = object.__new__(cls)
instance._frozen = False
instance._name = name
instance._storage = {}
instance._history = {}
instance._imports = set()
# load up defaults
for field in instance._fields.values():
instance._history[field.name] = []
field.__set__(instance, field.default, at=at + [field.source], label="default")
# set custom default-overides
instance.setDefaults()
# set constructor overides
instance.update(__at=at, **kw)
return instance
def __reduce__(self):
"""Reduction for pickling (function with arguments to reproduce).
We need to condense and reconstitute the `~lsst.pex.config.Config`,
since it may contain lambdas (as the ``check`` elements) that cannot
be pickled.
"""
# The stream must be in characters to match the API but pickle
# requires bytes
stream = io.StringIO()
self.saveToStream(stream)
return (unreduceConfig, (self.__class__, stream.getvalue().encode()))
def setDefaults(self):
"""Subclass hook for computing defaults.
Notes
-----
Derived `~lsst.pex.config.Config` classes that must compute defaults
rather than using the `~lsst.pex.config.Field` instances's defaults
should do so here. To correctly use inherited defaults,
implementations of ``setDefaults`` must call their base class's
``setDefaults``.
"""
pass
def update(self, **kw):
"""Update values of fields specified by the keyword arguments.
Parameters
----------
kw
Keywords are configuration field names. Values are configuration
field values.
Notes
-----
The ``__at`` and ``__label`` keyword arguments are special internal
keywords. They are used to strip out any internal steps from the
history tracebacks of the config. Do not modify these keywords to
subvert a `~lsst.pex.config.Config` instance's history.
Examples
--------
This is a config with three fields:
>>> from lsst.pex.config import Config, Field
>>> class DemoConfig(Config):
... fieldA = Field(doc='Field A', dtype=int, default=42)
... fieldB = Field(doc='Field B', dtype=bool, default=True)
... fieldC = Field(doc='Field C', dtype=str, default='Hello world')
...
>>> config = DemoConfig()
These are the default values of each field:
>>> for name, value in config.iteritems():
... print(f"{name}: {value}")
...
fieldA: 42
fieldB: True
fieldC: 'Hello world'
Using this method to update ``fieldA`` and ``fieldC``:
>>> config.update(fieldA=13, fieldC='Updated!')
Now the values of each field are:
>>> for name, value in config.iteritems():
... print(f"{name}: {value}")
...
fieldA: 13
fieldB: True
fieldC: 'Updated!'
"""
at = kw.pop("__at", getCallStack())
label = kw.pop("__label", "update")
for name, value in kw.items():
try:
field = self._fields[name]
field.__set__(self, value, at=at, label=label)
except KeyError:
raise KeyError("No field of name %s exists in config type %s" % (name, _typeStr(self)))
def load(self, filename, root="config"):
"""Modify this config in place by executing the Python code in a
configuration file.
Parameters
----------
filename : `str`
Name of the configuration file. A configuration file is Python
module.
root : `str`, optional
Name of the variable in file that refers to the config being
overridden.
For example, the value of root is ``"config"`` and the file
contains::
config.myField = 5
Then this config's field ``myField`` is set to ``5``.
**Deprecated:** For backwards compatibility, older config files
that use ``root="root"`` instead of ``root="config"`` will be
loaded with a warning printed to `sys.stderr`. This feature will be
removed at some point.
See also
--------
lsst.pex.config.Config.loadFromStream
lsst.pex.config.Config.save
lsst.pex.config.Config.saveFromStream
"""
with open(filename, "r") as f:
code = compile(f.read(), filename=filename, mode="exec")
self.loadFromStream(stream=code, root=root, filename=filename)
def loadFromStream(self, stream, root="config", filename=None):
"""Modify this Config in place by executing the Python code in the
provided stream.
Parameters
----------
stream : file-like object, `str`, or compiled string
Stream containing configuration override code.
root : `str`, optional
Name of the variable in file that refers to the config being
overridden.
For example, the value of root is ``"config"`` and the file
contains::
config.myField = 5
Then this config's field ``myField`` is set to ``5``.
**Deprecated:** For backwards compatibility, older config files
that use ``root="root"`` instead of ``root="config"`` will be
loaded with a warning printed to `sys.stderr`. This feature will be
removed at some point.
filename : `str`, optional
Name of the configuration file, or `None` if unknown or contained
in the stream. Used for error reporting.
See also
--------
lsst.pex.config.Config.load
lsst.pex.config.Config.save
lsst.pex.config.Config.saveFromStream
"""
with RecordingImporter() as importer:
globals = {"__file__": filename}
try:
local = {root: self}
exec(stream, globals, local)
except NameError as e:
if root == "config" and "root" in e.args[0]:
if filename is None:
# try to determine the file name; a compiled string
# has attribute "co_filename",
# an open file has attribute "name", else give up
filename = getattr(stream, "co_filename", None)
if filename is None:
filename = getattr(stream, "name", "?")
print(f"Config override file {filename!r}"
" appears to use 'root' instead of 'config'; trying with 'root'", file=sys.stderr)
local = {"root": self}
exec(stream, globals, local)
else:
raise
self._imports.update(importer.getModules())
def save(self, filename, root="config"):
"""Save a Python script to the named file, which, when loaded,
reproduces this config.
Parameters
----------
filename : `str`
Desination filename of this configuration.
root : `str`, optional
Name to use for the root config variable. The same value must be
used when loading (see `lsst.pex.config.Config.load`).
See also
--------
lsst.pex.config.Config.saveToStream
lsst.pex.config.Config.load
lsst.pex.config.Config.loadFromStream
"""
d = os.path.dirname(filename)
with tempfile.NamedTemporaryFile(mode="w", delete=False, dir=d) as outfile:
self.saveToStream(outfile, root)
# tempfile is hardcoded to create files with mode '0600'
# for an explantion of these antics see:
# https://stackoverflow.com/questions/10291131/how-to-use-os-umask-in-python
umask = os.umask(0o077)
os.umask(umask)
os.chmod(outfile.name, (~umask & 0o666))
# chmod before the move so we get quasi-atomic behavior if the
# source and dest. are on the same filesystem.
# os.rename may not work across filesystems
shutil.move(outfile.name, filename)
def saveToStream(self, outfile, root="config", skipImports=False):
"""Save a configuration file to a stream, which, when loaded,
reproduces this config.
Parameters
----------
outfile : file-like object
Destination file object write the config into. Accepts strings not
bytes.
root
Name to use for the root config variable. The same value must be
used when loading (see `lsst.pex.config.Config.load`).
skipImports : `bool`, optional
If `True` then do not include ``import`` statements in output,
this is to support human-oriented output from ``pipetask`` where
additional clutter is not useful.
See also
--------
lsst.pex.config.Config.save
lsst.pex.config.Config.load
lsst.pex.config.Config.loadFromStream
"""
tmp = self._name
self._rename(root)
try:
if not skipImports:
self._collectImports()
# Remove self from the set, as it is handled explicitly below
self._imports.remove(self.__module__)
configType = type(self)
typeString = _typeStr(configType)
outfile.write(f"import {configType.__module__}\n")
outfile.write(f"assert type({root})=={typeString}, 'config is of type %s.%s instead of "
f"{typeString}' % (type({root}).__module__, type({root}).__name__)\n")
for imp in self._imports:
if imp in sys.modules and sys.modules[imp] is not None:
outfile.write(u"import {}\n".format(imp))
self._save(outfile)
finally:
self._rename(tmp)
def freeze(self):
"""Make this config, and all subconfigs, read-only.
"""
self._frozen = True
for field in self._fields.values():
field.freeze(self)
def _save(self, outfile):
"""Save this config to an open stream object.
Parameters
----------
outfile : file-like object
Destination file object write the config into. Accepts strings not
bytes.
"""
for field in self._fields.values():
field.save(outfile, self)
def _collectImports(self):
"""Adds module containing self to the list of things to import and
then loops over all the fields in the config calling a corresponding
collect method. The field method will call _collectImports on any
configs it may own and return the set of things to import. This
returned set will be merged with the set of imports for this config
class.
"""
self._imports.add(self.__module__)
for name, field in self._fields.items():
field._collectImports(self, self._imports)
def toDict(self):
"""Make a dictionary of field names and their values.
Returns
-------
dict_ : `dict`
Dictionary with keys that are `~lsst.pex.config.Field` names.
Values are `~lsst.pex.config.Field` values.
See also
--------
lsst.pex.config.Field.toDict
Notes
-----
This method uses the `~lsst.pex.config.Field.toDict` method of
individual fields. Subclasses of `~lsst.pex.config.Field` may need to
implement a ``toDict`` method for *this* method to work.
"""
dict_ = {}
for name, field in self._fields.items():
dict_[name] = field.toDict(self)
return dict_
def names(self):
"""Get all the field names in the config, recursively.
Returns
-------
names : `list` of `str`
Field names.
"""
#
# Rather than sort out the recursion all over again use the
# pre-existing saveToStream()
#
with io.StringIO() as strFd:
self.saveToStream(strFd, "config")
contents = strFd.getvalue()
strFd.close()
#
# Pull the names out of the dumped config
#
keys = []
for line in contents.split("\n"):
if re.search(r"^((assert|import)\s+|\s*$|#)", line):
continue
mat = re.search(r"^(?:config\.)?([^=]+)\s*=\s*.*", line)
if mat:
keys.append(mat.group(1))
return keys
def _rename(self, name):
"""Rename this config object in its parent `~lsst.pex.config.Config`.
Parameters
----------
name : `str`
New name for this config in its parent `~lsst.pex.config.Config`.
Notes
-----
This method uses the `~lsst.pex.config.Field.rename` method of
individual `lsst.pex.config.Field` instances.
`lsst.pex.config.Field` subclasses may need to implement a ``rename``
method for *this* method to work.
See also
--------
lsst.pex.config.Field.rename
"""
self._name = name
for field in self._fields.values():
field.rename(self)
def validate(self):
"""Validate the Config, raising an exception if invalid.
Raises
------
lsst.pex.config.FieldValidationError
Raised if verification fails.
Notes
-----
The base class implementation performs type checks on all fields by
calling their `~lsst.pex.config.Field.validate` methods.
Complex single-field validation can be defined by deriving new Field
types. For convenience, some derived `lsst.pex.config.Field`-types
(`~lsst.pex.config.ConfigField` and
`~lsst.pex.config.ConfigChoiceField`) are defined in `lsst.pex.config`
that handle recursing into subconfigs.
Inter-field relationships should only be checked in derived
`~lsst.pex.config.Config` classes after calling this method, and base
validation is complete.
"""
for field in self._fields.values():
field.validate(self)
#def formatHistory(self, name, **kwargs):
# """Format a configuration field's history to a human-readable format.
# Parameters
# ----------
# name : `str`
# Name of a `~lsst.pex.config.Field` in this config.
# kwargs
# Keyword arguments passed to `lsst.pex.config.history.format`.
# Returns
# -------
# history : `str`
# A string containing the formatted history.
# See also
# --------
# lsst.pex.config.history.format
# """
# import lsst.pex.config.history as pexHist
# return pexHist.format(self, name, **kwargs)
history = property(lambda x: x._history)
"""Read-only history.
"""
def __setattr__(self, attr, value, at=None, label="assignment"):
"""Set an attribute (such as a field's value).
Notes
-----
Unlike normal Python objects, `~lsst.pex.config.Config` objects are
locked such that no additional attributes nor properties may be added
to them dynamically.
Although this is not the standard Python behavior, it helps to protect
users from accidentally mispelling a field name, or trying to set a
non-existent field.
"""
if attr in self._fields:
if self._fields[attr].deprecated is not None:
fullname = _joinNamePath(self._name, self._fields[attr].name)
warnings.warn(f"Config field {fullname} is deprecated: {self._fields[attr].deprecated}",
FutureWarning, stacklevel=2)
if at is None:
at = getCallStack()
# This allows Field descriptors to work.
self._fields[attr].__set__(self, value, at=at, label=label)
elif hasattr(getattr(self.__class__, attr, None), '__set__'):
# This allows properties and other non-Field descriptors to work.
return object.__setattr__(self, attr, value)
elif attr in self.__dict__ or attr in ("_name", "_history", "_storage", "_frozen", "_imports"):
# This allows specific private attributes to work.
self.__dict__[attr] = value
else:
# We throw everything else.
raise AttributeError("%s has no attribute %s" % (_typeStr(self), attr))
def __delattr__(self, attr, at=None, label="deletion"):
if attr in self._fields:
if at is None:
at = getCallStack()
self._fields[attr].__delete__(self, at=at, label=label)
else:
object.__delattr__(self, attr)
def __eq__(self, other):
if type(other) == type(self):
for name in self._fields:
thisValue = getattr(self, name)
otherValue = getattr(other, name)
if isinstance(thisValue, float) and math.isnan(thisValue):
if not math.isnan(otherValue):
return False
elif thisValue != otherValue:
return False
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return str(self.toDict())
def __repr__(self):
return "%s(%s)" % (
_typeStr(self),
", ".join("%s=%r" % (k, v) for k, v in self.toDict().items() if v is not None)
)
def compare(self, other, shortcut=True, rtol=1E-8, atol=1E-8, output=None):
"""Compare this configuration to another `~lsst.pex.config.Config` for
equality.
Parameters
----------
other : `lsst.pex.config.Config`
Other `~lsst.pex.config.Config` object to compare against this
config.
shortcut : `bool`, optional
If `True`, return as soon as an inequality is found. Default is
`True`.
rtol : `float`, optional
Relative tolerance for floating point comparisons.
atol : `float`, optional
Absolute tolerance for floating point comparisons.
output : callable, optional
A callable that takes a string, used (possibly repeatedly) to
report inequalities.
Returns
-------
isEqual : `bool`
`True` when the two `lsst.pex.config.Config` instances are equal.
`False` if there is an inequality.
See also
--------
lsst.pex.config.compareConfigs
Notes
-----
Unselected targets of `~lsst.pex.config.RegistryField` fields and
unselected choices of `~lsst.pex.config.ConfigChoiceField` fields
are not considered by this method.
Floating point comparisons are performed by `numpy.allclose`.
"""
name1 = self._name if self._name is not None else "config"
name2 = other._name if other._name is not None else "config"
name = getComparisonName(name1, name2)
return compareConfigs(name, self, other, shortcut=shortcut,
rtol=rtol, atol=atol, output=output)
@classmethod
def __init_subclass__(cls, **kwargs):
"""Run initialization for every subclass.
Specifically registers the subclass with a YAML representer
and YAML constructor (if pyyaml is available)
"""
super().__init_subclass__(**kwargs)
if not yaml:
return
yaml.add_representer(cls, _yaml_config_representer)
@classmethod
def _fromPython(cls, config_py):
"""Instantiate a `Config`-subclass from serialized Python form.
Parameters
----------
config_py : `str`
A serialized form of the Config as created by
`Config.saveToStream`.
Returns
-------
config : `Config`
Reconstructed `Config` instant.
"""
cls = _classFromPython(config_py)
return unreduceConfig(cls, config_py)
def _classFromPython(config_py):
"""Return the Config subclass required by this Config serialization.
Parameters
----------
config_py : `str`
A serialized form of the Config as created by
`Config.saveToStream`.
Returns
-------
cls : `type`
The `Config` subclass associated with this config.
"""
# standard serialization has the form:
# import config.class
# assert type(config)==config.class.Config, ...
# We want to parse these two lines so we can get the class itself
# Do a single regex to avoid large string copies when splitting a
# large config into separate lines.
matches = re.search(r"^import ([\w.]+)\nassert .*==(.*?),", config_py)
if not matches:
first_line, second_line, _ = config_py.split("\n", 2)
raise ValueError("First two lines did not match expected form. Got:\n"
f" - {first_line}\n"
f" - {second_line}")
module_name = matches.group(1)
module = importlib.import_module(module_name)
# Second line
full_name = matches.group(2)
# Remove the module name from the full name
if not full_name.startswith(module_name):
raise ValueError(f"Module name ({module_name}) inconsistent with full name ({full_name})")
# if module name is a.b.c and full name is a.b.c.d.E then
# we need to remove a.b.c. and iterate over the remainder
# The +1 is for the extra dot after a.b.c
remainder = full_name[len(module_name)+1:]
components = remainder.split(".")
pytype = module
for component in components:
pytype = getattr(pytype, component)
return pytype
def unreduceConfig(cls, stream):
"""Create a `~lsst.pex.config.Config` from a stream.
Parameters
----------
cls : `lsst.pex.config.Config`-type
A `lsst.pex.config.Config` type (not an instance) that is instantiated
with configurations in the ``stream``.
stream : file-like object, `str`, or compiled string
Stream containing configuration override code.
Returns
-------
config : `lsst.pex.config.Config`
Config instance.
See also
--------
lsst.pex.config.Config.loadFromStream
"""
config = cls()
config.loadFromStream(stream)
return config
| [
"[email protected]"
] | |
b4a2b40674ce9d708d9bdbb27f367823166b87d9 | 92658cf5178f88e69ae243310425658a8df36c0d | /video_distributor/video_distributor/urls.py | b6f3128cc703215315dd3c1a7253763f8e878149 | [] | no_license | SeedofWind-demo2017/NSQ_Stress_Test | dc3df4126c0381c90f13631b2ba067530ab2e06a | 9cd34588299effcf29a32affb4bc6714db0e26ac | refs/heads/master | 2021-01-19T23:41:43.705136 | 2017-05-08T02:15:28 | 2017-05-08T02:15:28 | 89,020,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
# Examples:
# url(r'^blog/', include('blog.urls')),
url(r'^$', 'video.views.home', name='home'),
url(r'^stats/', 'video.views.stats', name='stats'),
url(r'^update_table/', 'video.views.home', name='update_table'),
url(r'^update_charts/', 'video.views.update_charts', name='update_charts'),
url(r'^update_stats/', 'video.views.update_stats', name='update_stats'),
url(r'^admin/', include(admin.site.urls)),
]
| [
"[email protected]"
] | |
c311a00b768166835f67364b125edcaba5999485 | 107e62a03254c9ebe2e1830977a47861633b0d33 | /Meminfo.py | 7d05844c1597727d24d601a90214c8582fe144eb | [] | no_license | prasanna-ranganathan/mypython | bb798c0782cfb79a27b0730e924921b802da2a44 | 25fa93602e2465ec6ccb0c3ff30a2bbf90da96e4 | refs/heads/master | 2021-06-03T18:35:31.129399 | 2016-08-28T14:21:44 | 2016-08-28T14:22:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | #!/usr/bin/python
from __future__ import print_function
from collections import OrderedDict
def Meminfo():
meminfo = OrderedDict()
with open('/proc/meminfo') as file:
for line in file:
meminfo[line.split(':')[0].strip()] = line.split(':')[1].strip()
return meminfo
if __name__ == '__main__':
meminfo = Meminfo()
print('Total memory: {0}'.format(meminfo['MemTotal']))
print('Free memory: {0}'.format(meminfo['MemFree']))
| [
"[email protected]"
] | |
5da24ee2ef65a48b724f63e8744263375abd505d | d190750d6cb34e9d86ae96724cf4b56a2f57a74a | /tests/r/test_wage.py | 9c3f558601d3b21c414e2b6f21cf0f7e147870e1 | [
"Apache-2.0"
] | permissive | ROAD2018/observations | a119f61a48213d791de0620804adb8d21c2ad9fb | 2c8b1ac31025938cb17762e540f2f592e302d5de | refs/heads/master | 2021-09-24T04:28:02.725245 | 2018-09-16T23:06:30 | 2018-09-16T23:06:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.wage import wage
def test_wage():
"""Test module wage.py by downloading
wage.csv and testing shape of
extracted data has 3000 rows and 12 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = wage(test_path)
try:
assert x_train.shape == (3000, 12)
except:
shutil.rmtree(test_path)
raise()
| [
"[email protected]"
] | |
27e23cc933a22f4df81f5789dc254419e9789f18 | fdf1e1f4efc51bc024311d44a2fa4524f9b88bce | /girleffect/countries/migrations/0045_auto_20180105_1521.py | 32fccc37e247313d8e0818bba8fc583e52dfaa4d | [] | no_license | girleffect/girleffect.org | 8327ffd6bbd1103033c92fbd4cbe5461aa1c7f03 | 55731b1c024f207211a161fd6d3ca796beea7a61 | refs/heads/master | 2023-04-07T21:40:43.910892 | 2022-06-14T11:50:21 | 2022-06-14T11:50:21 | 112,452,828 | 1 | 2 | null | 2023-04-01T12:05:55 | 2017-11-29T09:13:18 | Python | UTF-8 | Python | false | false | 11,702 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-01-05 15:21
from __future__ import unicode_literals
from django.db import migrations
import girleffect.utils.models
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtaildocs.blocks
import wagtail.wagtailembeds.blocks
import wagtail.wagtailimages.blocks
import wagtail.wagtailsnippets.blocks
class Migration(migrations.Migration):
dependencies = [
('countries', '0044_auto_20171220_1706'),
]
operations = [
migrations.AlterField(
model_name='countrypage',
name='body',
field=wagtail.wagtailcore.fields.StreamField((('heading', wagtail.wagtailcore.blocks.CharBlock(classname='full title')), ('body_text', wagtail.wagtailcore.blocks.StructBlock((('body', wagtail.wagtailcore.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'link', 'ol', 'ul', 'hr'], label='Body Text')), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('body_heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))))), ('large_text', wagtail.wagtailcore.blocks.StructBlock((('body', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'link', 'document-link'], label='Large Text', max_length=350, required=False)), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('body_heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))))), ('extendable_body', wagtail.wagtailcore.blocks.StructBlock((('body_upper', wagtail.wagtailcore.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'link', 'ol', 'ul', 'hr'], label='Body Text')), ('extend_button_text', wagtail.wagtailcore.blocks.CharBlock(help_text='Customise text for the extend button', max_length=80, required=False)), ('collapse_button_text', wagtail.wagtailcore.blocks.CharBlock(help_text='Customise text for the collapse button', max_length=80, required=False)), ('body_lower', wagtail.wagtailcore.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'link', 'ol', 'ul', 'hr'], help_text='This body field is invisible until the user clicks the expand button', label='Extended body text')), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('body_heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))))), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('caption', wagtail.wagtailcore.blocks.CharBlock(required=False))))), ('quote', wagtail.wagtailcore.blocks.StructBlock((('quotes', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock(max_length=80, required=False)), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('text', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'link', 'document-link'], max_length=255, required=True)), ('citation', wagtail.wagtailcore.blocks.CharBlock(max_length=80, required=False)), ('link_block', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)), ('drop_shadow_is_on', wagtail.wagtailcore.blocks.BooleanBlock(help_text='Show or hide drop shadow', label='Drop Shadow Toggle', required=False)), ('text_hex', wagtail.wagtailcore.blocks.CharBlock(label='Quote Text Hex Code', max_length=7, required=False)), ('quote_mark_hex', wagtail.wagtailcore.blocks.CharBlock(label='Quote Mark Hex Code', max_length=7, required=False)))), icon='openquote', template='blocks/quote_block.html')), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))))), ('video', wagtail.wagtailcore.blocks.StructBlock((('heading', wagtail.wagtailcore.blocks.CharBlock(max_length=30, required=False)), ('text', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'link', 'document-link'], max_length=255, required=False)), ('youtube_embed', wagtail.wagtailembeds.blocks.EmbedBlock(help_text="Your YouTube URL goes here. Only YouTube video URLs will be accepted. The custom 'play' button will be created for valid YouTube URLs.", label='YouTube Video URL')), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))), label='Girl Effect YouTube Video')), ('carousel', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('label', wagtail.wagtailcore.blocks.CharBlock(help_text='Carousel item small label, for example Our Reach', max_length=30)), ('title', wagtail.wagtailcore.blocks.CharBlock(help_text='Carousel item large title', max_length=30)), ('text', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'link', 'document-link'], help_text='Carousel item text', max_length=75, required=False)), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)))), icon='image', template='blocks/carousel_block.html')), ('media_text_overlay', wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock(help_text='Appears above the module.', label='Title Text', max_length=25, required=False)), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('logo', wagtail.wagtailimages.blocks.ImageChooserBlock(label='Title Logo', required=False)), ('text', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'link', 'document-link'], max_length=75, required=False)), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))), label='Full Width Media with Text Overlay')), ('list_block', wagtail.wagtailcore.blocks.StructBlock((('list_block', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('title', wagtail.wagtailcore.blocks.CharBlock(max_length=80, required=False)), ('description', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'link', 'document-link'], icon='pilcrow', max_length=250, required=False)), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)))))), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))))), ('link_row', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False)))), icon='link', template='blocks/inline_link_block.html')), ('statistic', wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock(max_length=80, required=False)), ('statistics', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailsnippets.blocks.SnippetChooserBlock(girleffect.utils.models.Statistic))), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))), label='Statistic Block')), ('call_to_action', wagtail.wagtailsnippets.blocks.SnippetChooserBlock(girleffect.utils.models.CallToActionSnippet, template='blocks/call_to_action.html')))),
),
]
| [
"[email protected]"
] | |
7a128e797059ae648a4e283127d383f99c3ebbac | 360e1f69f4c0923c5d79bc82aa33c0fd4e80b71e | /LINKED_LISTALLMETHODS/insertionsortinsinglyll.py | 75fc859bccc7a3af38a5fc3e4f0072b5a2fa8761 | [] | no_license | Vijay1234-coder/data_structure_plmsolving | 04e52fe6c918313e13d39107a2ded8b47645bb12 | d449b266295d1ae55613cdcfd9b22ad9cee3dfbe | refs/heads/master | 2023-08-01T00:55:28.825972 | 2021-09-12T15:20:12 | 2021-09-12T15:20:12 | 387,782,783 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,085 | py | class Node:
def __init__(self,data):
self.data=data
self.next_node=None
class LinkedList:
def __init__(self):
self.head=None
def display(self):
if self.head==None:
print("Linked List is empty!")
else:
n = self.head
while n!= None: # we keep traverse till last
print(n.data,"--->",end="")
n = n.next_node
def insert_atEnd(self,data):
new_node = Node(data)
if self.head is None:
self.head = new_node
else:
n = self.head
while n.next_node!=None:
n = n.next_node
n.next_node = new_node
# def insertionSort(self,head):
# start = Node(0)
# start.next_node = head
# curr = head
# prev = start
# while curr != None:
# if curr.next_node != None and curr.next_node.data <curr.data:
# while prev.next_node != None and prev.next_node.data<curr.next_node.data:
# prev = prev.next_node
# temp = prev.next_node
# prev.next_node = curr.next_node
# curr.next_node = curr.next_node.next_node
# prev.next_node.next_node = temp
# prev = start
# else:
# curr = curr.next_node
# return start.next_node
def sort(self,head):
dummy = Node(0)
curr = head
while curr != None:
prev = dummy
next = dummy.next_node
temp = curr.next_node
while next!=None:
if next.data > curr.data:
break
prev = next
next = next.next_node
curr.next_node = next
prev.next_node = curr
curr = temp
return dummy.next_node
l = LinkedList()
l.insert_atEnd(100)
l.insert_atEnd(40)
l.insert_atEnd(50)
l.insert_atEnd(2)
l.display()
print(" ")
l.head = l.sort(l.head)
l.display()
| [
"[email protected]"
] | |
4bc0d6e4b5b52679568e0bb0c742c95e4961f9f0 | bd4734d50501e145bc850426c8ed595d1be862fb | /6Kyu - Format a string of names like 'Bart, Lisa & Maggie'.py | c0188e848a5e41d60affcf353ae989beba6063b6 | [] | no_license | OrdinaryCoder00/CODE-WARS-PROBLEMS-SOLUTIONS | f61ff9e5268305519ffeed4964589289f4148cfd | 5711114ddcc6a5f22f143d431b2b2e4e4e8ac9fb | refs/heads/master | 2021-10-23T09:09:45.670850 | 2019-03-16T13:24:17 | 2019-03-16T13:24:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | def namelist(names):
#assignments
length_of_list = len(names)
i = 0
str1 = ""
f = length_of_list - 1
while i<length_of_list:
m = names[i]
k = m.popitem()
n = k[1]
if i ==0:
str1 = n
elif f==i:
str1 = str1 + " & " + n
else:
str1 = str1 + ", "+n
i = i + 1
return str1
| [
"[email protected]"
] | |
187ff3aa82fc1514d839eaa4238c98715236c02a | 08d17ddeb5713d8e7a4ee01054fcce78ed7f5191 | /tensorflow/python/autograph/pyct/static_analysis/liveness_test.py | f14b1a3e79de80d2218366e086d649fa5493be4f | [
"Apache-2.0"
] | permissive | Godsinred/tensorflow | 9cd67e1088ad8893265651ad4a5c45a6640b6c96 | 45100d5f55d7cba15bffcd91bf521ed37daf7bca | refs/heads/master | 2020-04-25T19:44:53.669366 | 2019-02-28T01:54:55 | 2019-02-28T02:59:15 | 173,030,955 | 2 | 0 | Apache-2.0 | 2019-02-28T03:03:41 | 2019-02-28T03:03:41 | null | UTF-8 | Python | false | false | 7,335 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for liveness module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import liveness
from tensorflow.python.platform import test
class LivenessTest(test.TestCase):
def _parse_and_analyze(self, test_fn):
node, source = parser.parse_entity(test_fn)
entity_info = transformer.EntityInfo(
source_code=source,
source_file=None,
namespace={},
arg_values=None,
arg_types=None,
owner_type=None)
node = qual_names.resolve(node)
ctx = transformer.Context(entity_info)
node = activity.resolve(node, ctx)
graphs = cfg.build(node)
liveness.resolve(node, ctx, graphs)
return node
def assertHasLiveOut(self, node, expected):
live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)
live_out_strs = set(str(v) for v in live_out)
if not expected:
expected = ()
if not isinstance(expected, tuple):
expected = (expected,)
self.assertSetEqual(live_out_strs, set(expected))
def assertHasLiveIn(self, node, expected):
live_in = anno.getanno(node, anno.Static.LIVE_VARS_IN)
live_in_strs = set(str(v) for v in live_in)
if not expected:
expected = ()
if not isinstance(expected, tuple):
expected = (expected,)
self.assertSetEqual(live_in_strs, set(expected))
def test_live_out_stacked_if(self):
def test_fn(x, a):
if a > 0:
x = 0
if a > 1:
x = 1
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveOut(fn_body[0], ('a', 'x'))
self.assertHasLiveOut(fn_body[1], 'x')
def test_live_out_stacked_if_else(self):
def test_fn(x, a):
if a > 0:
x = 0
if a > 1:
x = 1
else:
x = 2
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveOut(fn_body[0], 'a')
self.assertHasLiveOut(fn_body[1], 'x')
def test_live_out_for_basic(self):
def test_fn(x, a):
for i in range(a):
x += i
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveOut(fn_body[0], 'x')
def test_live_out_attributes(self):
def test_fn(x, a):
if a > 0:
x.y = 0
return x.y
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveOut(fn_body[0], ('x.y', 'x'))
def test_live_out_nested_functions(self):
def test_fn(a, b):
if b:
a = []
def foo():
return a
foo()
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveOut(fn_body[0], 'a')
def test_live_out_nested_functions_isolation(self):
def test_fn(b):
if b:
a = 0 # pylint:disable=unused-variable
def child():
max(a) # pylint:disable=used-before-assignment
a = 1
return a
child()
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveOut(fn_body[0], 'max')
def test_live_out_deletion(self):
def test_fn(x, y, a):
for _ in a:
if x:
del y
else:
y = 0
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveOut(fn_body[0], ())
def test_live_in_stacked_if(self):
def test_fn(x, a, b, c):
if a > 0:
x = b
if c > 1:
x = 0
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveIn(fn_body[0], ('a', 'b', 'c', 'x'))
self.assertHasLiveIn(fn_body[1], ('c', 'x'))
def test_live_in_stacked_if_else(self):
def test_fn(x, a, b, c, d):
if a > 1:
x = b
else:
x = c
if d > 0:
x = 0
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveIn(fn_body[0], ('a', 'b', 'c', 'd'))
self.assertHasLiveIn(fn_body[1], ('d', 'x'))
def test_live_in_for_basic(self):
def test_fn(x, y, a):
for i in a:
x = i
y += x
z = 0
return y, z
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveIn(fn_body[0], ('a', 'y', 'z'))
def test_live_in_for_nested(self):
def test_fn(x, y, a):
for i in a:
for j in i:
x = i
y += x
z = j
return y, z
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveIn(fn_body[0], ('a', 'y', 'z'))
def test_live_in_deletion(self):
def test_fn(x, y, a):
for _ in a:
if x:
del y
else:
y = 0
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveIn(fn_body[0], ('a', 'x', 'y'))
def test_live_in_generator_comprehension(self):
def test_fn(y):
if all(x for x in y):
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
if six.PY2:
self.assertHasLiveIn(fn_body[0], ('all', 'x', 'y'))
else:
self.assertHasLiveIn(fn_body[0], ('all', 'y'))
def test_live_in_list_comprehension(self):
def test_fn(y):
if [x for x in y]:
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
if six.PY2:
self.assertHasLiveIn(fn_body[0], ('x', 'y'))
else:
self.assertHasLiveIn(fn_body[0], ('y',))
def test_live_in_set_comprehension(self):
def test_fn(y):
if {x for x in y}:
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
if six.PY2:
self.assertHasLiveIn(fn_body[0], ('x', 'y'))
else:
self.assertHasLiveIn(fn_body[0], ('y',))
def test_live_in_dict_comprehension(self):
def test_fn(y):
if {k: v for k, v in y}:
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
if six.PY2:
self.assertHasLiveIn(fn_body[0], ('k', 'v', 'y'))
else:
self.assertHasLiveIn(fn_body[0], ('y',))
if __name__ == '__main__':
test.main()
| [
"[email protected]"
] | |
078058580720b4b789bb263b2edeb244ccf671b5 | 80a3e654cf33e5c86410e207e3a28ed160adbd75 | /list_questions/listodd.py | e300ae82628a87b0fa9ece8ef36a79bbb870762e | [] | no_license | Rinkikumari19/python | f3f6e57ca39d7d6fe4e110264eb5685be2441f66 | 2b98f4bac313725c2716cc8a60440336d28acba4 | refs/heads/master | 2022-11-26T06:47:34.161251 | 2020-08-02T18:37:54 | 2020-08-02T18:37:54 | 284,518,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,412 | py | # elements = [23,14,56,12,19,9,15,25,31,42,43]
# i = 0
# odd = 0
# even = 0
# while i < len(elements):
# if elements[i]%2==0:
# even = even + 1
# else:
# odd = odd + 1
# i = i + 1
# print(even)
# print(odd)
# ye code kitne odd or even number hai vo print krega
# elements = [23,14,56,12,19,9,15,25,31,42,43]
# i = 0
# odd_sum = 0
# even_sum = 0
# ave = 0
# ave1 = 0
# while i < len(elements):
# if elements[i] % 2 == 0:
# even_sum = even_sum + elements[i]
# ave = ave + 1
# else:
# odd_sum = odd_sum + elements[i]
# ave1 = ave1 + 1
# i = i + 1
# print(even_sum/ave)
# print(odd_sum/ave1)
# is code me even_sum or odd_sum ka average print krega
elements = [23,14,56,12,19,9,15,25,31,42,43]
i = 0
sum_odd = 0
sum_even = 0
ave = 0
ave1 = 0
while i < len(elements):
if elements[i]%2==0:
sum_even=sum_even+elements[i]
ave=ave+1
else:
sum_odd=sum_odd+elements[i]
ave1=ave1+1
i = i + 1
print("odd number ka count:",ave1)
print("even number ka count:",ave)
print("sare number ka count:",ave1+ave)
print("odd number ka sum:",sum_odd)
print("even number ka sum:",sum_even)
print("sare number ka sum:",sum_odd+sum_even)
print("odd number ka avarage:",sum_odd/ave1)
print("even number ka avarage:",sum_even/ave)
print("sare numbers ka avarage:",(sum_even+sum_odd)/i)
# ye code sara kuchh print krega
| [
"[email protected]"
] | |
0ec110b96e33c6f65e992debda2c5d9e67ecba6d | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/INTERVIEW-PREP-COMPLETE/notes-n-resources/Data-Structures-N-Algo/_DS-n-Algos/arithmetic_analysis/secant_method.py | 45bcb185fc3ec25a7773d736029375983b9a12c6 | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 577 | py | """
Implementing Secant method in Python
Author: dimgrichr
"""
from math import exp
def f(x: float) -> float:
"""
>>> f(5)
39.98652410600183
"""
return 8 * x - 2 * exp(-x)
def secant_method(lower_bound: float, upper_bound: float, repeats: int) -> float:
"""
>>> secant_method(1, 3, 2)
0.2139409276214589
"""
x0 = lower_bound
x1 = upper_bound
for i in range(0, repeats):
x0, x1 = x1, x1 - (f(x1) * (x1 - x0)) / (f(x1) - f(x0))
return x1
if __name__ == "__main__":
print(f"Example: {secant_method(1, 3, 2)}")
| [
"[email protected]"
] | |
366dd30f8a578d45f2b4fdcebc2d560b30e69968 | 1d8611c9d190239f05c3c4a7cee60e7e026e3be5 | /backend/manage.py | 7e3dd143c021ddf64216be593471db66c6fcca96 | [] | no_license | crowdbotics-apps/letsbuy-24656 | dad94f505703eab0a2699a52adaf626b45852ab8 | b350298ad4a4fd3798d0033f7659f31c156330ea | refs/heads/master | 2023-03-05T14:48:23.809435 | 2021-02-22T04:57:44 | 2021-02-22T04:57:44 | 341,083,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "letsbuy_24656.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
701220fce2f2348ed36924cb47be6003d1eb8267 | bb767bfc9db2b0ab7f24d3561b168a829c4eb0bc | /1st_Year/1st_Semestre/Fpro/Python/saved files/rm_letter_rev.py | 6caffafdd7696fa4adee6e2c1bdb8170060842c0 | [] | no_license | Hugomguima/FEUP | 7e6e0faf5408d698a34c3b5aed977b20aa76c067 | f26887e2b8e92e41ae5050515cd0b3cdf94d6476 | refs/heads/master | 2023-06-09T05:21:38.897094 | 2021-06-29T17:00:01 | 2021-06-29T17:00:01 | 272,567,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 23 13:31:14 2018
@author: Hugo
"""
def rm_letter_rev(l,astr):
result = ""
for i in astr:
if i != l:
result += i
result = result[::-1]
return result
| [
"[email protected]"
] | |
b47db36712832d90f04b309c0c0c0541b67d3816 | f211382033cbedd7304ad640f9bb869be61fff34 | /parkstay/migrations/0006_auto_20161114_0840.py | 6867960ef185c545daed8acd144be67f94903915 | [
"Apache-2.0"
] | permissive | dbca-wa/parkstay_bs | 0b2f918f645dfbd1470a40934aae5304f4333942 | ce1b6cd75fb3021863005097c5ce2c0e2dbf3061 | refs/heads/master | 2023-06-27T14:27:01.658254 | 2023-06-16T07:55:14 | 2023-06-16T07:55:14 | 234,250,398 | 2 | 3 | NOASSERTION | 2023-04-28T01:38:48 | 2020-01-16T06:24:41 | Vue | UTF-8 | Python | false | false | 418 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-11-14 00:40
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('parkstay', '0005_auto_20161111_1302'),
]
operations = [
migrations.RenameModel(
old_name='BookingRange',
new_name='CampgroundBookingRange',
),
]
| [
"[email protected]"
] | |
b7b0d94145519dfe8dc76e850870731e3dbc0b0a | 7f92c2fc131ca637d8b7c2a4dbba4b974884e786 | /lab6/plottingScripts/plotting3_b.py | 5a1f42e0d764db963bd81041c6ba111e9de86d38 | [] | no_license | byronwasti/CircuitsLabs | 2c5694f07a59adedddde361d0a85a690a83e096b | be1227c504ed1a2b81b6d670cbaa45d4b8be8e17 | refs/heads/master | 2020-05-23T11:15:14.853587 | 2017-09-03T18:53:50 | 2017-09-03T18:53:50 | 80,369,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import csv
def getData(FILENAME):
x = []
y = []
with open(FILENAME, 'r') as f:
reader = csv.reader(f)
for i, row in enumerate(reader):
if i==0 : continue
if i > 75: continue
x.append(float(row[0]))
y.append(-float(row[1]))
return np.array(x), np.array(y)
def plot():
plt.xlabel("Input Current (A)")
plt.ylabel("Output Current (A)")
plt.title("nMOS Current Divider (b)")
plt.legend()
plt.show()
if __name__ == "__main__":
iin, iout = getData("../data/experiment3_current_divider_b_3.csv")
theoretical = [ i/2 for i in iin ]
plt.plot(iin, iout, '.', label="Experimental Data")
plt.plot(iin, theoretical, '-', label="Theoretical")
fit = np.polyfit(iin, iout, 1)
plt.text(0.006, 0.002, "Experimental Divider Ratio: %e\nTheoretical Divider Ratio: 0.5" % fit[0])
plot()
| [
"[email protected]"
] | |
f811948d04902553e78577fab2df24ffa33c479b | 33febf8b617ef66d7086765f1c0bf6523667a959 | /probpy/learn/conjugate/categorical.py | bb3b32c9b459ebab2743640c68665cdb00c0b78b | [] | no_license | JonasRSV/probpy | 857201c7f122461463b75d63e5c688e011615292 | 5203063db612b2b2bc0434a7f2a02c9d2e27ed6a | refs/heads/master | 2022-07-07T06:17:44.504570 | 2020-04-15T14:52:20 | 2020-04-15T14:52:20 | 245,820,195 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,361 | py | from probpy.core import RandomVariable
from typing import Tuple
from probpy.distributions import categorical, dirichlet
from .identification import _check_no_none_parameters, _check_only_none_is
import numpy as np
import numba
class CategoricalDirichlet_PPrior:
"""Conjugate prior for categorical likelihood with unknown probability"""
@staticmethod
def is_conjugate(likelihood: RandomVariable, prior: RandomVariable):
if prior.cls is dirichlet \
and _check_no_none_parameters(prior) \
and _check_only_none_is(likelihood, [categorical.probabilities]):
return True
return False
@staticmethod
def fast_loop(data: np.ndarray, categories: int):
result = np.zeros(categories)
for d in data:
result[d] += 1
return result
@staticmethod
def posterior(data: np.ndarray, _: RandomVariable, prior: RandomVariable) -> RandomVariable:
data = np.array(data[0])
if data.ndim == 0: data = data.reshape(-1)
prior_alpha = prior.parameters[dirichlet.alpha].value
if data.ndim == 1:
posterior_alpha = prior_alpha + CategoricalDirichlet_PPrior.fast_loop(data, prior_alpha.size)
else:
posterior_alpha = prior_alpha + data.sum(axis=0)
return dirichlet.med(alpha=posterior_alpha)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.