blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f37d0de9c1c026676a9b32a6a3c62e1c94bb05b7 | 26a099ac59c43c0ff04d4d9cde8fc4d5cdb53278 | /note/advanced/classStudy.py | 570bdc97a69bb882f64f695b72dd6ba028937e97 | [] | no_license | lilycoco/Python_study | 1aefb79e44573e89c1f2a39fa878535a76af2528 | 4b3a5313ef126769713e8f528621ffe8c60836ca | refs/heads/master | 2022-12-10T22:17:19.667546 | 2019-07-22T11:25:44 | 2019-07-22T11:25:44 | 192,544,100 | 0 | 0 | null | 2022-12-08T05:48:41 | 2019-06-18T13:19:03 | Python | UTF-8 | Python | false | false | 3,354 | py | # クラスの定義
class MyClass(object):
pass
# クラスからインスタンスを生成
me = MyClass()
print(me) # => <__main__.MyClass object at 0x100799b00>
#*************************************************************************
# 何も要素を持たないクラス
class EmptyClass(object):
pass
# 要素を好きなように定義できるので、複数の変数をまとめた入れ物として扱える
holder = EmptyClass()
holder.name = "Ryoko"
holder.age = 28
print(holder.name, holder.age) # => Ryoko 28
#*************************************************************************
class MyClass2(object):
# クラス直下に定義した変数はインスタンス変数ではなく、クラス変数
# some_field = "aaa"
# インスタンス生成時に、値を渡すことができる
def __init__(self, name, age):
# 「self.xxx」でインスタンス変数の参照/代入ができる
self.name = name
self.age = age
# インスタンスメソッドの定義
def introduce(self):
print("My name is %s, %d years old." % (self.name, self.age))
# インスタンス化
me = MyClass2('Ryoko', 28)
me.introduce() # => My name is Ryoko, 28 years old.
# 直接値を代入することもできる
me.name = "Taro"
me.age = 25
me.introduce() # => My name is Taro, 25 years old.
#*************************************************************************
class MyClass3(object):
# クラス直下に定義すると、クラス変数になる
primary_key = "id"
# クラスメソッドは「@classmethod」を付与して定義する
@classmethod
def show_primary_key(cls):
print("PrimayKey is %s" % cls.primary_key)
# クラス変数やクラスメソッドへのアクセスは、インスタンス化する必要ない
print(MyClass3.primary_key) # => id
MyClass3.show_primary_key() # => PrimayKey is id
#*************************************************************************
class MyClass4(object):
primary_key = "id"
@classmethod
def show1(cls):
print(cls.primary_key)
# スタティックメソッドは「@staticmethod」を付与して関数を定義します
@staticmethod
def show2():
print(MyClass4.primary_key)
MyClass4.show1() # => id
MyClass4.show2() # => id
class MySubClass(MyClass4):
primary_key = "subid"
MySubClass.show1() # => subid
MySubClass.show2() # => id
#*************************************************************************
# 人を表すクラス
class Person(object):
def __init__(self, name, age):
self.name = name
self.age = age
def sayHi(self):
print("Hi, my name is %s, %d years old." % (self.name, self.age))
# 仕事人を表すクラス
class Worker(Person):
def __init__(self, name, age, skills):
# 親クラスの関数を使う場合には、super()を使う
super().__init__(name, age)
self.skills = skills
def show_skills(self):
# 親クラスの変数も参照できる
print("%s's skills are %s" % (self.name, self.skills))
# インスタンス化
w = Worker("Yohei", 30, ["html","js","css"])
# 親クラスのメソッドも呼び出せる
w.sayHi()
w.show_skills()
#*************************************************************************
| [
"[email protected]"
] | |
6e612fc27d7649f13a714b1b7fa40c43d0d5b896 | 73c05ee0cbc54dd77177b964f3a72867138a1f0f | /interview/CyC2018_Interview-Notebook/剑指offer/51.py | 408a4c13dfda11d4265b29e3406ad5325b656c49 | [] | no_license | tb1over/datastruct_and_algorithms | 8be573953ca1cdcc2c768a7d9d93afa94cb417ae | 2b1c69f28ede16c5b8f2233db359fa4adeaf5021 | refs/heads/master | 2020-04-16T12:32:43.367617 | 2018-11-18T06:52:08 | 2018-11-18T06:52:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,475 | py | # -*- coding: utf-8 -*-
"""题目描述
在数组中的两个数字,如果前面一个数字大于后面的数字,则这两个数字组成一个逆序对。输入一个数组,求出这个数组中的逆序对的总数P。并将P对1000000007取模的结果输出。 即输出P%1000000007
"""
class Solution:
def __init__(self):
self.P = 0
self.tmp = []
def merge(self, data, l, m, h):
i, j, k = l, m + 1, l
while i <= m or j <= h:
if i>m:
self.tmp[k] = data[j]
j += 1
elif j > h:
self.tmp[k] = data[i]
i += 1
elif data[i] < data[j]:
self.tmp[k] = data[i]
i += 1
else:
self.tmp[k] = data[j]
j += 1
self.P += m-i+1 # data[i] > data[j]
k +=1
for x in range(l, h+1):
data[x] = self.tmp[x]
def merge_sort(self, data, low, high):
if low < high:
mid = low+ (high-low) // 2
self.merge_sort(data, low, mid)
self.merge_sort(data, mid+1, high)
self.merge(data, low, mid, high)
def InversePairs(self, data):
# write code here
self.tmp = [0]*len(data)
self.merge_sort(data, 0, len(data)-1)
return self.P%1000000007
if __name__ == '__main__':
s = Solution()
n = s.InversePairs([7, 5, 6, 4])
print(n)
| [
"[email protected]"
] | |
d863b10f7674734320f67fedb9f82fe932943d47 | 27d3ddb794d4c9c4521bf743ea02c399b2da908f | /create-deploy-instance.py | 7315da6f3f96f3f7fac87d5819515833e68949ab | [
"MIT"
] | permissive | alokshukla1978/HelloShiftLeft | e1b75c5ab17b465fb57e9f6d940fad218e816afc | 33e1433f46538149cc488701e3235cc2b3cd8473 | refs/heads/master | 2021-12-04T09:25:44.966195 | 2021-10-14T02:46:39 | 2021-10-14T02:46:39 | 151,323,552 | 0 | 6 | MIT | 2021-10-14T02:29:18 | 2018-10-02T21:07:16 | Java | UTF-8 | Python | false | false | 5,964 | py | !/usr/bin/env python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using the Compute Engine API to create and delete instances.
Creates a new compute engine instance and uses it to apply a caption to
an image.
https://cloud.google.com/compute/docs/tutorials/python-guide
For more information, see the README.md under /compute.
"""
import argparse
import os
import time
import googleapiclient.discovery
from six.moves import input
# [START list_instances]
def list_instances(compute, project, zone):
result = compute.instances().list(project=project, zone=zone).execute()
return result['items'] if 'items' in result else None
# [END list_instances]
# [START create_instance]
def create_instance(compute, project, zone, name, bucket):
# Get the latest Debian Jessie image.
image_response = compute.images().getFromFamily(
project='ubuntu-os-cloud', family='ubuntu-1604-lts').execute()
source_disk_image = image_response['selfLink']
# Configure the machine
machine_type = "zones/%s/machineTypes/n1-standard-1" % zone
startup_script = open(
os.path.join(
os.path.dirname(__file__), 'startup-script.sh'), 'r').read()
image_url = "http://storage.googleapis.com/gce-demo-input/photo.jpg"
image_caption = "Ready for dessert?"
config = {
'name': name,
'machineType': machine_type,
# Specify the boot disk and the image to use as a source.
'disks': [
{
'boot': True,
'autoDelete': True,
'initializeParams': {
'sourceImage': source_disk_image,
}
}
],
# Specify a network interface with NAT to access the public
# internet.
'networkInterfaces': [{
'network': 'global/networks/default',
'accessConfigs': [
{'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'}
]
}],
# Specify Properties
'tags': {
'items' : [
'http-server',
'https-server'
]
},
# Allow the instance to access cloud storage and logging.
'serviceAccounts': [{
'email': 'default',
'scopes': [
'https://www.googleapis.com/auth/devstorage.read_write',
'https://www.googleapis.com/auth/logging.write'
]
}],
# Metadata is readable from the instance and allows you to
# pass configuration from deployment scripts to instances.
'metadata': {
'items': [{
# Startup script is automatically executed by the
# instance upon startup.
'key': 'startup-script',
'value': startup_script
}, {
'key': 'url',
'value': image_url
}, {
'key': 'text',
'value': image_caption
}, {
'key': 'bucket',
'value': bucket
}]
}
}
return compute.instances().insert(
project=project,
zone=zone,
body=config).execute()
# [END create_instance]
# [START delete_instance]
def delete_instance(compute, project, zone, name):
return compute.instances().delete(
project=project,
zone=zone,
instance=name).execute()
# [END delete_instance]
# [START wait_for_operation]
def wait_for_operation(compute, project, zone, operation):
print('Waiting for operation to finish...')
while True:
result = compute.zoneOperations().get(
project=project,
zone=zone,
operation=operation).execute()
if result['status'] == 'DONE':
print("done.")
if 'error' in result:
raise Exception(result['error'])
return result
time.sleep(1)
# [END wait_for_operation]
# [START run]
def main(project, bucket, zone, instance_name, wait=True):
compute = googleapiclient.discovery.build('compute', 'v1')
print('Creating instance.')
operation = create_instance(compute, project, zone, instance_name, bucket)
wait_for_operation(compute, project, zone, operation['name'])
instances = list_instances(compute, project, zone)
print('Instances in project %s and zone %s:' % (project, zone))
for instance in instances:
print(' - ' + instance['name'])
print("""
Instance created.
It will take a minute or two for the instance to complete work.
Check this URL: http://storage.googleapis.com/{}/output.png
Once the image is uploaded press enter to delete the instance.
""".format(bucket))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('project_id', help='Your Google Cloud project ID.')
parser.add_argument(
'bucket_name', help='Your Google Cloud Storage bucket name.')
parser.add_argument(
'--zone',
default='us-central1-f',
help='Compute Engine zone to deploy to.')
parser.add_argument(
'--name', default='demo-instance', help='New instance name.')
args = parser.parse_args()
main(args.project_id, args.bucket_name, args.zone, args.name)
# [END run]
| [
"[email protected]"
] | |
124b683dd0ebabc388ab990d5d1ed0d0b0288f5e | 052b4b7bd91de594776adb181ba7d7de3294e749 | /xanax/admin.py | 9fad29d745d2ddd0151f4b2750f94fb6b5542611 | [
"BSD-3-Clause"
] | permissive | rthtrmnsfbsd/django-xanax | cb5cff743e3306ab9507dfbc063edf9a5d8b74be | b2755efb35fdd007b29b08ec1e98d3c73bbacefb | refs/heads/master | 2021-05-28T02:07:05.398590 | 2013-04-26T06:49:35 | 2013-04-26T06:49:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,019 | py | # -*- coding: utf-8 -*-
import logging
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from functools import update_wrapper
from django.forms.formsets import all_valid
from django.contrib import admin
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.contrib.contenttypes.models import ContentType
from django.contrib.admin.util import unquote
from django.views.decorators.csrf import csrf_protect
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import Http404
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from django.utils.html import escape
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import force_unicode
from django.utils.crypto import get_random_string
from django.contrib.admin import widgets, helpers
from xanax.settings import GET_SETTING
LOGGER = logging.getLogger(__name__)
csrf_protect_m = method_decorator(csrf_protect)
def fine_setattr(preview_object, attr, value):
if object.__dict__.get(attr):
setattr(preview_object, attr, value)
else:
setattr(preview_object.__class__, attr, value)
def prepare_M2M_field(preview_object, field, m2m_related_list=None):
if m2m_related_list == None:
m2m_related_list = []
fine_setattr(preview_object, field, m2m_related_list)
def prepeare_object(preview_object, preview_token):
proxy_model = type(
str(preview_object.__class__.__name__)
+ 'Preview%s' % preview_token,
(preview_object.__class__, ),
{'__module__': __name__}
)
pk_attname = preview_object._meta.pk.attname
preview_object.__class__ = proxy_model
preview_object._meta.pk.attname = pk_attname
#preview_object.pk = 0
return preview_object
class PikleFile(object):
def __init__(self, stringIO_file):
self.file_string = ''.join([i for i in stringIO_file.chunks()])
self.field_name = getattr(stringIO_file, 'field_name', None)
self.name = getattr(stringIO_file, 'name', None)
self.content_type = getattr(stringIO_file, 'content_type', None)
self.size = getattr(stringIO_file, 'size', None)
self.charset = getattr(stringIO_file, 'charset', None)
def unpickle(self):
return InMemoryUploadedFile(
StringIO(self.file_string),
self.field_name,
self.name,
self.content_type,
self.size,
self.charset
)
def pickle_files(files):
result = {}
for key, value in files.items():
result.update({
key: PikleFile(value)
})
return result
def unpickle_files(files):
result = {}
if files:
for key, value in files.items():
result.update({
key: value.unpickle()
})
return result
# TODO: NAGOVNOKOZENO?
def get_inline_objects(formsets):
result = {}
for formset in formsets:
deleted_objects_id = []
for form in formset.initial_forms:
if formset.can_delete and formset._should_delete_form(form):
pk_name = formset._pk_field.name
raw_pk_value = form._raw_value(pk_name)
pk_value = form.fields[pk_name].clean(raw_pk_value)
deleted_objects_id.append(getattr(pk_value, 'pk', pk_value))
# TODO: ZATYCHKA?
formset._should_delete_form = lambda x: []
changed_objects = formset.save(commit=False)
changed_objects_id = [i.id for i in changed_objects]
inline_objects = [i for i in list(formset.get_queryset())
if i.id not in changed_objects_id] +\
[i for i in changed_objects
if i.id not in deleted_objects_id]
result.update({formset.model.__name__: inline_objects})
return result
class InlineList(list):
def all(self):
return self
def count(self):
return len(self)
def prepare_M2M_set(preview_object, inline_objects):
# TODO: check if trought setted
for attr_name in [i for i in dir(preview_object) if '_set' == i[-4:]]:
attr = getattr(preview_object, attr_name, None)
if attr.__class__.__name__ == 'RelatedManager':
for key in inline_objects.keys():
if key.lower() == attr_name[:-4]:
fine_setattr(
preview_object,
attr_name,
InlineList(inline_objects[key])
)
return preview_object
class XanaxAdmin(admin.ModelAdmin):
object_preview_template = None
def get_list_display(self, request):
result = super(XanaxAdmin, self).get_list_display(request)
if not 'preview_link' in result:
result += ('preview_link',)
return result
def preview_link(self, obj):
info = obj._meta.app_label, obj._meta.module_name
url = reverse('admin:%s_%s_preview' % info, args=(obj.id,))
return _(u'<a href="%s">preview</a>') % url
preview_link.allow_tags = True
preview_link.short_description = _(u'Preview')
def has_preview_permission(self, request, obj=None):
LOGGER.debug('has_preview_permission True')
opts = self.opts
#return request.user.has_perm(opts.app_label + '.' + opts.get_change_permission())
return True
def preview_context_handler(self, context):
''' Customise your preview context here.'''
return context
def get_urls(self):
from django.conf.urls import patterns, url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.module_name
urlpatterns = super(XanaxAdmin, self).get_urls()
admin_preview_url = patterns(
'',
url(r'^(.+)/preview/$',
wrap(self.preview_view),
name='%s_%s_preview' % info),
)
return admin_preview_url + urlpatterns
#TODO: add security decorators
def add_view(self, request, form_url='', extra_context=None):
if "_popup" in request.REQUEST:
return super(XanaxAdmin, self)\
.add_view(request, form_url, extra_context)
if request.method == 'POST':
if request.session.get('admin_preview', False):
obj, inline_objects = self.get_add_view_object(request)
if obj:
preview_token = get_random_string(
GET_SETTING('XANAX_PREVIEW_TOKEN_LENGTH')
)
request.session['preview_POST_%s' % preview_token] = request.POST.copy()
request.session['preview_FILES_%s' % preview_token] = pickle_files(request.FILES)
request.session['admin_preview'] = False
return self.preview_view(
request,
None,
preview_token=preview_token,
preview_object=obj,
inline_objects=inline_objects
)
else:
preview_token = request.POST.get('preview_token')
preview_POST = request.session.get('preview_POST_%s' % preview_token)
preview_FILES = unpickle_files(request.session.get('preview_FILES_%s' % preview_token))
if preview_POST:
preview_POST.update(request.POST)
request.POST = preview_POST
request.FILES.update(preview_FILES)
del request.session['preview_POST_%s' % preview_token]
del request.session['preview_FILES_%s' % preview_token]
if request.POST.get('_back', None):
request.session['admin_preview'] = True
return self.add_preview_back(request,
form_url, extra_context)
del request.session['admin_preview']
else:
request.session['admin_preview'] = True
return super(XanaxAdmin, self).add_view(request, form_url, extra_context)
def get_add_view_object(self, request):
formsets = []
inline_objects = new_object = None
ModelForm = self.get_form(request)
form = ModelForm(request.POST, request.FILES)
inline_instances = self.get_inline_instances(request)
if form.is_valid():
new_object = prepeare_object(
self.save_form(request, form, change=False),
get_random_string(GET_SETTING('XANAX_PREVIEW_TOKEN_LENGTH'))
)
cleaned_data = form.cleaned_data
for f in new_object._meta.many_to_many:
if f.name in cleaned_data:
prepare_M2M_field(
new_object,
f.name,
m2m_related_list=cleaned_data[f.name]
)
else:
return None, None
prefixes = {}
for FormSet, inline in zip(self.get_formsets(request), inline_instances):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(data=request.POST, files=request.FILES,
instance=new_object,
save_as_new="_saveasnew" in request.POST,
prefix=prefix, queryset=inline.queryset(request))
formsets.append(formset)
if all_valid(formsets):
inline_objects = get_inline_objects(formsets)
else:
return None, None
return new_object, inline_objects
def add_preview_back(self, request, form_url='', extra_context=None):
"The 'add' admin view for this model."
model = self.model
opts = model._meta
if not self.has_add_permission(request):
raise PermissionDenied
ModelForm = self.get_form(request)
formsets = []
inline_instances = self.get_inline_instances(request)
form = ModelForm(request.POST, request.FILES)
if form.is_valid():
new_object = self.save_form(request, form, change=False)
else:
new_object = self.model()
prefixes = {}
for FormSet, inline in zip(self.get_formsets(request), inline_instances):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(data=request.POST, files=request.FILES,
instance=new_object,
save_as_new="_saveasnew" in request.POST,
prefix=prefix, queryset=inline.queryset(request))
formsets.append(formset)
adminForm = helpers.AdminForm(form, list(self.get_fieldsets(request)),
self.get_prepopulated_fields(request),
self.get_readonly_fields(request),
model_admin=self)
media = self.media + adminForm.media
inline_admin_formsets = []
for inline, formset in zip(inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request))
readonly = list(inline.get_readonly_fields(request))
prepopulated = dict(inline.get_prepopulated_fields(request))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,
fieldsets, prepopulated, readonly, model_admin=self)
inline_admin_formsets.append(inline_admin_formset)
media = media + inline_admin_formset.media
context = {
'title': _('Add %s') % force_unicode(opts.verbose_name),
'adminform': adminForm,
'is_popup': "_popup" in request.REQUEST,
'show_delete': False,
'media': media,
'inline_admin_formsets': inline_admin_formsets,
'errors': helpers.AdminErrorList(form, formsets),
'app_label': opts.app_label,
}
context.update(extra_context or {})
return self.render_change_form(request, context, form_url=form_url, add=True)
# TODO: add security decorators
def change_view(self, request, object_id, form_url='', extra_context=None):
if "_popup" in request.REQUEST:
return super(XanaxAdmin, self).change_view(request, object_id,
form_url, extra_context)
if request.method == 'POST':
if request.session.get('admin_preview', False):
obj, inline_objects = self.get_change_view_object(request, object_id)
if obj:
preview_token = get_random_string(
GET_SETTING('XANAX_PREVIEW_TOKEN_LENGTH')
)
request.session['preview_POST_%s' % preview_token] = request.POST.copy()
request.session['preview_FILES_%s' % preview_token] = pickle_files(request.FILES)
request.session['admin_preview'] = False
return self.preview_view(
request,
None,
preview_token=preview_token,
preview_object=obj,
inline_objects=inline_objects
)
else:
preview_token = request.POST.get('preview_token')
preview_POST = request.session.get('preview_POST_%s' % preview_token)
preview_FILES = unpickle_files(request.session.get('preview_FILES_%s' % preview_token))
if preview_POST:
preview_POST.update(request.POST)
request.POST = preview_POST
request.FILES.update(preview_FILES)
del request.session['preview_POST_%s' % preview_token]
del request.session['preview_FILES_%s' % preview_token]
if request.POST.get('_back', None):
request.session['admin_preview'] = True
return self.change_preview_back(request, object_id,
form_url, extra_context)
del request.session['admin_preview']
else:
request.session['admin_preview'] = True
return super(XanaxAdmin, self).change_view(request, object_id,
form_url, extra_context)
def get_change_view_object(self, request, object_id=None):
model = self.model
opts = model._meta
obj = self.get_object(request, unquote(object_id))
inline_objects = new_object = None
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.')
% {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
#FIXME is it possible to use _saveasnew?
if request.method == 'POST' and "_saveasnew" in request.POST:
return self.add_view(request, form_url=reverse('admin:%s_%s_add' %
(opts.app_label, opts.module_name),
current_app=self.admin_site.name))
ModelForm = self.get_form(request, obj)
formsets = []
inline_instances = self.get_inline_instances(request)
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
new_object = prepeare_object(
self.save_form(request, form, change=False),
get_random_string(GET_SETTING('XANAX_PREVIEW_TOKEN_LENGTH'))
)
cleaned_data = form.cleaned_data
for f in new_object._meta.many_to_many:
if f.name in cleaned_data:
prepare_M2M_field(
new_object,
f.name,
m2m_related_list=cleaned_data[f.name]
)
else:
return None, None
prefixes = {}
for FormSet, inline in zip(self.get_formsets(request, new_object), inline_instances):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(request.POST, request.FILES,
instance=new_object, prefix=prefix,
queryset=inline.queryset(request))
formsets.append(formset)
if all_valid(formsets):
inline_objects = get_inline_objects(formsets)
else:
return None, None
new_object = prepare_M2M_set(new_object, inline_objects)
return new_object, inline_objects
@csrf_protect_m
def change_preview_back(self, request, object_id, form_url='', extra_context=None):
"The 'change' admin view for this model."
model = self.model
opts = model._meta
obj = self.get_object(request, unquote(object_id))
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.')
% {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
if request.method == 'POST' and "_saveasnew" in request.POST:
return self.add_view(request, form_url=reverse('admin:%s_%s_add' %
(opts.app_label, opts.module_name),
current_app=self.admin_site.name))
ModelForm = self.get_form(request, obj)
formsets = []
inline_instances = self.get_inline_instances(request)
form = ModelForm(request.POST, request.FILES, instance=obj)
prefixes = {}
for FormSet, inline in zip(self.get_formsets(request, obj), inline_instances):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(request.POST, request.FILES,
instance=obj, prefix=prefix,
queryset=inline.queryset(request))
formsets.append(formset)
adminForm = helpers.AdminForm(form, self.get_fieldsets(request, obj),
self.get_prepopulated_fields(request, obj),
self.get_readonly_fields(request, obj),
model_admin=self)
media = self.media + adminForm.media
inline_admin_formsets = []
for inline, formset in zip(inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
readonly = list(inline.get_readonly_fields(request, obj))
prepopulated = dict(inline.get_prepopulated_fields(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,
fieldsets, prepopulated, readonly, model_admin=self)
inline_admin_formsets.append(inline_admin_formset)
media = media + inline_admin_formset.media
context = {
'title': _('Change %s') % force_unicode(opts.verbose_name),
'adminform': adminForm,
'object_id': object_id,
'original': obj,
'is_popup': "_popup" in request.REQUEST,
'media': media,
'inline_admin_formsets': inline_admin_formsets,
'errors': helpers.AdminErrorList(form, formsets),
'app_label': opts.app_label,
}
context.update(extra_context or {})
return self.render_change_form(request, context, change=True, obj=obj, form_url=form_url)
# TODO: add security decorators
# TODO: add preview form and submit row
# TODO: add preview content
def preview_view(self, request, object_id=None,
extra_context=None, preview_token=None,
preview_object=None, inline_objects=None):
model = self.model
opts = model._meta
if request.method == 'GET':
preview_object = self.get_object(request, unquote(object_id))
#TODO: inline_objects check
if preview_object is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
#TODO: MINIMIZE GOVNOKOD
context = {
'inline_objects': inline_objects,
'is_post': bool(request.method == 'POST'),
'action_list': [],
'module_name': capfirst(force_unicode(opts.verbose_name_plural)),
'object': preview_object,
'app_label': opts.app_label,
'opts': opts,
'title': _('Change %s') % force_unicode(opts.verbose_name),
'adminform': '',
'object_id': object_id,
'original': preview_object,
'is_popup': "_popup" in request.REQUEST,
'media': '',
'inline_admin_formsets': '',
'errors':[],
'add': False,
'change': True,
'has_add_permission': self.has_add_permission(request),
'has_change_permission': self.has_change_permission(request, preview_object),
'has_delete_permission': self.has_delete_permission(request, preview_object),
'has_file_field': True, # FIXME - this should check if form or formsets have a FileField,
'has_absolute_url': hasattr(self.model, 'get_absolute_url'),
'content_type_id': ContentType.objects.get_for_model(self.model).id,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
'preview_token': preview_token,
'is_admin_preview': True,
'object_publish': False,
}
#TODO remove jQuery
context = self.preview_context_handler(context)
return TemplateResponse(request, self.object_preview_template or [
"%s/%s_object_preview.html" % (opts.app_label, opts.object_name.lower()),
"admin/%s/%s/object_preview.html" % (opts.app_label, opts.object_name.lower()),
"admin/%s/object_preview.html" % opts.app_label,
"admin/object_preview.html"
], context, current_app=self.admin_site.name)
| [
"[email protected]"
] | |
11b918d657294bca29a6ed6f976674caef840634 | dabace16dbdfffbd0f68c77c3d2489c4d7dc279c | /models/stock_transfer.py | d972331a048409956958344468170e6234505338 | [] | no_license | senthilnathang/stock_transfer | 632ec064e4d0b97f8b1a36e49517dd4ba124576a | f2f73ac70a2cc19f10853b7039383ca404686072 | refs/heads/master | 2020-04-27T17:13:45.739102 | 2019-03-12T11:25:15 | 2019-03-12T11:25:15 | 174,509,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,710 | py | # -*- coding: utf-8 -*-
from datetime import datetime
from openerp import api, fields, models, _, SUPERUSER_ID
from openerp.osv import osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
from openerp.tools import float_compare, float_is_zero
from openerp.tools.translate import _
from openerp import tools, SUPERUSER_ID
from openerp.exceptions import UserError, AccessError
import openerp.addons.decimal_precision as dp
class StockPicking(models.Model):
_inherit = "stock.picking"
stock_transfer_done = fields.Boolean(string='Transfer Done', default=False)
class StockTransfer(models.Model):
"""
Stock Transfer
"""
_name = 'stock.transfer'
_description = 'Stock Transfer'
_order = 'name desc, date'
name = fields.Char('Name', required=True, copy=False, readonly=True, default="New")
company_id = fields.Many2one('res.company', string="Company", ondelete="restrict", default=lambda self: self.env.user.company_id, required=True)
operating_unit_id = fields.Many2one('operating.unit', string="Operating Unit", ondelete="restrict")
location_id = fields.Many2one('stock.location', 'Stock Location', ondelete="restrict", required=True, readonly=True, states={'draft': [('readonly', False)]}, help="Location where the system will look for components.")
date = fields.Date(sting="Date", default=fields.Datetime.now, copy=False)
note = fields.Text(string="Description")
state = fields.Selection([('draft', 'New'), ('partially_available', 'Partially Available'),('available', 'Ready to Produce'),
('done', 'Done'), ('cancel', 'Cancelled')],
string="Status", default="draft")
stock_transfer_done = fields.Boolean(string="Invoice Btn Control", default=False)
input_product_lines = fields.One2many('stock.transfer.product.line', 'input_transfer_id', 'Input Products', copy=True)
output_product_lines = fields.One2many('stock.transfer.product.line', 'output_transfer_id', 'Output Products', copy=True)
input_move_lines = fields.One2many('stock.move', 'input_transfer_id', 'Input Products', copy=False)
output_move_lines = fields.One2many('stock.move', 'output_transfer_id', 'Output Products', copy=False)
# ~ @api.multi
# ~ @api.constrains('operating_unit_id','location_id')
# ~ def _check_location_org(self):
# ~ for rec in self:
# ~ if rec.operating_unit_id and rec.location_id:
# ~ if rec.location_id.company_id != rec.operating_unit_id:
# ~ print (rec.location_id.company_id,rec.operating_unit_id)
# ~ raise UserError(_('Organization is Mismatch with location Organization'))
# ~ @api.multi
# ~ @api.constrains('company_id','location_id')
# ~ def _check_location_company(self):
# ~ for rec in self:
# ~ if rec.company_id and rec.location_id:
# ~ if rec.location_id.company_id != rec.company_id:
# ~ print (rec.location_id.company_id,rec.operating_unit_id,rec.company_id,)
# ~ raise UserError(_('Company is Mismatch with location Company'))
@api.multi
@api.onchange('operating_unit_id')
def onchange_operating_unit_id(self):
if self.operating_unit_id:
loc_ids = self.env['stock.location'].search([('company_id','=',self.operating_unit_id.company_id.id),('usage','=','internal')])
if not loc_ids:
raise UserError(_('Please map at least one location to selected organization\nLocation and Organization mapping is missing'))
self.location_id = min(loc_ids) and min(loc_ids).id or False
def _prepare_input_line(self, line):
data = {}
if line:
data['product_id'] = line.product_id and line.product_id.id or False
data['product_qty'] = line.product_qty
data['product_uom'] = line.product_uom and line.product_uom.id or False
data['price_unit'] = line.price_unit
data['name'] = line.name or '/'
data['currency_id'] = line.company_id.currency_id.id or False
data['picking_id'] = line.picking_id and line.picking_id.id or False
return data
@api.model
def create(self, vals):
if vals.get('name', 'New') == 'New':
vals['name'] = self.env['ir.sequence'].next_by_code('stock.transfer') or 'New'
return super(StockTransfer, self).create(vals)
@api.multi
def get_property_stock_inventory(self):
loc = False
locs = self.env['stock.location'].search([('usage','=','inventory'),('scrap_location','=',False)], order="id desc", limit=1)
if locs:
loc = locs.id
return loc
@api.multi
def prepare_move_vals(self, line):
input_transfer_id = output_transfer_id = location_id = location_dest_id = picking_type_id = False
loc_id = line.product_id.property_stock_inventory and line.product_id.property_stock_inventory.id or False
if not loc_id:
loc_id = self.get_property_stock_inventory()
if line.input_transfer_id:
input_transfer_id = self.id
location_id = self.location_id and self.location_id.id or False
location_dest_id = loc_id
elif line.output_transfer_id:
output_transfer_id = self.id
location_dest_id = self.location_id and self.location_id.id or False
location_id = loc_id
rec_date = str(self.date)+" "+str(datetime.now())[11:]
move_vals = {
'product_id' : line.product_id.id,
'product_uom_qty' : line.product_qty or 0,
'product_uom' : line.product_uom and line.product_uom.id or False,
'location_id' : location_id,
'location_dest_id' : location_dest_id,
'name' : line.product_id.name,
'picking_id' : False,
'picking_type_id' : picking_type_id,
'company_id': self.company_id and self.company_id.id or False,
'operating_unit_id': self.operating_unit_id and self.operating_unit_id.id or False,
'price_unit': line.price_unit,
'input_transfer_id': input_transfer_id,
'output_transfer_id': output_transfer_id,
'date': rec_date,
'date_expected': rec_date,
'origin': self.name,
}
return move_vals
@api.multi
def check_validations(self):
rec = self
consume_total_value = produce_total_value = consume_total_qty = produce_total_qty = 0
for inline in rec.input_product_lines:
consume_total_value += inline.price_subtotal
consume_total_qty += inline.product_qty
for outline in rec.output_product_lines:
produce_total_value += outline.price_subtotal
produce_total_qty += outline.product_qty
if not consume_total_value or not produce_total_value:
raise UserError(_('The Total value should not 0 (Zero)'))
@api.multi
def action_confirm(self):
for rec in self:
for ln in rec.input_move_lines+rec.output_move_lines:
ln.sudo().unlink()
if not rec.input_product_lines or not rec.output_product_lines:
raise UserError(_('Some Consumable or Producible product lines are mandatory'))
if rec.state != 'draft':
raise UserError(_('You can confirm only New State Records'))
rec.check_validations()
for input_line in rec.input_product_lines+rec.output_product_lines:
move_vals = rec.prepare_move_vals(input_line)
move_id = self.env['stock.move'].create(move_vals)
move_id._action_confirm()
move_id._action_assign()
partial_available_states = []
available_states = []
for mvline in rec.input_move_lines:
if mvline.state == 'partially_available':
partial_available_states.append(mvline)
elif mvline.state == 'assigned':
available_states.append(mvline)
if any(partial_available_states) and len(partial_available_states) > 0:
rec.state = 'partially_available'
elif any(available_states) and len(available_states) == len(rec.input_product_lines):
rec.state = 'available'
rec.write({'state':'available'})
# ~ if rec.state == 'draft':
# ~ raise UserError(_('Stock Not available for selected products'))
return True
@api.multi
def action_done(self):
for rec in self:
if rec.state != 'available':
raise UserError(_('You can Transfer only Available State Records'))
for mv,inp in zip((rec.input_move_lines+rec.output_move_lines),(rec.input_product_lines+rec.output_product_lines)):
for move_line in mv.move_line_ids:
# ~ lot = self.env['stock.production.lot'].create(
# ~ {'name': move_line.lot_name, 'product_id': move_line.product_id.id}
# ~ )
move_line.write({'lot_id': inp.lot_id.id,'qty_done':mv.product_uom_qty})
mv.move_line_ids._action_done()
mv.write({'state': 'done', 'date': fields.Datetime.now()})
# ~ rec.input_move_lines._action_done()
# ~ rec.output_move_lines._action_done()
rec.state = 'done'
return True
@api.multi
def action_update_pickings_done(self):
for rec in self:
if rec.picking_ids:
for pick in rec.picking_ids:
pick.sudo().write({'stock_transfer_done':True})
rec.stock_transfer_done = True
@api.multi
def action_update_pickings_available(self):
for rec in self:
if rec.picking_ids:
for pick in rec.picking_ids:
pick.sudo().write({'stock_transfer_done':False})
rec.stock_transfer_done = False
@api.multi
def action_cancel(self):
for rec in self:
for line in rec.output_move_lines+rec.input_move_lines:
# ~ line.update_product_average_cost(cancel=True)
# ~ line.quant_cancel_from_move()
line.state = 'assigned'
line._action_cancel()
rec.state = 'cancel'
return True
@api.multi
def action_draft(self):
for rec in self:
if rec.state != 'cancel':
raise UserError(_('You can Set as New only Cancel State Records'))
for mvl in rec.input_move_lines+rec.output_move_lines:
mvl._action_set_draft()
rec.state = 'draft'
return True
@api.multi
def unlink(self):
for rec in self:
if rec.state != 'draft':
raise UserError(_('You can delete only New State Records'))
return super(StockTransfer, self).unlink()
class StockMove(models.Model):
_inherit = 'stock.move'
input_transfer_id = fields.Many2one('stock.transfer', string='Input Stock Transfer', ondelete="set null")
output_transfer_id = fields.Many2one('stock.transfer', string='Output Stock Transfer', ondelete="set null")
class StockTransferProductLine(models.Model):
_name = 'stock.transfer.product.line'
_description = 'Transfer Product Lines'
_order = 'id desc'
@api.depends('product_qty','price_unit')
def _compute_amount(self):
for line in self:
price_subtotal = line.product_qty * line.price_unit
line.update({
'price_subtotal': price_subtotal
})
input_transfer_id = fields.Many2one('stock.transfer', string="Input Stock Transfer", ondelete="cascade", copy=False)
output_transfer_id = fields.Many2one('stock.transfer', string="Output Stock Transfer", ondelete="cascade", copy=False)
product_id = fields.Many2one('product.product', string='Product', required=True, ondelete="restrict")
product_qty = fields.Float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'))
product_uom = fields.Many2one('product.uom', 'Unit of Measure')
qty_available = fields.Float(string="QTY Available", related="product_id.qty_available", readonly=True)
uom_id = fields.Many2one('product.uom', 'Unit of Measure')
name = fields.Char(string='Deacription',required=True, copy=False, default="New")
price_unit = fields.Float('Rate')
currency_id = fields.Many2one('res.currency', string="Currency", default=lambda self: self.env.user.company_id.currency_id)
price_subtotal = fields.Monetary(compute='_compute_amount', string='Total', readonly=True)
lot_id = fields.Many2one('stock.production.lot', string="Lot")
picking_id = fields.Many2one('stock.picking', string="Picking", copy=False, ondelete="restrict")
@api.multi
@api.constrains('price_unit')
def _check_price_unit(self):
for line in self:
if line.price_unit < 0:
raise UserError(_('Negative Unit Price not allowed'))
@api.multi
@api.constrains('product_qty')
def _check_product_qty(self):
for line in self:
if line.product_qty < 0:
raise UserError(_('Negative Quantity not allowed'))
@api.multi
def get_validation_message(self):
if self.product_id.type == 'product' and self.product_id.qty_available < 0.1:
warning_mess = {
'title': _('Quantity Not Available!'),
'message' : _('The stock not available for the selected product .'),
}
return {'warning': warning_mess}
if self.product_id.type == 'product' and self.product_id.qty_available < self.product_qty:
warning_mess = {
'title': _('Not Enough Quantity!'),
'message' : _('You Entered more than available Quantity.'),
}
return {'warning': warning_mess}
return {}
@api.onchange('product_qty')
def _onchange_product_qty_check_availability(self):
if not self.product_id or not self.product_qty or not self.product_uom:
return {}
if self.input_transfer_id:
return self.get_validation_message()
@api.multi
@api.onchange('product_id')
def _onchange_product_id(self):
for line in self:
if line.product_id:
line.name = line.product_id.name
line.product_uom = line.product_id.uom_id and line.product_id.uom_id.id or False
line.uom_id = line.product_id.uom_id and line.product_id.uom_id.id or False
if line.input_transfer_id:
return self.get_validation_message()
@api.model
def create(self, vals):
prod = self.env['product.product'].browse(vals['product_id'])
vals['uom_id'] = prod.uom_id.id
vals['product_uom'] = vals['uom_id']
return super(StockTransferProductLine, self).create(vals)
@api.multi
def write(self, vals):
if 'uom_id' in vals:
vals['product_uom'] = vals['uom_id']
return super(StockTransferProductLine, self).write(vals)
| [
"[email protected]"
] | |
e490dd36619f5338792d064841f183cc96a4394f | af1d9fcd4790d55013a47704eef1ce550c51a8a6 | /Distributed_vector_computation/vector_addition.py | d3dce63d25979ecd051fcb4415887f3ec7d86890 | [] | no_license | SaikiranGS/Distributed-programming | 9906fd7ec9b7c87eab548975b177494b7391992d | 3313bccf80fc7ca95055e52376ce0cf16c5c78d9 | refs/heads/master | 2023-02-21T09:46:28.454623 | 2021-01-25T17:44:57 | 2021-01-25T17:44:57 | 134,693,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,346 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 16 19:16:50 2018
@author: saikiran
"""
from mpi4py import MPI
import numpy as np
def divide_data(data,n):
split_data = np.split(data,n)
return split_data
comm = MPI.COMM_WORLD
rank = comm.rank
print("my rank is:", rank)
start_time = MPI.Wtime()
print("start time is:",start_time)
n=4
np.random.seed(0)
vector1 = np.random.rand(1,16)
vector1=np.ravel(vector1)
vector2 = np.random.rand(1,16)
vector2=np.ravel(vector2)
for i in range(n):
v1 = divide_data(vector1,n)
v2 = divide_data(vector2,n)
if rank==0:
if i==0:
data=np.add(v1[i],v2[i])
print("my vector sum is:",data)
end_time = MPI.Wtime()
print("end time is:",end_time)
print("total execution time is :",end_time-start_time)
destination_process= i+1
if destination_process==n:
print("Data has been sent to all processes succesfully")
else:
comm.send(v1[i+1],dest=destination_process, tag=8)
comm.send(v2[i+1],dest=destination_process, tag=9)
print("sending vector1 data {} data to process{}" .format(v1[i+1],destination_process))
print("sending vector2 data {} data to process{}" .format(v2[i+1],destination_process))
final_vector=comm.recv(source = i+1,tag=4)
print("received vector_sum data is",final_vector)
append_data = np.append(data,final_vector)
data = append_data
print("my final_vector is :",data)
if rank==i+1:
vector3 = comm.recv(source=0,tag=8)
print("received vector1 data is",vector3)
vector4 = comm.recv(source=0,tag=9)
print("received vector2 data is",vector4)
data2 = np.add(vector3,vector4)
destination_process = 0
comm.send(data2, dest=destination_process,tag=4)
print("sending vector sum data {} data to process{}" .format(data2,destination_process))
print("my vector sum is:", data2)
end_time = MPI.Wtime()
print("end time is:",end_time)
print("total execution time is :",end_time-start_time)
if rank==n-1:
print("vector sum using parallel processes is completed successfully")
| [
"[email protected]"
] | |
5a35f394e5ddaa9f9de976cd8e59f404f02a220e | 7bd5d09c716267de9f9bced383481d30611deee8 | /DC06.py | 85080fc62bdd422c3fec3b713683cbd65ff0e34c | [] | no_license | dinnguyen1495/daily-coding | 86642ef43b2d3835c2d01a4d270d3d0a799b3c57 | e51e7f54ef4ca175909f959c3b7803e5800d4ff6 | refs/heads/master | 2020-06-02T05:20:38.019247 | 2019-06-26T13:21:47 | 2019-06-26T13:21:47 | 191,051,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,863 | py | # Daily Coding 6:
# An XOR linked list is a more memory efficient doubly linked list.
# Instead of each node holding next and prev fields, it holds a field named both,
# which is an XOR of the next node and the previous node. Implement an XOR linked list;
# it has an add(element) which adds the element to the end, and a get(index) which returns the node at index.
# If using a language that has no pointerss (such as Python), you can assume you have access
# to get_pointer and dereference_pointer functions that converts between nodes and memory addresses.
class Node:
def __init__(self, value, next, prev):
self.value = value
if next and prev:
print("Wrong input for a linked list!\n")
return None
if not prev:
self.both = next
else:
self.both = prev
class LinkedList:
def __init__(self, node):
self.head = node
def add(self, element):
prev = None
current = self.head
if current is not None:
self.head = Node(element, None, None)
return
next = current.both
while prev != next:
prev = current
current = next
next = current.both
current.both = Node(element, None, current)
def get(self, index):
if self.head is None:
print("Can't get element at index: ", index)
return None
if (index == 0):
return self.head
prev = None
current = self.head
next = current.both
i = 0
while prev != next:
i += 1
prev = current
current = next
next = current.both
if i == index:
return current
print("Index out of bound: ", index)
return None
def list_of_values(self):
if self.head is None:
return []
prev = None
current = self.head
next = current.both
list = [current.value]
while prev != next:
prev = current
current = next
next = current.both
list.append(current.value)
return list
def main():
linked_list = LinkedList(None)
linked_list.add(1)
linked_list.add(3)
linked_list.add(9)
linked_list.add(2)
linked_list.add(5)
print("XOR Linked list at index 0:", linked_list.get(0).value)
print("XOR Linked list at index 1:", linked_list.get(1).value)
print("XOR Linked list at index 2:", linked_list.get(2).value)
print("XOR Linked list at index 3:", linked_list.get(3).value)
print("XOR Linked list at index 4:", linked_list.get(4).value)
#print("XOR Linked list at index 5:", linked_list.get(5).value)
print("List of values:", linked_list.list_of_values())
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
075e477b07c1b2ddf18f2af3b5d246d52a188490 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/1/usersdata/112/527/submittedfiles/formula.py | 8e216f832ce072ddd634a32c8b3c48556dc72c08 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | # -*- coding: utf-8 -*-
from __future__ import division
p=input('Digite o valor de P')
i=input('Digite o valor de i')
n=input('Digite o valor de n')
V=p*(((1+i)**n)-1)/i
print('Resultado: %.2f'%V) | [
"[email protected]"
] | |
b2d05d5a04bf022bad5cca80c78c994edcf82561 | 6207c74ad38b15e0de1c58443df6052422b24dae | /Crawling/Crolling.py | bbdd17bc7fd4a09a4fc35863be1e2b8cd9199255 | [] | no_license | jeongeunjii/Database-B10 | 669c284b4a1714d97c3e66e51b949426c77a1718 | 440592555cbd795778ce35a96245aab1e459a752 | refs/heads/master | 2022-12-17T13:42:14.051526 | 2019-12-11T07:58:23 | 2019-12-11T07:58:23 | 220,905,676 | 1 | 1 | null | 2022-12-04T21:47:44 | 2019-11-11T05:11:48 | CSS | UTF-8 | Python | false | false | 2,956 | py | import requests
import copy
from bs4 import BeautifulSoup
date = '20191212'
url = 'http://www.cgv.co.kr/common/showtimes/iframeTheater.aspx?areacode=02&theatercode=0211&date='+date
respose = requests.get(url)
html = respose.text
soup = BeautifulSoup(html, 'html.parser')
movies = soup.find_all('div', class_='col-times')
def makecode(str):
if (str=='1'):
return 'A'
elif (str=='2'):
return 'B'
elif (str=='3'):
return 'C'
elif (str=='4'):
return 'D'
elif (str=='5'):
return 'E'
elif (str=='6'):
return 'F'
elif (str=='7'):
return 'G'
elif (str=='8'):
return 'H'
moviecode = []
for movie in movies:
array = []
code = movie.find('a').attrs['href']
idxequal = code.find('=')
code = code[idxequal+1:]
array.append(code)
title = movie.find('strong')
# print(title.get_text().lstrip(), end=' ')
array.append(title.get_text().lstrip())
infoes = movie.find_all('i')
i=0
for info in infoes:
info = info.get_text().lstrip()
info = info.replace(" ", "")
if (info.find('\n')!=-1):
index = info.find('\n')
info = info[:index-1] + info[index+1:]
if (info.find(',')!=-1):
index = info.find(',')
info = info[:index]
# print (info, end=' ')
if i!=2:
array.append(info)
i = i+1
tables = movie.find_all('div', class_="type-hall")
temp = copy.copy(array)
# moviecode.append(temp[0]) #영화정보 추가할때 주석빼고 영화코드 movieinfo에 추가
# print(moviecode)
for table in tables:
temp = copy.copy(array)
infoes = table.find('div', class_="info-hall")
infoes = infoes.find_all('li')
i=0
for info in infoes:
info = info.get_text().lstrip()
info = info.replace(" ", "")
if (info.find('\n')!=-1):
index = info.find('\n')
info = info[:index-1] + info[index+1:]
# print (info, end=' ')
if i==1:
temp.append(info)
i = i+1
infoes = table.find('div', class_="info-timetable")
infoes = infoes.find_all('li')
for info in infoes:
link = info.find('a')
# theatername = link.get('data-theatername')
# remainseat = link.get('data-seatremaincnt') + '석'
starttime = info.find('em')
starttime = starttime.get_text()
# print(theatername, end=' ')
# print(remainseat, end=' ')
# print(starttime, end=' ')
temp.append(starttime)
# print(temp)
for i in range(1,len(temp)-4) :
print("INSERT INTO 영화상영정보 VALUES (NULL, '0211','"+makecode(temp[4][0])+"',"+temp[0]+",'"+date+"','"+temp[4+i]+"','"+temp[3]+"',8000,6000);")
| [
"[email protected]"
] | |
977fb1b14c31d6daeab3d7cfa65ad85dfd2a7a32 | 08e6cfb5c60c350bad670d3d269395a3996f4483 | /Project-孟铃翔/PreProcessing/OJSpider2.py | b59384e49c6cfcaf965e2afe586f146e7ea9c3d6 | [] | no_license | Ice-Jeffrey/ProductionPractice | 92a9bc11aa09b1cb7ee3ce2f36107625f1e122f0 | ebb5c9a63154a53706c2e8afaee18d7a02043f0c | refs/heads/master | 2022-12-11T00:23:17.331349 | 2020-08-30T04:25:36 | 2020-08-30T04:25:36 | 276,920,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,212 | py | # 对buctoj中的重要信息进行爬取
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup # 需要用到beautifulsoup对网站进行解析
import requests
import json, time
# 获取html网页中信息的函数
def getUrlText(url):
while True:
try:
html = requests.get(url)
html = html.text
break
except requests.exceptions.ConnectionError:
print('ConnectionError -- please wait 3 seconds')
time.sleep(3)
except requests.exceptions.ChunkedEncodingError:
print('ChunkedEncodingError -- please wait 3 seconds')
time.sleep(3)
except:
print('Unfortunitely -- An Unknow Error Happened, Please wait 3 seconds')
time.sleep(3)
return html
def SubmitCrawler(StuNo, Language, Year, Result=None):
# 爬取用户提交数与解决数
url = 'http://39.106.31.26/status.php?'
nextUrl = ''
# 指定学生学号
if StuNo:
url += '&user_id=' + StuNo
# 指定编程语言
if Language:
url += '&language=' + Language
# 对提交状态进行筛选
if Result != None:
url += '&jresult=4'
# 统计所有的提交记录数
num = 0
tempList = [] # 存放提交id的容器
while True:
# print(url)
# 对提交的编程语言进行映射
html = getUrlText(url)
# 进行网址解析
soup = BeautifulSoup(html, features='html.parser')
# 进行css选择
table = soup.select('tbody')
# 对特殊情况进行判断
if len(table) > 0:
t = table[0]
else:
continue
for index, tr in enumerate(t.select('tr')):
result = tr.select('td')
time = result[8].text[0:4]
# 由于页面布局原因,可能不同的页面有相同的提交id,因此需要判断提交id是否存在,若不存在且满足条件,则进行抓取
if time <= Year and result[0] not in tempList:
num += 1
tempList.append(result[0])
# 获取下一页url
nextUrl = soup.select('a')
nextUrl = nextUrl[-1]['href']
nextUrl = 'http://39.106.31.26/' + nextUrl
if url == nextUrl:
break
else:
url = nextUrl
return num
# main函数
def main():
DefaultData = pd.read_csv('F:\Codes\ProductionPractice\OutputData\ExcelData3.csv')
# 筛选出需要的数据
data = DefaultData[DefaultData['OJ提交数'] != 0]
# 确定提交数的下限
temp = data['OJ提交数'].describe()
minimum = temp['25%']
# 修改需要数据的条件
data = DefaultData[DefaultData['OJ提交数'] < minimum]
falseList = []
for index, item in enumerate(data.loc[:, ['学号', '年份', '科目名称']].values):
Submit, AC = 0, 0
print('爬取第', data.index[index], '条数据中......')
# print(item[2], type(item[2]))
# 用所有语言的提交结果代替某一特定语言的提交结果
Submit = SubmitCrawler(str(item[0]), '-1', str(item[1]))
AC = SubmitCrawler(str(item[0]), '-1', str(item[1]), 'AC')
DefaultData.loc[data.index[index], 'OJ提交数'] = Submit
DefaultData.loc[data.index[index], 'OJ正确数'] = AC
if Submit != 0:
DefaultData.loc[data.index[index], 'OJ准确率'] = AC / Submit
else:
# print(index)
falseList.append(index)
print('爬取结束')
# print(DefaultData)
# print(falseList)
DefaultData.to_csv(
'OutputData/ExcelData4.csv',
index=False,
columns = [
'学号', '考生姓名', '科目名称', '性别', '专业', '年份', '编程年份',
'国家级一等奖', '国家级二等奖', '国家级三等奖', '国家级优秀奖',
'省部级一等奖', '省部级二等奖', '省部级三等奖', '省部级优秀奖',
'OJ提交数', 'OJ正确数', 'OJ准确率',
'获奖类别'
]
)
# 默认的函数入口
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
538a456c5574a279e96d187281fccc04d446d12f | 7c15f211adc9e9eb9f66ccdd570c9f38dff7ea8d | /packages/autorest.python/test/vanilla/legacy/Expected/AcceptanceTests/XmsErrorResponse/xmserrorresponse/aio/_xms_error_response_extensions.py | 7900fd57faf05a8991d659e93f9645a9c76ca103 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/autorest.python | cc4bfbf91ae11535731cad37cedd6b733edf1ebd | a00d7aaa3753ef05cb5a0d38c664a90869478d44 | refs/heads/main | 2023-09-03T06:58:44.246200 | 2023-08-31T20:11:51 | 2023-08-31T20:11:51 | 100,315,955 | 47 | 40 | MIT | 2023-09-14T21:00:21 | 2017-08-14T22:58:33 | Python | UTF-8 | Python | false | false | 3,294 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable
from azure.core import AsyncPipelineClient
from azure.core.rest import AsyncHttpResponse, HttpRequest
from .. import models as _models
from .._serialization import Deserializer, Serializer
from ._configuration import XMSErrorResponseExtensionsConfiguration
from .operations import PetOperations
class XMSErrorResponseExtensions: # pylint: disable=client-accepts-api-version-keyword
"""XMS Error Response Extensions.
:ivar pet: PetOperations operations
:vartype pet: xmserrorresponse.aio.operations.PetOperations
:param base_url: Service URL. Default value is "http://localhost:3000".
:type base_url: str
"""
def __init__( # pylint: disable=missing-client-constructor-parameter-credential
self, base_url: str = "http://localhost:3000", **kwargs: Any
) -> None:
self._config = XMSErrorResponseExtensionsConfiguration(**kwargs)
self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.pet = PetOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "XMSErrorResponseExtensions":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details: Any) -> None:
await self._client.__aexit__(*exc_details)
| [
"[email protected]"
] | |
0c2b5f0a6a48df85b4ad9b6c2e22eebc494d44fa | 003c0dcde0ccc544b9164eba41af08a696ae9316 | /pfg/wsgi.py | d1c05f3c758fefe19b32db152c9b4754a4205314 | [] | no_license | yugle7/pfg | 11b176b99e77d0d415e631e15f75f8f4512a383d | 8c49dc3efe188d2bfc290f05b16c06d5d3fd9fc3 | refs/heads/master | 2020-03-23T00:47:49.721371 | 2018-07-13T19:36:06 | 2018-07-13T19:36:06 | 140,886,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | """
WSGI config for pfg project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pfg.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
c1f93f4d0365b611e681984fda84478a04b50e99 | 6008fee3e9311e34659ca1d8a82caf4cb1a1a782 | /lorenzo/write.py | 3d337b74a6602f9d9c29c39499ec1aafe7fd7d28 | [] | no_license | ssh-impatici/reply-2021-online | cd20eb08afe99f861b49eb7289474e5d4ef12107 | 182417fea57cfd577d71a8572c0378f2ab0f082a | refs/heads/main | 2023-03-20T18:38:37.934135 | 2021-03-11T19:39:01 | 2021-03-11T19:39:01 | 346,288,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | def write(path, output):
with open(path, "w") as file:
file.write(str(len(output)) + '\n')
for antenna in output:
file.write(str(antenna.antenna_id) + ' ' + str(antenna.x) + ' ' + str(antenna.y) + '\n')
| [
"[email protected]"
] | |
99bba36d619be2ba8e8858671a4db086dd8ef12a | 4d49450b3c7a0daf35022012ec7906ca556dd9e8 | /cn/4 - interpolarea lagrange/metoda_newton_polinom.py | 45f45b360013cbbbb15685c66dcc248b23ef7ca2 | [] | no_license | rebecadiaconu/fmi | 3d3c7d977b8f61ee02a52b81c0cdc7ae0ccd31ae | 1ccee33441f6a1843bfe599eec6219b37c533c95 | refs/heads/master | 2023-04-14T08:39:11.996296 | 2021-04-22T08:49:55 | 2021-04-22T08:49:55 | 238,420,720 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,666 | py | import numpy as np
import matplotlib.pyplot as plt
def chebyshev(k):
x = np.cos(np.pi * ((N - k) / N))
return (left + right) / 2 + ((right - left) / 2) * x
def newton_pol():
coef = [y[0]]
def sum_before_k(k):
sum = coef[0]
for i in range(1, k):
sum += coef[i] * (np.prod([xs[k] - xs[j] for j in range(i)]))
return sum
for k in range(1, N + 1):
term = np.prod([xs[k] - xs[j] for j in range(k)])
c = (y[k] - sum_before_k(k)) / term
coef.append(c)
def get_value(x):
pol = coef[0]
for k in range(1, N + 1):
pol += coef[k] * (np.prod([x - xs[j] for j in range(k)]))
return pol
return get_value
f = lambda x: -2 * np.sin(3 * x) - 9 * np.cos(3 * x) + 1.07 * x
left = -np.pi
right = np.pi
N = 21
xs = np.array([chebyshev(k) for k in range(N + 1)])
y = f(xs)
x_grafic = np.linspace(left, right, 500)
y_grafic = f(x_grafic)
# Plotam functia + aproximarile facute
plt.plot(x_grafic, y_grafic, linestyle='-', label='funcția f')
plt.scatter(xs, y, c='red')
pol = newton_pol()
y_approx = np.array([pol(x) for x in x_grafic])
plt.plot(x_grafic, y_approx, c='orange', linestyle='--', label=f'aproximare cu polinom de gradul {N}')
plt.legend()
plt.show()
# Plotam eroarea de trunchiere
errors = np.abs(y_approx - y_grafic)
max_error = np.max(errors)
print('Eroarea de trunchiere maxima este: ', max_error)
plt.plot(x_grafic, errors, linestyle='--', label=f'eroare pentru N={N}')
plt.hlines(1e-5, xmin=left, xmax=right, color='red')
plt.hlines(max_error, label='Eroarea maximă', xmin=left, xmax=right, color='orange')
plt.legend()
plt.show()
| [
"[email protected]"
] | |
d48bac0106ca936ec4eb19d102a03f04334de551 | 9cda2257468d0ef2f7706d4d07099a7b6d897f02 | /letecode/361-480/421-440/432.py | b9f9287e1319f6bb2ed97402466773fa1dc5ffb0 | [] | no_license | hshrimp/letecode_for_me | 4ba4031803687d7a309da9af4f003a328b48e53e | 6dc5b8968b6bef0186d3806e4aa35ee7b5d75ff2 | refs/heads/master | 2021-12-11T07:37:22.323713 | 2021-08-23T08:16:00 | 2021-08-23T08:16:00 | 204,437,378 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,170 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: wushaohong
@time: 2020/12/25 上午11:23
"""
"""432. 全 O(1) 的数据结构
请你实现一个数据结构支持以下操作:
Inc(key) - 插入一个新的值为 1 的 key。或者使一个存在的 key 增加一,保证 key 不为空字符串。
Dec(key) - 如果这个 key 的值是 1,那么把他从数据结构中移除掉。否则使一个存在的 key 值减一。如果这个 key 不存在,这个函数不做任何事情。key 保证不为空字符串。
GetMaxKey() - 返回 key 中值最大的任意一个。如果没有元素存在,返回一个空字符串"" 。
GetMinKey() - 返回 key 中值最小的任意一个。如果没有元素存在,返回一个空字符串""。
挑战:
你能够以 O(1) 的时间复杂度实现所有操作吗?"""
from collections import defaultdict
class AllOne:
def __init__(self):
"""
Initialize your data structure here.
"""
self.d = defaultdict(int)
def inc(self, key: str) -> None:
"""
Inserts a new key <Key> with value 1. Or increments an existing key by 1.
"""
self.d[key] += 1
def dec(self, key: str) -> None:
"""
Decrements an existing key by 1. If Key's value is 1, remove it from the data structure.
"""
if key not in self.d:
return
self.d[key] -= 1
if not self.d[key]:
self.d.pop(key)
def getMaxKey(self) -> str:
"""
Returns one of the keys with maximal value.
"""
key = ''
m = -1
for k, v in self.d.items():
if v > m:
m = v
key = k
return key
def getMinKey(self) -> str:
"""
Returns one of the keys with Minimal value.
"""
key = ''
m = float('inf')
for k, v in self.d.items():
if v < m:
m = v
key = k
return key
# Your AllOne object will be instantiated and called as such:
# obj = AllOne()
# obj.inc(key)
# obj.dec(key)
# param_3 = obj.getMaxKey()
# param_4 = obj.getMinKey()
| [
"[email protected]"
] | |
8b1a96d0bf001748de966e5c045d1c1868f22f3d | bda53a5d99e36c37b6342b271274b7bcd9345a07 | /ML for identifying galaxy-type using classification/RandomForestClassifier.py | 0f4691ad200235c52aaf1158f76d4f7123bca076 | [] | no_license | jaiisrani/Data-driven-Astronomy | f795948253fae8582f1592912b60fcaf34e7670e | 1595871f1919929a513d9e16eda5f8dca163f53f | refs/heads/main | 2023-03-20T20:23:15.316439 | 2021-03-15T07:12:41 | 2021-03-15T07:12:41 | 325,173,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,463 | py | import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_predict
from sklearn.ensemble import RandomForestClassifier
from support_functions import generate_features_targets, plot_confusion_matrix, calculate_accuracy
def generate_features_targets(data):
# complete the function by calculating the concentrations
targets = data['class']
features = np.empty(shape=(len(data), 13))
features[:, 0] = data['u-g']
features[:, 1] = data['g-r']
features[:, 2] = data['r-i']
features[:, 3] = data['i-z']
features[:, 4] = data['ecc']
features[:, 5] = data['m4_u']
features[:, 6] = data['m4_g']
features[:, 7] = data['m4_r']
features[:, 8] = data['m4_i']
features[:, 9] = data['m4_z']
# fill the remaining 3 columns with concentrations in the u, r and z filters
# concentration in u filter
features[:, 10] = data["petroR50_u"]/data["petroR90_u"]
# concentration in r filter
features[:, 11] = data["petroR50_r"]/data["petroR90_r"]
# concentration in z filter
features[:, 12] = data["petroR50_z"]/data["petroR90_z"]
return features, targets
# complete this function to get predictions from a random forest classifier
def rf_predict_actual(data, n_estimators):
# generate the features and targets
features, targets = generate_features_targets(data)
# instantiate a random forest classifier using n estimators
rfc = RandomForestClassifier(n_estimators= n_estimators)
# get predictions using 10-fold cross validation with cross_val_predict
predictions = cross_val_predict(rfc, features, targets, cv=10)
# return the predictions and their actual classes
return predictions, targets
if __name__ == "__main__":
data = np.load('galaxy_catalogue.npy')
# get the predicted and actual classes
number_estimators = 50 # Number of trees
predicted, actual = rf_predict_actual(data, number_estimators)
# calculate the model score using your function
accuracy = calculate_accuracy(predicted, actual)
print("Accuracy score:", accuracy)
# calculate the models confusion matrix using sklearns confusion_matrix function
class_labels = list(set(actual))
model_cm = confusion_matrix(y_true=actual, y_pred=predicted, labels=class_labels)
# plot the confusion matrix using the provided functions.
plt.figure()
plot_confusion_matrix(model_cm, classes=class_labels, normalize=False)
plt.show()
| [
"[email protected]"
] | |
47ece760b639cb445165e59afb0b7e178fec18c7 | b7c2d4c49fae0db5d50bfa856ff35e3683f05b6f | /node_modules/react-native/node_modules/ws/build/config.gypi | e8b9324b1aceb8f9f92cc01c013de10ddc31c471 | [
"MIT",
"BSD-3-Clause"
] | permissive | ee0pdt/GridIron | 450b9f49cdaa51d850b8a0c67866d5aed76f3643 | 3499d929898d475105e90a5ded06eaec36b589ec | refs/heads/master | 2020-04-06T06:40:45.308907 | 2015-04-20T08:16:49 | 2015-04-20T08:16:49 | 33,592,394 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,188 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 1,
"host_arch": "x64",
"node_install_npm": "false",
"node_prefix": "/usr/local/Cellar/node/0.10.29",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"python": "/usr/local/opt/python/bin/python2.7",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "true",
"nodedir": "/Users/peterthorne/.node-gyp/0.10.29",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"shell": "/bin/zsh",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"npat": "",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"spin": "true",
"cache_lock_retries": "10",
"cafile": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/peterthorne/.npm-init.js",
"userconfig": "/Users/peterthorne/.npmrc",
"node_version": "0.10.29",
"user": "",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"optional": "true",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"strict_ssl": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/peterthorne/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "npm/2.0.0-alpha-5 node/v0.10.29 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "0.0.0",
"umask": "18",
"git": "git",
"init_author_name": "",
"scope": "",
"onload_script": "",
"tmp": "/var/folders/lc/jmplcx6n1wvfsvqngw_hr9jm0000gn/T",
"unsafe_perm": "true",
"link": "",
"prefix": "/usr/local"
}
}
| [
"[email protected]"
] | |
1612e3b1e218e768a20d537399c11768067ecb85 | e95fc8c562c050f47ecb6fb2639ce3024271a06d | /easy/374.猜数字大小.py | e89e381ee759492436ff5c4bd2ed2a1901c02859 | [] | no_license | w940853815/my_leetcode | 3fb56745b95fbcb4086465ff42ea377c1d9fc764 | 6d39fa76c0def4f1d57840c40ffb360678caa96e | refs/heads/master | 2023-05-25T03:39:32.304242 | 2023-05-22T01:46:43 | 2023-05-22T01:46:43 | 179,017,338 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,481 | py | #
# @lc app=leetcode.cn id=374 lang=python3
#
# [374] 猜数字大小
#
# https://leetcode-cn.com/problems/guess-number-higher-or-lower/description/
#
# algorithms
# Easy (44.49%)
# Likes: 78
# Dislikes: 0
# Total Accepted: 29.1K
# Total Submissions: 64.1K
# Testcase Example: '10\n6'
#
# 猜数字游戏的规则如下:
#
#
# 每轮游戏,系统都会从 1 到 n 随机选择一个数字。 请你猜选出的是哪个数字。
# 如果你猜错了,系统会告诉你,你猜测的数字比系统选出的数字是大了还是小了。
#
#
# 你可以通过调用一个预先定义好的接口 guess(int num) 来获取猜测结果,返回值一共有 3 种可能的情况(-1,1 或 0):
#
# -1 : 你猜测的数字比系统选出的数字大
# 1 : 你猜测的数字比系统选出的数字小
# 0 : 恭喜!你猜对了!
#
#
#
#
# 示例 :
#
# 输入: n = 10, pick = 6
# 输出: 6
#
#
# @lc code=start
# The guess API is already defined for you.
# @param num, your guess
# @return -1 if my number is lower, 1 if my number is higher, otherwise return 0
# def guess(num: int) -> int:
class Solution:
def guessNumber(self, n: int) -> int:
left = 1
right = n
while left <= right:
mid = (left+right)//2
flag = guess(mid)
if flag == 0:
return mid
if flag == -1:
right = mid-1
else:
left = mid+1
# @lc code=end
| [
"[email protected]"
] | |
1bc396752d5b197b1750cccc14d43be85471283e | 6d9e462cc6dbcfc1c7f0a5a79c89024c31e0dff0 | /app/__init__.py | 4f6d668ffeb849db0536da1950092c011ff6b57e | [] | no_license | GuillermoLB/Raspi | 4dc7f444c28ce38ed7916663b1635c5d7a4522b5 | db8f42d261f24fe94689a7c326665b671815e6c2 | refs/heads/master | 2022-12-05T22:08:02.773407 | 2020-08-27T10:01:11 | 2020-08-27T10:01:11 | 290,740,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | from flask import Flask
from app.settings.config import Ajustes, ConexionMail
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
import logging
from logging.handlers import SMTPHandler
from flask_moment import Moment
app = Flask(__name__)
app.config.from_object(Ajustes)
bdd = SQLAlchemy(app)
migrar = Migrate(app,bdd)
moment = Moment(app)
login = LoginManager(app)
login.login_view = 'login' # 1.la funcion login es la que maneja los inicios de sesion
login.login_message = 'Por favor inicia sesión para acceder a esta página.'
from app import rutas, modelos, errores
if app.debug == False:
if ConexionMail.MAIL_SERVER: # existe configuracion para MAIL-SERVER
autenticacion = None
if ConexionMail.MAIL_USERNAME or ConexionMail.MAIL_PASSWORD:
autenticacion = (ConexionMail.MAIL_USERNAME, ConexionMail.MAIL_PASSWORD)
seguridad = None
if ConexionMail.MAIL_USE_TLS: # no hace falta pasarle el parámetro de seguridad
seguridad = ()
enviar_email = SMTPHandler(
mailhost = (ConexionMail.MAIL_SERVER, ConexionMail.MAIL_PORT),
fromaddr = 'no-reply@' + ConexionMail.MAIL_SERVER,
toaddrs = ConexionMail.ADMINS, subject='Fallo encontrado en nuestro Blog',
credentials= autenticacion, secure=seguridad
)
enviar_email.setLevel(logging.ERROR) # solo los errores en los formularios
app.logger.addHandler(enviar_email)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80)
| [
"[email protected]"
] | |
65fe0f152757f98862164d4919e75b86972b1dd6 | 2dc9ee4a8c39d00c255f52e8af2486e7c2891a98 | /sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_09_30/aio/operations/_snapshots_operations.py | f26eb247d43f078753805fcc3cc88fec91d537f6 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | JoshuaLai/azure-sdk-for-python | fd780c2ab145a35ec0bf9519c4d08c928081e79c | 07614796a332bcfeed35dddee9dbfc2f5487a39f | refs/heads/master | 2023-04-04T17:49:58.177790 | 2021-04-06T21:31:48 | 2021-04-06T21:31:48 | 348,842,434 | 0 | 0 | MIT | 2021-03-17T20:24:55 | 2021-03-17T20:24:54 | null | UTF-8 | Python | false | false | 41,734 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SnapshotsOperations:
"""SnapshotsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_09_30.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
snapshot_name: str,
snapshot: "_models.Snapshot",
**kwargs
) -> "_models.Snapshot":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Snapshot"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-30"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(snapshot, 'Snapshot')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Snapshot', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Snapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
snapshot_name: str,
snapshot: "_models.Snapshot",
**kwargs
) -> AsyncLROPoller["_models.Snapshot"]:
"""Creates or updates a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
max name length is 80 characters.
:type snapshot_name: str
:param snapshot: Snapshot object supplied in the body of the Put disk operation.
:type snapshot: ~azure.mgmt.compute.v2020_09_30.models.Snapshot
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Snapshot or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_09_30.models.Snapshot]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Snapshot"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
snapshot=snapshot,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Snapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
snapshot_name: str,
snapshot: "_models.SnapshotUpdate",
**kwargs
) -> "_models.Snapshot":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Snapshot"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-30"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(snapshot, 'SnapshotUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Snapshot', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Snapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
snapshot_name: str,
snapshot: "_models.SnapshotUpdate",
**kwargs
) -> AsyncLROPoller["_models.Snapshot"]:
"""Updates (patches) a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
max name length is 80 characters.
:type snapshot_name: str
:param snapshot: Snapshot object supplied in the body of the Patch snapshot operation.
:type snapshot: ~azure.mgmt.compute.v2020_09_30.models.SnapshotUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Snapshot or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_09_30.models.Snapshot]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Snapshot"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
snapshot=snapshot,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Snapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} # type: ignore
async def get(
self,
resource_group_name: str,
snapshot_name: str,
**kwargs
) -> "_models.Snapshot":
"""Gets information about a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
max name length is 80 characters.
:type snapshot_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Snapshot, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_09_30.models.Snapshot
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Snapshot"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-30"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Snapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
snapshot_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-30"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
snapshot_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
max name length is 80 characters.
:type snapshot_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.SnapshotList"]:
"""Lists snapshots under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SnapshotList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_09_30.models.SnapshotList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SnapshotList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-30"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SnapshotList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.SnapshotList"]:
"""Lists snapshots under a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SnapshotList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_09_30.models.SnapshotList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SnapshotList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-30"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SnapshotList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/snapshots'} # type: ignore
async def _grant_access_initial(
self,
resource_group_name: str,
snapshot_name: str,
grant_access_data: "_models.GrantAccessData",
**kwargs
) -> Optional["_models.AccessUri"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.AccessUri"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-30"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._grant_access_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(grant_access_data, 'GrantAccessData')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AccessUri', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_grant_access_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess'} # type: ignore
async def begin_grant_access(
self,
resource_group_name: str,
snapshot_name: str,
grant_access_data: "_models.GrantAccessData",
**kwargs
) -> AsyncLROPoller["_models.AccessUri"]:
"""Grants access to a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
max name length is 80 characters.
:type snapshot_name: str
:param grant_access_data: Access data object supplied in the body of the get snapshot access
operation.
:type grant_access_data: ~azure.mgmt.compute.v2020_09_30.models.GrantAccessData
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AccessUri or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_09_30.models.AccessUri]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccessUri"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._grant_access_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
grant_access_data=grant_access_data,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AccessUri', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_grant_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess'} # type: ignore
async def _revoke_access_initial(
self,
resource_group_name: str,
snapshot_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-30"
# Construct URL
url = self._revoke_access_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_revoke_access_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess'} # type: ignore
async def begin_revoke_access(
self,
resource_group_name: str,
snapshot_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Revokes access to a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
max name length is 80 characters.
:type snapshot_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._revoke_access_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_revoke_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess'} # type: ignore
| [
"[email protected]"
] | |
0800085027434f0359df2c64edc4b158777e79e1 | 09d02f10fd186f5034d37cc2cff8c54613948e83 | /nlp054.py | f285d0e9e4559abff43ab1ffa6acd54df86d6a1f | [] | no_license | mathhun/nlp100 | fe7ac954372ea6dd521d3242864f7b7ff65de2fd | 33c2e8ec3fdd1a40b5591dd7b6b299a1eb5695a0 | refs/heads/master | 2021-01-01T17:52:00.382221 | 2015-12-21T14:19:12 | 2015-12-21T14:19:12 | 32,793,185 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | #!/usr/bin/env python
"""
54. 品詞タグ付け
Stanford Core NLPの解析結果XMLを読み込み,単語,レンマ,品詞をタブ区切り形式で出力せよ
"""
import sys
import xml.etree.ElementTree as etree
def main():
tree = etree.parse(sys.argv[1])
for token in tree.findall('//token'):
print('\t'.join([token.find('word').text, token.find('lemma').text, token.find('POS').text]))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
35278f3517de2a05388a384cfcb2871b0a5daae8 | 9a10d8e420abef9e7c5e6e78d84d19ecc4029895 | /tests/unit_tests/test_solution.py | 4ac27221d6b9e4d769f6f1d158f4c02c25239ca5 | [
"ISC"
] | permissive | zixzeus/Job_Shop_Schedule_Problem | d46d0178bf1215863789076e4914f087d933fe8d | d2b61b42bcf618533f2086a1f67fd743e3cf2946 | refs/heads/master | 2022-06-18T11:59:11.778312 | 2020-05-02T21:29:54 | 2020-05-02T21:29:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,009 | py | import pickle
import unittest
import numpy as np
from JSSP import solution
from JSSP.exception import IncompleteSolutionException, InfeasibleSolutionException
from tests.util import csv_data, tmp_dir, rm_tree
class TestSolution(unittest.TestCase):
def test_solution_equality(self):
solution_obj1 = solution.SolutionFactory(csv_data).get_solution()
solution_obj2 = solution.Solution(csv_data, solution_obj1.operation_2d_array)
self.assertEqual(solution_obj1, solution_obj2, "These two solution.Solutions should be equal")
def test_solution_inequality(self):
solution_obj1 = solution.SolutionFactory(csv_data).get_solution()
solution_obj2 = solution.SolutionFactory(csv_data).get_solution()
self.assertNotEqual(solution_obj1, solution_obj2, "These two solution.Solutions should not be equal")
def test_solution_less_than(self):
solution_obj1 = solution.SolutionFactory(csv_data).get_solution()
solution_obj2 = solution.Solution(csv_data, solution_obj1.operation_2d_array)
solution_obj2.makespan -= 1
self.assertLess(solution_obj2, solution_obj1, "solution_obj2 should be less than solution_obj1")
solution_obj2.makespan += 1
solution_obj2.machine_makespans[0] -= 1
self.assertLess(solution_obj2, solution_obj1, "solution_obj2 should be less than solution_obj1")
def test_solution_greater_than(self):
solution_obj1 = solution.SolutionFactory(csv_data).get_solution()
solution_obj2 = solution.Solution(csv_data, solution_obj1.operation_2d_array)
solution_obj2.makespan -= 1
self.assertGreater(solution_obj1, solution_obj2, "solution_obj2 should be greater than solution_obj1")
solution_obj2.makespan += 1
solution_obj2.machine_makespans[0] -= 1
self.assertGreater(solution_obj1, solution_obj2, "solution_obj2 should be greater than solution_obj1")
def test_sorting_solutions(self):
lst = sorted(solution.SolutionFactory(csv_data).get_n_solutions(50))
for i in range(1, len(lst)):
self.assertLess(lst[i - 1], lst[i], "lst should be in sorted order")
def test_solution_in_list(self):
sol1 = solution.SolutionFactory(csv_data).get_solution()
sol2 = solution.Solution(csv_data, sol1.operation_2d_array)
lst = [sol1]
self.assertIn(sol2, lst)
def test_infeasible_solution(self):
try:
solution_obj = solution.SolutionFactory(csv_data).get_solution()
solution_obj.operation_2d_array[[0, 200]] = solution_obj.operation_2d_array[[200, 0]]
solution.Solution(csv_data, solution_obj.operation_2d_array)
self.fail("Failed to raise solution.InfeasibleSolutionException")
except InfeasibleSolutionException:
pass
def test_incomplete_solution(self):
try:
solution_obj = solution.SolutionFactory(csv_data).get_solution()
solution.Solution(csv_data, np.delete(solution_obj.operation_2d_array, 0, axis=0))
self.fail("Failed to raise solution.IncompleteSolutionException")
except IncompleteSolutionException:
pass
class TestPicklingSolution(unittest.TestCase):
def setUp(self) -> None:
if not tmp_dir.exists():
tmp_dir.mkdir()
def tearDown(self) -> None:
rm_tree(tmp_dir)
def test_pickle_to_file(self):
solution_obj = solution.SolutionFactory(csv_data).get_solution()
with open(tmp_dir / 'test_solution.pkl', 'wb') as fout:
pickle.dump(solution_obj, fout)
self.assertTrue((tmp_dir / 'test_solution.pkl').exists(), "The pickled solution does not exist")
with open(tmp_dir / 'test_solution.pkl', 'rb') as fin:
solution_obj_pickled = pickle.load(fin)
self.assertEqual(solution_obj, solution_obj_pickled, "The pickled solution should be equal to solution_obj")
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
7af6233f73032ed008e7c3514e5a80b8ab1f4af6 | 2dd26e031162e75f37ecb1f7dd7f675eeb634c63 | /nemo/collections/nlp/data/dialogue/input_example/mellon_qa_input_example.py | e6576d40460bdc698bd7fbe8eede2c0a743fe6ed | [
"Apache-2.0"
] | permissive | NVIDIA/NeMo | 1b001fa2ae5d14defbfd02f3fe750c5a09e89dd1 | c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7 | refs/heads/main | 2023-08-21T15:28:04.447838 | 2023-08-21T00:49:36 | 2023-08-21T00:49:36 | 200,722,670 | 7,957 | 1,986 | Apache-2.0 | 2023-09-14T18:49:54 | 2019-08-05T20:16:42 | Python | UTF-8 | Python | false | false | 1,329 | py | # Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.dialogue.input_example.input_example import DialogueInputExample
class MellonQAInputExample(DialogueInputExample):
"""
Template for MellonQAInputExample
Meant as a descriptor rather than to be instantiated
Please instantiate using the base class 'DialogueInputExample'
{
"utterance": <utterance>,
"labels": {
"example_id": <example_id>,
"response": <response>,
"fluent_response": <fluent_response>, # written version of the response that is more fluent
"passage": <passage>, # passage which supports generating the response (answer) to the utterance (question)
}
}
"""
| [
"[email protected]"
] | |
55285d9fbb561a79de632bdc2da537402af92122 | 818ddd12e08a852f1bf814479647ef958803315e | /sync/migrations/0002_step.py | 7a223d65817cb842e0b538f936b232147f7c68c2 | [] | no_license | allandereal/rapidpro_api_intro | 599a560a2c89ec909d420133433d396433f56382 | 8f9136f6e0f113c494f7c6df1a258c32af8b08de | refs/heads/master | 2021-01-20T12:42:15.910476 | 2017-02-23T14:58:46 | 2017-02-23T14:58:46 | 82,667,014 | 0 | 0 | null | 2017-02-21T10:27:32 | 2017-02-21T10:27:31 | null | UTF-8 | Python | false | false | 613 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-21 13:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sync', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Step',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('node', models.CharField(max_length=200)),
('time', models.DateTimeField()),
],
),
]
| [
"[email protected]"
] | |
199715b0f42276287762339e011a083d6e514ea9 | 3e7e501c6ddce2339a04dfe740af7ca4c561d7e5 | /src/Python/Lecture1/Exercise.DecipherTheText/decipher.py | 420cf58e48110dcd6586a9cbec2880f5df855ae0 | [] | no_license | eddigavell/Data-IT-security | b05ce56885390a28e8cb2422fc0e7d443351e9f3 | aeb0012d32be626ff1fa705217c2eb54b2ab6110 | refs/heads/main | 2023-02-20T02:08:54.509436 | 2021-01-22T13:02:28 | 2021-01-22T13:02:28 | 328,910,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,753 | py | from collections import Counter
def wordcount():
message = open('TextToDecipher.txt').read()
count = Counter(message)
words = message.split()
words = [word.replace(',', '') for word in words]
words = [word.replace('.', '') for word in words]
word_count = Counter(words)
#one_letter_words = [word for word in words if len(word) == 1]
#two_letter_words = [word for word in words if len(word) == 2]
#three_letter_words = [word for word in words if len(word) == 3]
print(count)
print(word_count)
#print(one_letter_words)
#print(two_letter_words)
#print(three_letter_words)
def main():
wordcount()
print()
message = open('TextToDecipher.txt').read()
keys = []
values = []
keys = dict(zip(keys, values))
#ABCDEFGHIJKLMNOPQRSTUVWXYZ
keys.update({'A': 'Y'})
keys.update({'B': 'P'})
keys.update({'C': 'B'})
keys.update({'D': 'G'})
keys.update({'E': 'I'})
keys.update({'F': 'A'})
keys.update({'G': 'D'})
keys.update({'H': 'V'})
keys.update({'I': 'S'})
keys.update({'J': 'N'})
keys.update({'K': 'L'})
keys.update({'L': 'J'})
keys.update({'M': 'C'})
keys.update({'N': 'E'})
keys.update({'O': 'F'})
keys.update({'P': 'U'})
keys.update({'Q': 'R'})
keys.update({'R': 'Z'})
keys.update({'S': 'K'})
keys.update({'T': 'O'})
keys.update({'U': 'T'})
keys.update({'V': 'X'})
keys.update({'W': 'Q'})
keys.update({'X': 'W'})
keys.update({'Y': 'H'})
keys.update({'Z': 'M'})
for letter in message:
if letter.upper() in keys:
print(keys[letter.upper()], end='')
else:
print(letter, end='')
print()
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
3204194f55154ad67ef0e0b9c7f9c9b18b6dc89a | 53acff31eef0faa61045e6fb1c866d54207dfaf4 | /Recursion/convertBase.py | da53215a1a3f8dd59606fcde1a45006426c80fd4 | [] | no_license | nikspatel03/DataStructure | c71b5b44ced1667d74e10c4ab782c47be94b6d6b | c747011170f8b4acaac863a2a198378504ce2f03 | refs/heads/master | 2020-05-06T20:20:37.633204 | 2019-07-24T21:22:54 | 2019-07-24T21:22:54 | 180,233,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | """
convert base function using recursion
"""
def convertBase(num, base):
digits = "0123456789ABCDEF"
if num < base:
return digits[num]
else:
return convertBase(num // base, base) + str((num % base))
print(convertBase(25,2),"11001")
print(convertBase(10,2),"1010")
print(convertBase(25,16),"19")
| [
"[email protected]"
] | |
ec71567e9dde7a3d3f4c1bc86b7aed097e7ea20d | 20add5362207286794b67e62caad3b866394ff9d | /Controller.py | ccadf5c889b2274cb28b584e4c7afd18a05005db | [] | no_license | cappatar/LinkedInCrawler | 1c4ed517d4839991565ffea8a7626025c8ddf871 | fdd4ae7aceba21258e4dda0fef81e8e62422917c | refs/heads/master | 2016-08-06T14:14:16.973345 | 2015-03-01T09:55:06 | 2015-03-01T09:55:06 | 31,491,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,137 | py | from LinkedInCrawler.Crawler import Crawler
from LinkedInCrawler import Settings
from LinkedInCrawler.Writer import Writer
__author__ = 'Michelle'
class Controller:
def __init__(self):
self.crawler = Crawler()
self.people = []
self.Writer = Writer()
def write_people_to_file(self):
self.Writer.write_people_to_file(self.people)
def find_name(self, name):
url = Settings.base_url + Settings.directory_link + name[0].lower()
self.find_page_by_name(name.lower(), url)
# Some of the links received from Linkedin have no prefix. This fixes that.
def validate_link(self, url):
new_url = url.encode('ascii', errors='ignore')
if new_url[0] != 'h':
return Settings.base_url[:-1] + url
return url
def save_person(self, link):
fixed_link = self.validate_link(link)
person = self.crawler.crawl_profile_page(fixed_link)
if person is not None:
self.people.append(person)
'''
If url param is a directory page, zoom in the ranges (recursion).
If it's a search page, go through profiles in page.
If it's a profile page, save it to list.
'''
def find_page_by_name(self, name, url):
fixed_url = self.validate_link(url)
parts = fixed_url.split('/')
if parts[3] == 'directory':
name_ranges = self.crawler.crawl_directory_page(fixed_url)
next_range = self.get_range(name, name_ranges.keys())
if next_range is None:
if name_ranges.has_key(name):
if 'dir' in name_ranges[name]:
self.iterate_profiles(name_ranges[name])
else:
self.save_person(name_ranges[name])
elif '-' not in str(name_ranges.keys()[0]):
search_link = name_ranges[name]
if parts[4] == 'dir':
self.iterate_profiles(search_link)
else:
self.save_person(search_link)
else:
return self.find_page_by_name(name, name_ranges[next_range])
elif parts[4] == 'dir':
self.iterate_profiles(fixed_url)
else:
self.save_person(fixed_url)
# being called when needed to crawl search pages (https://www.linkedin.com/pub/dir/William/Gates)
def iterate_profiles(self, url):
profiles = self.crawler.crawl_profile_search_page(self.validate_link(url))
for profile in profiles:
self.save_person(profile)
'''
Zooming in directory pages (by recursion in binary search of name in ranges array),
returning new range.
name_array = array of current page ranges.
'''
def search_range(self, name, name_array):
try:
size = len(name_array)
_range = name_array[size/2]
last_name = _range.split('-')[1][1:]
for index in range(0, min(len(name), len(last_name))):
if name[index] > last_name[index]:
return self.end_case_larger_name(name, name_array, size)
elif name[index] < last_name[index]:
return self.end_cases_larger_end_range(name, name_array, size)
if len(name) > len(last_name):
return self.end_case_larger_name(name, name_array, size)
elif len(name) < len(last_name):
return self.end_cases_larger_end_range(name, name_array, size)
return _range
except:
return None
def get_range(self, name, name_array):
return self.search_range(name, sorted(name_array, key=str.lower))
def end_cases_larger_end_range(self, name, name_array, size):
if size == 2:
return name_array[1]
if size == 3:
return self.search_range(name, name_array[:-(size/2)])
return self.search_range(name, name_array[:-(size/2 - 1)])
def end_case_larger_name(self, name, name_array, size):
if size == 2:
return name_array[0]
return self.search_range(name, name_array[(size/2):]) | [
"[email protected]"
] | |
8e9fa4e4f360c221ad8e353d4414d33924b0afcb | 3a1cd959017f6e80030d90ff19a09e8857400184 | /preprocessing/preprocess.py | fa680db1cf3cb94dcac6b46c8a277171845d4d3d | [] | no_license | beatboxerish/Delhi-Air-Pollution-PM-Characterization | 34cf530f681373809de284b1b831c5276cfbc2d2 | 53fd0902c06d28d306fa33aa30018e128e3b9e3d | refs/heads/master | 2023-05-07T17:27:47.801777 | 2021-05-14T15:46:55 | 2021-05-14T15:46:55 | 367,397,147 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,666 | py | # -*- coding: utf-8 -*-
import os
import sys
from dateutil import tz
import pytz
from datetime import datetime
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def read_file(date, data_dir = "./data/"):
"""
Reads in the data
"""
df = pd.read_csv(data_dir + date + ".csv", index_col = 0, \
parse_dates = ["dateTime"])
return df
def tester(df, test_plots_dir = "./plots_for_preprocessing/"):
"""
Makes no changes. Graphs different test outputs. This is to check the
dataset for any inconsistencies.
"""
### make a new folder for test_plots if the folder isn't there
# Number of data points with lat = 0 or long = 0
idx = df[(df.lat == 0) | (df.long == 0)].index
try:
df.loc[idx, "deviceId"].value_counts().sort_index().plot(kind = "bar")
plt.title("Number of entries where either reported lat == 0 or long\
== 0 : "+ str(len(idx)))
plt.savefig(test_plots_dir + "lat_long_0.png")
except:
pass
# Number of data with pm values equal to 0
try:
idx = df_all[(df_all.pm1_0 == 0) | (df_all.pm2_5 == 0) | (df_all.pm10 == 0)].index
df_all.loc[idx, "deviceId"].value_counts().sort_index().plot(kind = "bar")
plt.title("Number of entries where any of the recorded PM values are 0 : "+ str(len(idx)))
plt.savefig(test_plots_dir + "pm_0.png")
except:
pass
# Checking for high outliers
try:
idx = df_all[(df_all.pm1_0>1500) | (df_all.pm2_5>1500) | (df_all.pm10>1500)].index
df_all.loc[idx, "deviceId"].value_counts().sort_index().plot(kind = "bar")
plt.title("Number of entries where any recorded PM value is above 1500 : "+ str(len(idx)))
plt.savefig(test_plots_dir + "outliers.png")
except:
pass
def clean(df):
"""
Performs some standard cleaning steps
"""
## Values to be dropped
idx_to_drop = []
# 1. where either lat or long is less than 1
idx_to_drop.extend(df[(df.lat <= 1) | (df.long <= 1)].index.tolist())
# # 2. where malfunctioning device is the recording instrument. Was there at the start (11-20 Oct)
# bad_id = '00000000d5ddcf9f'
# idx_to_drop.extend(df[df.deviceId == bad_id].index.tolist())
# 3. where pm values are above 2000
idx_to_drop.extend(df[(df.pm1_0 > 2000) | (df.pm2_5 > 2000) | (df.pm10 > 2000) ].index.tolist())
# 4. where pm values are less than 0
idx_to_drop.extend(df[(df.pm1_0 <= 0) | (df.pm2_5<= 0) | (df.pm10<= 0) ].index.tolist())
idx_to_drop = list(set(idx_to_drop))
df_dropped = df.loc[idx_to_drop, :]
df = df.drop(idx_to_drop, axis = 0)
df = df.reset_index(drop = True)
return df, df_dropped
def handle_outliers(df, nbd=10, plot=False):
"""
Handles high and low PM outliers using moving average smoothing with median
"""
for col in ["pm1_0", "pm2_5", "pm10"]:
df = df.sort_values(["deviceId", "dateTime"]).copy()
df["rmed"] = df[col].rolling(nbd, center = True).median()
df["pm_new"] = df[col]
idx = df[(df[col]>1000) | (df[col]<20)].index
df.loc[idx, "pm_new"] = df.loc[idx, "rmed"]
if plot:
fig, ax = plt.subplots(1, 2, figsize = (15,6))
df[col].plot(style = "s", ax = ax[0])
df.pm_new.plot(style = "s", ax = ax[1])
ylims = ax[0].get_ylim()
ax[1].set_ylim(ylims)
ax[0].set_title("Original " + col)
ax[1].set_title("Outlier Handled "+ col)
ax[0].set_xlabel("Index")
ax[1].set_xlabel("Index")
ax[0].set_ylabel(col)
ax[1].set_ylabel(col)
plt.show()
df.loc[:, col] = df.loc[:, "pm_new"]
df = df.drop(["rmed", "pm_new"], axis=1)
return df
def preprocess(df_tuple, test = True, car = False):
"""
Main function. Combines all other functions.
"""
if not car:
df_bme, df_gps, df_pol = df_tuple
# drop duplicates
df_bme = df_bme.drop_duplicates(subset = "uid")
df_gps = df_gps.drop_duplicates(subset = "uid")
df_pol = df_pol.drop_duplicates(subset = "uid")
start = datetime.now()
# merge on key columns
key_cols = ["uid", "dateTime", "deviceId"]
df_all = pd.merge(df_bme, df_gps, on = key_cols)
df_all = pd.merge(df_all, df_pol , on = key_cols)
else:
df_all = df_tuple
start = datetime.now()
df_all["deviceId"] = "0"
df_all["dateTime"] = pd.to_datetime(df_all.createdAt, unit = "ms")
df_all = df_all.drop(["temperature", "humidity", "id", "session_Id", "createdAt"], axis = 1)
# renaming columns
df_all = df_all.rename(columns = {"lng":"long"})
if car:
df_all = df_all.rename(columns = {"pm_2_5":"pm2_5", "pm_1":"pm1_0", "pm_10":"pm10"})
# test for potential problems
if test:
tester(df_all)
# handle dateTime
df_all = handle_dateTime(df_all)
# use clean()
df_all, df_dropped = clean(df_all)
# handling outliers
df_all = handle_outliers(df_all, nbd = 10, plot = False)
# some final stuff
print("Null values:"+str(df_all.isnull().sum().sum()))
df_all = df_all.dropna()
df_all = df_all.sort_values("dateTime")
df_all = df_all.reset_index(drop = True)
return df_all, df_dropped, datetime.now() - start
def save(df_all, date, car=False):
if car:
df_all.to_csv("../data/" + date + "_car.csv")
else:
df_all.to_csv("../data/" + date + "_all.csv")
| [
"[email protected]"
] | |
a9b0bc1d4d4b90ba023df5a7c36186edb416a17e | 35bab6d7a5b88632b1a70a2d6bbe7459b320b527 | /algorithms/eightQueens.py | 3634bf0285a1ea5370f6d0a4a0c0402517668928 | [
"MIT"
] | permissive | asarenski/python-algos-examples | 975745119a46d5c8c2d7dad8eadaf48c1350d0ef | 992b76690500dcf792a43072cdf820d3e3085a72 | refs/heads/main | 2023-02-24T13:56:28.999449 | 2021-01-31T19:19:11 | 2021-01-31T19:19:11 | 330,254,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,746 | py | QUEEN_MARKER = 'X'
def printBoard(board):
for i in range(len(board)):
print(board[i])
def depthOneCopy(board):
nextboard = []
for i in range(len(board)):
row = board[i]
nextboard.append(row.copy())
return nextboard
def checkDiagLeft(board, row, col):
# check up and left
while row > 0 and col > 0:
if board[row-1][col-1] == QUEEN_MARKER:
return False
row -= 1
col -= 1
return True
def checkDiagRight(board, row, col):
# check up and right
while row > 0 and col < len(board) - 1:
if board[row-1][col+1] == QUEEN_MARKER:
return False
row -= 1
col += 1
return True
def checkVert(board, row, col):
# for each row from i to row
for i in range(row+1):
if board[i][col] == QUEEN_MARKER:
return False
return True
def canPlaceQueen(board, row, col):
return checkVert(board, row, col) and checkDiagLeft(board, row, col) and checkDiagRight(board, row, col)
def queens(board = [], row = 0, depth = 0):
if not board:
for i in range(8):
board.append([])
for j in range(8):
board[i].append('o')
if row == 7:
for col in range(len(board)):
if canPlaceQueen(board, row, col):
print('')
printBoard(board)
return 1
return 0
total = 0
# for each col recursive operation
for col in range(0, len(board)):
if canPlaceQueen(board, row, col):
nextboard = depthOneCopy(board)
nextboard[row][col] = QUEEN_MARKER
total += queens(nextboard, row+1, depth+1)
return total
print(f'total is: {queens()}') | [
"[email protected]"
] | |
f183c16f2e7c658e1db690f0ef26b14e0369164b | dbffff0e215fdcd91cda130dd97dc9240af3a896 | /utils/permissions.py | 2967b333bebbb8788b8bad5b34e801d18e9fa8be | [] | no_license | Hovhanes/to_do_list | 00885dc6378b73500293c302e31a5df203bac8ff | 90ac56835e070f24694927a269a4111b734c9d8c | refs/heads/master | 2022-12-02T19:18:49.469126 | 2020-08-27T13:20:09 | 2020-08-27T13:20:09 | 290,326,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | from rest_framework import permissions
class IsAdminOrOwner(permissions.BasePermission):
message = 'Permission denied.'
def has_object_permission(self, request, view, obj):
if request.user.is_superuser:
return True
elif request.user.id == obj.user.id:
return True
return False
| [
"[email protected]"
] | |
b689f1680d00844834565d0b05d916c70dd6faeb | 9ba6a1180382a102f3b9036537530faccddd16d4 | /wykop/posts/migrations/0006_auto_20190321_1924.py | 2407b6f7f4203f0e42ef7e45351d1ac3088c9ad5 | [] | no_license | jakubste/codebrainers_django_project | 5e320b93617ce947915baf7a9d382f370649ecdf | 20ef4283e9ab82c620e979b47cac9d68e2c1f2b6 | refs/heads/master | 2020-04-27T20:57:41.747889 | 2019-03-30T13:55:44 | 2019-03-30T13:55:44 | 174,677,904 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | # Generated by Django 2.1.7 on 2019-03-21 19:24
from django.db import migrations, models
import embed_video.fields
class Migration(migrations.Migration):
dependencies = [
('posts', '0005_auto_20190321_1730'),
]
operations = [
migrations.AddField(
model_name='post',
name='video',
field=embed_video.fields.EmbedVideoField(blank=True, null=True),
),
migrations.AlterField(
model_name='post',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='posts_images/'),
),
]
| [
"[email protected]"
] | |
04401bfb5f1828c62df484ab6987ab5db2325a51 | a92791af063a998c49ac9f007ffb03392424ec2f | /exercise_python_koni/chapter_4/4_logic.py | d2bdef196fd724ae392a8ecf4b67784c05b6b00f | [] | no_license | kabaksh0507/exercise_python_koni | 0008197a5ba0dfbbf4d804cfb8fc98e31c86091c | 45e164d5c9ab10271500ceecaa5f41dd1bebe10a | refs/heads/main | 2023-02-25T06:12:40.608527 | 2021-02-09T00:45:23 | 2021-02-09T00:45:23 | 337,250,354 | 0 | 0 | null | 2021-02-09T00:39:24 | 2021-02-09T00:39:24 | null | UTF-8 | Python | false | false | 7,025 | py | #ex
# disaster = True
# if disaster:
# print("Woe!")
# else:
# print("Whee!")
#ex2
# color = "yellow"
# if color == "red":
# print("red")
# elif color == "blue":
# print("blue")
# elif color == "green":
# print("green")
# else:
# print("No just color:",color)
#while
# count = 1
# while count <= 5:
# print(count)
# count += 1
#break
# while True:
# stuff = input("String to capitalize[type q to quit]")
# if stuff == "q":
# break
# print(stuff.capitalize())
#continue
# while True:
# value = input("Integer, please [q to quit]")
# if value == 'q': #終了
# break
# number = int(value)
# if number % 2 == 0: #偶数
# continue
# print(number, "squared is",number*number)
#else
# numbers = [1,3,4,5,6]
# position = 0
# while position < len (numbers):
# number = numbers[position]
# if number % 2 == 0:
# print('Found even number', number)
# break
# position += 1
# else: # breakが呼ばれていない
# print('No even number found')
#for#1
# rabbits = ['usagi','oisi','kanoyama']
# cpy = []
# for rabbit in rabbits:
# print(rabbit)
#for#2
# word = 'cat'
# for letter in word:
# print(letter)
#for#3
# word_dict = {'hello': 'hello is こんにちは',
# 'dinner': 'dinner is 晩御飯',
# 'mike': 'mike is マイケル'}
# for dictkey in word_dict: #または for dictkey in word_dict.keys():
# print(dictkey)
#for#4
# word_dict = {'hello': 'hello is こんにちは',
# 'dinner': 'dinner is 晩御飯',
# 'mike': 'mike is マイケル'}
# for dictvalue in word_dict.values():
# print(dictvalue)
#for#5
# word_dict = {'hello': 'hello is こんにちは',
# 'dinner': 'dinner is 晩御飯',
# 'mike': 'mike is マイケル'}
# for dictitem in word_dict.items():
# print(dictitem)
#for#6 個々への代入
# word_dict = {'hello': 'hello is こんにちは',
# 'dinner': 'dinner is 晩御飯',
# 'mike': 'mike is マイケル'}
# for dictkey, dictvalue in word_dict.items():
# print('dictkey : ', dictkey,',' 'dictvalue : ', dictvalue)
#zip#1
# days = ['Monday','Tuesday','Wednesday']
# fruits = ['banana','orange']
# drinks = ['beer','coffee','tea','soda']
# for day, fruit, drink in zip(days, fruits, drinks):
# print(day, ': drink', drink ,"fruit", fruit)
#zip#2 list
# days = ('Monday','Tuesday','Wednesday')
# nums = ('one', 'two', 'three','four')
# days_list = list(zip(nums, days))
# print(days_list)
#zip#3 dict
# days = ('Monday','Tuesday','Wednesday')
# nums = ('one', 'two', 'three','four')
# days_dict = dict(zip(nums, days))
# print(days_dict)
#range#1
# for x in range(100,0,-1):
# print(x)
#range#2
# tmp = list(range(0,100,1))
# print(tmp)
#range#3
# tmp = list(range(0,101,2))
# print(tmp)
#list内包表記#1
# num_list = list(range(1,6,1))
# print(num_list)
#list内包表記#2 https://hibiki-press.tech/python/list-comprehensions/588
# num_list = [number for number in range(1,6)]
# print(num_list)
#list内包表記#3
# num_list = [number-1 for number in range(1,6)]
# print(num_list)
#list内包表記#4
# num_list = [number for number in range(1,7) if number % 2 == 0]
# print(num_list)
#list内包表記#5
# lows = range(0,100)
# highs = range(50,100)
# num_list = [(low, high) for low in lows for high in highs]
# for num in num_list:
# print(num)
#list内包表記#5
# lows = range(0,100)
# highs = range(50,100)
# num_list = [(low, high) for low in lows for high in highs]
# for low, high in num_list:
# print(low, high)
#ジェネレータ内包表記
# num_thing = (num for num in range(1,6))
# print(num_thing)
# for num in num_thing
# print(num)
#function#1
# def DoNothing():
# pass
# DoNothing()
#function#2
# def MakeASounds():
# print("quack")
# MakeASounds()
#function#3
# def ReturnTrue():
# return True
# def checkReturn_True():
# if ReturnTrue():
# printString("OK")
# else:
# printString("OMG")
# def printString(String):
# print(String)
# checkReturn_True()
#function#4
# def echoAnything(Anything):
# return Anything + ' ' + Anything
# print(echoAnything('hello'))
#位置関数 位置を覚えていれば対応付けできる
# def menu(wine, entree, dessert):
# return {'wine':wine, 'entree': entree, 'dessert': dessert}
# print(menu('chardonnay','chicken', 'cake'))
#キーワード引数
# def menu(wine, entree, dessert):
# return {'wine':wine, 'entree': entree, 'dessert': dessert}
# print(menu(entree='beef',wine='bordeaux',dessert='bage1'))
#デフォルト引数値
# def menu(drink1, drink2, entree, dessert='suger'):
# return {'drink1':drink1,'drink2':drink2, 'entree': entree, 'dessert': dessert}
# print(menu(drink2='beer', entree='chicken',drink1='beer'))
#位置引数のタプル化#1
# def print_args(*args):
# print('入力値:',args)
# print_args()
#位置引数のタプル化#2
# def print_args(*args):
# print('入力値:',args)
# print_args(3,2,1,'wait!','uh...')
#位置引数のタプル化#2
# def print_args(includeNum1,includeNum2, *args):
# print('必須入力値1:',includeNum1)
# print('必須入力値2:',includeNum2)
# print('入力値:',args)
# print_args(3,2,1,'wait!','uh...')
#キーワード引数の辞書化
# def printKeyWords(**KeyWords):
# print('keyword include:', KeyWords)
# printKeyWords(drink='beer',dessert='cake',human='human')
#docstrung
# def echo(thing, check):
# '''
# これは説明
# トリプルクォーテーションで囲うこと
# で複数行をコメントアウトできる
# '''
# if check:
# print(thing)
# help(echo)
# print(echo.__doc__)
#object function#1
# def runSomething(func):
# func()
# def answer():
# print(42)
# runSomething(answer)
# print(type(runSomething))
#object function#2
# def add_args(arg1, arg2):
# print(arg1 + arg2)
# print(type(add_args))
#object function#3
# def add_args(arg1, arg2):
# print(arg1 + arg2)
# def runFunc_Twoinclude(func, arg1, arg2):
# func(arg1,arg2)
# runFunc_Twoinclude(add_args,2,100)
#object function#4
# def sum_args(*args):
# return sum(args)
# def runFunc_anyinclude(func, *args):
# return func(*args)
# print(runFunc_anyinclude(sum_args,1,2,3,4,5,6,7,8,9,10))
#ジェネレータ
# print(sum(range(1,100)))
#original ジェネレータ
# def my_range(first=0,last=10,step=1):
# num = first
# while num < last:
# yield num
# num += step
# ranger = my_range(1,15)
# for x in ranger:
# print(x) | [
"[email protected]"
] | |
71e768aa9a2df2aa54807a4e47f8418d31ccfeb1 | 152916446f8c6e76fce9d465f7ef1a3477bac028 | /src/services/exceptions/unauthorized_user_error.py | d21a63d0d7b2adb3a2dcaee390ca51b9a1db01ef | [] | no_license | crpistillo/taller2-app-server | 08d6b43925e1ffaf40105ff503d86c35ccd7f693 | a04d56d5091545f7605fd1fa17bae516ed9c920a | refs/heads/master | 2022-11-26T01:29:09.067603 | 2020-07-30T15:20:56 | 2020-07-30T15:20:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | class UnauthorizedUserError(AttributeError):
pass | [
"[email protected]"
] | |
47a5a84966e45f9d37bbb2a644fe993e521e52d8 | 28ef126452bf2c0c60d1095b91aff9db9e41c495 | /vacations/models.py | 6714df531c7932df3f4de7c3219c8602d6fcb2f2 | [] | no_license | Shravya9506/Assignment3_BonVoyage | 66d76c849fb6ce22700472032dac2aa6098e7aa6 | 7d4883d41965c5dc2657b67231a5ee04005c13f6 | refs/heads/master | 2023-01-13T05:01:19.537913 | 2020-11-16T04:08:52 | 2020-11-16T04:08:52 | 300,146,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,035 | py | from django.db import models, IntegrityError
from django.urls import reverse
from django.template.defaultfilters import slugify
class AutoSlugifyOnSaveModel(models.Model):
"""
Models that inherit from this class get an auto filled slug property based on the models name property.
Correctly handles duplicate values (slugs are unique), and truncates slug if value too long.
The following attributes can be overridden on a per model basis:
* value_field_name - the value to slugify, default 'name'
* slug_field_name - the field to store the slugified value in, default 'slug'
* max_interations - how many iterations to search for an open slug before raising IntegrityError, default 1000
* slug_separator - the character to put in place of spaces and other non url friendly characters, default '-'
"""
def save(self, *args, **kwargs):
pk_field_name = self._meta.pk.name
value_field_name = getattr(self, 'value_field_name', 'name')
slug_field_name = getattr(self, 'slug_field_name', 'slug')
max_interations = getattr(self, 'slug_max_iterations', 1000)
slug_separator = getattr(self, 'slug_separator', '-')
# fields, query set, other setup variables
slug_field = self._meta.get_field(slug_field_name)
slug_len = slug_field.max_length
queryset = self.__class__.objects.all()
# if the pk of the record is set, exclude it from the slug search
current_pk = getattr(self, pk_field_name)
if current_pk:
queryset = queryset.exclude(**{pk_field_name: current_pk})
# setup the original slug, and make sure it is within the allowed length
slug = slugify(getattr(self, value_field_name))
if slug_len:
slug = slug[:slug_len]
original_slug = slug
# iterate until a unique slug is found, or max_iterations
counter = 2
while queryset.filter(**{slug_field_name: slug}).count() > 0 and counter < max_interations:
slug = original_slug
suffix = '%s%s' % (slug_separator, counter)
if slug_len and len(slug) + len(suffix) > slug_len:
slug = slug[:slug_len - len(suffix)]
slug = '%s%s' % (slug, suffix)
counter += 1
if counter == max_interations:
raise IntegrityError('Unable to locate unique slug')
setattr(self, slug_field.attname, slug)
super(AutoSlugifyOnSaveModel, self).save(*args, **kwargs)
class Meta:
abstract = True
class Vacation(AutoSlugifyOnSaveModel):
name = models.CharField(max_length=60, verbose_name='Vacation name')
destination = models.CharField(max_length=60)
description = models.TextField()
destination_image = models.ImageField(upload_to='photos')
slug = models.SlugField(max_length=200,
unique=True)
class Meta:
ordering = ('name',)
index_together = (('id', 'slug'),)
def __str__(self):
return self.destination
class Trip(AutoSlugifyOnSaveModel):
vacation = models.ForeignKey(
Vacation,
on_delete=models.CASCADE,
related_name='trips'
)
name = models.CharField(max_length=60, verbose_name='Trip name')
source = models.CharField(max_length=60, verbose_name='Starting point')
transportation_choices = (
('FL','Flight'),
('TR','Train'),
('CR','Cruise'),
('BU','Bus')
)
slug = models.SlugField(max_length=200,
unique=True)
mode_of_transport = models.CharField(max_length=2, choices=transportation_choices)
start_date = models.DateField()
end_date = models.DateField()
price = models.DecimalField(max_digits=6, decimal_places=2, verbose_name='Price ($)')
additional_benefits = models.TextField()
trip_description = models.TextField()
class Meta:
ordering = ('name',)
index_together = (('id', 'slug'),)
def __str__(self):
return self.name | [
"[email protected]"
] | |
46197c4916106629fcc181d5dc86122be096b222 | fef7d3b7d72c05701f06c7a4a1699b01f576ea4d | /widgets/Pong.py | 9dd8f5285d4c0a5e0881e3fceb842bb8a13f0070 | [] | no_license | airtonchagas/ping-pong | 606773e04bdaaa47d049845ee3971a3c4f03254f | f0392ed07700efc0e6854bd615836b62a2988eb0 | refs/heads/master | 2022-12-04T08:19:20.137856 | 2020-08-28T19:05:27 | 2020-08-28T19:05:27 | 272,957,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,240 | py | from kivy.properties import ObjectProperty
from kivy.uix.widget import Widget
from kivy.clock import Clock
from kivy.core.window import Window
# Definição do
class Pong(Widget):
bola = ObjectProperty(None)
raquete_1 = ObjectProperty(None)
raquete_2 = ObjectProperty(None)
def __init__(self, screen_manager=None):
super(Pong, self).__init__()
self.screen_manager = screen_manager
self._keyboard = Window.request_keyboard(self._keyboard_closed, self)
self._keyboard.bind(on_key_down=self._on_keyboard_down)
def _keyboard_closed(self):
self._keyboard.unbind(on_key_down=self._on_keyboard_down)
self._keyboard = None
def servico(self, vel=(4, 0)):
self.bola.center = self.center
self.bola.velocidade = vel
def atualiza(self, dt):
self.bola.movimenta()
self.raquete_1.rebate_bola(self.bola)
self.raquete_2.rebate_bola(self.bola)
if (self.bola.y < 0) or (self.bola.top > self.height):
self.bola.velocidade_y *= -1
if self.bola.x < self.x:
self.raquete_2.placar += 1
if self.raquete_2.placar >= 3:
self.servico(vel=(0, 0))
self.raquete_1.placar = 0
self.raquete_2.placar = 0
self.screen_manager.current = "vencedor_2"
return
self.servico(vel=(4, 0))
if self.bola.x > self.width:
self.raquete_1.placar += 1
if self.raquete_1.placar >= 3:
self.servico(vel=(0, 0))
self.raquete_1.placar = 0
self.raquete_2.placar = 0
self.screen_manager.current = "vencedor_1"
return
self.servico(vel=(-4, 0))
# # Captura o evento on_touch_move (arrastar de dedo na tela)
# def on_touch_move(self, touch):
# # Verifica se toque foi do lado esquerdo da tela
# if touch.x < self.width / 2:
# # Atualiza altura da raquete esquerda
# self.raquete_1.center_y = touch.y
# # Verifica se toque foi do lado direito da tela
# if touch.x > self.width - self.width / 2:
# # Atualiza altura da raquete direita
# self.raquete_2.center_y = touch.y
def _on_keyboard_down(self, keyboard, keycode, text, modifiers):
if keycode[1] == 'w':
if(self.raquete_1.center_y <= 617.5 ):
self.raquete_1.center_y += 10
elif keycode[1] == 's':
if(self.raquete_1.center_y >= 97.5 ):
self.raquete_1.center_y -= 10
elif keycode[1] == 'up':
if(self.raquete_2.center_y <= 617.5 ):
self.raquete_2.center_y += 10
elif keycode[1] == 'down':
if(self.raquete_2.center_y >= 97.5 ):
self.raquete_2.center_y -= 10
return True
def remove_btn(self, btn):
self.remove_widget(btn)
def comeca_jogo(self):
self.servico()
Clock.schedule_interval(self.atualiza, 1.0/120.0)
def reinicia_jogo(self):
self.servico(vel=(4,0))
self.raquete_1.placar = 0
self.raquete_2.placar = 0
| [
"[email protected]"
] | |
79f42bc80ce442200cc1f108d2ecc383500f1906 | 2894c7f7c58290651c0fac48a8084371def3d7ba | /groups/models.py | 8badb586a6605eefb58626ef9fc3e0ae112f8b59 | [] | no_license | SanjayPJ/sg-update01 | 4bd08b62ee3e684f50173a96bb712f5512719fde | dc2d2db21ad58040c4ad10485872d33a457bf2a8 | refs/heads/master | 2020-03-30T09:49:43.043591 | 2018-10-02T11:43:29 | 2018-10-02T11:43:29 | 151,094,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from django.template.defaultfilters import slugify
# Create your models here.
class Group(models.Model):
name = models.CharField(max_length=50)
description = models.TextField(blank=True, null=True)
user = models.ManyToManyField(User)
def __str__(self):
return self.name
class Meta:
ordering = ['-pk', ]
def add_user(self, add_u):
self.user.add(add_u)
self.save()
def remove_user(self, add_u):
self.user.remove(add_u)
self.save()
def get_absolute_url(self):
return reverse('group_detail', kwargs={'pk': self.pk})
| [
"[email protected]"
] | |
a2cc050787256f59c75122d1f84766ba93159863 | 9bdfee02be0c22f1455ad56b5c1a060b76d46a95 | /dc8_pandas_foundations.py | 22aa4943fa49e20e38fdb8b82f267dca28caeaa2 | [] | no_license | elixias/kaggle-python | cfa30ed23123b98a56e6b0c9a9a91ec51ee39fa5 | 2aacce03d4dba7b2d0df8c1a88d80abe1a7d3fe2 | refs/heads/master | 2021-06-15T04:47:40.835039 | 2021-05-31T01:38:57 | 2021-05-31T01:38:57 | 201,199,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,833 | py | import numpy as np
#AAPL.iloc[::3,-1] = np.nan #Here, :: means every 3 rows
"""getting numerical entries of a series in a dataframe"""
#AAPL['low'].values
"""using dicts as df"""
data = {'weekday':['sun','sun','mon'],
'city':['austin','dallas','austin']
}
print(pd.DataFrame(data)) #keys are used as columns
"""building from series instead"""
days = ['sun','sun','mon']
cities = ['austin','dallas','austin']
list_labels = ['days','cities']
list_cols = [days,cities]
zipped = list(zip(list_labels,list_cols))
data = dict(zipped)
print(pd.DataFrame(data))
"""changing column labels"""
#data.columns = ['a','b']
"""no headers, set column names, set na values, parse columns as dates"""
#data = pd.read_csv(filepath, header=None, names=[...,...], na_values={'column':['-1']},
#parse_dates=[[0,1,2]], index_col='')
"""set index"""
#data.index=data['yearmthday']
#data.index.name='date'
"""write to csv"""
#data.to_csv("filename.csv")
#data.to_excel("filename.xls")
"""different ways to plot"""
#data.column.plot()
#plt.plot(data.column)
#plt.plot(data.column)
#plt.plot(data)
#data.plot()
#data.plot(x=..,y=..,kind='scatter')
#data.plot(y=...,kind='box')
#data.plot(y=...,kind='hist',bins=..,range=(..,..),normed=..,cumulative=..)
#data.plot(kind='hist')
#data.plt.hist()
#data.hist()
"""VERY USEUFL"""
#data.plot(subplots=True)
#data.column.plot(color='b',style='.-', legend=True)
#plt.axis(('2001','2002',0,100))
#plt.yscale('log')
"""saving figure"""
plt.savefig("___.png/jpg/pdf")
"""other configs"""
plt.title("_")
plt.xlabel('')
plt.ylabel('')
"""subplots: """
#fig, axes = plt.subplots(nrows=2, ncols=1)
#df.fraction.plot(ax=axes[0], kind='hist', normed=True, bins=30, range=(0,.3)) #specifying ax=axes[0] to use the subplots
"""data exploration beyond describe() and info()"""
#data.column.count() #applied to series
#data.count() #applied to df, returns a series of the counts
#count(),mean(),median(),std(),quantile(0.X) or quantile([0.X,0.X]), min(), max()
#data.column.unique()
#indices = data['species'] == 'setosa'
#data[indices,:]
"""computing errors"""
error_setosa = 100 * np.abs(setosa.describe()-data.describe())
error_setosa = error_setosa/setosa.describe()
#using datetime as indices are very common techniques
#you can provide the date and a subset of those rows will be returned
#data.loc['2015-2-5'] #partial datetime string selection
#data.loc['2015-2':'2016-2'] #partial selection by month range
#pd.to_datetime([...,...])
#data.reindex([...,...],method='ffill') #forward fill using preceding entries, or use bfill
#manual way without date index
#res = df1['Date'].apply(lambda x:x[0:12]=='2010-Aug-01')
#df1[df1['Date'].apply(lambda x:x[0:8]=='20100801')]
#df3.loc['2010-Aug-01']
#runtime converting datetime
#pd.to_datetime(series, format='%Y-%m-%d %H:%M')
"""reindexing"""
#ts3 = ts2.reindex(ts1.index)
"""resampling, downsampling (daily to weekly), upsampling (daily to hourly) """
#daily_mean = sales.resample('D').mean()/.sum()/.max()/.count()/.var()/.std()/.ffill() #D stands for daily
#min/T, H, D, B, W, M, Q, A
"""rolling mean"""
#df['Temperature']['2010-Aug-01':'2010-Aug-15'].rolling(window=24).mean()
#if you use resampling and reindex to do it, looks 'digital'
#test = unsmoothed.resample('D').mean()
#test = test.reindex(unsmoothed.index, method='ffill')
#another way is to use interpolate
df.resample('A').first().interpolate('linear')
"""use str/contains to do string based search/etc"""
#df.str.upper()/contains().sum()/strip()
#df.dt.hour #this extracts the hour from the time
#df.dt.tz_localize('US/Central') #localize time ones is not the same as tz_convert()
#df.columns = df.columns.str.strip()
"""plotting time series data"""
#correcting data
#pd.to_numeric(errors='coerce')/to_date/to_str
"""pearson's correlation"""
#df.corr() #on a df with 2 columns
| [
"[email protected]"
] | |
d3411fd0e54750291d4cbe45685cebda153daab4 | 6413fe58b04ac2a7efe1e56050ad42d0e688adc6 | /tempenv/lib/python3.7/site-packages/plotly/validators/scatter/_dx.py | 423b842fba4b12f2a02d34b9e3ba1d92b140d16f | [
"MIT"
] | permissive | tytechortz/Denver_temperature | 7f91e0ac649f9584147d59193568f6ec7efe3a77 | 9d9ea31cd7ec003e8431dcbb10a3320be272996d | refs/heads/master | 2022-12-09T06:22:14.963463 | 2019-10-09T16:30:52 | 2019-10-09T16:30:52 | 170,581,559 | 1 | 0 | MIT | 2022-06-21T23:04:21 | 2019-02-13T21:22:53 | Python | UTF-8 | Python | false | false | 470 | py | import _plotly_utils.basevalidators
class DxValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name='dx', parent_name='scatter', **kwargs):
super(DxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop('anim', True),
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| [
"[email protected]"
] | |
49559eab3904384ce934183fff7c4a0028ce89a2 | 4df1be2bca52241da7131c77acd737b7b84cd1d1 | /learn/urls.py | 9fe30c676e348613e30071eeac8618292d59740d | [] | no_license | codeGoogler/DgUserForLoginManager | 38353b3918dc9debe95990a54410f53da389a111 | 595569a2ef9f1f3f8e5efe5fcb670d42a42ec9d3 | refs/heads/master | 2021-10-27T03:00:42.210689 | 2019-04-15T15:33:23 | 2019-04-15T15:33:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | from django.urls import path
from learn import views as i_view
urlpatterns =[
path('test/',i_view.index),
path('test2/',i_view.index),
] | [
"[email protected]"
] | |
bc898b2c2d078be2e2dfa31a9a5fb574b345a1b9 | d28a8156629f617fee81bb4532f43ee0de7a417c | /untitled folder/search1.py | 548f6b3be365a9684112c51a9cfc1cd97d44f6b7 | [] | no_license | karandeepbhardwaj/Pacman-Game | 733c661c8abd280ec694a05d5a705369e7b6734f | 5e0e43bc7c649a7d5b43ba4bc1c9c260cd94366b | refs/heads/main | 2022-12-29T06:39:16.474459 | 2020-10-16T17:00:32 | 2020-10-16T17:00:32 | 303,215,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,613 | py | # search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# ([email protected]) and Dan Klein ([email protected]).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel ([email protected]).
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first.
Your search algorithm needs to return a list of actions that reaches the
goal. Make sure to implement a graph search algorithm.
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
"""
#
# print("Start:", problem.getStartState())
# print("Is the start a goal?", problem.isGoalState(problem.getStartState()))
# print("Start's successors:", problem.getSuccessors(problem.getStartState()))
#
#
"*** YOUR CODE HERE ***"
startingNode = problem.getStartState()
if problem.isGoalState(startingNode):
return []
stack = util.Stack() # Create a stack for DFS.
visited = [] # Create a list to keep track of explored/visited nodes.
# Beginning node of the graph.
start = (startingNode, [])
stack.push(start)
"""
visit current node if is not explored before and find its
children (push those into the stack)
"""
while not stack.isEmpty():
element = stack.pop()
location = element[0]
path = element[1]
if location not in visited:
visited.append(location)
if problem.isGoalState(location):
return path
for nextElement in problem.getSuccessors(location):
stack.push((nextElement[0], path + [nextElement[1]]))
util.raiseNotDefined()
def breadthFirstSearch(problem):
"""Search the shallowest nodes in the search tree first."""
# print("Start:", problem.getStartState())
# print("Is the start a goal?", problem.isGoalState(problem.getStartState()))
# print("Start's successors:", problem.getSuccessors(problem.getStartState()))
"*** YOUR CODE HERE ***"
queue = util.Queue()
visited = []
start = (problem.getStartState(), [])
queue.push(start)
while not queue.isEmpty():
location, path = queue.pop()
if location not in visited:
visited.append(location)
if problem.isGoalState(location):
return path
for nextNode, action, cost in list(problem.getSuccessors(location)):
if nextNode not in visited:
queue.push((nextNode, path + [action]))
return []
def uniformCostSearch(problem):
'''*** YOUR CODE HERE ***'''
pQueue = util.PriorityQueue()
pQueue.push((problem.getStartState(), [], 0), 0)
closed = {}
goal = False
while not goal:
if pQueue.isEmpty():
return False
node = pQueue.pop()
closed[node[0]] = node[2]
if problem.isGoalState(node[0]):
return node[1]
for i in problem.getSuccessors(node[0]):
if i[0] not in closed or (i[0] in closed and closed[i[0]] > node[2] + i[2]):
temp = list(node[1])
temp.append(i[1])
cost = node[2] + i[2]
closed[i[0]] = cost
pQueue.push((i[0], temp, cost), cost)
util.raiseNotDefined()
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
startingNode = problem.getStartState()
if problem.isGoalState(startingNode):
return []
visitedNodes = []
pQueue = util.PriorityQueue()
# ((coordinate/node , action to current node , cost to current node),priority)
pQueue.push((startingNode, [], 0), 0)
while not pQueue.isEmpty():
currentNode, actions, prevCost = pQueue.pop()
if currentNode not in visitedNodes:
visitedNodes.append(currentNode)
if problem.isGoalState(currentNode):
return actions
for nextNode, action, cost in problem.getSuccessors(currentNode):
newAction = actions + [action]
newCostToNode = prevCost + cost
heuristicCost = newCostToNode + heuristic(nextNode,problem)
pQueue.push((nextNode, newAction, newCostToNode),heuristicCost)
def randomyu(problem, heuristic=nullHeuristic):
pQueue = util.PriorityQueue()
start = problem.getStartState()
pQueue.push((start, []), heuristic(start, problem))
visited = []
while True:
if pQueue.isEmpty():
return False
state, path = pQueue.pop()
if problem.isGoalState(state):
return path
if state not in visited:
visited.append(state)
for successor, direction, cost in problem.getSuccessors(state):
if successor not in visited:
neighborCost = path + [direction]
pathCost = problem.getCostOfActions(neighborCost) + heuristic(successor, problem)
pQueue.push((successor, neighborCost), pathCost)
return path
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
| [
"[email protected]"
] | |
8509e9faa4cf07c55342bdcb6c48066658494cca | e9afb95f54c25fb70e53fb544864515bd16d75a0 | /app/requests.py | 39907688861cea5dcdb7d8d6c6fa5413da6c0d35 | [
"MIT"
] | permissive | Irene-nandy/News-API | ffc82740af7399a374c28c35d27dd941e4c1b407 | 4ae96ef43bcb3fe380dd5c31aef1b8de73594de3 | refs/heads/master | 2022-12-17T13:31:53.567962 | 2020-09-15T13:50:55 | 2020-09-15T13:50:55 | 294,651,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,975 | py | import urllib.request
import json
from .models import Sources, Articles
from datetime import datetime
# Getting api key
api_key = None
# Getting the news base url
base_url = None
# Getting the articlces url
articles_url = None
def configure_request(app):
global api_key, base_url, articles_url
api_key = app.config['NEWS_API_KEY']
base_url = app.config['NEWS_SOURCES_BASE_URL']
articles_url = app.config['ARTICLES_BASE_URL']
def get_sources(category):
'''
Function that gets the json response to our url request
'''
get_sources_url = base_url.format(category, api_key)
print(get_sources_url)
with urllib.request.urlopen(get_sources_url) as url:
get_sources_data = url.read()
get_sources_response = json.loads(get_sources_data)
sources_results = None
if get_sources_response['sources']:
sources_results_list = get_sources_response['sources']
sources_results = process_sources(sources_results_list)
return sources_results
def process_sources(sources_list):
'''
Function that processes the news sources results and turns them into a list of objects
Args:
sources_list: A list of dictionaries that contain sources details
Returns:
sources_results: A list of sources objects
'''
sources_results = []
for source_item in sources_list:
id = source_item.get('id')
name = source_item.get('name')
description = source_item.get('description')
url = source_item.get('url')
category = source_item.get('category')
language = source_item.get('language')
country = source_item.get('country')
sources_object = Sources(
id, name, description, url, category, country, language)
sources_results.append(sources_object)
return sources_results
def get_articles(id):
'''
Function that processes the articles and returns a list of articles objects
'''
get_articles_url = articles_url.format(id, api_key)
with urllib.request.urlopen(get_articles_url) as url:
articles_results = json.loads(url.read())
articles_object = None
if articles_results['articles']:
articles_object = process_articles(articles_results['articles'])
return articles_object
def process_articles(articles_list):
'''
'''
articles_object = []
for article_item in articles_list:
id = article_item.get('id')
author = article_item.get('author')
title = article_item.get('title')
description = article_item.get('description')
url = article_item.get('url')
image = article_item.get('urlToImage')
date = article_item.get('publishedAt')
if image:
articles_result = Articles(
id, author, title, description, url, image, date)
articles_object.append(articles_result)
return articles_object | [
"[email protected]"
] | |
c4a69dbe00a8af252761190ce0db8e24624afb89 | 63d4ac8c703524cd46c50ce4b685321520f41302 | /saleor/accounts/expenses_pdf.py | e7824ecd2de1c2a6147c306c5cb8d15270e131ab | [
"BSD-2-Clause"
] | permissive | glosoftgroup/glosoftgroup-django-pos | a377939380261bec4a2e1471e10655c384c46c13 | b489c402939b9ebabd164c449e7da38fe849d550 | refs/heads/master | 2023-01-23T21:59:01.342184 | 2018-12-01T15:17:06 | 2018-12-01T15:17:06 | 94,298,377 | 2 | 1 | NOASSERTION | 2023-01-11T21:52:18 | 2017-06-14T06:41:16 | JavaScript | UTF-8 | Python | false | false | 2,084 | py | from django.db.models import Q
from django.http import HttpResponse
from .views import staff_member_required
from ..utils import render_to_pdf, default_logo
from datetime import date
from .models import ExpenseType, Expenses, PersonalExpenses
@staff_member_required
def pdf(request):
if request.is_ajax():
q = request.GET.get( 'q' )
gid = request.GET.get('gid')
type = None
if q is not None:
expenses = Expenses.objects.filter(
Q(expense_type__icontains=q) |
Q(paid_to__icontains=q) | Q(authorized_by__icontains=q)).order_by('id')
if gid:
type = ExpenseType.objects.get(pk=request.GET.get('gid'))
expenses = expenses.filter(expense_type=type.name)
elif gid:
type = ExpenseType.objects.get(pk=request.GET.get('gid'))
expenses = Expenses.objects.filter(expense_type=type.name)
else:
expenses = Expenses.objects.all()
img = default_logo()
data = {
'today': date.today(),
'expenses': expenses,
'puller': request.user,
'image': img,
'type':type
}
pdf = render_to_pdf('dashboard/accounts/expenses/pdf/expenses.html', data)
return HttpResponse(pdf, content_type='application/pdf')
@staff_member_required
def bpdf(request):
if request.is_ajax():
q = request.GET.get( 'q' )
gid = request.GET.get('gid')
type = None
if q is not None:
expenses = PersonalExpenses.objects.filter(
Q(expense_type__icontains=q) |
Q(paid_to__icontains=q) | Q(authorized_by__icontains=q)).order_by('id')
if gid:
type = ExpenseType.objects.get(pk=request.GET.get('gid'))
expenses = expenses.filter(expense_type=type.name)
elif gid:
type = ExpenseType.objects.get(pk=request.GET.get('gid'))
expenses = PersonalExpenses.objects.filter(expense_type=type.name)
else:
expenses = PersonalExpenses.objects.all()
img = default_logo()
data = {
'today': date.today(),
'expenses': expenses,
'puller': request.user,
'image': img,
'type':type
}
pdf = render_to_pdf('dashboard/accounts/personal_expenses/pdf/pdf.html', data)
return HttpResponse(pdf, content_type='application/pdf')
| [
"[email protected]"
] | |
ae2e5f7aa5365b8b00bdd95a11baef644feece61 | 248cd2229334518abb951860cfbd77a65d2a840d | /allcounter.py | 952280187cc950af734d3ac2008737ee4281047f | [] | no_license | sotolab/dataanalysis | b7edc59495b602fb9c15552535b573b215ad5694 | 798c4ff1fa81cc0b7d95de44eceaca8e640e9136 | refs/heads/main | 2022-12-31T18:50:20.611273 | 2020-10-21T13:04:55 | 2020-10-21T13:04:55 | 306,019,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,783 | py | # -*- coding: utf-8 -*-
# Run >> $ python allcounter.py
import sys
from wordcloud import WordCloud
import openpyxl
import pandas as pd
from pandas import DataFrame as df
import re
from konlpy.tag import Okt
from collections import Counter
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import font_manager, rc
from konlpy.tag import Hannanum
import allfname as fn
# filename = sys.argv[1]
# MALL_SIZE = 8
# MEDIUM_SIZE = 10
# BIGGER_SIZE = 12
#
# plt.rc('font', size=SMALL_SIZE) # controls default text sizes
# plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
# plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
# plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
# plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
# plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
# plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
# fig = plt.figure()
# fig.suptitle('test title', fontsize=20)
# plt.xlabel('xlabel', fontsize=18)
# plt.ylabel('ylabel', fontsize=16)
# params = {'legend.fontsize': 'x-large',
# 'figure.figsize': (15, 5),
# 'axes.labelsize': 'x-large',
# 'axes.titlesize':'x-large',
# 'xtick.labelsize':'x-large',
# 'ytick.labelsize':'x-large'}
# plt.rcParams.update(params)
font_location = "c:/Windows/fonts/HMFMMUEX.TTC"
font_name = font_manager.FontProperties(fname=font_location).get_name()
font = {'family' : font_name,
'weight' : 'bold',
'size' : 15}
# matplotlib.rc('font', **font)
def showGraph(wordInfo, filename ):
matplotlib.rc('font', **font) #family=font_name, size=15)
plt.figure(figsize=(20,10))
plt.xlabel('해시태크')
plt.ylabel('빈도수')
plt.grid(True)
Sorted_Dict_Values = sorted(wordInfo.values(), reverse=True)
Sorted_Dict_Keys = sorted(wordInfo, key=wordInfo.get, reverse=True)
plt.bar(range(len(wordInfo)), Sorted_Dict_Values, align='center')
plt.xticks(range(len(wordInfo)), list(Sorted_Dict_Keys), rotation='70')
plt.savefig(filename+"_counter"+'.png', dpi=300)
# plt.show()
def main():
filename = fn.allfname()
# print("filename:", filename)
for i in filename:
wb = openpyxl.load_workbook(i+".xlsx")
ws = wb.active
list = []
for r in ws.rows:
index = r[0].row
txt = r[0].value
# print("txt:", txt)
list.append(str(txt))
result = ",".join(list)
# print("result:", result)
#-------------------------
result = re.sub('[0-9]+', '', result)
result = re.sub('[A-Za-z]+', '', result)
result = re.sub('[-_]', '', result)
# result = re.sub('[-=+,#/\?:^$.@*\"※~&%ㆍ·!』\\‘’|\(\)\[\]\<\>`\'…》]', '', result)
result = result.replace(',', ' ')
# ----------------------------
nlp = Okt()
nouns = nlp.nouns(result)
count = Counter(nouns)
# print(text_list)
names = []
values = []
wordInfo = dict()
for tags, counts in count.most_common(50):
if (len(str(tags)) > 2):
wordInfo[tags] = counts
names.append(str(tags))
values.append(counts)
# print ("%s : %d" % (tags, counts))
showGraph(wordInfo, i)
df1 = df(data = {'Tag': names, 'Value': values })
# print (" df1 %s" % df1)
df1.to_excel(i+"DataFrame"+'.xlsx')
print( i + " done")
if __name__ == "__main__":
main()
print("All done")
| [
"[email protected]"
] | |
5120fcaa1e2540dc1e068017b0f9962000a77019 | 640b2e13232bf94f182888357dba9b46dcb21520 | /czl_personal_site/personal_app/__init__.py | 72d22b8d76c0114f2a6f079fcf68cdf7cf7b0edf | [] | no_license | Clee2691/PersonalSite2020 | 281c48f88757ac5691518dab93ff0cffb3b5f8d8 | 94364d67546f03eefab6154e39ee9fb6a08579c3 | refs/heads/master | 2021-03-31T11:37:49.975590 | 2020-03-27T00:58:28 | 2020-03-27T00:58:28 | 248,104,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | from flask import Flask
site_app = Flask(__name__)
from personal_app import routes | [
"[email protected]"
] | |
78485a505f1192c5c4ab32b7b883cb5620eea81a | 317dd88e5ec34edd22d7fed4a0238dcb7eef385d | /videoFrameRead.py | 5aa3454e818c1eff4ee3d36c444b19ee5306f60f | [] | no_license | hank-w/Driver-State-Facial-Recognition | 9822a1b47287290b6706c16f9b1a55933366c25c | 3225ad1525ac3d4294bba13b7487791572c38078 | refs/heads/master | 2022-11-17T06:04:21.062832 | 2020-07-14T15:34:43 | 2020-07-14T15:34:43 | 279,623,249 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,266 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: @swiftbeagle (hank w)
"""
from scipy.spatial import distance as dist
import cv2
import numpy as np
from fastai import *
from fastai.vision import *
import pandas as pd
import argparse
import imutils
from imutils.video import FileVideoStream
import time
from imutils import face_utils
import dlib
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video-file", required=True, help="video file in current directory")
ap.add_argument("--frame-step", type=int, default = 10, help="framecount which video frames are predicted")
ap.add_argument("--save", dest="save", action = "store_true")
ap.add_argument("--no-save", dest="save", action = "store_false")
ap.add_argument("--savedata", dest="savedata", action = "store_true")
ap.add_argument("--no-savedata", dest="savedata", action = "store_false")
ap.set_defaults(savedata = False)
ap.set_defaults(save = False)
args = vars(ap.parse_args())
path = "/Users/hankw/FacialRecognitionVideo/"
vidcap = FileVideoStream(args["video_file"]).start()
count = 0
framecount = 0
learn = load_learner(path, 'export.pkl')
data = []
EYE_AR_THRESH = 0.3
EYE_AR_CONSEC_FRAMES = 40
COUNTER = 0
ALARM_ON = False
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
def eye_aspect_ratio(eye):
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
C = dist.euclidean(eye[0], eye[3])
ear = (A + B) / (2.0 * C)
return ear
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
if args["save"]:
out = cv2.VideoWriter(path + "output.avi", cv2.VideoWriter_fourcc('M','J','P','G'), 10, (450,300))
while vidcap.more():
frame = vidcap.read()
if frame is None:
break
frame = imutils.resize(frame, width=450)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_coord = face_cascade.detectMultiScale(gray, 1.1, 20, minSize=(30, 30))
for coords in face_coord:
X, Y, w, h = coords
H, W, _ = frame.shape
X_1, X_2 = (max(0, X - int(w * 0.3)), min(X + int(1.3 * w), W))
Y_1, Y_2 = (max(0, Y - int(0.3 * h)), min(Y + int(1.3 * h), H))
img_cp = gray[Y_1:Y_2, X_1:X_2].copy()
rect = dlib.rectangle(X, Y, X+w, Y+h)
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
ear = (leftEAR + rightEAR) / 2.0
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
if ear < EYE_AR_THRESH:
COUNTER += 1
if COUNTER >= EYE_AR_CONSEC_FRAMES:
cv2.putText(frame, "Distracted Driving", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
else:
COUNTER = 0
ALARM_ON = False
cv2.putText(frame, "Eye Ratio: {:.2f}".format(ear), (250, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
if framecount % args["frame_step"] == 0:
prediction, idx, probability = learn.predict(Image(pil2tensor(img_cp, np.float32).div_(225)))
data.append([framecount, prediction, probability, ear])
cv2.rectangle(
img=frame,
pt1=(X_1, Y_1),
pt2=(X_2, Y_2),
color=(128, 128, 0),
thickness=2,
)
cv2.putText(frame, str(prediction), (10, frame.shape[0] - 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (225, 255, 255), 2)
cv2.imshow("frame", frame)
framecount += 1
if args["save"]:
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if args["savedata"]:
df = pd.DataFrame(data, columns = ['Framecount', 'Expression', 'Probability', 'EAR'])
df.to_csv(path+'/export.csv')
print("data saved to export.csv")
vidcap.stop()
if args["save"]:
print("done saving")
out.release()
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
bb43b40a1b82724a706860580a4a2a5fa0e0c136 | 2337351b228818e41be3002bd38f68f77c2aa074 | /sa/profiles/AlliedTelesis/AT9900/get_interface_properties.py | f819e1f9efb83b9a09d5dd0c591ad7528e93a9cd | [
"BSD-3-Clause"
] | permissive | nocproject/noc | 57d40c680a1499374463e472434f9595ed6d1374 | 6e6d71574e9b9d822bec572cc629a0ea73604a59 | refs/heads/master | 2023-08-31T01:11:33.544573 | 2023-08-30T17:31:11 | 2023-08-30T17:31:11 | 107,815,776 | 105 | 33 | BSD-3-Clause | 2023-07-31T07:57:45 | 2017-10-21T21:04:33 | Python | UTF-8 | Python | false | false | 566 | py | # ----------------------------------------------------------------------
# AlliedTelesis.AT9900.get_interface_properties script
# ----------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from noc.sa.profiles.Generic.get_interface_properties import Script as BaseScript
class Script(BaseScript):
name = "AlliedTelesis.AT9900.get_interface_properties"
SNMP_NAME_TABLE = "IF-MIB::ifName"
| [
"[email protected]"
] | |
0fa1e912d997a7e05629982474f909492f14c165 | fa8cd09f749d132a0130a0fe270891d8539c0d0f | /accounts/forms.py | 28c1f9083ed1dc7131e03137a9da5812b27958d4 | [
"Apache-2.0"
] | permissive | dhirajshah04/lstfnd | 717db47a13d562d8e592c53a57d03921cf5a05c8 | 25745d04753b4bbaedcbed915ca64b57eb780c54 | refs/heads/master | 2021-09-06T18:07:48.912751 | 2018-02-09T13:02:09 | 2018-02-09T13:02:09 | 111,217,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,629 | py | from django.contrib.auth.models import User
from django import forms
from django.contrib.auth import (
authenticate,
get_user_model,
login,
logout,
)
from .models import Profile
class UserRegistrationForm(forms.ModelForm):
password = forms.CharField(label='Password',
widget=forms.PasswordInput(
attrs={
'class':'form-control', 'placeholder':'password',
}
))
password2 = forms.CharField(label='Confirm password',
widget=forms.PasswordInput(
attrs={
'class': 'form-control', 'placeholder': 'Confirm Password',
}
))
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email')
widgets = {
'username': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Enter your Username'}),
'first_name': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Enter your Firstname'}),
'last_name': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Enter your Surname'}),
'email': forms.EmailInput(attrs={'class': 'form-control', 'placeholder': 'Enter Email Addess '}),
}
def clean_password2(self):
cd = self.cleaned_data
if cd['password'] != cd['password2']:
raise forms.ValidationError('Passwords don\'t match.')
return cd['password2']
class UserLoginForm(forms.Form):
username = forms.CharField(widget=forms.TextInput(
attrs={
'class':'form-control', 'placeholder':'Username',
}
))
password = forms.CharField(widget=forms.PasswordInput(
attrs={
'class':'form-control', 'placeholder':'Password',
}
))
def clean(self, *args, **kwargs):
username = self.cleaned_data.get("username")
password = self.cleaned_data.get("password")
if username and password:
user = authenticate(username=username, password=password)
if not user:
raise forms.ValidationError("This User does not Exist")
if not user.check_password(password):
raise forms.ValidationError("Incorrect Password")
if not user.is_active:
raise forms.ValidationError("This user is not active")
return super(UserLoginForm, self).clean(*args, **kwargs)
class UserEditForm(forms.ModelForm):
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
widgets = {
'first_name': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'First Name'}),
'last_name': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Last Name'}),
'email': forms.EmailInput(attrs={'class': 'form-control', 'placeholder': 'Email'}),
}
class ProfileEditForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('gender', 'date_of_birth', 'phone', 'country', 'city', 'photo')
widgets = {
'date_of_birth': forms.DateInput(attrs={'type': 'date', 'placeholder': 'YYYY-MM-DD'}),
'country': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Country'}),
'city': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'City'}),
} | [
"[email protected]"
] | |
6741d8c5e8d7dd1d5835f5002303e1d8702c8043 | 3d013c57d3031a3701de65d929277f8a62509207 | /ArgumentsTypeDemo.py | 1f7ff763d3900931ab748bc1916eaaba614ff877 | [] | no_license | Krushnarajsinh/MyPrograms | 576e98e03ce9fdc102ea649fac400362ab0aebb4 | 002244e34d02212630c12153b9e12110e29370df | refs/heads/master | 2023-01-09T10:15:01.751590 | 2020-11-07T07:51:00 | 2020-11-07T07:51:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py | def person(name,age): #Formal Arguments
print(name)
print(age+5)
person("Krushnarajsinh",15) #Actual Arguments
#person(15,"krushnarajsinh") #Exchange positions
#There are 4 types of Actual Argumnets: (1)Position (2)Keyword (3)Default (4)Variable Length
#(1)Position:-When We pass the values during function calling posion metters above is the example
#(2)Keyword:-During Function calling we can pass values in any order using keywords
person(age=20,name="Rahul")
#person(a=20,b="navin") keywords name should be match with formal arguments
def add(x,y=12): #(3)Default:-We can pass default value in formal argument
c=x+y
print("sum is:",c)
add(5)
add(2,5) #When we pass value as actual Argument then the default value is not considered
def add1(x,y,z=1): #always default parameter is last right parameter
c=x+y+z
print("Sum of 3 number is:",c)
add1(2,2)
#(4)variable length :-when we not know howmany parameter need to pass in actual argument then we can use this
def person_details(name,*data): #here *data is tuple
print(name)
print(data)
for i in data:
print(i)
person_details("krushnarajsinh","20","Ahemadabad","8511418177")
| [
"[email protected]"
] | |
06e069af4c1024ba7667ab2e723dd89d8100ba82 | cac090af84fae158a9e4c62a384578ba30b93c15 | /Week8/miniproject/bike_store_project/rent/models.py | 8557213dcdae8c2ce459e4f2f162cd21f2636a02 | [] | no_license | micnem/developers_institute | ed316a3754dd48ed54741387430ff1dd436ae1d9 | aea6154a896407336c665d4ad531f124078bc001 | refs/heads/main | 2023-02-06T00:53:41.078983 | 2020-12-31T14:04:07 | 2020-12-31T14:04:07 | 305,331,313 | 0 | 0 | null | 2020-10-22T11:18:21 | 2020-10-19T09:29:00 | CSS | UTF-8 | Python | false | false | 2,086 | py | from django.db import models
# Create your models here.
class Customer(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.EmailField(null= False, unique=True)
phone_number = models.CharField(max_length=50)
address = models.CharField(max_length=200)
city = models.CharField(max_length=30)
country = models.CharField(max_length=30)
def __str__(self):
return self.first_name
class Vehicle(models.Model):
vehicle_type = models.ForeignKey(
'VehicleType',
on_delete=models.CASCADE,
)
date_created = models.DateField(auto_now_add=True)
real_cost = models.DecimalField(max_digits=6, decimal_places=2)
size = models.ForeignKey(
'VehicleSize',
on_delete=models.CASCADE,
)
def __str__(self):
return f"Vehicle: {self.id}, type: {self.vehicle_type.name}"
class VehicleType(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
class VehicleSize(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
class RentalRate(models.Model):
daily_rate = models.DecimalField(max_digits=6, decimal_places=2)
vehicle_type = models.ForeignKey(
'VehicleType',
on_delete=models.CASCADE,
)
vehicle_size = models.ForeignKey(
'VehicleSize',
on_delete=models.CASCADE,
)
class Rental(models.Model):
rental_date = models.DateField(auto_now_add=True)
return_date = models.DateField(blank=True, null=True)
customer = models.ForeignKey(
'Customer',
on_delete=models.CASCADE,
)
vehicle = models.ForeignKey(
'Vehicle',
on_delete=models.CASCADE,
)
def rate(self):
rr = RentalRate.objects.get(vehicle_type=self.vehicle.vehicle_type, vehicle_size = self.vehicle.size)
if self.return_date:
td = self.return_date - self.rental_date
return rr.daily_rate * td.days
return
| [
"[email protected]"
] | |
b820e183293542175e3ef54ff3a6205a679ae10a | 155a385fc01b4bf1e4f1fb5fd6dc982b2cac0964 | /model.py | 65229f15db11d7a31a9d936089ef56c1d9f8bc6c | [] | no_license | tylerwatkins101/DDPG_Reinforcement_Learning_Project | 87249034c2dbb95fc302cfd65a4f1450d018f9da | 6bdb3a4a021e555f6909ce8241cbe2edcb3e8548 | refs/heads/master | 2020-06-30T16:18:24.129662 | 2019-08-08T15:04:04 | 2019-08-08T15:04:04 | 200,881,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,835 | py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return (-lim, lim)
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=400, fc2_units=300):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.bn1 = nn.BatchNorm1d(fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
x = F.relu(self.fc1(state))
x = self.bn1(x)
x = F.relu(self.fc2(x))
return F.tanh(self.fc3(x))
class Critic(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, fcs1_units=400, fc2_units=300):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fcs1_units (int): Number of nodes in the first hidden layer
fc2_units (int): Number of nodes in the second hidden layer
"""
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
self.fcs1 = nn.Linear(state_size, fcs1_units)
self.bn1 = nn.BatchNorm1d(fcs1_units)
self.fc2 = nn.Linear(fcs1_units+action_size, fc2_units)
self.fc3 = nn.Linear(fc2_units, 1)
self.reset_parameters()
def reset_parameters(self):
self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
xs = F.relu(self.fcs1(state))
xs = self.bn1(xs)
x = torch.cat((xs, action), dim=1)
x = F.relu(self.fc2(x))
return self.fc3(x)
| [
"[email protected]"
] | |
94cde8b36d97b83c312ec559fc00bfeff62a9959 | 059db785d835d15fb0102678b0c028c335d5bec5 | /nlp/library.py | 6d8919beb4af72543bdc85b6547e88412442b367 | [
"MIT"
] | permissive | xflows/cf_nlp | 9c64e727742d61a0feec748e0aed21cd92c0be9a | 3bb52f3715aa168a8b3b082a23fd3b22e27c65ab | refs/heads/master | 2021-01-21T14:19:44.798252 | 2019-09-05T11:40:46 | 2019-09-05T11:40:46 | 95,266,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59,345 | py | # -*- coding: utf-8 -*-
import nlp
import os
import base64
from services.webservice import WebService
from workflows.security import safeOpen
from requests import post
import json
import re
import itertools
import subprocess
from tweetcat import *
from time import sleep,time
import pandas as pd
import multiprocessing
from functools import partial
from itertools import repeat
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import FeatureUnion
from sklearn import pipeline
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.svm import SVC
from sklearn import preprocessing
from nltk.corpus import stopwords
import nltk
from nltk.tag import PerceptronTagger
from nltk.corpus import floresta
from nltk.corpus import cess_esp as cess
import numpy as np
from sklearn.externals import joblib
import sys
import gender_classification as genclass
import language_variety as varclass
import sentiment_analysis as sentclass
import lemmagen.lemmatizer
from lemmagen.lemmatizer import Lemmatizer
webservices_totrtale_url = "http://172.20.0.154/totrtale"
webservice_def_ex_url = "http://172.20.0.154/definition"
def merge_sentences(input_dict):
"""
Merges the input sentences in XML according to the specified method.
"""
method = input_dict['method']
merged_sen, id_to_sent = set(), {}
ids_list = []
for i, sentsXML in enumerate(input_dict['sentences']):
sents = nlp.parse_def_sentences(sentsXML)
ids = set(map(lambda x: x['id'], sents))
ids_list.append(ids)
# Save the map from id to sentence
for sent in sents:
id_to_sent[sent['id']] = sent
if i == 0 and method != 'intersection_two':
merged_sen = ids
if method == 'union':
merged_sen = merged_sen | ids
elif method == 'intersection':
merged_sen = merged_sen & ids
elif method == 'intersection_two':
# Skip the current set of sentences
# and intersect it with the others.
for ids_alt in ids_list[:i] + ids_list[i+1:]:
# As long as (at least) two sets agree with a sentence it
# will be in the resulting set.
merged_sen = merged_sen | (ids_alt & ids)
return {'merged_sentences': nlp.sentences_to_xml([id_to_sent[sid] for sid in merged_sen])}
def merge_sentences2(input_dict):
"""
Merges the input sentences in XML according to the specified method.
"""
method = input_dict['method']
merged_sen, id_to_sent = set(), {}
ids_list = []
for i, sentsXML in enumerate(input_dict['sentences']):
sents = nlp.parse_def_sentences2(sentsXML)
ids = set(map(lambda x: x['id'], sents))
ids_list.append(ids)
# Save the map from id to sentence
for sent in sents:
id_to_sent[sent['id']] = sent
if i == 0 and method != 'intersection_two':
merged_sen = ids
if method == 'union':
merged_sen = merged_sen | ids
elif method == 'intersection':
merged_sen = merged_sen & ids
elif method == 'intersection_two':
# Skip the current set of sentences
# and intersect it with the others.
for ids_alt in ids_list[:i] + ids_list[i+1:]:
# As long as (at least) two sets agree with a sentence it
# will be in the resulting set.
merged_sen = merged_sen | (ids_alt & ids)
return {'merged_sentences': nlp.sentences_to_xml2([id_to_sent[sid] for sid in merged_sen])}
def load_corpus(input_dict):
'''
Parses an input file and encodes it in base 64.
'''
f = safeOpen(input_dict['file'])
fname = os.path.basename(input_dict['file'])
wsdl = input_dict.get('wsdl', 'http://vihar.ijs.si:8095/totale?wsdl')
data = base64.b64encode(f.read())
ws = WebService(wsdl, 60000)
response = ws.client.parseFile(fileName=fname, inFile=data)
return {'corpus': response['parsedFile']}
def load_corpus2(input_dict):
'''
Parses an input file and encodes it in base 64.
'''
use_text = input_dict["use_text"] == "true"
if use_text: #checkbox is checked
fname = "input_string.txt"
text = input_dict[u"text"].strip()
if len(text) == 0:
raise Exception("Please input text or uncheck the Use text checkbox.")
data = base64.b64encode(text)
else: #checkbox is not checked
f = safeOpen(input_dict['file'])
fname = os.path.basename(input_dict['file'])
data = base64.b64encode(f.read())
#define web service
webservice_url = webservices_totrtale_url + "/parseFile"
params = {"filename": fname, "text": data} #set params
#call web service
#print webservice_url
resp = post(webservice_url, data=params)
#print resp.content
content = json.loads(resp.content)[u'parseFileResponse'][u'parseFileResult']
"""
if content[u"error"] != "":
raise Exception(content[u"error"])
else:
"""
return {'corpus': content[u"resp"]}
def parse_tei(path, lemma_name = "lemma", pos_name = "ana", word_tag = "w", sentence_tag = "s"):
"""
Helper function for load tagged corpus. Function parses TEI format.
"""
from xml.dom import minidom
fname = os.path.basename(path)
xmldoc = minidom.parse(path)
sentences = xmldoc.getElementsByTagName(sentence_tag)
tab_separated_output = []
head = "<TEXT title="+fname+">\t\n"
foot = "</TEXT>\t\n"
tab_separated_output.append(head)
sentence_id = 0
for sentece in sentences:
line = "\t<S id=\"0_" +str(sentence_id) + "\">\t\n"
tab_separated_output.append(line)
for s in sentece.getElementsByTagName(word_tag):
line = s.childNodes[0].nodeValue + "\tTOK\t" + s.attributes[lemma_name].value + "\t" + s.attributes[pos_name].value + "\t\n"
tab_separated_output.append(line)
line = "\t</S>\t\n"
tab_separated_output.append(line)
sentence_id +=1
tab_separated_output.append(foot)
return "".join(tab_separated_output).encode("utf8", "ignore")
def parse_tab_separated(path, word_index, token_index, lemma_index, pos_index, start_tag, end_tag, separator):
"""
Helper function for load tagged corpus. Function parses tab separated format.
"""
fname = os.path.basename(path)
f = safeOpen(path)
data = []
head = "<TEXT title="+fname+">\t\n"
foot = "</TEXT>\t\n"
data.append(head)
sentence_counter = 0
for line in f:
splitted_line = re.split(separator, line.strip())
if len(splitted_line) >= 4:
new_line = splitted_line[word_index] + "\t" + splitted_line[token_index] + "\t" + splitted_line[lemma_index] + "\t" + splitted_line[pos_index] + "\t\n"
data.append(new_line)
else:
added = False
for el in splitted_line:
if re.match(start_tag, el.strip()):
data.append("\t<S id=\"0_" + str(sentence_counter)+"\">\t\n")
added = True
break
elif re.match(end_tag, el.strip()):
data.append("\t</S>\t\n")
sentence_counter+=1
added = True
break
if not added:
data.append("\t".join(splitted_line + ["\t\n"]))
data.append(foot)
return "".join(data)
def load_tagged_corpus(input_dict):
"""
Loads a file in TEI or XML format.
"""
data = ""
if input_dict["input_format"] == "tab_format":
try:
word_index = int(input_dict["word_index"]) - 1
lemma_index = int(input_dict["lemma_index"]) - 1
token_index = int(input_dict["token_index"]) - 1
pos_index = int(input_dict["pos_index"]) - 1
except ValueError:
raise Exception("Please specify a number in index fields.")
start_tag = input_dict["start_tag"]
end_tag = input_dict["end_tag"]
separator = input_dict["separator"]
if len(start_tag) < 1 or len(end_tag) < 1 or len(separator) < 1:
raise Exception("Please review start, end tag and separator parameters.")
if word_index+1 == 1 and token_index+1 == 2 and lemma_index+1 == 3 and pos_index+1 == 4 and start_tag == u'<S>' and end_tag == '</S>':
f = safeOpen(input_dict['file'])
data = f.read()
else:
if len(set([word_index, lemma_index, token_index, pos_index])) != 4:
raise Exception("Field indices should be distinct.")
data = parse_tab_separated(input_dict['file'], word_index=word_index, token_index=token_index, lemma_index=lemma_index, pos_index=pos_index, start_tag=start_tag, end_tag=end_tag, separator=separator)
else:
lemma_name = input_dict["lemma_name"]
pos_name = input_dict["pos_name"]
sentence_tag = input_dict["sentence_tag"]
word_tag = input_dict["word_tag"]
if len(lemma_name) < 1 or len(pos_name) < 1 or len(sentence_tag) < 1 or len(word_tag) < 1:
raise Exception("Please review parameters for TEI format.")
data = parse_tei(input_dict['file'], lemma_name = lemma_name, pos_name = pos_name, word_tag = word_tag, sentence_tag = sentence_tag)
return {'annotations': data}
def totrtale_request(params):
webservice_url = webservices_totrtale_url + "/runToTrTaLe"
return post(webservice_url, data=params)
def nlp_totrtale2(input_dict, widget):
'''
Calls the totrtale web service.
Function splits huge documents in smaller pieces and sends them separatly to totrtale webservice. If there is multiple smaller documents, this functions groups them and sends them together.
'''
import multiprocessing
from xml.dom.minidom import parseString
import time
import math
import copy
from xml.dom.minidom import getDOMImplementation
progress_accumulator = 0 #progress for progress bar
widget.progress= progress_accumulator
widget.save()
processes = 4 #number of processes for multiprocessing
DOCUMENTS_SIZE = 3 * int(1e6) #size of a group of documents in MB per process
SINGLE_DOC_SIZE = 1 * int(1e6) #size of a single document per process
corpus = input_dict['corpus']
if type(corpus) is list:
fname = "input_list.txt"
text = "\n".join(input_dict['corpus']).strip()
data = base64.b64encode(text)
#define web service
webservice_url = webservices_totrtale_url + "/parseFile"
params = {"filename": fname, "text": data} #set params
#call web service
#print webservice_url
resp = post(webservice_url, data=params)
#print resp.content
content = json.loads(resp.content)[u'parseFileResponse'][u'parseFileResult']
corpus = parseString(content[u"resp"])
else:
corpus = parseString(input_dict['corpus'])
language = input_dict['lang'],
postprocess = input_dict['postprocess'] == "true"
xml = input_dict['xml'] == "true"
params = {"language": language,
"postprocess": postprocess,
"xml":xml}
tei_corpus = corpus.getElementsByTagName('teiCorpus')
if tei_corpus:
tei_head = '<?xml version="1.0" encoding="utf-8"?>\n' + \
'<teiCorpus xmlns="http://www.tei-c.org/ns/1.0">\n'
tei_header = corpus.getElementsByTagName('teiHeader')[0].toxml() + "\n"
tei_tail = '</teiCorpus>'
pool = multiprocessing.Pool(processes=processes)
documents = corpus.getElementsByTagName('TEI')
documents_size, document_num, process_num = 0, 0, 1
results, docs, single_docs = [], [], []
for i, document in enumerate(documents):
doc_len = len(document.getElementsByTagName('body')[0].getElementsByTagName('p')[0].childNodes[0].nodeValue)
doc_title = document.getElementsByTagName('title')[0].firstChild.nodeValue
print doc_title
if doc_len > SINGLE_DOC_SIZE:
#split single huge document
predhead = '<TEI xmlns="http://www.tei-c.org/ns/1.0">\n'
title = '<title>' + doc_title + '</title>\n'
head = '<text>\n<body>\n<p>\n'
header = document.getElementsByTagName('teiHeader')[0].toxml() + "\n"
tail = '\n</p>\n</body>\n</text>\n</TEI>'
document_text = document.getElementsByTagName('body')[0].getElementsByTagName('p')[0].childNodes[0].nodeValue.strip().replace("&","&").replace("<","<").replace(">",">").replace("\"",""")
prev_j, curr_j = 0, SINGLE_DOC_SIZE
while (curr_j+2) < len(document_text):
while (curr_j+2) < len(document_text) and document_text[curr_j:curr_j+2] != ". ":
curr_j+=1
sub_params = copy.deepcopy(params)
if prev_j == 0:
sub_params["text"] = predhead +title + head + document_text[prev_j: curr_j+2] +tail
else:
sub_params["text"] = predhead + head + document_text[prev_j: curr_j+2] + tail
sub_params["doc_id"] = str(len(results))
results.append(pool.apply_async(totrtale_request, args=[sub_params]))
if prev_j == 0:
single_docs.append(0)
else:
single_docs.append(1)
prev_j = curr_j+2
curr_j += SINGLE_DOC_SIZE
document_num+=1
process_num += 1
if curr_j > doc_len:
sub_params = copy.deepcopy(params)
sub_params["text"] = predhead + head + document_text[prev_j:] + tail
sub_params["doc_id"] = str(len(results))
results.append(pool.apply_async(totrtale_request, args=[sub_params]))
document_num += 1
process_num += 1
single_docs.append(2)
print "document was split",doc_title, len(single_docs)
else:
#group multiple smaller documents.
docs.append(document.toxml())
document_num+=1
documents_size += doc_len
if documents_size > DOCUMENTS_SIZE or (document_num) % 10==0 or i == len(documents)-1:
documents_size = 0
document_num = 0
sub_params = copy.deepcopy(params)
sub_params["text"] = "\n".join(docs)
sub_params["doc_id"] = str(len(results))
print "whole document was added", len(docs)
results.append(pool.apply_async(totrtale_request, args=[sub_params]))
process_num += 1
docs = []
single_docs.append(-1)
pool.close()
#we need to join results of totrtale processing back together. Funtion also updates progress bar.
response = ["" for i in results]
progress = [True]
while any(progress):
time.sleep(1)
progress = [not result.ready() for result in results]
print progress
for i, prog in enumerate(progress):
if not prog and response[i] == "":
try:
resp=json.loads(results[i].get().content)[u'runToTrTaLeResponse'][u'runToTrTaLeResult']
except Exception as e:
raise Exception("There was a problem processing your file.")
if resp["error"] != "":
progress = [False]
raise Exception(resp["error"])
if xml:
#results are in xml
if single_docs[i] == 0:
print "remove back", i
pos1 = resp["resp"].find("<s>")
pos2 = resp["resp"].find("</p>")
response[i] = predhead + header + head + resp["resp"][pos1:pos2]
elif single_docs[i] == 2:
print "remove front", i
pos1 = resp["resp"].find("<s>")
response[i] = resp["resp"][pos1:]
elif single_docs[i] == 1:
print "remove both", i
pos1 = resp["resp"].find("<s>")
pos2 = resp["resp"].find("</p>")
response[i] = resp["resp"][pos1:pos2]
else:
print "nothing to remove"
response[i] = resp["resp"]
else:
#results are tab separated
if single_docs[i] in [0,1]:
pos2 = resp["resp"].find("</TEXT>")
response[i] = resp["resp"][:pos2]
else:
print "nothing to remove"
response[i] = resp["resp"]
progress_accumulator += 1/float(len(results))*100
print progress_accumulator
widget.progress = math.floor(progress_accumulator)
widget.save()
pool.join()
#return output only if all processes are completed.
if not any(progress):
widget.progress=100
widget.save()
response = "".join(response)
if tei_corpus and xml:
response = tei_head + tei_header + response + tei_tail
return {'annotations': response}
def nlp_totrtale(input_dict):
'''
Calls the totrtale web service.
'''
corpus = input_dict['corpus']
lang = input_dict['lang']
wsdl = input_dict.get('wsdl', 'http://vihar.ijs.si:8095/totale?wsdl')
xml = input_dict['xml'] == 'true'
postprocess = input_dict['postprocess'] == 'true'
bohoricica = input_dict['bohoricica'] == 'true'
antique = input_dict['antique'] == 'true'
ws = WebService(wsdl, 60000)
response = ws.client.runTotale(inFile=corpus, language=lang,
postProcessing=postprocess,
bohoricica=bohoricica,
antiqueSlovenian=antique,
outputAsXML=xml)
errors = response['error']
if errors:
print errors
return {'annotations': response['annotatedFile']}
def nlp_term_extraction(input_dict):
'''
Term extraction from totrtale annotations.
'''
annotations = input_dict['annotations']
lang = input_dict['lang']
wsdl = input_dict.get('wsdl', 'http://vihar.ijs.si:8095/totale?wsdl')
if '<TEI xmlns="http://www.tei-c.org/ns/1.0">' in annotations:
annotations = TEItoTab(annotations)
ws = WebService(wsdl, 60000)
response = ws.client.TermExtraction(corpus=annotations, lang=lang,
threshold=0)
return {'candidates': response['candidates']}
def get_default_stop_word_list(lang):
if lang == "en":
return ["et al", "example", "use", "source", "method", "approach", "table", "figure", "percentage"]
elif lang == "sl":
return ["itd", "primer", "uporaba", "vir", "metoda", "pristop", "tabela", "slika", "odstotek"]
def nlp_term_extraction2(input_dict):
'''
Term extraction from totrtale annotations.
'''
ws_url = webservice_def_ex_url + "/call"
annotations = input_dict['annotations']
lang = input_dict['lang']
stop_list_checkbox = input_dict["stop_list"] == "true"
user_stop_words = []
try:
max_terms = input_dict['max_terms']
except:
max_terms = 'all'
if input_dict['stop_words_file'] != "":
user_stop_words = safeOpen(input_dict['stop_words_file']).read()
try:
user_stop_words.decode("utf-8")
except Exception:
raise Exception("Please make sure that your stop words list is encoded in UTF-8.")
user_stop_words = [word.strip() for word in user_stop_words.split("\n")]
if '<TEI xmlns="http://www.tei-c.org/ns/1.0">' in annotations:
annotations = TEItoTab(annotations)
if lang == "sl":
reference_corpus = input_dict["slovene_reference_corpus"]
elif lang == "en":
reference_corpus = input_dict["english_reference_corpus"]
params = {"corpus":annotations,
"lang": lang,
"reference_corpus":reference_corpus}
response = post(ws_url, data=params)
resp = json.loads(response.content)[u'callResponse'][u'callResult']
if max_terms != 'all':
resp = "\n".join(resp.split("\n")[:int(max_terms)])
stop_list = []
if stop_list_checkbox:
stop_list = get_default_stop_word_list(lang)
stop_list = set(stop_list + user_stop_words)
if len(stop_list) > 0:
resp = resp.split("\n")
i=0
while i < len(resp):
increase = True
line = resp[i]
if len(line) > 0:
term = line.split("\t")[1][1:-1]
for word in term.split(" "):
if word.lower() in stop_list:
increase = False
resp.pop(i)
break
if increase:
i+=1
resp = "\n".join(resp)
return {'candidates': resp}
def nlp_def_extraction_patterns(input_dict):
'''
Definition extraction using pre-defined patterns.
'''
annotations = input_dict['annotations']
lang = input_dict['lang']
wsdl = input_dict.get('wsdl', 'http://vihar.ijs.si:8099')
if '<TEI xmlns="http://www.tei-c.org/ns/1.0">' in annotations:
annotations = TEItoTab(annotations)
ws = WebService(wsdl, 60000)
pattern = input_dict['pattern']
response = ws.client.GlossaryExtractionByPatterns(corpus=annotations,
lang=lang, pattern=pattern)
return {'sentences': response['candidates']}
def nlp_def_extraction_patterns2(input_dict):
'''
Definition extraction using pre-defined patterns.
'''
annotations = input_dict['annotations']
lang = input_dict['lang']
pattern = input_dict['pattern']
if lang == "sl" and pattern == "begin_allvar":
raise Exception("Pattern begin_allvar is not supported for slovene language.")
if '<TEI xmlns="http://www.tei-c.org/ns/1.0">' in annotations:
annotations = TEItoTab(annotations)
ws_url = webservice_def_ex_url + "/patDefSent"
params = {"corpus":annotations,
"pattern":pattern,
"lang":lang}
response = post(ws_url, data=params)
response = json.loads(response.content)[u'patDefSentResponse'][u'patDefSentResult']
return {'sentences': response}
def nlp_def_extraction_terms(input_dict):
'''
Definition extraction using terms.
'''
annotations = input_dict['annotations']
term_candidates = input_dict['term_candidates']
lang = input_dict['lang']
wsdl = input_dict.get('wsdl', 'http://vihar.ijs.si:8099')
terms_per_sentence = input_dict['terms_per_sentence']
nominatives = input_dict['nominatives']
threshold = input_dict['threshold']
verb_two_terms = input_dict['verb_two_terms']
multiword_term = input_dict['multiword_term']
num_multiterms = input_dict['num_multiterms']
term_beginning = input_dict['term_beginning']
if '<TEI xmlns="http://www.tei-c.org/ns/1.0">' in annotations:
annotations = TEItoTab(annotations)
ws = WebService(wsdl, 60000)
response = ws.client.GlossaryExtractionByTerms(corpus=annotations,
candidates=term_candidates, lang=lang, nominatives=nominatives,
termsPerSent=terms_per_sentence, select=threshold,
verb_two_terms=verb_two_terms, multiword_term=multiword_term,
num_multiterms=num_multiterms, term_beginning=term_beginning)
return {'sentences': response['candidates']}
def nlp_def_extraction_terms2(input_dict):
'''
Definition extraction using terms.
'''
annotations = input_dict['annotations']
term_candidates = input_dict['term_candidates']
lang = input_dict['lang']
terms_per_sentence = input_dict['terms_per_sentence']
nominatives = input_dict['nominatives']
threshold = input_dict['threshold']
verb_two_terms = input_dict['verb_two_terms']
multiword_term = input_dict['multiword_term']
num_multiterms = input_dict['num_multiterms']
term_beginning = input_dict['term_beginning']
if '<TEI xmlns="http://www.tei-c.org/ns/1.0">' in annotations:
annotations = TEItoTab(annotations)
ws_url = webservice_def_ex_url + "/termDefSent"
params = {"corpus":annotations,
"candidates":term_candidates,
"lang":lang,
"nominatives":nominatives,
"terms_per_sentence":terms_per_sentence,
"select": threshold,
"verb_two_terms":verb_two_terms,
"multiword_term":multiword_term,
"num_multiterms":num_multiterms,
"term_beginning":term_beginning}
response = post(ws_url, data=params)
response = json.loads(response.content)[u'termDefSentResponse'][u'termDefSentResult']
return {'sentences': response}
def nlp_def_extraction_wnet(input_dict):
'''
Definition extraction using WordNet.
'''
annotations = input_dict['annotations']
lang = input_dict['lang']
wsdl = input_dict.get('wsdl', 'http://vihar.ijs.si:8099')
if '<TEI xmlns="http://www.tei-c.org/ns/1.0">' in annotations:
annotations = TEItoTab(annotations)
ws = WebService(wsdl, 60000)
response = ws.client.GlossaryExtractionByWnet(corpus=annotations, lang=lang)
return {'sentences': response['candidates']}
def nlp_def_extraction_wnet2(input_dict):
'''
Definition extraction using WordNet.
'''
annotations = input_dict['annotations']
lang = input_dict['lang']
if '<TEI xmlns="http://www.tei-c.org/ns/1.0">' in annotations:
annotations = TEItoTab(annotations)
ws_url = webservice_def_ex_url + "/wnetDefSent"
params = {"corpus":annotations,
"lang":lang}
response = post(ws_url, data=params)
response = json.loads(response.content)[u'wnetDefSentResponse'][u'wnetDefSentResult']
return {'sentences': response}
def export_terms(input_dict):
terms = input_dict['candidates']
name = input_dict['name']
regex = r"\<\<([^>>]+)\>\>"
terms = re.findall(regex, terms)
terms = [[term] for term in terms]
df_terms = pd.DataFrame(terms, columns = [name])
return {'df': df_terms}
def TEItoTab(text, doc_id=0):
mask1 = ["\tTOK\t", "\t", "\t\n"]
pattern1 = "<w (type=\"unknown\")| lemma=\"(?P<lemma>.*?)\" ana=\"(?P<ana>.*?)\">(?P<value>.*?)</w>"
pattern2 = "<title>(.*?)</title>"
pattern3 = "<pc>(.*?)</pc>"
pattern4 = "(.*?)\t(TOK)\t(.*?)\t(Y)"
pattern5 = "(.*?)\t(TOK)\t(.*?)\t(Mdo|Mdc)"
pattern6 = "<w>(.*)</w>"
newText=[]
print "TEItoTab started"
sentence_id = 0
choice_found=False #if lang in ["gaji", "boho"]
local_s=""
for l in text.splitlines():
print l
if "<choice>" in l:
choice_found=True
first = True
continue
elif choice_found and "<w" in l:
local_s = re.findall(pattern6, l)[0]
choice_found=False
continue
if "<w" in l:
match = [m.group("value", "lemma", "ana") for m in re.finditer(pattern1, l)]
if len(match) == 0:
local_s += " " + re.findall(pattern6, l)[0]
elif len(match) == 1:
match = match[0]
elif len(match) == 2:
match = match[1]
l = ''.join(itertools.chain.from_iterable(zip(match, mask1)))
if len(l) < 100:
value = re.findall(pattern4, l)
if len(value) > 0:
l = "\t".join(value[0]).replace("TOK", "TOK_ABBR") + "\t\n"
value = re.findall(pattern5, l)
if len(value) > 0:
l = "\t".join(value[0]).replace("TOK", "TOK_DIG") + "\t\n"
if len(local_s) > 0:
l = local_s + "|" + l
local_s = ""
newText.append(l)
elif "<s>" in l:
newText.append("\t\t<S id=\"" + str(doc_id) + "_" + str(sentence_id) + "\">\t\n")
elif "</s>" in l:
newText.append("\t\t</S>\t\n")
sentence_id+=1
elif "<pc>" in l:
value = re.findall(pattern3, l)[0]
if value == ".":
newText.append(value+"\t\tPUN_TERM\t\n")
else:
value = value.replace("&","&").replace("<","<").replace(">", ">").replace(""","\"")
newText.append(value+"\t\tPUN\t\n")
elif "<title>" in l:
title = re.findall(pattern2, l)[0]
title = title.replace("&","&").replace("<","<").replace(">", ">").replace(""","\"")
newText.append("<TEXT title=" + title + ">\t\n")
elif "</body>" in l:
newText.append("</TEXT>\t\n")
return "".join(newText)
def definition_sentences2(input_dict):
return {}
from reldi.parser import Parser
from redi import restore_diacritic
from reldi_tokenizer import generate_tokenizer, sentence_split, sentence_split_nonstd, tokenize
from reldi_tagger import tag_main, load_models
import json
def nlp_reldi_tokenizer(input_dict):
lang = input_dict['lang']
not_standard = input_dict['standard']
corpus = input_dict['corpus']
flatten = input_dict['flatten']
mode = 'nonstandard' if not_standard else 'standard'
all_tokenized_docs = []
tokenizer = generate_tokenizer(lang)
process = {'standard':lambda x,y,z:sentence_split(tokenize(x,y),z),'nonstandard':lambda x,y,z:sentence_split_nonstd(tokenize(x,y),z)}
if type(corpus) is list:
for doc in corpus:
tokens = process[mode](tokenizer,doc.decode('utf8'),lang)
if flatten:
tokens = " ".join([token for sentence in tokens for token, begin, end in sentence if ' ' not in token])
all_tokenized_docs.append(tokens)
else:
tokens = process[mode](tokenizer,corpus.decode('utf8'),lang)
if flatten:
tokens = " ".join([token for sentence in tokens for token, begin, end in sentence if ' ' not in token])
all_tokenized_docs.append(tokens)
return {'tokens': all_tokenized_docs}
def split_list(seq, size):
newseq = []
splitsize = 1.0 / size * len(seq)
for i in range(size):
newseq.append(seq[int(round(i * splitsize)):int(round((i + 1) * splitsize))])
return newseq
def nlp_reldi_tagger(input_dict):
folder_path = os.path.dirname(os.path.realpath(__file__))
reldir = os.path.join(folder_path, 'models', 'reldi_tagger')
tokens = input_dict['tokens']
lang = input_dict['lang']
lemmatize = False
trie, tagger, lemmatiser = load_models(lang)
results = tag_main((tokens, lang, lemmatize), trie, tagger, lemmatiser)
return {'pos_tags': results}
def nlp_reldi_lemmatizer(input_dict):
folder_path = os.path.dirname(os.path.realpath(__file__))
reldir = os.path.join(folder_path, 'models', 'reldi_tagger')
tokens = input_dict['tokens']
lang = input_dict['lang']
lemmatize = True
trie, tagger, lemmatiser = load_models(lang)
results = tag_main((tokens, lang, lemmatize), trie, tagger, lemmatiser)
return {'lemmas': results}
def nlp_diacritic_restoration(input_dict):
tokens = input_dict['tokens']
lang = input_dict['lang']
flatten = input_dict['flatten']
folder_path = os.path.dirname(os.path.realpath(__file__))
lexicon=pickle.load(open(os.path.join(folder_path, 'models', 'redi', 'wikitweetweb.'+lang+'.tm'), 'rb'))
all_docs = []
for doc in tokens:
restored_tokens = restore_diacritic(doc, lexicon)
if flatten:
restored_tokens = " ".join([token for sentence in restored_tokens for token, begin, end in sentence if ' ' not in token])
all_docs.append(restored_tokens)
return {'tokens': all_docs}
def nlp_reldi_parser(input_dict):
user = 'user'
passwd = 'user'
coding = 'utf8'
corpus = input_dict['corpus']
lang = input_dict['lang']
parser = Parser(lang)
parser.authorize(user, passwd)
if type(corpus) is list:
corpus = "\n".join(input_dict['corpus']).strip()
result = json.loads(parser.tagLemmatiseParse(corpus.decode(coding).encode('utf8')))
final = set()
for sentence in result['sentences']['sentence']:
final.add(sentence['tokenIDs'].split(' ')[-1])
sent_offset = 0
token_num = 0
parses = []
final_text = ''
for tree in result['depparsing']['parse']:
parses.extend(tree['dependency'])
for token, lemma, tag, parse in zip(result['tokens']['token'], result['lemmas']['lemma'], result['POStags']['tag'], parses):
if 'govIDs' not in parse:
head = '0'
else:
head = int(parse['govIDs'].split('_')[-1]) + 1 - sent_offset
text = (token['text'] + '\t' + lemma['text'] + '\t' + tag['text'] + '\t' + str(head) + ':' + parse[
'func'] + '\n').encode(coding)
token_num += 1
if token['ID'] in final:
text += '\n'
sent_offset += token_num
token_num = 0
final_text += text
return {'annotations': final_text.encode('utf8')}
def streaming_tweetcat(input_dict, widget, stream=None):
from streams.models import StreamWidgetData
from streams.models import StreamWidgetState
from streams.models import HaltStream
from streams.models import Stream
# you can obtain the four values required below by registering your app at https://apps.twitter.com
if input_dict['cfauth'] == "true":
consumer_key="zmK41mqxU3ZNJTFQpYwTdg"
consumer_secret="9StnKNAe20ebDOREQjsVjAjBEiz5R9feZJTGUYWqLo"
access_token="45210078-VydgdJMwhWYjZRvlNbrKj6jfqicUIsdMnRbnaPElL"
access_token_secret="uLvIN3MMxFSxdK4M8P5RYojjUkbc2reqNydYtpT7Ks"
else:
consumer_key = input_dict['ck']
consumer_secret = input_dict['cs']
access_token = input_dict['at']
access_token_secret = input_dict['as']
langid_lang= [code.strip() for code in input_dict['lc'].split(',')]
MODE = input_dict['mod']
# define if MODE is GEO, ignore if MODE is LANG
# lower left corner, can be obtained from http://www.latlong.net
coordinates = (int(cor.strip()) for cor in input_dict['geo'].split(','))
MINLAT, MINLON, MAXLAT, MAXLON = coordinates
# authorization
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
if stream is not None:
try:
swd = StreamWidgetState.objects.get(stream=stream,widget=widget)
data = swd.state
except Exception as e:
swd = StreamWidgetState()
swd.stream = stream
swd.widget = widget
data = {}
swd.state = data
swd.save()
if MODE=='LANG':
seedw = [e.decode('utf8').strip() for e in input_dict['sd'].split(',')]
user_index = {}
user_lang = {}
state = {'seeds': seedw, 'hits': None, 'followers': None, 'friends': None}
try:
ltw = tweepy.API(auth_handler=auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, retry_count=3, retry_delay=10)
except Exception as e:
raise HaltStream("The Twitter API returned an error: " + str(e))
if data.has_key('authors'):
user_index = data['authors']
if data.has_key('state'):
state = data['state']
if data.has_key('user_lang'):
user_lang = data['user_lang']
if state['seeds'] == None:
tweets, user_index, user_lang, state = lang_mode(state, user_index, ltw, langid_lang, user_lang, True)
state['seeds'] = seedw
elif not set(state['seeds']) <= set(seedw):
state['seeds'] = seedw
tweets, user_index, user_lang, state = lang_mode(state, user_index, ltw, langid_lang, user_lang)
else:
tweets, user_index, user_lang, state = lang_mode(state, user_index, ltw, langid_lang, user_lang)
swd.state = {'authors': user_index, 'state': state, 'user_lang': user_lang}
swd.save()
elif MODE=='GEO':
l=StdOutListener()
tweepy_stream=tweepy.Stream(auth,l)
tweepy_stream.filter(locations=[MINLON,MINLAT,MAXLON,MAXLAT])
tweets = l.tweetList
new_tweets = []
for tweet in tweets:
new_tweets.append(StreamWidgetData(stream=stream,widget=widget,value=tweet))
StreamWidgetData.objects.bulk_create(new_tweets)
return {}
else:
try:
stream = Stream.objects.filter(workflow__widgets=widget)[0]
except:
raise Exception('It appears no data was collected yet. Try it again in couple of minutes. Also, make sure stream is activated - if not, go to "Your workflows" and activate it.')
tweet_data = StreamWidgetData.objects.filter(widget=widget,stream=stream)
tweets = []
if len(tweet_data) == 0:
raise Exception('It appears no data was collected yet. Try it again in couple of minutes. Also, make sure your Tweet API credentials are correct.')
for tweet in tweet_data:
tweet = tweet.value
tweet['text'] = unicode(tweet['text'], 'utf-8')
tweets.append(tweet)
df = pd.DataFrame(tweets)
return {'df': df}
def load_corpus_from_csv(input_dict):
import gc
separator = str(input_dict['separator'])
if separator.startswith('\\'):
separator = '\t'
try:
data_iterator = pd.read_csv(input_dict['file'], delimiter=separator, chunksize=1000, index_col=None, encoding = 'utf8')
df_data = pd.DataFrame()
for sub_data in data_iterator:
df_data = pd.concat([df_data, sub_data], axis=0)
gc.collect()
except:
raise Exception("Ups, we are having problem uploading your corpus. Please make sure it's encoded in utf-8.")
df_data = df_data.dropna()
#print(df_data.columns.tolist())
#print("Data shape:", df_data.shape)
return {'dataframe': df_data}
def select_corpus_attribute(input_dict):
df = input_dict['dataframe']
attribute = input_dict['attribute']
column = df[attribute].tolist()
return {'attribute': column}
def tfidf_tokenizer(text):
#hopefuly this sequence is not used in any document more than 3 times or tokenization will go horribly wrong :).
if text.count('###') > 3:
return text.split('###')
return text.split()
def tfidf_vectorizer(input_dict):
corpus = input_dict['corpus']
lowercase = True if input_dict['low'] == 'true' else False
max_df = input_dict['max_df']
min_df = input_dict['min_df']
max_df = float(max_df) if '.' in max_df else int(max_df)
min_df = float(min_df) if '.' in min_df else int(min_df)
try:
max_features = int(input_dict['max_features'])
except:
max_features = None
smooth_idf = True if input_dict['smooth_idf'] == 'true' else False
sublinear_tf = True if input_dict['sublinear_tf'] == 'true' else False
min_ngram = int(input_dict['min_ngram'])
max_ngram = int(input_dict['max_ngram'])
analyzer = input_dict['analyzer'].encode('utf8')
tfidf_vec = TfidfVectorizer(tokenizer=tfidf_tokenizer, min_df=min_df, max_df=max_df, lowercase=lowercase, max_features=max_features, smooth_idf=smooth_idf, sublinear_tf=sublinear_tf, ngram_range=(min_ngram, max_ngram), analyzer=analyzer)
return {'tfidf': {'vectorizer': tfidf_vec, 'data': corpus}}
class Transformer(BaseEstimator, TransformerMixin):
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_list):
return data_list[self.key]
class DigitTransformer(BaseEstimator, TransformerMixin):
def __init__(self, drop):
self.drop = drop
def fit(self, x, y=None):
return self
def transform(self, hd_searches):
d_col_drops= self.drop
#print(hd_searches.columns)
hd_searches = hd_searches.drop(d_col_drops,axis=1).values
scaler = preprocessing.MinMaxScaler().fit(hd_searches)
return scaler.transform(hd_searches)
def feature_union(input_dict):
y = input_dict['y']
weights = input_dict['weights'].split(',')
weights = [float(weight) for weight in weights if len(weights) > 1]
vec_and_data = input_dict['features']
features = []
dataset = []
columns = []
digit_col = False
drop = []
for i, instance in enumerate(vec_and_data):
column = 'feature' + str(i)
columns.append(column)
try:
vectorizer = instance['vectorizer']
data = instance['data']
#print(len(data))
dataset.append(data)
feature = (column, pipeline.Pipeline([('t' + str(i), Transformer(key=column)), ('f' + str(i), vectorizer)]))
features.append(feature)
drop.append(column)
except:
digit_col = True
dataset.append(list(np.transpose(np.array([instance]))))
if digit_col:
feature = ('cst', DigitTransformer(drop=drop))
features.append(feature)
weights_dict = {}
if len(weights) > 1 and len(weights) == len(features):
for i in range(len(weights)):
weights_dict[features[i][0]] = weights[i]
else:
for i in range(len(features)):
weights_dict[features[i][0]] = 1.0
df = pd.DataFrame(dataset)
df = df.transpose()
df.columns = columns
#print(df.columns)
#print("Shape: ", df.shape)
featureUnion = FeatureUnion(transformer_list = features, transformer_weights = weights_dict)
#featureUnion = featureUnion.fit(dataset).transform(dataset)
return {'matrix': {'featureUnion': featureUnion, 'data': df, 'target':y}}
def affix_extractor(input_dict):
corpus = input_dict['corpus']
affixes_tokens = []
affix_type = input_dict['affix_type']
affix_length = int(input_dict['affix_length'])
punct = '#@!"$%&()*+,-./:;<=>?[\]^_`{|}~' + "'"
for text in corpus:
if affix_type == 'suffix':
affixes = " ".join([word[-affix_length:] for word in text.split() if len(word) >= affix_length])
elif affix_type == 'prefix':
affixes = " ".join([word[0:affix_length] for word in text.split() if len(word) >= affix_length])
else:
ngrams = []
for i, character in enumerate(text[0:-affix_length - 1]):
ngram = text[i:i+affix_length]
if ngram[0] in punct:
for p in punct:
if p in ngram[1:]:
break
else:
ngrams.append(ngram)
affixes = "###".join(ngrams)
affixes_tokens.append(affixes)
return {'affixes': affixes_tokens}
def tweet_clean(input_dict):
mode = input_dict['mode']
if mode == 'remove':
mention_replace_token, hashtag_replace_token, url_replace_token = '', '', ''
else:
mention_replace_token, hashtag_replace_token, url_replace_token = 'TWEETMENTION', 'HASHTAG', 'HTTPURL'
corpus = input_dict['corpus']
cleaned_docs = []
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
for doc in corpus:
doc = re.sub(r'(?:@[\w_]+)', mention_replace_token, doc)
doc = re.sub(r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)", hashtag_replace_token, doc)
doc = re.sub(url_regex, url_replace_token, doc)
cleaned_docs.append(doc)
return {'corpus': cleaned_docs}
def remove_stopwords(input_dict):
lang = input_dict['lang']
corpus = input_dict['corpus']
cleaned_docs = []
if lang == 'es':
stops = set(stopwords.words("spanish"))
elif lang == 'en':
stops = set(stopwords.words("english"))
elif lang == 'pt':
stops = set(stopwords.words("portuguese"))
elif lang == 'sl':
folder_path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(folder_path, 'models', 'stopwords_slo.txt')
with open(path) as f:
stops = set([unicode(line.strip().lower(), 'utf-8') for line in f])
for doc in corpus:
doc = [x.lower() for x in doc.split() if x.lower() not in stops]
cleaned_docs.append(" ".join(doc))
return {'corpus': cleaned_docs}
def nltk_tokenizer(input_dict):
corpus = input_dict['corpus']
tokenized_docs = []
for doc in corpus:
words = [word for sent in nltk.sent_tokenize(doc) for word in nltk.word_tokenize(sent)]
tokenized_docs.append(" ".join(words))
return {'corpus': tokenized_docs}
def perceptron_pos_tagger(input_dict):
corpus = input_dict['corpus']
tagger = PerceptronTagger()
pos_tagged_docs = []
for doc in corpus:
tokens = nltk.sent_tokenize(doc)
#use average perceptron tagger
tokens = [nltk.word_tokenize(token) for token in tokens]
text = tagger.tag_sents(tokens)
tagged_doc = " ".join(tag for sent in text for word, tag in sent)
pos_tagged_docs.append(tagged_doc)
return {'corpus': pos_tagged_docs}
def lemmagen_lemmatizer(input_dict):
corpus = input_dict['corpus']
lemmatizer = Lemmatizer(dictionary=lemmagen.DICTIONARY_ENGLISH)
lemmatized_docs = []
for doc in corpus:
lemmas = [lemmatizer.lemmatize(word) for sent in nltk.sent_tokenize(doc) for word in nltk.word_tokenize(sent)]
lemmatized_docs.append(" ".join(lemmas))
return {'corpus': lemmatized_docs}
def remove_punctuation(input_dict):
corpus = input_dict['corpus']
punctuation = '#@!"$%&()*+,-./:;<=>?[\]^_`{|}~' + "'"
cleaned_docs = []
for doc in corpus:
for p in punctuation:
doc = doc.replace(p, "")
cleaned_docs.append(doc)
return {'corpus': cleaned_docs}
def count(text, l):
cnt = 0
for pattern in l:
cnt += text.count(pattern)
return cnt
def count_patterns(input_dict):
from itertools import groupby
corpus = input_dict['corpus']
mode = input_dict['mode']
wordlist = input_dict['custom'].split(',')
wordlist = [word.strip() for word in wordlist]
sum_all = input_dict['sum_all']
raw_frequency = input_dict['raw_frequency']
if mode == 'emojis':
folder_path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(folder_path, 'models', 'emoji_dataset.csv')
df_emojis = pd.read_csv(path, encoding="utf-8", delimiter=",")
emoji_list = set(df_emojis['Emoji'].tolist())
counts = []
whole_length = 0
for doc in corpus:
doc_length = len(doc)
if doc_length == 0:
counts.append(0)
continue
cnt = 0
if mode == 'floods':
text = ''.join(doc.split())
groups = groupby(text)
for label, group in groups:
char_cnt = sum(1 for _ in group)
if char_cnt > 2:
cnt += 1
elif mode == 'emojis':
cnt = count(doc, emoji_list)
else:
cnt = count(doc, wordlist)
counts.append(float(cnt)/doc_length) if not raw_frequency and not sum_all else counts.append(cnt)
whole_length += doc_length
if not sum_all:
return {'counts': counts}
else:
if raw_frequency:
return {'counts': sum(counts)}
return {'counts': float(sum(counts))/whole_length}
def emoji_sentiment(input_dict):
corpus = input_dict['corpus']
emoji_dict = {}
folder_path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(folder_path, 'models', 'emoji_dataset.csv')
df_emojis = pd.read_csv(path, delimiter=",", encoding="utf-8")
for index, row in df_emojis.iterrows():
occurrences = float(row['Occurrences'])
pos = (float(row['Positive']) + 1) / (occurrences + 3)
neg = (float(row['Negative']) + 1) / (occurrences + 3)
sent = pos - neg
emoji_dict[row['Emoji']] = sent
sentiments = []
for doc in corpus:
sentiment = 0
l = emoji_dict.keys()
for pattern in l:
text_cnt = doc.count(pattern)
sentiment += float(emoji_dict[pattern]) * text_cnt
sentiments.append(sentiment)
return {'sentiments': sentiments}
def display_corpus_statistic(input_dict):
#implemented in visualization_views.py
return {}
def filter_corpus(input_dict):
corpus = input_dict['dataframe']
query = input_dict['query']
if '>' in query:
query = query.split('>')
column_name, value = query[0].strip(), float(query[1].strip())
corpus = corpus[corpus[column_name] > value]
elif '<' in query:
query = query.split('<')
column_name, value = query[0].strip(), float(query[1].strip())
corpus = corpus[corpus[column_name] < value]
elif '==' in query:
query = query.split('==')
column_name, value = query[0].strip(), query[1].strip()
corpus = corpus[corpus[column_name] == value]
elif '!=' in query:
query = query.split('!=')
column_name, value = query[0].strip(), query[1].strip()
corpus = corpus[corpus[column_name] != value]
elif 'in' in query:
query = query.split(' in ', 1)
value, column_name = query[0].strip(), query[1].strip()
corpus = corpus[corpus[column_name].str.contains(value)]
return {'dataframe': corpus}
def corpus_to_csv(input_dict):
return {}
def display_result(input_dict):
return {}
def build_dataframe_from_columns(input_dict):
columns = input_dict['corpus']
names = [str(name).strip() for name in input_dict['names'].split(',')]
if len(names) != len(columns):
names = ['Column_' + str(i+1) for i in range(len(columns))]
df = pd.DataFrame(columns)
df = df.transpose()
df.columns = names
return {'df':df}
def group_by_column(input_dict):
chosen_column = input_dict['column']
df = input_dict['df']
columns = df.columns.tolist()
#print(columns)
columns.remove(chosen_column)
group_dict = {}
for index, row in df.iterrows():
if row[chosen_column] not in group_dict:
chosen_column_dict = {}
for column in columns:
chosen_column_dict[column] = [row[column]]
else:
chosen_column_dict = group_dict[row[chosen_column]]
for column in columns:
chosen_column_dict[column].append(row[column])
group_dict[row[chosen_column]] = chosen_column_dict
df_list = []
for key, value in group_dict.items():
end_dict = {}
end_dict[chosen_column] = key
for column in columns:
end_dict[column] = " ".join([unicode(str(x), 'utf8') for x in value[column]]).replace('\n', ' ')
df_list.append(end_dict)
df_grouped = pd.DataFrame(df_list)
return {'df':df_grouped}
def concatenate_corpora(input_dict):
dfs = input_dict['dfs']
return {'df': pd.concat(dfs)}
def gender_classification(input_dict):
from gender_classification import preprocess, createFeatures, simplify_tag
lang = input_dict['lang']
df = input_dict['dataframe']
column = input_dict['column']
output_name = input_dict['output_name']
corpus = df[column].tolist()
folder_path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(folder_path, 'models', 'gender_classification', 'lr_clf_' + lang + '_gender_python2.pkl')
sys.modules['gender_classification'] = genclass
#get pos tags
if lang == 'en':
sent_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
pos_tags = PerceptronTagger()
else:
pos_tags = PerceptronTagger(load=False)
if lang == 'es':
sent_tokenizer = nltk.data.load('tokenizers/punkt/spanish.pickle')
pos_tags.train(list(cess.tagged_sents()))
elif lang == 'pt':
sent_tokenizer = nltk.data.load('tokenizers/punkt/portuguese.pickle')
tsents = floresta.tagged_sents()
tsents = [[(w.lower(), simplify_tag(t)) for (w, t) in sent] for sent in tsents if sent]
pos_tags.train(tsents)
else:
sent_tokenizer = None
df_data = pd.DataFrame({'text': corpus})
df_prep = preprocess(df_data, lang, pos_tags, sent_tokenizer)
df_data = createFeatures(df_prep)
X = df_data
clf = joblib.load(path)
y_pred_gender = clf.predict(X)
df_results = pd.DataFrame({output_name: y_pred_gender})
return {'df': pd.concat([df, df_results], axis=1)}
def language_variety_classification(input_dict):
from language_variety import predict
lang = input_dict['lang']
df_old = input_dict['dataframe']
column = input_dict['column']
output_name = input_dict['output_name']
df_new = df_old[[column]]
folder_path = os.path.dirname(os.path.realpath(__file__))
weights_path = os.path.join(folder_path, 'models', 'language_variety', 'model_' + lang + '_weights.hdf5')
data_path = os.path.join(folder_path, 'models', 'language_variety', 'model_' + lang + '_data.pk')
sys.modules['language_variety'] = varclass
y_pred, tags_to_idx = predict(df_new, column, lang, weights_path, data_path)
y_pred = [tags_to_idx[pred] for pred in y_pred]
df_old.reset_index(drop=True, inplace=True)
df_results = pd.DataFrame({output_name: y_pred})
return {'df': pd.concat([df_old, df_results], axis=1)}
def italian_sentiment_analysis(input_dict):
from sentiment_analysis import preprocess, createFeatures
df = input_dict['dataframe']
column = input_dict['column']
output_name = input_dict['output_name']
password = '7pKtPguy'
token = input_dict['password'].strip()
if password != token:
raise ValueError('Wrong password!')
corpus = df[column].tolist()
folder_path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(folder_path, 'models', 'sentiment_analysis', 'lr_clf_sentiment_python2.pkl')
sys.modules['sentiment_analysis'] = sentclass
df_data = pd.DataFrame({'text': corpus})
df_prep = preprocess(df_data)
df_data = createFeatures(df_prep)
X = df_data
clf = joblib.load(path)
y_pred_gender = clf.predict(X)
df_results = pd.DataFrame({output_name: y_pred_gender})
return {'df': pd.concat([df, df_results], axis=1)}
def extract_true_and_predicted_labels(input_dict):
df = input_dict['dataframe']
true_values = input_dict['true_values']
predicted_values = input_dict['predicted_values']
true_values = df[true_values].tolist()
predicted_values = df[predicted_values].tolist()
return {'labels': [true_values, predicted_values]}
def terminology_alignment(input_dict):
from terminology import *
lang = input_dict['language']
src = input_dict['src']
tar = input_dict['tar']
source_column = input_dict['source_name']
target_column = input_dict['target_name']
cognates = input_dict['cognates']
cognates = True if cognates else False
source_name = source_column
target_name = target_column
if source_name == target_name:
source_name += " 1"
target_name += " 2"
try:
src = list(src[source_column].values)
tar = list(tar[target_column].values)
except:
raise ValueError('Wrong column name! Either column "' + source_column + '" or column "' + target_column + '" does not exist in input dataframe.' )
if max(len(src), len(tar)) > 2000:
raise ValueError('Input source and target terminologies are too big. The maximum number of terms per language is 2000.' )
src = [term.strip() for term in src if len(term.strip()) > 0]
tar = [term.strip() for term in tar if len(term.strip()) > 0]
df_test = build_manual_eval_set(src, tar)
folder_path = os.path.dirname(os.path.realpath(__file__))
giza = os.path.join(folder_path, 'models', 'terminology', 'giza_dict_en_' + lang + '.txt')
giza_reverse = os.path.join(folder_path, 'models', 'terminology', 'giza_dict_' + lang + '_en.txt')
if cognates:
train_path = os.path.join(folder_path, 'models', 'terminology', 'cognates_train_' + lang + '.csv')
else:
train_path = os.path.join(folder_path, 'models', 'terminology', 'train_' + lang + '.csv')
df_train = pd.read_csv(train_path, encoding="utf8")
df_train = filterTrainSet(df_train, 200, cognates=cognates)
if lang == 'sl':
dd = arrangeLemmatizedData(giza)
dd_reversed = arrangeLemmatizedData(giza_reverse)
df_test = createLemmatizedFeatures(df_test, dd, dd_reversed, cognates=cognates)
else:
dd = arrangeData(giza)
dd_reversed = arrangeData(giza_reverse)
df_test = createFeatures(df_test, dd, dd_reversed, lang, cognates=cognates)
y = df_train['label'].values
X = df_train.drop(['label'], axis=1)
if not cognates:
svm = LinearSVC(C=10, fit_intercept=True)
else:
svm = svm.SVC(C=10)
features = [('cst', digit_col())]
clf = pipeline.Pipeline([
('union', FeatureUnion(
transformer_list=features,
n_jobs=1
)),
('scale', Normalizer()),
('svm', svm)])
clf.fit(X, y)
y_pred = clf.predict(df_test)
result = pd.concat([df_test, pd.DataFrame(y_pred, columns=['prediction'])], axis=1)
result = result.loc[result['prediction'] == 1]
result = result[['src_term', 'tar_term']]
result = result.rename(index=str, columns={"src_term": source_name, "tar_term": target_name})
return {'df': result}
def terminology_alignment_evaluation(input_dict):
gold_df = input_dict['true']
predicted_df = input_dict['predicted']
try:
true_source_name = input_dict['true_source_name']
true_target_name = input_dict['true_target_name']
predicted_source_name = input_dict['predicted_source_name']
predicted_target_name = input_dict['predicted_target_name']
gold_df['alignment'] = gold_df[true_source_name] + " " + gold_df[true_target_name]
predicted_df['alignment'] = predicted_df[predicted_source_name] + " " + predicted_df[predicted_target_name]
except:
raise ValueError('Wrong column names! Make sure column names in gold standard and predicted terminology lists are correctly defined as parameters.')
return {}
| [
"[email protected]"
] | |
30b3069a243643753361aef6c3cf91415baa3ecd | 5e6d44e8b068cea9783a429b5ef5149cb021a3f4 | /bin/suspend.py | d44a636da566df24b83d3467b3e7ef457b73e2f6 | [
"Apache-2.0"
] | permissive | polyvios/twAwler | a8644f836dfef82550ca9207cee7fd88f5d74f47 | 8e9f2064cad846177ed6547b9f56f053226a2d5e | refs/heads/master | 2021-06-24T10:17:20.221190 | 2020-11-11T17:21:36 | 2020-11-11T17:21:36 | 161,616,004 | 8 | 3 | Apache-2.0 | 2020-11-11T17:21:37 | 2018-12-13T09:34:03 | Python | UTF-8 | Python | false | false | 27 | py | ../twkit/crawler/suspend.py | [
"[email protected]"
] | |
e0c2653407f8e1902e8a34cf7b31b66c2a6119cc | 1025f996a8521a4fa890addd5ede8d5e40905819 | /04_false_death.py | 547cca8a3bcf00261c8e6ac52b358c6c71d66b45 | [] | no_license | nicholustintzaw/sp_mis_stateregion | 2ae1e884773b659d9cc69f6cb9c50edc6fd61cfa | 5dcf3141e0a1fea120dc68d458f2412f24e0af98 | refs/heads/master | 2021-01-14T20:54:04.094706 | 2020-02-24T14:25:26 | 2020-02-24T14:25:26 | 242,756,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,193 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 4 07:55:42 2019
@author: nicholustintzaw
"""
####################################################################################################
'''
project tite : social pension database - national level
purpose : quaterly data combine and check - false death
developed by : Nicholus Tint Zaw
modified date : 7th Dec 2019
follow-up action:
'''
####################################################################################################
####################################################################################################
print('Now, the spyder is working on False Death in Sheet, please wait for a few minutes')
## STEP 2: APPLICATION PACKAGE SETTING ##
## package setting ##
import pandas as pd
import numpy as np
####################################################################################################
# columns name assignment
col_na = ['benef_id']
col_names = ['No.', 'benef_id', 'Benef: Name']
col_person = ['Benef: Name']
#sheet = ['01_new_register', '02_moved_in', '03_false_death_in', '04_death', '05_moved_out', '06_false_register']
####################################################################################################
####################################################################################################
## STEP 3: COMBINED ALL COMPLETED DATA MIGRATION FIELS ##
## Combined data from each office
df = pd.DataFrame()
i = 1
files = os.listdir(raw)
for xlsx in files :
if xlsx.endswith(".xlsx"):
print(i)
print("now working in " + xlsx)
dta = pd.read_excel(raw + xlsx, sheet_name = '03_false_death_in', \
skiprows = 3, header = None, index_col = False, usecols="A:C", names = col_names)
# drop na from selected main variables
dta = dta.dropna(how = 'all', subset = col_na)
#dta['geo_township'] = geo_township
dta.sort_values('benef_id')
source = xlsx
dta['source'] = source
df = df.append(dta)
i = 1 + i
df_falsed = df
####################################################################################################
####################################################################################################
## STEP 4: SUMMARY STATISTIC FOR DATA MIGRATION FILES ##
# use as different dataset name for summary stat figures
df_test = df
obs = len(df_test.index)
if obs > 0 :
# myanmar fount zero and wa lone replacement
df_test['benef_id'] = df_test['benef_id'].astype(str)
df_test['benef_id'] = df_test['benef_id'].str.replace('ဝ', '၀')
# english numeric to Myanmar numeric convertion
df_test['benef_id'] = df_test['benef_id'].str.replace('0', '၀')
df_test['benef_id'] = df_test['benef_id'].str.replace('1', '၁')
df_test['benef_id'] = df_test['benef_id'].str.replace('2', '၂')
df_test['benef_id'] = df_test['benef_id'].str.replace('3', '၃')
df_test['benef_id'] = df_test['benef_id'].str.replace('4', '၄')
df_test['benef_id'] = df_test['benef_id'].str.replace('5', '၅')
df_test['benef_id'] = df_test['benef_id'].str.replace('6', '၆')
df_test['benef_id'] = df_test['benef_id'].str.replace('7', '၇')
df_test['benef_id'] = df_test['benef_id'].str.replace('8', '၈')
df_test['benef_id'] = df_test['benef_id'].str.replace('9', '၉')
# keep one state/region
df_state = df_test
# count the number of obs
tot = len(df_state.index)
d_i = {'Total False Death In': [tot]}
dta_i = pd.DataFrame(d_i)
sum_state = dta_i
## STEP 5: DATA QUALITY CHECK ##
## Duplicated Observation
# duplicate by beneficiares info - booleen var + ID
dup_resp = df_test.duplicated(subset = col_person, keep = False)
dup_id = df_test.duplicated(subset = 'benef_id', keep = False)
# duplciate by id and beneficiares info dataset + ID duplicate
dup_resp = df_test.loc[dup_resp == True]
dup_id = df_test.loc[dup_id == True]
# dup respondent info
obs = len(dup_resp)
if obs > 0 :
i = 1
dup_state = pd.DataFrame()
for state in states :
# count the number of obs
tot = len(dup_resp.index)
d_i = {'Total Person Duplicate': [tot]}
dta_i = pd.DataFrame(d_i)
dup_state = dta_i
# dup benef id
obsid = len(dup_id)
if obsid > 0 :
i = 1
dupid_state = pd.DataFrame()
for state in states :
# count the number of obs
tot = len(dup_id.index)
d_i = {'Total ID Duplicate': [tot]}
dta_i = pd.DataFrame(d_i)
dupid_state = dta_i
# export as summary statistic figures for all combined data migration files
#dup_resp.to_excel(output + qrt + '_dup_person.xlsx', index = False)
writer = pd.ExcelWriter(output + region + '_' + qrt + '_falsedeath_check.xlsx', engine = 'xlsxwriter')
sum_state.to_excel(writer, sheet_name = 'District')
obs = len(dup_resp)
if obs > 0 :
dup_state.to_excel(writer, sheet_name = 'dupli_person_stateregion')
dup_resp.to_excel(writer, sheet_name = 'dupli_person_list')
obsid = len(dup_id)
if obsid > 0 :
dupid_state.to_excel(writer, sheet_name = 'dupli_id_stateregion')
dup_id.to_excel(writer, sheet_name = 'dupli_id_list')
writer.save()
writer.close()
####################################################################################################
####################################################################################################
print('Woow, just finished the False Death in checking, please check your outputs folder for result excel files')
| [
"[email protected]"
] | |
eb2f553caf7f0012318af8e16f51e01be76194d2 | 08283bc49ce42b3694035ba5087a006594181753 | /ICA/Ch8Pt1.py | 16526d0229b78dc29a662a903e2397a7c2a02ca5 | [] | no_license | amriikk/CS31-Python | ed9afef49293e556c01c7a734d3fb1259321d6e2 | 6bddddb09dd6ae30fe62814c4307ed95e6e8a1e8 | refs/heads/master | 2023-05-31T20:16:57.874143 | 2021-07-05T06:59:26 | 2021-07-05T06:59:26 | 343,651,979 | 0 | 1 | null | 2021-05-06T05:00:21 | 2021-03-02T05:04:55 | Python | UTF-8 | Python | false | false | 862 | py | # Week 13, ICA 1
def main():
# Part 1
sum = 0
again = 'y'
while again == 'y' or again == 'Y':
nums = input('Enter sequence of numbers: ')
for ch in nums:
sum += int(ch)
print('Sum of digits in', nums, '=', sum)
sum = 0
again = input('Care to try again? (y/n) ')
# Part 1
print()
again = 'y'
while again == 'y' or again == 'Y':
title = input('Enter the movie title: ')
if len(title) <= 4:
codept1 = title
elif len(title) == 5:
codept1 = title[:len(title):2]
else:
codept1 = title[:5:2] + title[-1]
year = input('Enter the 4-digit year movie was released: ')
codept2 = year[2:]
print('Code for', title, 'is', codept1 + codept2)
again = input('Try again? (y/n) ')
main() | [
"[email protected]"
] | |
175550e1de4d72064321eb9409756618ef056bc7 | e54b88cf4ec9c1a0106f3e69f1cb49f594217f81 | /envi/bin/django-admin.py | bd4f252b4ab91e3141bffaf2dba9d5b61dbbddca | [] | no_license | tejayerraballe/Recruitment | f30dfe57b5206e747d8e7aa9ebbe88e902875efe | 198cc8bd77d1fb6e7566c42de6806113b891137e | refs/heads/master | 2020-03-24T07:42:12.588108 | 2018-07-27T12:31:52 | 2018-07-27T12:31:52 | 142,572,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | #!/home/aspire/Desktop/recruitment/envi/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
b1f7e245488f33794ab36743104ff1ae45b668b8 | 197ee9bb6d626992fec5dbd0d8de17d98a8fe4c7 | /config.py | 752b306f14af2d00aee96f527388dd7f3e5619c8 | [] | no_license | Aquanewbie/Local-Guacamole | 28faab2143f699b8e6db44c9ec8d0ac112af1e7e | a511161cf3e257af7245a3bb166770101be36f0e | refs/heads/master | 2020-12-05T21:59:56.475010 | 2020-03-31T07:29:05 | 2020-03-31T07:29:05 | 232,258,741 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | # Set Up Postgres Database by importing Postgres Data/Import_Postgres/GuacamoleCountries-Edited.csv into a Postgress Database
# Postgres Credentials
host='localhost'
dbname='Avocado'
user='postgres'
password='postgres'
| [
"[email protected]"
] | |
8b2e5e7319aef71aacb32a255f41accd9d8694bb | 8f3cb53f495c22873a50d3e22e0ae26ef5a389b4 | /TestCase/testcase01.py | 3778b9255078434958af92de2e2ac5fa319b5247 | [] | no_license | yangwei211/AppiumPython | 4029e08668b296ef5ab9760bf25273e064cbc49b | ed89969f9e4776e452a67f002e3b56fcd2aa453e | refs/heads/master | 2020-03-26T11:50:34.169387 | 2018-08-15T14:19:13 | 2018-08-15T14:19:13 | 144,862,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,920 | py | # coding=utf-8
from common.base import BaseCommon
from Public.login import Mylogin
from selenium import webdriver
import unittest
import os
import time
from report import HTMLTestRunner
class Testlogin(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.get('http://www.baidu.com')
self.basecommon = BaseCommon(self.driver)
def tearDown(self):
print time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
filedir = "D:/webtest/screenshot/"
if not os.path.exists(filedir):
os.makedirs(os.path.join('D:/', 'webtest', 'screenshot'))
screen_name = filedir + time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time())) + ".png"
self.basecommon.screenshot(screen_name)
self.driver.quit()
def testBaidu01_01(self):
self.driver.find_element_by_link_text(u'新闻').click()
time.sleep(2)
self.basecommon.back()
time.sleep(2)
self.basecommon.forward()
time.sleep(2)
print "111111"
self.assertEqual("https://www.baidu.com", self.driver.current_url)
def testBaidu01_02(self):
time.sleep(2)
self.basecommon.fresh()
ele = self.basecommon.untilTime("ID", 'kw')
ele.send_keys("123")
print "test结束"
if __name__ == "__main__":
# suite = unittest.TestLoader().loadTestsFromTestCase(Testlogin)
# unittest.TextTestRunner(verbosity=2).run(suite)
suiteTest = unittest.TestSuite()
suiteTest.addTest(Testlogin("testBaidu01_01"))
suiteTest.addTest(Testlogin("testBaidu01_02"))
filePath = "../report/Result.html"
fp = file(filePath, 'wb')
# 生成报告的Title,描述
runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title='Test Report', description='This is Report')
runner.run(suiteTest)
| [
"[email protected]"
] | |
963d3d60ad8cb4f594388d7db85a07a68e0b5861 | 7f5e39adc33b71e226a425c8e05012f9c449d834 | /bots/corona-charts-risklayer/risklayer_dashboard/risklayer_dashboard.py | c727379f6d5ee05071b2ab7af8639417414550e4 | [] | no_license | nzzdev/st-methods | a87eb4967b64bd1079a1a42e280fcf7679b6eb7d | fa12656afa043303ec99374b51c5e12762f20a35 | refs/heads/master | 2023-08-17T03:32:33.553268 | 2023-08-16T19:22:08 | 2023-08-16T19:22:08 | 107,977,306 | 52 | 8 | null | 2022-11-24T16:20:14 | 2017-10-23T12:12:52 | Jupyter Notebook | UTF-8 | Python | false | false | 10,918 | py | import os
import pandas as pd
import gspread
import sys
import json
from datetime import datetime, timedelta
from time import sleep
if __name__ == '__main__':
try:
# add parent directory to path so helpers file can be referenced
sys.path.append(os.path.dirname((os.path.dirname(__file__))))
from helpers import *
# set working directory, change if necessary
os.chdir(os.path.dirname(__file__))
# create dict with Google API keys instead of using JSON file
google_secrets = {
'type': 'service_account',
'project_id': 'nzz-risklayer-sheet',
'private_key_id': os.environ['GSPREAD_PRIVATE_KEY_ID'],
'private_key': os.environ['GSPREAD_PRIVATE_KEY'].replace('\\n', '\n'),
'client_email': '[email protected]',
'client_id': '117834230046379590580',
'auth_uri': 'https://accounts.google.com/o/oauth2/auth',
'token_uri': 'https://oauth2.googleapis.com/token',
'auth_provider_x509_cert_url': 'https://www.googleapis.com/oauth2/v1/certs',
'client_x509_cert_url': 'https://www.googleapis.com/robot/v1/metadata/x509/view-risklayer-sheet%40nzz-risklayer-sheet.iam.gserviceaccount.com'
}
# authenticate with dict
gc = gspread.service_account_from_dict(google_secrets)
# key from risklayer spreadsheet
sh = gc.open_by_key('1wg-s4_Lz2Stil6spQEYFdZaBEp8nWW26gVyfHqvcl8s')
# open spreadsheet and merge dataframes
wsh = download_sheet(sh, 'Curve')
# B:B date column
dt = get_sheet(wsh, 'B:B')
df1 = pd.DataFrame(data=dt)
# Z:Z ICU patients
icu = get_sheet(wsh, 'Z:Z')
df2 = pd.DataFrame(data=icu)
# F:F new cases 7-day mvg avg
cases = get_sheet(wsh, 'F:F')
df3 = pd.DataFrame(data=cases)
# S:S new deaths 7-day mvg avg
deaths = get_sheet(wsh, 'T:T')
df4 = pd.DataFrame(data=deaths)
df = pd.concat([df1, df2, df3, df4], axis=1)
# AA:AA ICU patients trend
trend_icu = get_sheet(wsh, 'AA:AA')
df2 = pd.DataFrame(data=trend_icu)
# H:H new cases 7-day mvg avg trend
trend_cases = get_sheet(wsh, 'H:H')
df3 = pd.DataFrame(data=trend_cases)
# U:U new deaths 7-day mvg avg trend
trend_deaths = get_sheet(wsh, 'U:U')
df4 = pd.DataFrame(data=trend_deaths)
# AC:AC new ICU patients
# new_patients = get_sheet(wsh, 'AC:AC')
# df5 = pd.DataFrame(data=new_patients)
# Z:Z ICU patients
diff_patients = get_sheet(wsh, 'Z:Z')
df5 = pd.DataFrame(data=diff_patients)
# F:F new deaths 7-day mvg avg
new_cases = get_sheet(wsh, 'F:F')
df6 = pd.DataFrame(data=new_cases)
# T:T new deaths 7-day mvg avg
new_deaths = get_sheet(wsh, 'T:T')
df7 = pd.DataFrame(data=new_deaths)
df_meta = pd.concat([df1, df2, df3, df4, df5, df6, df7], axis=1)
# drop some rows + column header and reindex
df = df.drop(df.index[0:212]).reset_index(drop=True)
df_meta = df_meta.drop(df_meta.index[0:688]).reset_index(drop=True)
# create column header names
cols = list(df.columns)
cols[0] = 'date'
cols[1] = 'Intensivpatienten'
cols[2] = 'Neuinfektionen'
cols[3] = 'Neue Todesfälle'
df.columns = cols
cols = list(df_meta.columns)
cols[0] = 'date'
cols[1] = 'Trend ICU'
cols[2] = 'Trend Fälle'
cols[3] = 'Trend Tote'
cols[4] = 'Diff ICU'
cols[5] = 'Neu Fälle'
cols[6] = 'Neu Tote'
df_meta.columns = cols
# clean numeric values and remove rows with empty values
df['Intensivpatienten'] = df['Intensivpatienten'].str.replace(
'.', '', regex=False)
df['Neuinfektionen'] = df['Neuinfektionen'].str.replace(
'.', '', regex=False).str.replace(',', '.', regex=False)
df['Neue Todesfälle'] = df['Neue Todesfälle'].str.replace(
'.', '', regex=False).str.replace(',', '.', regex=False)
df_meta['Trend ICU'] = df_meta['Trend ICU'].str.replace(
',', '.', regex=False).str.replace('%', '', regex=False)
df_meta['Trend Fälle'] = df_meta['Trend Fälle'].str.replace(
',', '.', regex=False).str.replace('%', '', regex=False)
df_meta['Trend Tote'] = df_meta['Trend Tote'].str.replace(
',', '.', regex=False).str.replace('%', '', regex=False)
df_meta['Diff ICU'] = df_meta['Diff ICU'].str.replace(
'.', '', regex=False)
df_meta['Neu Fälle'] = df_meta['Neu Fälle'].str.replace(
'.', '', regex=False).str.replace(',', '.', regex=False)
df_meta['Neu Tote'] = df_meta['Neu Tote'].str.replace(
'.', '', regex=False).str.replace(',', '.', regex=False)
df.dropna(subset=['Neuinfektionen'], inplace=True)
df.dropna(subset=['Neue Todesfälle'], inplace=True)
# df.dropna(subset=['Intensivpatienten'], inplace=True)
# df_meta.dropna(subset=['Trend ICU'], inplace=True)
df_meta.dropna(subset=['Trend Fälle'], inplace=True)
df_meta.dropna(subset=['Trend Tote'], inplace=True)
# df_meta.dropna(subset=['Diff ICU'], inplace=True)
df_meta.dropna(subset=['Neu Fälle'], inplace=True)
df_meta.dropna(subset=['Neu Tote'], inplace=True)
# convert numeric strings to int/float
df['Intensivpatienten'] = df['Intensivpatienten'].astype(float) # nan
df['Neuinfektionen'] = df['Neuinfektionen'].astype(
float).round().astype(int)
df['Neue Todesfälle'] = df['Neue Todesfälle'].astype(
float).round().astype(int)
df_meta['Trend ICU'] = df_meta['Trend ICU'].astype(float) # nan
df_meta['Trend Fälle'] = df_meta['Trend Fälle'].astype(float)
df_meta['Trend Tote'] = df_meta['Trend Tote'].astype(float)
df_meta['Diff ICU'] = df_meta['Diff ICU'].astype(float) # nan
df_meta['Neu Fälle'] = df_meta['Neu Fälle'].astype(
float).round().astype(int)
df_meta['Neu Tote'] = df_meta['Neu Tote'].astype(
float).round().astype(int)
# check if last row in ICU column is not nan, then shift cases and deaths
if pd.notna(df['Intensivpatienten'].iloc[-1]) == True:
df = df.append(pd.DataFrame(df[-1:].values, columns=df.columns))
df_meta = df_meta.append(pd.DataFrame(
df_meta[-1:].values, columns=df_meta.columns))
df['Neuinfektionen'] = df['Neuinfektionen'].shift(1)
df['Neue Todesfälle'] = df['Neue Todesfälle'].shift(1)
df_meta['Trend Fälle'] = df_meta['Trend Fälle'].shift(1)
df_meta['Trend Tote'] = df_meta['Trend Tote'].shift(1)
df_meta['Neu Fälle'] = df_meta['Neu Fälle'].shift(1)
df_meta['Neu Tote'] = df_meta['Neu Tote'].shift(1)
# drop rows (last row with cases and deaths is temporary)
df = df.iloc[1:-1].reset_index(drop=True)
df_meta = df_meta.iloc[1:-1].reset_index(drop=True)
df['Intensivpatienten'] = df['Intensivpatienten'].astype(int)
# replace percentages with strings
cols = ('Trend ICU', 'Trend Fälle', 'Trend Tote')
def replace_vals(df_meta):
for col in cols:
if df_meta[col] >= 5:
df_meta[col] = 'steigend'
elif df_meta[col] <= -5:
df_meta[col] = 'fallend'
else:
df_meta[col] = 'gleichbleibend'
return df_meta
df_meta = df_meta.apply(replace_vals, axis=1)
# calculate decrease/increase in ICU patients
df_meta.loc[df_meta.index[-1], 'Diff ICU'] = df_meta.loc[df_meta.index[-1],
'Diff ICU'] - df_meta.loc[df_meta.index[-2], 'Diff ICU']
# get last values of df_meta as objects
df_meta = df_meta.iloc[-1]
trend_icu = df_meta['Trend ICU']
trend_cases = df_meta['Trend Fälle']
trend_deaths = df_meta['Trend Tote']
diff_icu = df_meta['Diff ICU']
diff_cases = df_meta['Neu Fälle']
diff_deaths = df_meta['Neu Tote']
# convert dates to ISO standard with European-style date parsing
df['date'] = pd.to_datetime(
df['date'], dayfirst=True).dt.strftime('%Y-%m-%d')
# get date for chart notes and add one day
timestamp_str = df['date'].iloc[-1]
timestamp_dt = datetime.strptime(
timestamp_str, '%Y-%m-%d') + timedelta(days=1)
# timestamp_str_notes = timestamp_dt.strftime('%-d. %-m. %Y')
# create dictionaries for JSON file
dict_icu = df.drop(['Neuinfektionen', 'Neue Todesfälle'], axis=1).rename(
columns={'Intensivpatienten': 'value'}).to_dict(orient='records')
dict_cases = df.drop(['Intensivpatienten', 'Neue Todesfälle'], axis=1).rename(
columns={'Neuinfektionen': 'value'}).to_dict(orient='records')
dict_deaths = df.drop(['Intensivpatienten', 'Neuinfektionen'], axis=1).rename(
columns={'Neue Todesfälle': 'value'}).to_dict(orient='records')
# additional data for JSON file
meta_icu = {'indicatorTitle': 'Intensivpatienten', 'date': timestamp_str, 'indicatorSubtitle': 'Belegte Betten',
'value': int(diff_icu), 'color': '#24b39c', 'trend': trend_icu, 'chartType': 'area'}
meta_cases = {'indicatorTitle': 'Neuinfektionen', 'date': timestamp_str, 'indicatorSubtitle': '7-Tage-Schnitt',
'value': int(diff_cases), 'color': '#e66e4a', 'trend': trend_cases, 'chartType': 'area'}
meta_deaths = {'indicatorTitle': 'Neue Todesfälle', 'date': timestamp_str, 'indicatorSubtitle': '7-Tage-Schnitt',
'value': int(diff_deaths), 'color': '#05032d', 'trend': trend_deaths, 'chartType': 'area'}
# merge dictionaries
meta_icu['chartData'] = dict_icu
meta_cases['chartData'] = dict_cases
meta_deaths['chartData'] = dict_deaths
dicts = []
dicts.append(meta_icu)
dicts.append(meta_cases)
dicts.append(meta_deaths)
# save data
if not os.path.exists('data'):
os.makedirs('data')
with open('./data/dashboard_de.json', 'w') as fp:
json.dump(dicts, fp, indent=4)
fp.close()
file = [{
"loadSyncBeforeInit": True,
"file": {
"path": "./risklayer_dashboard/data/dashboard_de.json"
}
}]
# run function
update_chart(id='499935fb791197fd126bda721f15884a', files=file)
except:
raise
| [
"[email protected]"
] | |
dabf323db7db542c0af109320c3ef82625420025 | f81099738d3ab7d4a4773a04ed9e36e493632590 | /angelos-server/scripts/scriptlets.py | 8e67a21cb5f52e1b4c729063fe0a22272073f0b0 | [
"MIT"
] | permissive | kristoffer-paulsson/angelos | eff35753e4d7e4465d2aadac39265f206b09fcf9 | d789f47766fe3a63a6752b92e4ea955f420dbaf9 | refs/heads/master | 2022-05-05T15:16:59.340527 | 2022-03-27T16:05:51 | 2022-03-27T16:05:51 | 142,691,235 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,614 | py | #
# Copyright (c) 2018-2020 by Kristoffer Paulsson <[email protected]>.
#
# This software is available under the terms of the MIT license. Parts are licensed under
# different terms if stated. The legal terms are attached to the LICENSE file and are
# made available on:
#
# https://opensource.org/licenses/MIT
#
# SPDX-License-Identifier: MIT
#
# Contributors:
# Kristoffer Paulsson - initial implementation
#
"""Scriptlets for packaging under pre-install, post-install, pre-uninstall and post-uninstall conditions."""
NAME_NIX = "angelos"
NAME_SERVICE = "{}.service".format(NAME_NIX)
USERNAME = "{}".format(NAME_NIX)
GROUPNAME = "{}".format(NAME_NIX)
DIR_ANGELOS = "/opt/{}".format(NAME_NIX)
DIR_VAR = "/var/lib/{}".format(NAME_NIX)
DIR_LOG = "/var/log/{}".format(NAME_NIX)
DIR_ETC = "/etc/{}".format(NAME_NIX)
LINK_EXE = "/usr/local/bin/{}".format(NAME_NIX)
FILE_EXE = "{0}/bin/{1}".format(DIR_ANGELOS, NAME_NIX)
FILE_ADMINS = "{0}/admins.pub".format(DIR_VAR)
FILE_ENV = "{}/env.json".format(DIR_ETC)
FILE_CONF = "{}/config.json".format(DIR_ETC)
FILE_SERVICE = "/etc/systemd/system/{}".format(NAME_SERVICE)
DATA_ENV_JSON = """
{}
"""
DATA_CONFIG_JSON = """
{}
"""
DATA_SYSTEMD_SERVICE = """
[Unit]
Description = Run the Angelos server
After = network.target
[Service]
Type = forking
AmbientCapabilities = CAP_NET_BIND_SERVICE
ExecStart = {0} -d start
ExecStop = {0} -d stop
ExecReload = {0} -d restart
User = {1}
Group = {2}
# RootDirectory = {3}
RuntimeDirectory = {0}
StateDirectory = {4}
LogsDirectory = {5}
ConfigurationDirectory = {6}
KeyringMode = private
[Install]
WantedBy=default.target
""".format(NAME_NIX, USERNAME, GROUPNAME, DIR_ANGELOS, DIR_VAR, DIR_LOG, DIR_ETC)
SCRIPTLET_PRE_INSTALL = """
grep {0} /etc/group 2>&1>/dev/null
if [ $? != 0 ]
then
groupadd {0}
else
printf "Group {0} already exists.\n"
fi
if id {1} >/dev/null 2>&1; then
printf "User {1} already exists.\n"
else
useradd {1} --system -g {0}
fi
""".format(GROUPNAME, USERNAME)
SCRIPTLET_POST_INSTALL = """
DATA_ENV_JSON=$(cat <<EOF
{{}}
EOF
)
DATA_CONFIG_JSON=$(cat <<EOF
{{}}
EOF
)
DATA_SYSTEMD_SERVICE=$(cat <<EOF
[Unit]
Description = Run the Angelos server
After = network.target
[Service]
Type = forking
AmbientCapabilities = CAP_NET_BIND_SERVICE
ExecStart = {namenix} -d start
ExecStop = {namenix} -d stop
ExecReload = {namenix} -d restart
User = {username}
Group = {groupname}
# RootDirectory = {dirangelos}
RuntimeDirectory = {namenix}
StateDirectory = {dirvar}
LogsDirectory = {dirlog}
ConfigurationDirectory = {diretc}
KeyringMode = private
[Install]
WantedBy=default.target
EOF
)
# Create directories for angelos
mkdir {dirvar} -p
mkdir {dirlog} -p
mkdir {diretc} -p
# Create admin public keys file
if [ -s "{fileadmins}" ]
then
echo "{fileadmins} already exists, left untouched."
else
echo "" > {fileadmins}
fi
# Create configuration
if [ -s "{fileenv}" ]
then
echo "{fileenv} already exists, left untouched."
else
echo $DATA_ENV_JSON > {fileenv}
fi
if [ -s "{fileconf}" ]
then
echo "{fileconf} already exists, left untouched."
else
echo $DATA_CONFIG_JSON > {fileconf}
fi
# Setup systemd service
if [ -s "{fileservice}" ]
then
echo "{fileservice} already exists, left untouched."
else
echo "$DATA_SYSTEMD_SERVICE" > {fileservice}
chmod 644 {fileservice}
systemctl daemon-reload
systemctl enable
echo "Run '>sudo systemctl start {nameservice}' in order to start angelos."
fi
# Set angelos:angelos ownership
chown -R {username}:{groupname} {dirangelos}
chown -R {username}:{groupname} {dirvar}
chown -R {username}:{groupname} {dirlog}
chown -R {username}:{groupname} {diretc}
# Make angelos binary accessible
ln -sf {fileexe} {linkexe}
""".format(
namenix=NAME_NIX, dirvar=DIR_VAR, dirlog=DIR_LOG, diretc=DIR_ETC, fileadmins=FILE_ADMINS,
fileenv=FILE_ENV, fileconf=FILE_CONF, fileservice=FILE_SERVICE, nameservice=NAME_SERVICE,
username=USERNAME, groupname=GROUPNAME, dirangelos=DIR_ANGELOS, fileexe=FILE_EXE, linkexe=LINK_EXE
)
SCRIPTLET_PRE_UNINSTALL = """
# Remove systemd entry
systemctl stop {0}
systemctl disable {0}
rm {1}
systemctl daemon-reload
# Remove angelos link
rm {2}
""".format(NAME_SERVICE, FILE_SERVICE, LINK_EXE)
SCRIPTLET_POST_UNINSTALL = """
# Remove all angelos files
rm -fR {0}/*
rm -fR {1}/*
# Remove all angelos directories
rmdir {0}
rmdir {1}
""".format(DIR_ETC, DIR_ANGELOS)
print("#" * 80)
print("%pre")
print(SCRIPTLET_PRE_INSTALL)
print("%post")
print(SCRIPTLET_POST_INSTALL)
print("%preun")
print(SCRIPTLET_PRE_UNINSTALL)
print("%postun")
print(SCRIPTLET_POST_UNINSTALL)
print("#" * 80) | [
"[email protected]"
] | |
5207e2fe277102a4816b5464211aae69802fef1a | f63c4eb29ce57319441f5469d1d049b63bc220de | /swu_cycle_variance/run671.py | aded12c6c5cfe22af1f876f6a4907f4b1dcf3433 | [] | no_license | a-co/diversion_models | 0237642153668b16035699e9e734ff0538568582 | 69eed2687b1cd2b48f5717d15919eccd24a0eabc | refs/heads/main | 2023-05-02T19:04:26.333677 | 2020-06-18T20:50:18 | 2020-06-18T20:50:18 | 216,904,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,362 | py | SIMULATION = {'simulation': {'agent': [{'name': 'deployer_military', 'prototype': 'military_deployer'}, {'name': 'deployer_civilian', 'prototype': 'civilian_deployer'}, {'name': 'deployer_shared', 'prototype': 'shared_deployer'}], 'archetypes': {'spec': [{'lib': 'cycamore', 'name': 'DeployInst'}, {'lib': 'cycamore', 'name': 'Source'}, {'lib': 'cycamore', 'name': 'Sink'}, {'lib': 'cycamore', 'name': 'Storage'}, {'lib': 'cycamore', 'name': 'Reactor'}, {'lib': 'cycamore', 'name': 'Separations'}, {'lib': 'cycamore', 'name': 'Enrichment'}]}, 'control': {'duration': '144', 'explicit_inventory': 'true', 'startmonth': '1', 'startyear': '2020'}, 'prototype': [{'config': {'Source': {'inventory_size': '1e30', 'outcommod': 'u_ore', 'outrecipe': 'r_u_ore', 'throughput': '1e10'}}, 'name': 'mine'}, {'config': {'Separations': {'feed_commod_prefs': {'val': ['1.0', '10.0', '100.0']}, 'feed_commods': {'val': ['u_ore', 'u_ore1', 'u_ore2']}, 'feedbuf_size': '2e8', 'leftover_commod': 'waste', 'streams': {'item': {'commod': 'u_nat', 'info': {'buf_size': '150000', 'efficiencies': {'item': [{'comp': 'U', 'eff': '.99'}, {'comp': 'O', 'eff': '.99'}]}}}}, 'throughput': '2e8'}}, 'name': 'milling'}, {'config': {'Separations': {'feed_commod_prefs': {'val': '1.0'}, 'feed_commods': {'val': 'u_nat'}, 'feedbuf_size': '200000', 'leftover_commod': 'waste', 'streams': {'item': {'commod': 'uf6', 'info': {'buf_size': '200000', 'efficiencies': {'item': {'comp': 'U', 'eff': '.99'}}}}}, 'throughput': '200000'}}, 'name': 'conversion'}, {'config': {'Enrichment': {'feed_commod_prefs': {'val': '1'}, 'feed_commods': {'val': 'uf6'}, 'feed_recipe': 'r_uox', 'max_feed_inventory': '20000', 'product_commod': 'mil_fiss', 'swu_capacity': '3086.3337015201478', 'tails_assay': '0.003', 'tails_commod': 'mil_u_dep'}}, 'name': 'mil_enrichment'}, {'config': {'Storage': {'in_commods': {'val': 'mil_u_dep'}, 'out_commods': {'val': 'mil_u_dep_str'}, 'residence_time': '0'}}, 'name': 'mil_str_u_dep'}, {'config': {'Storage': {'in_commod_prefs': {'val': '1'}, 'in_commods': {'val': 'uf6'}, 'in_recipe': 'r_mil_uox', 'max_inv_size': '30000', 'out_commods': {'val': 'mil_uox'}, 'residence_time': '0'}}, 'name': 'mil_uox_fabrication'}, {'config': {'Reactor': {'assem_size': '14000', 'cycle_time': '29', 'fuel_incommods': {'val': 'mil_uox'}, 'fuel_inrecipes': {'val': 'r_mil_uox'}, 'fuel_outcommods': {'val': 'mil_uox_spent'}, 'fuel_outrecipes': {'val': 'r_mil_uox_spent'}, 'fuel_prefs': {'val': '1'}, 'n_assem_batch': '1', 'n_assem_core': '1', 'power_cap': '0.15', 'refuel_time': '0'}}, 'lifetime': '960', 'name': 'mil_lwr'}, {'config': {'Storage': {'in_commods': {'val': 'mil_mox_spent'}, 'out_commods': {'val': 'mil_mox_spent_str'}, 'residence_time': '60'}}, 'name': 'mil_str_mox_spent'}, {'config': {'Separations': {'feed_commod_prefs': {'val': '1.0'}, 'feed_commods': {'val': 'mil_uox_spent'}, 'feedbuf_size': '30000000000', 'leftover_commod': 'waste', 'streams': {'item': {'commod': 'mil_fiss', 'info': {'buf_size': '3000000000', 'efficiencies': {'item': {'comp': 'Pu', 'eff': '.95'}}}}}, 'throughput': '1e100'}}, 'name': 'reprocessing'}, {'config': {'Storage': {'in_commod_prefs': {'val': '10'}, 'in_commods': {'val': 'mil_fiss'}, 'in_recipe': 'r_mil_heu', 'max_inv_size': '1e100', 'out_commods': {'val': 'mil_heu'}, 'residence_time': '0'}}, 'name': 'mil_str_fiss'}, {'config': {'Enrichment': {'feed_commod_prefs': {'val': ['1', '20']}, 'feed_commods': {'val': ['uf6', 'mil_uf6']}, 'feed_recipe': 'r_natl_u', 'max_feed_inventory': '100000', 'product_commod': 'civ_leu', 'swu_capacity': '35000', 'tails_assay': '0.003', 'tails_commod': 'u_dep'}}, 'name': 'civ_enrichment'}, {'config': {'Storage': {'in_commods': {'val': 'u_dep'}, 'out_commods': {'val': 'u_dep_str'}, 'residence_time': '0'}}, 'name': 'civ_str_u_dep'}, {'config': {'Storage': {'in_commod_prefs': {'val': '1000'}, 'in_commods': {'val': 'civ_leu'}, 'in_recipe': 'r_uox', 'max_inv_size': '30000', 'out_commods': {'val': 'uox'}, 'residence_time': '1'}}, 'name': 'civ_fabrication'}, {'config': {'Reactor': {'assem_size': '29565', 'cycle_time': '18', 'fuel_incommods': {'val': 'uox'}, 'fuel_inrecipes': {'val': 'r_uox'}, 'fuel_outcommods': {'val': 'uox_spent'}, 'fuel_outrecipes': {'val': 'r_uox_spent'}, 'n_assem_batch': '1', 'n_assem_core': '3', 'power_cap': '900', 'refuel_time': '0'}}, 'lifetime': '960', 'name': 'civ_lwr'}, {'config': {'Storage': {'in_commods': {'val': 'uox_spent'}, 'out_commods': {'val': 'uox_spent_str'}, 'residence_time': '60'}}, 'name': 'civ_str_uox_spent'}, {'config': {'DeployInst': {'build_times': {'val': ['37', '37', '61', '73']}, 'n_build': {'val': ['1', '1', '1', '1']}, 'prototypes': {'val': ['mil_enrichment', 'mil_str_u_dep', 'mil_uox_fabrication', 'mil_str_fiss']}}}, 'name': 'military_deployer'}, {'config': {'DeployInst': {'build_times': {'val': ['121', '121', '121', '145', '157', '169']}, 'n_build': {'val': ['1', '1', '1', '1', '1', '1']}, 'prototypes': {'val': ['civ_enrichment', 'civ_str_u_dep', 'civ_fabrication', 'civ_lwr', 'civ_str_uox_spent', 'civ_lwr']}}}, 'name': 'civilian_deployer'}, {'config': {'DeployInst': {'build_times': {'val': ['1', '1', '1']}, 'n_build': {'val': ['1', '1', '1']}, 'prototypes': {'val': ['mine', 'milling', 'conversion']}}}, 'name': 'shared_deployer'}], 'recipe': [{'basis': 'mass', 'name': 'r_u_ore', 'nuclide': [{'comp': '0.0071', 'id': '922350000'}, {'comp': '0.9929', 'id': '922380000'}, {'comp': '999', 'id': '120240000'}]}, {'basis': 'mass', 'name': 'r_natl_u', 'nuclide': [{'comp': '0.0071', 'id': '922350000'}, {'comp': '0.9929', 'id': '922380000'}]}, {'basis': 'mass', 'name': 'r_uox', 'nuclide': [{'comp': '0.05', 'id': '922350000'}, {'comp': '0.95', 'id': '922380000'}]}, {'basis': 'mass', 'name': 'r_uox_spent', 'nuclide': [{'comp': '0.01', 'id': '922350000'}, {'comp': '0.94', 'id': '922380000'}, {'comp': '0.01', 'id': '942390000'}, {'comp': '0.001', 'id': '952410000'}, {'comp': '0.03', 'id': '551350000'}]}, {'basis': 'mass', 'name': 'r_mil_uox', 'nuclide': [{'comp': '0.0071', 'id': '922350000'}, {'comp': '0.9929', 'id': '922380000'}]}, {'basis': 'mass', 'name': 'r_mil_uox_spent', 'nuclide': [{'comp': '0.0071', 'id': '922350000'}, {'comp': '0.9919', 'id': '922380000'}, {'comp': '0.001', 'id': '942390000'}]}, {'basis': 'mass', 'name': 'r_mil_heu', 'nuclide': [{'comp': '0.90', 'id': '922350000'}, {'comp': '0.10', 'id': '922380000'}]}]}} | [
"[email protected]"
] | |
c8e42c16e81852f668c4cafab9d26de10083ccb8 | 0baa30c66444dd6937cb9d10651688dd8a7ed3d7 | /Socratica/Classes_Objects.py | be27a0eefce17d99b4ea64f201504f54117a9da9 | [] | no_license | rajeshgokuls/Socratica-Learning | 06943d9a98db3b9c2c7b6e3925a1fc8b8110d5c1 | 16f5dc12560c19246f3659c6cebe8dc9d762ac81 | refs/heads/master | 2020-07-06T07:50:29.148659 | 2019-08-18T01:11:59 | 2019-08-18T01:11:59 | 202,945,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | #using class keyword
#we write a simplest class possible
class User:
pass #pass is way to type a line to does nothing
user1 = User()
user1.first_name ="DAVE"
user1.last_name = "BOWMAN"
print(user1.first_name)
print(user1.last_name)
first_name = "Arthur"
last_name = "Clarke"
print(first_name, last_name) | [
"[email protected]"
] | |
2b9a97de395f4fc77376760d89f988897e87e1d4 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=2.5_rd=0.65_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=93/sched.py | c3937279e8510f4da2fef29ff85908ec01e9b1b7 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | -X FMLP -Q 0 -L 2 106 400
-X FMLP -Q 0 -L 2 76 300
-X FMLP -Q 0 -L 2 52 300
-X FMLP -Q 1 -L 1 47 400
-X FMLP -Q 1 -L 1 43 300
-X FMLP -Q 1 -L 1 35 300
-X FMLP -Q 2 -L 1 34 100
-X FMLP -Q 2 -L 1 27 125
-X FMLP -Q 3 -L 1 25 150
-X FMLP -Q 3 -L 1 24 100
20 175
19 175
16 175
15 150
10 200
| [
"[email protected]"
] | |
dab23e001001cbdcec2ba4be3034eb5f756b2430 | 5275ad8abc39dc033119e0217b41eaab016ebb83 | /scripts/api-exploration/kindai-ocr/decoder.py | 08cad772d544c15812aeb5f2c84e657a33145971 | [] | no_license | amycfarrow/japanese_dictionaries | 0c042d610420e24827a30062e1d96927ad5e0c63 | 72be67e817490fb5235b4a13fd648706bb657bee | refs/heads/master | 2023-03-23T09:47:27.845066 | 2021-03-19T15:44:16 | 2021-03-19T15:44:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,821 | py | import torch
import torch.nn as nn
# two layers of GRU
class Gru_cond_layer(nn.Module):
def __init__(self, params):
super(Gru_cond_layer, self).__init__()
self.cuda = params['cuda']
# attention
self.conv_Ua = nn.Conv2d(params['D'], params['dim_attention'], kernel_size=1)
self.fc_Wa = nn.Linear(params['n'], params['dim_attention'], bias=False)
self.conv_Q = nn.Conv2d(1, 512, kernel_size=11, bias=False, padding=5)
self.fc_Uf = nn.Linear(512, params['dim_attention'])
self.fc_va = nn.Linear(params['dim_attention'], 1)
# the first GRU layer
self.fc_Wyz = nn.Linear(params['m'], params['n'])
self.fc_Wyr = nn.Linear(params['m'], params['n'])
self.fc_Wyh = nn.Linear(params['m'], params['n'])
self.fc_Uhz = nn.Linear(params['n'], params['n'], bias=False)
self.fc_Uhr = nn.Linear(params['n'], params['n'], bias=False)
self.fc_Uhh = nn.Linear(params['n'], params['n'], bias=False)
# the second GRU layer
self.fc_Wcz = nn.Linear(params['D'], params['n'], bias=False)
self.fc_Wcr = nn.Linear(params['D'], params['n'], bias=False)
self.fc_Wch = nn.Linear(params['D'], params['n'], bias=False)
self.fc_Uhz2 = nn.Linear(params['n'], params['n'])
self.fc_Uhr2 = nn.Linear(params['n'], params['n'])
self.fc_Uhh2 = nn.Linear(params['n'], params['n'])
def forward(self, params, embedding, mask=None, context=None, context_mask=None, one_step=False, init_state=None,
alpha_past=None):
n_steps = embedding.shape[0]
n_samples = embedding.shape[1]
Ua_ctx = self.conv_Ua(context)
Ua_ctx = Ua_ctx.permute(2, 3, 0, 1)
state_below_z = self.fc_Wyz(embedding)
state_below_r = self.fc_Wyr(embedding)
state_below_h = self.fc_Wyh(embedding)
if one_step:
if mask is None:
mask = torch.ones(embedding.shape[0])
if self.cuda:
mask.cuda()
h2ts, cts, alphas, alpha_pasts = self._step_slice(mask, state_below_r, state_below_z, state_below_h,
init_state, context, context_mask, alpha_past, Ua_ctx)
else:
alpha_past = torch.zeros(n_samples, context.shape[2], context.shape[3])
h2t = init_state
h2ts = torch.zeros(n_steps, n_samples, params['n'])
cts = torch.zeros(n_steps, n_samples, params['D'])
alphas = (torch.zeros(n_steps, n_samples, context.shape[2], context.shape[3]))
alpha_pasts = torch.zeros(n_steps, n_samples, context.shape[2], context.shape[3])
if self.cuda:
alpha_past.cuda()
h2ts.cuda()
cts.cuda()
alphas.cuda()
alpha_pasts.cuda()
for i in range(n_steps):
h2t, ct, alpha, alpha_past = self._step_slice(mask[i], state_below_r[i], state_below_z[i],
state_below_h[i], h2t, context, context_mask, alpha_past,
Ua_ctx)
h2ts[i] = h2t
cts[i] = ct
alphas[i] = alpha
alpha_pasts[i] = alpha_past
return h2ts, cts, alphas, alpha_pasts
# one step of two GRU layers
def _step_slice(self, mask, state_below_r, state_below_z, state_below_h, h, ctx, ctx_mask, alpha_past, Ua_ctx):
# the first GRU layer
z1 = torch.sigmoid(self.fc_Uhz(h) + state_below_z)
r1 = torch.sigmoid(self.fc_Uhr(h) + state_below_r)
h1_p = torch.tanh(self.fc_Uhh(h) * r1 + state_below_h)
h1 = z1 * h + (1. - z1) * h1_p
h1 = mask[:, None] * h1 + (1. - mask)[:, None] * h
# attention
Wa_h1 = self.fc_Wa(h1)
alpha_past_ = alpha_past[:, None, :, :]
cover_F = self.conv_Q(alpha_past_).permute(2, 3, 0, 1)
cover_vector = self.fc_Uf(cover_F)
attention_score = torch.tanh(Ua_ctx + Wa_h1[None, None, :, :] + cover_vector)
alpha = self.fc_va(attention_score)
alpha = alpha.view(alpha.shape[0], alpha.shape[1], alpha.shape[2])
alpha = torch.exp(alpha)
if (ctx_mask is not None):
alpha = alpha * ctx_mask.permute(1, 2, 0)
alpha = alpha / alpha.sum(1).sum(0)[None, None, :]
alpha_past = alpha_past + alpha.permute(2, 0, 1)
ct = (ctx * alpha.permute(2, 0, 1)[:, None, :, :]).sum(3).sum(2)
# the second GRU layer
z2 = torch.sigmoid(self.fc_Wcz(ct) + self.fc_Uhz2(h1))
r2 = torch.sigmoid(self.fc_Wcr(ct) + self.fc_Uhr2(h1))
h2_p = torch.tanh(self.fc_Wch(ct) + self.fc_Uhh2(h1) * r2)
h2 = z2 * h1 + (1. - z2) * h2_p
h2 = mask[:, None] * h2 + (1. - mask)[:, None] * h1
return h2, ct, alpha.permute(2, 0, 1), alpha_past
# calculate probabilities
class Gru_prob(nn.Module):
def __init__(self, params):
super(Gru_prob, self).__init__()
self.fc_Wct = nn.Linear(params['D'], params['m'])
self.fc_Wht = nn.Linear(params['n'], params['m'])
self.fc_Wyt = nn.Linear(params['m'], params['m'])
self.dropout = nn.Dropout(p=0.2)
self.fc_W0 = nn.Linear(int(params['m'] / 2), params['K'])
def forward(self, cts, hts, emb, use_dropout):
logit = self.fc_Wct(cts) + self.fc_Wht(hts) + self.fc_Wyt(emb)
# maxout
shape = logit.shape
shape2 = int(shape[2] / 2)
shape3 = 2
logit = logit.view(shape[0], shape[1], shape2, shape3)
logit = logit.max(3)[0]
if use_dropout:
logit = self.dropout(logit)
out = self.fc_W0(logit)
return out
| [
"[email protected]"
] | |
fd0671e95920ab6be217404063704a625e4abcbd | c14272849ea87cc96d4380b6fb93d5d58405c9aa | /commands/say.py | 05cff4c2ac07780ddbe89549fd444724faf8c048 | [] | no_license | ikuyarihS/Charlotte | 39634c3d0a41d95bc66fac5d7b32e3ebbe9e528c | 16a08cae674055960bc213630f7961d60c02e048 | refs/heads/master | 2020-05-01T03:04:51.587237 | 2019-03-17T17:32:33 | 2019-03-17T22:01:14 | 177,236,955 | 1 | 0 | null | 2019-03-23T02:57:30 | 2019-03-23T02:57:30 | null | UTF-8 | Python | false | false | 363 | py | from discord import Message
def say(command_message, database, what_to_say, *args):
"""Makes Charlotte say something.
Args:
what_to_say: The content of the message Charlotte will send.
Returns:
A discord message.
"""
response = Message(what_to_say, command_message.channel_id, "Charlotte", "Charlotte")
return response
| [
"[email protected]"
] | |
3d45027671db3cfe2941a98b184d4fab0adbf631 | 648132a9a8fef33c02be1b9109fd8f5ed0e88e5c | /monitor.py | af4190c98dc4516c426528d8980ed4a495acc224 | [] | no_license | mr-sk/pool-notification | f1532611d335ade2acccea535048e1d500c8f803 | 387190cc2f3818a58c5a143b412d8918cc622537 | refs/heads/master | 2021-01-01T06:50:01.515398 | 2013-05-11T15:25:38 | 2013-05-11T15:25:38 | 9,782,273 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,398 | py | #!/usr/bin/env python
# -----------------------------------------------------------------------------
# monitor.py
#
# Shoutz: All the BTC & LTC guys on the forums: zif, Delarock.
# efnet #innercircle, freenode #give-me-ltc
# Fuckz: MKEGuy <-- scammer!
#
# Author: sk / [email protected]
# -----------------------------------------------------------------------------
import boto
import pprint
import config
import urllib2
import json
import sys
import pickle
import os
class Monitor:
def __init__(self):
self.inmemoryDict = {}
self.loadedDict = {}
self.giveMeLTCAPI = 'https://give-me-ltc.com/api?api_key='
if os.path.exists("workers.p"):
self.loadedDict = pickle.load(open("workers.p", "rb"))
print "[x] Loaded dictionary"
def sendAlert(self, msg):
sns = boto.connect_sns()
mytopic_arn = config.snsTopic
res = sns.publish(mytopic_arn, msg, '')
def heartbeat(self):
req = urllib2.Request(self.giveMeLTCAPI + config.poolKey)
opener = urllib2.build_opener()
for parentKey, subDict in json.loads(opener.open(req).read())['workers'].iteritems():
if 'last_share_timestamp' in subDict:
self.inmemoryDict[parentKey] = subDict['hashrate']
if len(self.loadedDict.keys())< 1:
print "[!] Loaded dictionary is empty, assign (Probably first time running)"
pickle.dump(self.inmemoryDict, open("workers.p", "wb"))
sys.exit()
dictIter = self.loadedDict.iteritems()
for wName, wHash in config.workerDict.iteritems():
for lKey, lHash in dictIter:
if wName not in self.inmemoryDict:
print "[!] Missing worker %s, sending alert" % wName
self.sendAlert("Worker %s has dropped off" % wName)
if lKey in self.inmemoryDict:
print "[x] Worker %s current hash %s, threshold %s" % (lKey, lHash, wHash)
if int(lHash) < wHash:
print "[!] Issue found with worker %s, sending alert" % lKey
self.sendAlert("Worker %s is below %s" % (lKey, wHash))
break
pickle.dump(self.inmemoryDict, open("workers.p", "wb"))
print "[x] Complete\n"
#
# Main
#
monitor = Monitor()
monitor.heartbeat();
| [
"[email protected]"
] | |
aec820532e0b36a76bbf9bea45ae7be6b568d53d | 2b3746d8c7416a22c9cda91f547af6db95896fa9 | /bdd/group_steps.py | da3680d82d30ed1f81d9da86f926080c88c7fe7d | [
"Apache-2.0"
] | permissive | Floskaa/python_training | 4bb5131ce693919164eafe7ddb16c10e6000a08f | 16e126678f0893465add925721a0238e00ae033f | refs/heads/master | 2023-04-06T15:39:59.519297 | 2021-04-15T10:09:51 | 2021-04-15T10:09:51 | 288,427,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,666 | py | from pytest_bdd import given, when, then
from model.group import Group
import random
@given("a group list")
def group_list(db):
return db.get_group_list()
@given("a group with <name>, <header> and <footer>")
def new_group(name, header, footer):
return Group(name=name, header=header, footer=footer)
@when("I add the group to the list")
def add_new_group(app, new_group):
app.group.create(new_group)
@then("the new group list is equal to the old list with the added group")
def verify_group_added(db, group_list, new_group):
old_groups = group_list
new_groups = db.get_group_list()
old_groups.append(new_group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
@given("a non-empty group list")
def non_empty_group_list(db, app):
if len (db.get_group_list()) == 0:
app.group.create(Group(name="str name"))
return db.get_group_list()
@given("a random group from the list")
def random_group(non_empty_group_list):
return random.choice(non_empty_group_list)
@when("I delete the group from the list")
def delete_group(app, random_group):
app.group.delete_group_by_id(random_group.id)
@then("the new group list is equal to the old list without the group")
def verify_group_deleted(db, non_empty_group_list, random_group, app, check_ui):
old_groups = non_empty_group_list
new_groups = db.get_group_list()
assert len(old_groups) - 1 == len(new_groups)
old_groups.remove(random_group)
assert old_groups == new_groups
if check_ui:
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
| [
"[email protected]"
] | |
76924257ed453cc22baabe2db4fa3eea2c5677ca | c9be87c83eec1f2a0d6f4fce991b72bc3cb15bc2 | /examples/postman_echo/cookie_manipulation/set_delete_cookies_test.py | 371a5372b87ade34d40be270454b9a0aefae0e24 | [
"Apache-2.0"
] | permissive | yanghuizhi/httprunner | e65d95644bc6292e1078b8963b41d2b0d2db6632 | 2cdca180836d78a4005e914a3ee7b66df810a2d4 | refs/heads/master | 2022-06-19T06:48:44.824685 | 2022-04-28T12:35:13 | 2022-04-28T12:35:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,592 | py | # NOTE: Generated By HttpRunner v4.0.0-beta
# FROM: cookie_manipulation/set_delete_cookies.yml
from httprunner import HttpRunner, Config, Step, RunRequest, RunTestCase
class TestCaseSetDeleteCookies(HttpRunner):
config = (
Config("set & delete cookies.")
.variables(**{"foo1": "bar1", "foo2": "bar2"})
.base_url("https://postman-echo.com")
.verify(False)
.export(*["cookie_foo1", "cookie_foo3"])
)
teststeps = [
Step(
RunRequest("set cookie foo1 & foo2 & foo3")
.with_variables(**{"foo3": "bar3"})
.get("/cookies/set")
.with_params(**{"foo1": "bar111", "foo2": "$foo2", "foo3": "$foo3"})
.with_headers(**{"User-Agent": "HttpRunner/${get_httprunner_version()}"})
.extract()
.with_jmespath("$.cookies.foo1", "cookie_foo1")
.with_jmespath("$.cookies.foo3", "cookie_foo3")
.validate()
.assert_equal("status_code", 200)
.assert_not_equal("$.cookies.foo3", "$foo3")
),
Step(
RunRequest("delete cookie foo2")
.get("/cookies/delete?foo2")
.with_headers(**{"User-Agent": "HttpRunner/${get_httprunner_version()}"})
.validate()
.assert_equal("status_code", 200)
.assert_not_equal("$.cookies.foo1", "$foo1")
.assert_equal("$.cookies.foo1", "$cookie_foo1")
.assert_equal("$.cookies.foo3", "$cookie_foo3")
),
]
if __name__ == "__main__":
TestCaseSetDeleteCookies().test_start()
| [
"[email protected]"
] | |
8309c49dbba8df1aeb689118f5b9c1a2892f3c63 | 0a3a0e619f2899dc2730223554ad63b269ff565f | /tests/test_node.py | ac151e1594553f39b7712bfa5d4d77e64ebc63a3 | [
"MIT"
] | permissive | andhus/scantree | 10b4c79217660e6f01fdba37a26875f5ea312a14 | 6f83758fc22db028b8b55aebf55c1db4697697bc | refs/heads/master | 2021-12-24T23:31:54.544064 | 2021-12-11T23:33:18 | 2021-12-11T23:33:18 | 175,146,501 | 11 | 3 | MIT | 2021-12-11T23:33:18 | 2019-03-12T06:09:49 | Python | UTF-8 | Python | false | false | 3,712 | py | from __future__ import print_function, division
import pytest
from scantree import (
RecursionPath,
DirNode,
LinkedDir,
CyclicLinkedDir
)
from scantree.test_utils import get_mock_recursion_path
def create_basic_entries(local_path):
d1 = local_path.join('d1')
d1.mkdir()
f1 = local_path.join('f1')
f1.write('file1')
local_path.join('ld1').mksymlinkto(d1)
local_path.join('lf1').mksymlinkto(f1)
class TestDirNode(object):
test_class = DirNode
def test_init(self):
dn = self.test_class(RecursionPath.from_root('.'), [], [None])
assert dn.directories == (None,)
assert dn.files == tuple()
def test_empty(self):
dn = self.test_class(RecursionPath.from_root('.'), [], [])
assert dn.empty
def test_apply(self, tmpdir):
create_basic_entries(tmpdir)
root = RecursionPath.from_root(tmpdir)
d1 = next((rp for rp in root.scandir() if rp.name == 'd1'))
dn = self.test_class(
path=root,
directories=[self.test_class(d1, files=[1., 2.])],
files=[0.5]
)
dn_new = dn.apply(
file_apply=lambda x: x*2,
dir_apply=lambda dn_: sum(dn_.directories) ** 2 + sum(dn_.files)
)
assert dn_new == ((2 + 4) ** 2 + 1)
def test_leafpaths_filepaths(self):
rp_file1 = get_mock_recursion_path('file1')
rp_dir1 = get_mock_recursion_path('dir1')
rp_file2 = get_mock_recursion_path('dir1/file2')
rp_linked_dir = get_mock_recursion_path('linked_dir')
rp_cyclic = get_mock_recursion_path('cyclic')
rp_cyclic_target = get_mock_recursion_path('cyclic_target')
ordered_leafpaths = [rp_cyclic, rp_file2, rp_file1, rp_linked_dir]
ordered_filepaths = [rp_file2, rp_file1]
tree = self.test_class(
path=get_mock_recursion_path(''),
files=[rp_file1],
directories=[
CyclicLinkedDir(path=rp_cyclic, target_path=rp_cyclic_target),
self.test_class(
path=rp_dir1,
files=[rp_file2]
),
LinkedDir(path=rp_linked_dir),
]
)
assert tree.leafpaths() == ordered_leafpaths
assert tree.filepaths() == ordered_filepaths
def test_entries(self):
dn = self.test_class(
RecursionPath.from_root('.'),
files=[None],
directories=['d1', 'd2']
)
assert dn.entries == dn.files + dn.directories
class TestLinkedDir(object):
test_class = LinkedDir
@staticmethod
def get_default_kwargs():
return {'path': get_mock_recursion_path('path/to/ld')}
def test_undefined_attributes(self):
ld = self.test_class(**self.get_default_kwargs())
for attribute in ['files', 'directories', 'entries']:
with pytest.raises(AttributeError):
getattr(ld, attribute)
def test_empty(self):
ld = self.test_class(**self.get_default_kwargs())
with pytest.raises(AttributeError):
ld.empty
def test_apply(self):
ld = self.test_class(**self.get_default_kwargs())
res = ld.apply(dir_apply=lambda x: (x, 1), file_apply=None)
assert res == (ld, 1)
class TestCyclicLinkedDir(TestLinkedDir):
test_class = CyclicLinkedDir
@staticmethod
def get_default_kwargs():
return {
'path': get_mock_recursion_path('path/to/ld'),
'target_path': get_mock_recursion_path('target')
}
def test_empty(self):
cld = self.test_class(**self.get_default_kwargs())
assert cld.empty == False
| [
"[email protected]"
] | |
bfc7bdefb066f9d97cd24b4d0a3d96d75ffd7301 | a62e9f76baaab0fa06b7a342b80e53a4afa9bfc7 | /shop/migrations/0004_contact.py | c25155b846f338875ad30349aa5e0c2e08f95565 | [] | no_license | mohakshines/My-Awesome-Cart | c7ffb5af6ccf37a7bfd2829cf8b9bd47e4e2efcc | 6adf2e66b757572ff6916b8e289272fe437600ae | refs/heads/main | 2023-02-12T00:19:27.440646 | 2021-01-07T12:29:25 | 2021-01-07T12:29:25 | 327,587,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | # Generated by Django 3.1.1 on 2020-10-15 13:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0003_auto_20201015_0231'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('contact_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('email', models.IntegerField(default='', max_length=40)),
('phone', models.CharField(default='', max_length=12)),
('desc', models.CharField(max_length=1000)),
],
),
]
| [
"[email protected]"
] | |
3893666a24322d2307aa3aeefcc6c03747cf215c | 87ff4662af7401ed9a37f729dafbe413394b1592 | /By_Glove/data_helpers.py | 3d0c7cd03ffdad2c97399989eb2aebac73d78d1f | [] | no_license | mike1201/CNN_Topic_Classification | 0c449991f82ddd6ab4dc4530fb5a844c0b226155 | 50daea6cb322df90c9ea86bb66e64fbe0b585ed1 | refs/heads/master | 2021-09-01T10:31:25.429728 | 2017-12-26T13:40:16 | 2017-12-26T13:40:16 | 113,930,165 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,762 | py | import numpy as np
import re
import os
import itertools
from collections import Counter
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
# 1-1
def load_data_and_labels_another():
x_text = []
y = []
one_hot_vector = [0,0,0,0,0]
labels = {}
topics = ['Technology', 'Business', 'Food', 'Design', 'Books']
os.chdir("./data")
for idx, topic in enumerate(topics):
# made x
clean_questions = list(open(topic+'clean_question.txt', mode ='r').readlines())
clean_questions = [s.strip() for s in clean_questions]
x_text = x_text + clean_questions
# made y
if topic == 'Technology':
y = y + [[1,0,0,0,0] for _ in clean_questions]
elif topic == 'Business':
y = y + [[0,1,0,0,0] for _ in clean_questions]
elif topic == 'Food':
y = y + [[0,0,1,0,0] for _ in clean_questions]
elif topic == 'Design':
y = y + [[0,0,0,1,0] for _ in clean_questions]
elif topic == 'Books':
y = y + [[0,0,0,0,1] for _ in clean_questions] # print labels
one_hot_vector[idx] = 0
y = np.array(y)
os.chdir("..")
return [x_text, y]
# 1-5
def batch_iter(data, batch_size, num_epochs, shuffle=True):
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int(len(data)/batch_size) + 1
for epoch in range(num_epochs):
if shuffle: # Shuffle the data at each epoch
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
# np.arange(3) = array([0,1,2]) , np.random.permutation() : randomly shuffle
else:
shuffled_data = data
# make batches at each epoch
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
| [
"[email protected]"
] | |
1c356dac6d2345b8c54e24b077185a63696f6f3a | 9f844facbc5ae26fd601e4f94ecec9e78f068f6a | /PollApplication/chartHandling/charts.py | 03f8256171d492005c976cdfb8316f3418f63e7d | [] | no_license | Luksos9/PythonSQL | 78923e338f1bf4ea493471e83608da32243906ed | 2b5df36f8c274fb428e590121bd46399091679e9 | refs/heads/master | 2022-12-15T17:12:38.534535 | 2020-09-11T18:55:53 | 2020-09-11T18:55:53 | 288,772,329 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | import matplotlib.pyplot as plt
def create_pie_chart(options):
figure = plt.figure()
axes = figure.add_subplot(1, 1, 1)
axes.pie(
[option[1] for option in options],
labels=[option[0] for option in options],
autopct="%1.1%%"
)
return figure
def create_bar_chart(polls):
figure = plt.figure(figsize=(10, 10))
figure.subplots_adjust(bottom=0.35) # gives more room underneath (works without it too)
axes = figure.add_subplot(1, 1, 1)
axes.set_title("Polls and their total votes")
axes.set_ylabel("Vote count")
axes.bar(
range(len(polls)), # how wide (x coordinates)
[poll[1] for poll in polls], # how many % of total voices were for this poll (height)
tick_label=[poll[0] for poll in polls] # labels for each bar
)
plt.xticks(rotation=30, ha="center") # adds rotation, and center alignment to labels
return figure
| [
"[email protected]"
] | |
ebf6852a1c6b8b97a07825287c5b90b7c7403ed2 | dca0351e62f4e801fcf733f26cabcfe4bb0bff0b | /opm_data_analyze/gift_pack_type.py | 76ffcb73f06064dee58a1d87d78214b5d92675d5 | [] | no_license | lengye49/opm_data_analyzing_system | 8cefc639af89274363d7fdbd7280e22cc63fa65f | 85fc13425fab1dc561b0bbc49f859673551e7692 | refs/heads/main | 2023-06-22T16:23:59.242825 | 2021-07-23T11:36:38 | 2021-07-23T11:36:38 | 323,501,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,393 | py | # 这个脚本的目标是判断某个阶段玩家在缺某种类型的资源时应该在哪里付费
# 目前只考虑了giftpack相关的产出,todo 基金、通行证、月卡、周卡等系列产出
# todo 各礼包限制条件及玩家所处状态和可购买次数限制
# 目前尚未考虑活动道具转换为目标资源的途径
import pandas as pd
from datetime import datetime
def is_on_time(start, end):
# 判断礼包是否还是时间段内
start = str(start)
end = str(end)
_t = datetime.strptime(target_time, '%Y%m%d')
if start != '0':
_s = datetime.strptime(start, '%Y%m%d%H%M')
if _t < _s:
return False
if end != '0':
_e = datetime.strptime(end, '%Y%m%d%H%M')
if _t > _e:
return False
return True
def check_reward_type(reward):
# 拆分reward字段
s = reward.split(',')
if s[0] == 'prop':
_id = int(s[1])
count = int(s[2])
_type = prop.loc[_id, 'type']
value = prop.loc[_id, 'value'] * count
return _type, value
elif s[0] == 'hero':
_id = int(s[1])
_type = '品质'
value = hero.loc[_id, 'value']
if len(s) > 2:
if s[2] == '6':
value *= 2
return _type, value
elif s[0] == 'equip':
_id = int(s[1])
_type = '装备'
value = equip.loc[_id, 'value']
return _type, value
elif s[0] == 'vip_exp':
return 'vip_exp', int(s[1])
else:
_type = resource.loc[s[0], 'type']
value = resource.loc[s[0], 'value'] * int(s[1])
return _type, value
def check_desire_type(desire_id):
# 拆分desire奖励
_type = desire.loc[desire_id, 'type']
value = desire.loc[desire_id, 'value']
return _type, value
def check_choose_type(choose_id):
# 拆分choose奖励
_type = choose.loc[choose_id, 'type']
value = choose.loc[choose_id, 'value']
return _type, value
def cal_type_percentage(_dict, vip, _id):
# 计算各类型奖励占比
total_value = 0
for k in _dict.keys():
if k != 'vip_exp':
total_value += _dict[k]
# 总价值与总性价比
result = {'Total': total_value, 'Ratio': total_value / vip}
# '等级%': 0, '品质%': 0, '装备%': 0, '天赋%': 0, '职阶%': 0, '限制器%': 0, '机器人%': 0, '机械核心%': 0, '其它%': 0}
for k in _dict:
result[k] = _dict[k]
for k in _dict:
ks = k.split('|')
for _k in ks:
if _k == 'vip_exp':
if _dict[_k] != vip:
print(_id, 'Vip Dis-Match!')
else:
# 价值占比 及 分类性价比
result[_k + '%'] = _dict[k] / total_value
result[_k + '_ratio'] = _dict[k] / vip
return result
def update_dict(_dict, k, v):
if k in _dict:
_dict[k] += v
else:
_dict[k] = v
return _dict
def get_rewards(rewards, desires, chooses, vip, _id):
r = {}
if rewards != '0' and rewards != 0:
if '#S#' in rewards:
rewards = rewards.split('#S#')[0]
s = rewards.split(';')
for ss in s:
k, v = check_reward_type(ss)
r = update_dict(r, k, v)
if desires != '0':
s = desires.split(',')
for ss in s:
k, v = check_desire_type(int(ss))
r = update_dict(r, k, v)
if chooses != 0:
k, v = check_choose_type(chooses)
r = update_dict(r, k, v)
r = cal_type_percentage(r, vip, _id)
return r
def get_price(_id):
price = 0
try:
price = commodity.loc[_id * 10, 'price']
finally:
return price
def get_basic_vip(price):
price = float(price)
return ratio.loc[price, 'vip_exp']
def get_cost_performance(total_value, price):
v = ratio.loc[price, 'vip_exp']
return total_value / v
def get_order_key(_dict):
if order_key in _dict:
return _dict[order_key]
else:
return 0
order_key = '品质_ratio'
target_time = '20200505'
prop = pd.read_excel('resource_types.xlsx', header=0, sheet_name='prop', index_col=0)
prop.dropna(axis=0, how='all') # 删除空行
prop = prop.fillna('未知')
hero = pd.read_excel('resource_types.xlsx', header=0, sheet_name='hero', index_col=0)
hero.dropna(axis=0, how='all') # 删除空行
hero = hero.fillna('未知')
equip = pd.read_excel('resource_types.xlsx', header=0, sheet_name='equip', index_col=0)
equip.dropna(axis=0, how='all') # 删除空行
equip = equip.fillna('未知')
desire = pd.read_excel('resource_types.xlsx', header=0, sheet_name='desire', index_col=0)
desire.dropna(axis=0, how='all') # 删除空行
desire = desire.fillna('未知')
choose = pd.read_excel('resource_types.xlsx', header=0, sheet_name='choose', index_col=0)
choose.dropna(axis=0, how='all') # 删除空行
choose = choose.fillna('未知')
resource = pd.read_excel('resource_types.xlsx', header=0, sheet_name='others', index_col=0)
resource.dropna(axis=0, how='all') # 删除空行
resource = resource.fillna('未知')
ratio = pd.read_excel('resource_types.xlsx', header=0, sheet_name='ratio', index_col=0)
ratio.dropna(axis=0, how='all') # 删除空行
commodity = pd.read_excel('Commodity.xlsx', header=2, sheet_name='Commodity', index_col=0)
commodity.dropna(axis=0, how='all') # 删除空行
commodity = commodity.drop(['id']) # 删除中文标示
commodity = commodity.drop(['Platform', 'ProductId', 'CurrencyPrice', 'Tier'], axis=1)
giftpack = pd.read_excel('GiftPack.xlsx', header=2, sheet_name='GiftPack')
giftpack.dropna(axis=0, how='all') # 删除空行
giftpack = giftpack.drop([0]) # 删除中文标示
giftpack = giftpack.fillna(0) # 将NAN值改为0
# 获取礼包报价
giftpack['Price'] = giftpack['Id'].apply(get_price)
giftpack['Vip'] = giftpack['Price'].apply(get_basic_vip)
# 分类处理Giftpack的奖励
s = giftpack.apply(lambda g: get_rewards(g['MainStageReward'], str(g['DesireList']), g['ChooseReward'], g['Vip'],
g['Id']), axis=1)
s.name = 'Contents'
giftpack = giftpack.join(s)
# 去掉时间不符合的礼包
giftpack['OnTime'] = giftpack.apply(lambda g: is_on_time(g['StartTime'], g['EndTime']), axis=1)
giftpack = giftpack.drop(giftpack[giftpack['OnTime'] == False].index)
# 去掉关卡不符合的礼包
# 清理GiftPack
giftpack = giftpack.drop(
['Order', 'Name', 'Icon', 'Reward', 'RewardPreview', 'Desire', 'DesirePos', 'DefaultShow', 'NextId', 'IsFree',
'Value', 'Prefab', 'DiamondDes', 'IconBg', 'HotVisible', 'VisibleCondition', 'ExtraPurchaseTimes', 'ImgSource',
'Server', 'MainStage', 'MainStageRewardPreview', 'MainStageReward', 'Version', 'ChooseIdx', 'DesireList',
'ChooseReward',], axis=1)
# ['Id', 'Type', 'SubType', 'TimeType', 'PurchaseTimes', 'StartTime', 'EndTime', 'Note', 'Price', 'Vip', 'Contents']
month_card1 = {'Id': 1, 'Type': 0, 'SubType': 0, 'TimeType': 0, 'PurchaseTimes': 0, 'StartTime': 0, 'EndTime': 0,
'Note': 0, 'Price': 0, 'Vip': 0, 'Contents': 0}
month_card2 = {'Id': 1, 'Type': 0, 'SubType': 0, 'TimeType': 0, 'PurchaseTimes': 0, 'StartTime': 0, 'EndTime': 0,
'Note': 0, 'Price': 0, 'Vip': 0, 'Contents': 0}
growth_fund1 = {'Id': 1, 'Type': 0, 'SubType': 0, 'TimeType': 0, 'PurchaseTimes': 0, 'StartTime': 0, 'EndTime': 0,
'Note': 0, 'Price': 0, 'Vip': 0, 'Contents': 0}
growth_fund2 = {'Id': 1, 'Type': 0, 'SubType': 0, 'TimeType': 0, 'PurchaseTimes': 0, 'StartTime': 0, 'EndTime': 0,
'Note': 0, 'Price': 0, 'Vip': 0, 'Contents': 0}
growth_fund3 = {'Id': 1, 'Type': 0, 'SubType': 0, 'TimeType': 0, 'PurchaseTimes': 0, 'StartTime': 0, 'EndTime': 0,
'Note': 0, 'Price': 0, 'Vip': 0, 'Contents': 0}
battle_pass_1 = {'Id': 1, 'Type': 0, 'SubType': 0, 'TimeType': 0, 'PurchaseTimes': 0, 'StartTime': 0, 'EndTime': 0,
'Note': 0, 'Price': 0, 'Vip': 0, 'Contents': 0}
battle_pass_2 = {'Id': 1, 'Type': 0, 'SubType': 0, 'TimeType': 0, 'PurchaseTimes': 0, 'StartTime': 0, 'EndTime': 0,
'Note': 0, 'Price': 0, 'Vip': 0, 'Contents': 0}
battle_pass_3 = {'Id': 1, 'Type': 0, 'SubType': 0, 'TimeType': 0, 'PurchaseTimes': 0, 'StartTime': 0, 'EndTime': 0,
'Note': 0, 'Price': 0, 'Vip': 0, 'Contents': 0}
hero_card = {'Id': 1, 'Type': 0, 'SubType': 0, 'TimeType': 0, 'PurchaseTimes': 0, 'StartTime': 0, 'EndTime': 0,
'Note': 0, 'Price': 0, 'Vip': 0, 'Contents': 0}
week_card = {'Id': 1, 'Type': 0, 'SubType': 0, 'TimeType': 0, 'PurchaseTimes': 0, 'StartTime': 0, 'EndTime': 0,
'Note': 0, 'Price': 0, 'Vip': 0, 'Contents': 0}
other_packs = pd.DataFrame([month_card1, month_card2, growth_fund1, growth_fund2, growth_fund3, battle_pass_1,
battle_pass_2, battle_pass_3, week_card])
# GiftPack按某个值排序
while True:
req = input('选择当前缺少的资源类型:等级,品质,装备,天赋,职阶,限制器,机器人,机械核心,其它\n')
if req in ['等级', '品质', '装备', '天赋', '职阶', '限制器', '机器人', '机械核心', '其它']:
order_key = req + '_ratio'
giftpack['order_key'] = giftpack['Contents'].apply(get_order_key)
giftpack = giftpack.sort_values(by='order_key', ascending=False)
print(giftpack.head(10))
| [
"[email protected]"
] | |
4176072074b7adf073d715e672c081732fbb7b49 | 554b3239a0bfb5c898548625493655dfce9695b1 | /0x0F-python-object_relational_mapping/14-model_city_fetch_by_state.py | 1683382d0a9220058ff72a0645a5eafd2ff7a0dc | [] | no_license | jose120918/holbertonschool-higher_level_programming | 2e03ec12229a1e9f7fa35e95a96fb2d06836d017 | 470eb36fcf5e0cd996ad51c7dae26a1abf144984 | refs/heads/master | 2023-04-07T02:17:44.617525 | 2021-04-15T14:56:39 | 2021-04-15T14:56:39 | 319,470,766 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | #!/usr/bin/python3
"""First usage of SLQAlchemy"""
from sys import argv
from model_state import Base, State
from model_city import City
from sqlalchemy import (create_engine)
from sqlalchemy.orm import sessionmaker, relationship
if __name__ == "__main__":
myU = argv[1]
myP = argv[2]
myDB = argv[3]
myH = "localhost"
city = "City"
states = "states"
db = 'mysql+mysqldb://{}:{}@localhost/{}'.format(myU, myP, myDB)
engine = create_engine(db, pool_pre_ping=True)
# State.cities = relationship(city, order_by=City.id, back_populates=states)
Base.metadata.create_all(engine)
presession = sessionmaker(bind=engine)
session = presession()
result = session.query(State.name, City.id, City.name.label('cname'))\
.filter(City.state_id == State.id)
# print(result)
for c in result:
print("{}: ({}) {}".format(c.name, c.id, c.cname))
session.close()
| [
"[email protected]"
] | |
3fc9fbce1f4a89f4653f600bd6a9cfa8b4c7af15 | 55f5c87e3a5206ff474f551bb886b6e62979c96a | /unsorted/Player.py | 660377b0682f8f7e0c4ad26533dcfae27492d868 | [] | no_license | robcecc27/miscellaneous_code | 8b2ba83e05dfa40dee93def81429b4aeb7c76fa6 | c528710ec0d51f8b4a90a6626991ec1d19e1b57a | refs/heads/master | 2020-03-29T20:09:03.590291 | 2018-09-26T18:10:22 | 2018-09-26T18:10:22 | 150,298,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,962 | py | from Snake import Game as game
import pygame
from pygame.locals import *
env = game()
env.reset()
action = -1
import random
goal_steps = 300
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
from statistics import median, mean
from collections import Counter
import numpy as np
LR = 1e-3
goal_steps = 300
score_requirement = 50
initial_games = 5000
def some_random_games_first():
# Each of these is its own game.
for episode in range(10):
env = game()
env.reset()
first = True
for _ in range(goal_steps):
# action = random.randrange(0, 3)
action = random.randrange(0, 3)
# action = 2
if first:
first = False
action = 2
# do it! render the previous view
env.render()
observation, reward, done, info = env.step(action)
a = 0
if done: break
def generate_population(model):
# [OBS, MOVES]
global score_requirement
training_data = []
# all scores:
scores = []
# just the scores that met our threshold:
accepted_scores = []
# iterate through however many games we want:
print('Score Requirement:', score_requirement)
for _ in range(initial_games):
print('Simulation ', _, " out of ", str(initial_games), '\r', end='')
# reset env to play again
env.reset()
score = 0
# moves specifically from this environment:
game_memory = []
# previous observation that we saw
prev_observation = []
# for each frame in 200
for _ in range(goal_steps):
# choose random action (0 or 1)
if len(prev_observation) == 0:
action = random.randrange(0, 3)
else:
if not model:
action = random.randrange(0, 3)
else:
prediction = model.predict(prev_observation.reshape(-1, len(prev_observation), 1))
action = np.argmax(prediction[0])
# do it!
observation, reward, done, info = env.step(action)
# notice that the observation is returned FROM the action
# so we'll store the previous observation here, pairing
# the prev observation to the action we'll take.
if len(prev_observation) > 0:
game_memory.append([prev_observation, action])
prev_observation = observation
score += reward
if done: break
# IF our score is higher than our threshold, we'd like to save
# every move we made
# NOTE the reinforcement methodology here.
# all we're doing is reinforcing the score, we're not trying
# to influence the machine in any way as to HOW that score is
# reached.
if score >= score_requirement:
accepted_scores.append(score)
for data in game_memory:
# convert to one-hot (this is the output layer for our neural network)
action_sample = [0, 0, 0]
action_sample[data[1]] = 1
output = action_sample
# saving our training data
training_data.append([data[0], output])
# save overall scores
scores.append(score)
# some stats here, to further illustrate the neural network magic!
print('Average accepted score:', mean(accepted_scores))
print('Score Requirement:', score_requirement)
print('Median score for accepted scores:', median(accepted_scores))
print(Counter(accepted_scores))
score_requirement = mean(accepted_scores)
# just in case you wanted to reference later
training_data_save = np.array([training_data, score_requirement])
np.save('saved.npy', training_data_save)
return training_data
def create_dummy_model(training_data):
shape_second_parameter = len(training_data[0][0])
x = np.array([i[0] for i in training_data])
X = x.reshape(-1, shape_second_parameter, 1)
y = [i[1] for i in training_data]
model = create_neural_network_model(input_size=len(X[0]), output_size=len(y[0]))
return model
def create_neural_network_model(input_size, output_size):
network = input_data(shape=[None, input_size, 1], name='input')
network = tflearn.fully_connected(network, 32)
network = tflearn.fully_connected(network, 32)
network = fully_connected(network, output_size, activation='softmax')
network = regression(network, name='targets')
model = tflearn.DNN(network, tensorboard_dir='tflearn_logs')
return model
def train_model(training_data, model=False):
shape_second_parameter = len(training_data[0][0])
x = np.array([i[0] for i in training_data])
X = x.reshape(-1, shape_second_parameter, 1)
y = [i[1] for i in training_data]
model.fit({'input': X}, {'targets': y}, n_epoch=10, batch_size=16, show_metric=True)
model.save('miniskake_trained.tflearn')
return model
def evaluate(model):
# now it's time to evaluate the trained model
scores = []
choices = []
for each_game in range(20):
score = 0
game_memory = []
prev_obs = []
env.reset()
for _ in range(goal_steps):
env.render()
if len(prev_obs) == 0:
action = random.randrange(0, 3)
else:
prediction = model.predict(prev_obs.reshape(-1, len(prev_obs), 1))
action = np.argmax(prediction[0])
choices.append(action)
new_observation, reward, done, info = env.step(action)
prev_obs = new_observation
game_memory.append([new_observation, action])
score += reward
if done: break
scores.append(score)
print('Average Score is')
print('Average Score:', sum(scores) / len(scores))
print('choice 1:{} choice 0:{}'.format(choices.count(1) / len(choices), choices.count(0) / len(choices)))
print('Score Requirement:', score_requirement)
if __name__ == "__main__":
some_random_games_first()
# initial_population
training_data = generate_population(None)
# creating a dummy model
model = create_dummy_model(training_data)
# training with first dataset
model = train_model(training_data, model)
# evaluating
evaluate(model)
# recursive learning
generation = 1
while True:
generation += 1
print('Generation: ', generation)
# training_data = initial_population(model)
training_data = np.append(training_data, generate_population(None), axis=0)
print('generation: ', generation, ' initial population: ', len(training_data))
if len(training_data) == 0:
break
model = train_model(training_data, model)
evaluate(model)
| [
"[email protected]"
] | |
4e01ac8a3419db9e9765ad01f7ada984c3377735 | ce0f43e330a32376ccf50559e175d1662b64cc2e | /deploy_driver.py | 2650804a5616085fa45552be7783fd26754130fc | [] | no_license | dannteMich/DriverTemplate | 6d8c4208c7efb7fc474c9afd10d67056b531d099 | 9d615a0b6e5dd7464319ce741ae42f4622185108 | refs/heads/master | 2023-02-17T04:44:15.134364 | 2021-01-16T19:38:52 | 2021-01-16T19:38:52 | 328,702,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,355 | py | import argparse
import subprocess
from os import path
import rpyc
from rpyc.utils.classic import DEFAULT_SERVER_PORT
# YOU should run on the target pc this (with admin): rpyc_classic.py -m threaded --host 0.0.0.0
DRIVER_FILE_TO_UPLOAD = 'sample.sys'
DRIVER_DEST_DIR = "C:\\"
PROJECT_DIR = path.dirname(__file__)
KERNEL_SERVICE_NAME = "DriverTest"
def parse_arguments(args=None):
parser = argparse.ArgumentParser(description="Connect to testing VM for deployment and testing of the Driver")
parser.add_argument('host', help="IP of the target computer")
parser.add_argument('-p', '--port', type=int, default=DEFAULT_SERVER_PORT,
help="Listening port on the target computer")
parser.add_argument('--release', action="store_const", const="release", default="debug",
help="Use the Release version of the driver instead of the debug")
return parser.parse_args(args)
def install_driver_on_input(connection, driver_name, bin_path):
input("Trying to install using SC. Press Enter to start the driver")
print(connection.modules.subprocess.check_output(
"sc create {} type= kernel binPath= {}".format(driver_name, bin_path)).decode('utf-8'))
print("Driver installed")
def start_driver_on_input(connection, driver_name):
input("Ready to start. Press Enter to start the driver")
print(connection.modules.subprocess.check_output(
"sc start {}".format(driver_name)).decode('utf-8'))
def stop_driver_on_input(connection, driver_name):
input("Stopping using SC. Press Enter to stop the driver")
print(connection.modules.subprocess.check_output(
"sc stop {}".format(driver_name)).decode('utf-8'))
def uninstall_driver_on_input(connection, driver_name):
input("Ready to uninstall. Press Enter to uninstall the driver and delete the service")
print(connection.modules.subprocess.check_output(
"sc delete {}".format(driver_name)).decode('utf-8'))
if __name__ == "__main__":
args = parse_arguments()
dest_driver_path = path.join(DRIVER_DEST_DIR, DRIVER_FILE_TO_UPLOAD)
connection = rpyc.classic.connect(args.host)
driver_file = path.join(PROJECT_DIR, 'Sample', 'x64', args.release, DRIVER_FILE_TO_UPLOAD)
rpyc.utils.classic.upload(connection, driver_file, dest_driver_path)
print("Upload {} to {}".format(DRIVER_FILE_TO_UPLOAD, dest_driver_path))
command = input("What should we do? ")
while command.lower() not in ['q', 'quit', 'exit']:
try:
if command.lower() in ['i', 'install']:
install_driver_on_input(connection, KERNEL_SERVICE_NAME, dest_driver_path)
elif command.lower() in ['start', 's']:
start_driver_on_input(connection, KERNEL_SERVICE_NAME)
elif command.lower() == 'stop':
stop_driver_on_input(connection, KERNEL_SERVICE_NAME)
elif command.lower() in ['u', 'uninstall']:
uninstall_driver_on_input(connection, KERNEL_SERVICE_NAME)
else:
print("unknown command")
except subprocess.CalledProcessError as e:
print("Command {} failed with the following output:\n{}".format(e.cmd, e.output.decode('utf-8')))
command = input("What do you want to do now? ")
print("Exiting")
| [
"[email protected]"
] | |
30742a92f5d10eec0a4e654054344e547238e600 | 3f0a9d9ce56846a81120a71d5eca595524c8674b | /Remove Punctuation not spaces.py | de1f8cdf9b022d664b7ac4c6d1ade05934ba4a1d | [] | no_license | Harry212001/Cipher-Challenge-2018 | ae3f9929bc12e56ffb838a1e16e9994071a3e395 | 16827136f7844ce15fb088f8732cd5242a53dc7d | refs/heads/master | 2020-03-31T22:54:05.823955 | 2018-12-10T21:59:32 | 2018-12-10T21:59:32 | 152,635,228 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | ciphText = input('Input text: ')
plainText = ''
for lett in ciphText:
if (lett >= 'A' and lett <= 'Z') or lett == ' ':
plainText += lett
print(plainText) | [
"[email protected]"
] | |
6f005e2ca978b50f19d6378a549b739e355f2ecb | d8010fd033ec0b36a87d9a058487d738befc3c17 | /detectron2/modeling/weaklygrounding/phrase_embedding_weakly.py | 5627de5ec2fd5621e2d8354ecd6da712dad9ff8c | [] | no_license | bobwan1995/ReIR-WeaklyGrounding.pytorch | 0efa0a616b18f9fa3d89f0afde1a6528efca1a8d | 2a962c335541c981149a042794ee508e0e7226f4 | refs/heads/main | 2023-08-11T12:55:42.977775 | 2021-10-09T00:25:15 | 2021-10-09T00:25:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,803 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from allennlp.modules.elmo import Elmo, batch_to_ids
from detectron2.config import global_cfg as cfg
import json
import numpy as np
import pickle
class PhraseEmbeddingPhr(torch.nn.Module):
def __init__(self, cfg, phrase_embed_dim=1024, bidirectional=True):
super(PhraseEmbeddingPhr, self).__init__()
self.device = torch.device('cuda')
self.bidirectional = bidirectional
phr_vocab_file = open(cfg.MODEL.VG.VOCAB_PHR_FILE)
self.phr_vocab = json.load(phr_vocab_file)
phr_vocab_file.close()
self.phr_vocab_to_id = {v:i+1 for i, v in enumerate(self.phr_vocab)}
self.phr_vocab_size = len(self.phr_vocab) + 1
self.embed_dim = phrase_embed_dim
if self.bidirectional:
self.hidden_dim = phrase_embed_dim // 2
else:
self.hidden_dim = phrase_embed_dim
self.enc_embedding = nn.Embedding(num_embeddings=self.phr_vocab_size,
embedding_dim=self.embed_dim,
padding_idx=0, sparse=False)
self.sent_rnn = nn.GRU(input_size=self.embed_dim, hidden_size=self.hidden_dim, num_layers=1,
batch_first=True, dropout=0, bidirectional=self.bidirectional, bias=True)
if cfg.MODEL.VG.USING_DET_KNOWLEDGE:
with open(cfg.MODEL.VG.GLOVE_DICT_FILE, 'rb') as load_f:
self.glove_embedding = pickle.load(load_f) ## dict, which contain word embedding.
if cfg.SOLVER.INIT_PARA:
self.init_para()
def init_para(self, ):
# Initialize LSTM Weights and Biases
for layer in self.sent_rnn._all_weights:
for param_name in layer:
if 'weight' in param_name:
weight = getattr(self.sent_rnn, param_name)
nn.init.xavier_normal_(weight.data)
else:
bias = getattr(self.sent_rnn, param_name)
bias.data.zero_()
# nn.init.uniform_(bias.data, a=-0.01, b=0.01)
if not cfg.MODEL.VG.USING_ELMO:
nn.init.xavier_normal_(self.enc_embedding.weight.data)
@staticmethod
def filtering_phrase(phrases, all_phrase):
phrase_valid = []
for phr in phrases:
if phr['phrase_id'] in all_phrase:
phrase_valid.append(phr)
return phrase_valid
def forward(self, all_sentences, all_phrase_ids, all_sent_sgs):
batch_phrase_ids = []
batch_phrase_types = []
batch_phrase_embed = []
batch_phrase_len = []
batch_phrase_dec_ids = []
batch_phrase_mask = []
batch_decoder_word_embed = []
batch_glove_phrase_embed = []
for idx, sent in enumerate(all_sentences):
seq = sent['sentence'].lower()
phrases = sent['phrases']
phrase_ids = []
phrase_types = []
input_phr = []
lengths = []
valid_phrases = self.filtering_phrase(phrases, all_phrase_ids[idx])
tokenized_seq = seq.split(' ')
# tokenized the phrase
max_len = np.array([len(phr['phrase'].split(' ')) for phr in valid_phrases]).max()
phrase_enc_ids = np.zeros((len(valid_phrases), max_len))
phrase_dec_ids = np.zeros((len(valid_phrases), max_len+1)) ## to predict end token
phrase_mask = np.zeros((len(valid_phrases), max_len+1)) ## to predict the "end" token
# phrase_mask_last = np.zeros((len(valid_phrases), max_len))
phrase_glove_embedding = []
for pid, phr in enumerate(valid_phrases):
phrase_ids.append(phr['phrase_id'])
phrase_types.append(phr['phrase_type'])
tokenized_phr = phr['phrase'].lower().split(' ')
word_glove_embedding = []
for tid, w in enumerate(tokenized_phr):
phrase_enc_ids[pid, tid] = self.phr_vocab_to_id[w]
phrase_dec_ids[pid, tid] = self.phr_vocab_to_id[w]
phrase_mask[pid, tid] = 1
if cfg.MODEL.VG.USING_DET_KNOWLEDGE:
phr_glo_vec = self.glove_embedding.get(w)
if phr_glo_vec is not None:
word_glove_embedding.append(phr_glo_vec)
if cfg.MODEL.VG.USING_DET_KNOWLEDGE:
if len(word_glove_embedding) == 0:
word_glove_embedding = 0 * torch.as_tensor(self.glove_embedding.get('a')).float().unsqueeze(0) ## 1*300
else:
word_glove_embedding = torch.as_tensor(np.array(word_glove_embedding)).float().mean(0, keepdim=True)
phrase_glove_embedding.append(word_glove_embedding.to(self.device))
phrase_mask[pid, tid+1] = 1
# phrase_dec_ids[:, :-1] = phrase_enc_ids[:, 1:]
phr_len = len(tokenized_phr)
lengths.append(phr_len)
input_phr.append(tokenized_phr)
phrase_word_embeds_b = self.enc_embedding(torch.as_tensor(phrase_enc_ids).long().to(self.device))
phrase_mask = torch.as_tensor(phrase_mask).float().to(self.device)
if self.bidirectional:
phrase_embeds = []
for pid, phr in enumerate(input_phr):
phrase_embed_phr, last_embed = self.sent_rnn(phrase_word_embeds_b[[pid]][:, :len(phr)])
if cfg.MODEL.VG.PHRASE_SELECT_TYPE == 'Sum':
phrase_embeds.append(phrase_embed_phr.sum(1)) # average the embedding
elif cfg.MODEL.VG.PHRASE_SELECT_TYPE == 'Mean':
phrase_embeds.append(phrase_embed_phr.mean(1))
phrase_embeds = torch.cat(phrase_embeds, dim=0) ## n*1024
else:
phrase_word_embeds, last_embed = self.sent_rnn(phrase_word_embeds_b)
phrase_word_embeds = phrase_word_embeds * phrase_mask[:, 1:, None]
phrase_embeds = None
if cfg.MODEL.VG.PHRASE_SELECT_TYPE == 'Sum':
phrase_embeds = phrase_word_embeds.sum(1) # average the embedding
elif cfg.MODEL.VG.PHRASE_SELECT_TYPE == 'Mean':
phrase_embeds = phrase_word_embeds.sum(1)/phrase_mask[:, 1:].sum(1).unsqueeze(1)
phrase_decoder_word_embeds = phrase_word_embeds_b
batch_phrase_ids.append(phrase_ids)
batch_phrase_types.append(phrase_types)
batch_phrase_embed.append(phrase_embeds)
batch_phrase_len.append(lengths)
batch_phrase_dec_ids.append(phrase_dec_ids)
batch_phrase_mask.append(phrase_mask)
batch_decoder_word_embed.append(phrase_decoder_word_embeds)
batch_glove_phrase_embed.append(phrase_glove_embedding)
return batch_phrase_ids, batch_phrase_types, batch_phrase_embed, batch_phrase_len, \
batch_phrase_dec_ids, batch_phrase_mask, batch_decoder_word_embed, batch_glove_phrase_embed
class PhraseEmbeddingSent(torch.nn.Module):
def __init__(self, cfg, phrase_embed_dim=1024, bidirectional=True):
super(PhraseEmbeddingSent, self).__init__()
self.device = torch.device('cuda')
self.bidirectional = bidirectional
vocab_file = open(cfg.MODEL.VG.VOCAB_FILE)
self.vocab = json.load(vocab_file)
vocab_file.close()
add_vocab = ['relate', 'butted']
self.vocab.extend(add_vocab)
self.vocab_to_id = {v: i + 1 for i, v in enumerate(self.vocab)}
self.vocab_size = len(self.vocab) + 1
phr_vocab_file = open(cfg.MODEL.VG.VOCAB_PHR_FILE)
self.phr_vocab = json.load(phr_vocab_file)
self.phr_vocab_to_id = {v:i+1 for i, v in enumerate(self.phr_vocab)}
self.phr_vocab_size = len(self.phr_vocab) + 1
self.embed_dim = phrase_embed_dim
if self.bidirectional:
self.hidden_dim = phrase_embed_dim // 2
else:
self.hidden_dim = self.embed_dim
if cfg.MODEL.VG.USING_ELMO:
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5"
self.elmo = Elmo(options_file, weight_file, 2, dropout=0, requires_grad=False)
self.elmo.eval()
else:
self.enc_embedding = nn.Embedding(num_embeddings=self.vocab_size,
embedding_dim=self.embed_dim,
padding_idx=0, sparse=False)
self.sent_rnn = nn.GRU(input_size=self.embed_dim, hidden_size=self.hidden_dim, num_layers=1,
batch_first=True, dropout=0, bidirectional=self.bidirectional, bias=True)
if cfg.MODEL.VG.USING_DET_KNOWLEDGE:
with open(cfg.MODEL.VG.GLOVE_DICT_FILE, 'rb') as load_f:
self.glove_embedding = pickle.load(load_f) ## dict, which contain word embedding.
if cfg.SOLVER.INIT_PARA:
self.init_para()
def init_para(self, ):
# Initialize LSTM Weights and Biases
for layer in self.sent_rnn._all_weights:
for param_name in layer:
if 'weight' in param_name:
weight = getattr(self.sent_rnn, param_name)
nn.init.xavier_normal_(weight.data)
else:
bias = getattr(self.sent_rnn, param_name)
# bias.data.zero_()
nn.init.uniform_(bias.data, a=-0.01, b=0.01)
nn.init.uniform_(self.enc_embedding.weight.data, a=-0.01, b=0.01)
@staticmethod
def filtering_phrase(phrases, all_phrase):
phrase_valid = []
for phr in phrases:
if phr['phrase_id'] in all_phrase:
phrase_valid.append(phr)
return phrase_valid
def forward(self, all_sentences, all_phrase_ids, all_sent_sgs):
batch_phrase_ids = []
batch_phrase_types = []
batch_phrase_embed = []
batch_phrase_len = []
batch_phrase_dec_ids = []
batch_phrase_mask = []
batch_decoder_word_embed = []
batch_glove_phrase_embed = []
for idx, sent in enumerate(all_sentences):
seq = sent['sentence'].lower()
phrases = sent['phrases']
phrase_ids = []
phrase_types = []
input_phr = []
lengths = []
valid_phrases = self.filtering_phrase(phrases, all_phrase_ids[idx])
tokenized_seq = seq.split(' ')
seq_enc_ids = [[self.vocab_to_id[w] for w in tokenized_seq]]
""" Extract the word embedding and feed into sent_rnn"""
if cfg.MODEL.VG.USING_ELMO:
input_seq_idx = batch_to_ids([tokenized_seq]).to(self.device)
seq_embed_b = self.elmo(input_seq_idx)['elmo_representations'][1] ## 1*L*1024
seq_embed, hn = self.sent_rnn(seq_embed_b)
else:
seq_embed_b = self.enc_embedding(torch.as_tensor(seq_enc_ids).long().to(self.device)) # 1*L*1024
seq_embed, hn = self.sent_rnn(seq_embed_b)
# tokenized the phrase
max_len = np.array([len(phr['phrase'].split(' ')) for phr in valid_phrases]).max()
phrase_dec_ids = np.zeros((len(valid_phrases), max_len+1)) ## to predict end token
phrase_mask = np.zeros((len(valid_phrases), max_len+1)) ## to predict the "end" token
phrase_decoder_word_embeds = torch.zeros(len(valid_phrases), max_len, seq_embed.shape[-1]).to(self.device) ##
phrase_embeds = []
phrase_glove_embedding = []
for pid, phr in enumerate(valid_phrases):
phrase_ids.append(phr['phrase_id'])
phrase_types.append(phr['phrase_type'])
tokenized_phr = phr['phrase'].lower().split(' ')
phr_len = len(tokenized_phr)
start_ind = phr['first_word_index']
word_glove_embedding = []
for wid, word in enumerate(tokenized_phr):
phrase_dec_ids[pid][wid] = self.phr_vocab_to_id[word]
if cfg.MODEL.VG.USING_DET_KNOWLEDGE:
phr_glo_vec = self.glove_embedding.get(word)
if phr_glo_vec is not None:
word_glove_embedding.append(phr_glo_vec)
if cfg.MODEL.VG.USING_DET_KNOWLEDGE:
if len(word_glove_embedding) == 0:
word_glove_embedding = 0 * torch.as_tensor(self.glove_embedding.get('a')).float().unsqueeze(0) ## 1*300
else:
word_glove_embedding = torch.as_tensor(np.array(word_glove_embedding)).float().mean(0, keepdim=True)
phrase_glove_embedding.append(word_glove_embedding)
phrase_mask[pid][:phr_len+1] = 1
phrase_decoder_word_embeds[pid, :phr_len, :] = phrase_decoder_word_embeds[pid, :phr_len, :] + seq_embed_b[0][start_ind:start_ind+phr_len]
if cfg.MODEL.VG.PHRASE_SELECT_TYPE == 'Sum':
phrase_embeds.append(seq_embed[[0], start_ind:start_ind+phr_len].sum(1)) # average the embedding
elif cfg.MODEL.VG.PHRASE_SELECT_TYPE == 'Mean':
phrase_embeds.append(seq_embed[[0], start_ind:start_ind+phr_len].mean(1))
phrase_embeds = torch.cat(phrase_embeds, dim=0)
phrase_mask = torch.as_tensor(phrase_mask).float().to(self.device)
if cfg.MODEL.VG.USING_DET_KNOWLEDGE:
phrase_glove_embedding = torch.cat(phrase_glove_embedding, dim=0).to(self.device) ## numP, 300
batch_phrase_ids.append(phrase_ids)
batch_phrase_types.append(phrase_types)
batch_phrase_embed.append(phrase_embeds)
batch_phrase_len.append(lengths)
batch_phrase_dec_ids.append(phrase_dec_ids)
batch_phrase_mask.append(phrase_mask)
batch_decoder_word_embed.append(phrase_decoder_word_embeds)
batch_glove_phrase_embed.append(phrase_glove_embedding)
return batch_phrase_ids, batch_phrase_types, batch_phrase_embed, batch_phrase_len, \
batch_phrase_dec_ids, batch_phrase_mask, batch_decoder_word_embed, batch_glove_phrase_embed
| [
"[email protected]"
] | |
eca23f8b65fb36e4ff4efb7bdb706dbf319c5292 | ffb98b376b030162ff04573ce012614db03bcc91 | /honeypot_app/honeypot_manager/opencanary/logger.py | a0660b28cbca8502aa5fc85107091e5dfec3012d | [] | no_license | tarunprakash/honeypotmanager | 586ce1c01f1234e234275bdcb46c1909f4840fa9 | 258938fe006a9abcd616e4a45b0d05559cda32a7 | refs/heads/master | 2023-07-11T08:01:02.839915 | 2021-08-13T16:36:50 | 2021-08-13T16:36:50 | 385,729,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,595 | py | from __future__ import generator_stop, print_function
import re
import simplejson as json
import logging.config
import socket
import hpfeeds
import sys
from datetime import datetime
from logging.handlers import SocketHandler
from twisted.internet import reactor
import requests
from opencanary.iphelper import *
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def getLogger(config):
try:
d = config.getVal('logger')
except Exception as e:
print("Error: config does not have 'logger' section", file=sys.stderr)
exit(1)
classname = d.get('class', None)
if classname is None:
print("Logger section is missing the class key.", file=sys.stderr)
exit(1)
LoggerClass = globals().get(classname, None)
if LoggerClass is None:
print("Logger class (%s) is not defined." % classname, file=sys.stderr)
exit(1)
kwargs = d.get('kwargs', None)
if kwargs is None:
print("Logger section is missing the kwargs key.", file=sys.stderr)
exit(1)
try:
logger = LoggerClass(config, **kwargs)
except Exception as e:
print("An error occured initialising the logger class", file=sys.stderr)
print(e)
exit(1)
return logger
class LoggerBase(object):
LOG_BASE_BOOT = 1000
LOG_BASE_MSG = 1001
LOG_BASE_DEBUG = 1002
LOG_BASE_ERROR = 1003
LOG_BASE_PING = 1004
LOG_BASE_CONFIG_SAVE = 1005
LOG_BASE_EXAMPLE = 1006
LOG_FTP_LOGIN_ATTEMPT = 2000
LOG_HTTP_GET = 3000
LOG_HTTP_POST_LOGIN_ATTEMPT = 3001
LOG_SSH_NEW_CONNECTION = 4000
LOG_SSH_REMOTE_VERSION_SENT = 4001
LOG_SSH_LOGIN_ATTEMPT = 4002
LOG_SMB_FILE_OPEN = 5000
LOG_PORT_SYN = 5001
LOG_PORT_NMAPOS = 5002
LOG_PORT_NMAPNULL = 5003
LOG_PORT_NMAPXMAS = 5004
LOG_PORT_NMAPFIN = 5005
LOG_TELNET_LOGIN_ATTEMPT = 6001
LOG_HTTPPROXY_LOGIN_ATTEMPT = 7001
LOG_MYSQL_LOGIN_ATTEMPT = 8001
LOG_MSSQL_LOGIN_SQLAUTH = 9001
LOG_MSSQL_LOGIN_WINAUTH = 9002
LOG_TFTP = 10001
LOG_NTP_MONLIST = 11001
LOG_VNC = 12001
LOG_SNMP_CMD = 13001
LOG_RDP = 14001
LOG_SIP_REQUEST = 15001
LOG_GIT_CLONE_REQUEST = 16001
LOG_REDIS_COMMAND = 17001
LOG_TCP_BANNER_CONNECTION_MADE = 18001
LOG_TCP_BANNER_KEEP_ALIVE_CONNECTION_MADE = 18002
LOG_TCP_BANNER_KEEP_ALIVE_SECRET_RECEIVED = 18003
LOG_TCP_BANNER_KEEP_ALIVE_DATA_RECEIVED = 18004
LOG_TCP_BANNER_DATA_RECEIVED = 18005
LOG_USER_0 = 99000
LOG_USER_1 = 99001
LOG_USER_2 = 99002
LOG_USER_3 = 99003
LOG_USER_4 = 99004
LOG_USER_5 = 99005
LOG_USER_6 = 99006
LOG_USER_7 = 99007
LOG_USER_8 = 99008
LOG_USER_9 = 99009
def sanitizeLog(self, logdata):
logdata['node_id'] = self.node_id
logdata['local_time'] = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
logdata['utc_time'] = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
logdata['local_time_adjusted'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if 'src_host' not in logdata:
logdata['src_host'] = ''
if 'src_port' not in logdata:
logdata['src_port'] = -1
if 'dst_host' not in logdata:
logdata['dst_host'] = ''
if 'dst_port' not in logdata:
logdata['dst_port'] = -1
if 'logtype' not in logdata:
logdata['logtype'] = self.LOG_BASE_MSG
if 'logdata' not in logdata:
logdata['logdata'] = {}
## get full logtype name from code
codes = LoggerBase.__dict__
logdata['logtype_msg'] = ''
print(codes, codes.items())
for msg, code in codes.items():
if code == logdata['logtype']:
logdata['logtype_msg'] = msg.replace('LOG_', '')
break
return logdata
class PyLogger(LoggerBase):
"""
Generic python logging
"""
__metaclass__ = Singleton
def __init__(self, config, handlers, formatters={}):
self.node_id = config.getVal('device.node_id')
# Build config dict to initialise
# Ensure all handlers don't drop logs based on severity level
for h in handlers:
handlers[h]["level"] = "NOTSET"
logconfig = {
"version": 1,
"formatters" : formatters,
"handlers": handlers,
# initialise all defined logger handlers
"loggers": {
self.node_id : {
"handlers": handlers.keys()
}
}
}
try:
logging.config.dictConfig(logconfig)
except Exception as e:
print("Invalid logging config", file=sys.stderr)
print(type(e))
print(e)
exit(1)
# Check if ignorelist is populated
self.ignorelist = config.getVal('ip.ignorelist', default='')
self.logger = logging.getLogger(self.node_id)
def error(self, data):
data['local_time'] = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S.%f")
msg = '[ERR] %r' % json.dumps(data, sort_keys=True)
print(msg, file=sys.stderr)
self.logger.warn(msg)
def log(self, logdata, retry=True):
logdata = self.sanitizeLog(logdata)
# Log only if not in ignorelist
notify = True
if 'src_host' in logdata:
for ip in self.ignorelist:
if check_ip(logdata['src_host'], ip) == True:
notify = False
break
if notify == True:
self.logger.warn(json.dumps(logdata, sort_keys=True))
class SocketJSONHandler(SocketHandler):
"""Emits JSON messages over TCP delimited by newlines ('\n')"""
def makeSocket(self, timeout=1):
s = SocketHandler.makeSocket(self,timeout)
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
return s
def __init__(self, *args, **kwargs):
SocketHandler.__init__(self, *args, **kwargs)
self.retryStart = 0
self.retryMax = 0
self.retryFactor = 0
def send(self, s, attempt=0):
if attempt >= 10:
print("Dropping log message due to too many failed sends")
return
if self.sock is None:
self.createSocket()
if self.sock:
try:
# TODO: Weirdly, one the other ends drops the
# connection for the next msg, sendall still reports
# successful write on a disconnected socket but then
# on subsequent writes it fails correctly.
self.sock.sendall(s.encode("utf-8"))
return
except socket.error:
self.sock.close()
self.sock = None
# Here, we've failed to send s so retry
reactor.callLater(1.5, lambda x: self.send(s, attempt + 1), None)
def makePickle(self, record):
return record.getMessage() + "\n"
class HpfeedsHandler(logging.Handler):
def __init__(self,host,port,ident, secret,channels):
logging.Handler.__init__(self)
self.host=str(host)
self.port=int(port)
self.ident=str(ident)
self.secret=str(secret)
self.channels=map(str,channels)
hpc=hpfeeds.new(self.host, self.port, self.ident, self.secret)
hpc.subscribe(channels)
self.hpc=hpc
def emit(self, record):
try:
msg = self.format(record)
self.hpc.publish(self.channels,msg)
except:
print("Error on publishing to server")
class SlackHandler(logging.Handler):
def __init__(self,webhook_url):
logging.Handler.__init__(self)
self.webhook_url=webhook_url
def generate_msg(self, alert):
msg = {}
msg['pretext'] = "OpenCanary Alert"
data=json.loads(alert.msg)
msg['fields']=[]
for k,v in data.items():
msg['fields'].append({'title':k, 'value':json.dumps(v) if type(v) is dict else v})
return {'attachments':[msg]}
def emit(self, record):
data = self.generate_msg(record)
response = requests.post(
self.webhook_url, json=data
)
if response.status_code != 200:
print("Error %s sending Slack message, the response was:\n%s" % (response.status_code, response.text))
class TeamsHandler(logging.Handler):
def __init__(self,webhook_url):
logging.Handler.__init__(self)
self.webhook_url=webhook_url
def message(self, data):
message = {
"@type": "MessageCard",
"@context": "http://schema.org/extensions",
"themeColor": "49c176",
"summary": "OpenCanary Notification",
"title": "OpenCanary Alert",
"sections": [{
"facts": self.facts(data)
}]
}
return message
def facts(self, data, prefix=None):
facts = []
for k, v in data.items():
key = str(k).lower() if prefix is None else prefix + '__' + str(k).lower()
if type(v) is not dict:
facts.append({"name": key, "value": str(v)})
else:
nested = self.facts(v, key)
facts.extend(nested)
return facts
def emit(self, record):
data = json.loads(record.msg)
payload = self.message(data)
headers = {'Content-Type': 'application/json'}
response = requests.post(self.webhook_url, headers=headers, json=payload)
if response.status_code != 200:
print("Error %s sending Teams message, the response was:\n%s" % (response.status_code, response.text))
class DjangoHandler(logging.Handler):
"""
Our custom handler for Django web interface
"""
def __init__(self,webhook_url):
logging.Handler.__init__(self)
self.webhook_url=webhook_url
def emit(self, record):
response = requests.post(
self.webhook_url, data={
'msg': record.msg
}
)
if response.status_code != 200:
print("Error %s sending Django message, the response was:\n%s" % (response.status_code, response.text)) | [
"[email protected]"
] | |
ceb9c1360c669083360797d69f1da5dc4d25f04e | 6781180f6d0f5f2450650bebcacc3a6e01bc6e1f | /sum_recursive.py | 6b2a4aed3aae005135f3f155d0c1c39ee0af0c10 | [] | no_license | Arytur/Algorithms | cc927d6fdf95ef498f48db1bb4bb3ebc74994f03 | bc8566fb5ece1b5068291b7876741664117a9d13 | refs/heads/master | 2020-03-09T08:08:56.536317 | 2018-04-14T19:13:37 | 2018-04-14T19:13:37 | 128,682,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | def array_sum_recursive(array):
if not array:
return 0
else:
return array[0] + array_sum_recursive(array[1:])
print(array_sum_recursive([2, 14, 44, 101, 555]))
print(array_sum_recursive([202, 104, 44, 1201, 56]))
| [
"[email protected]"
] | |
d7ebc25563c971e33598d6d4801017bceab2042f | a8609e6508254b33d0792ec8b3c2c9a29ca1018d | /app.py | 36f01d51ad4609fb3d5aeaecac7ffd1f9b93e5a4 | [] | no_license | coderguider/Data-Centric-Development-Mini-Project | 21ee7e6ed1b69d601601a8cda34024b18ab9d088 | 18a0ed9d19405b45fa7bc34d41f5dfe1cb622b2f | refs/heads/master | 2020-04-21T17:00:35.277770 | 2019-02-08T11:31:40 | 2019-02-08T11:31:40 | 169,722,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,106 | py | import os
from flask import Flask, render_template, redirect, request, url_for
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
app = Flask(__name__)
app.config["MONGO_DBNAME"] = 'task_manager'
app.config["MONGO_URI"] = 'mongodb://coderguider:[email protected]:25125/task_manager'
mongo = PyMongo(app)
@app.route('/')
@app.route('/get_tasks')
def get_tasks():
return render_template("tasks.html",
tasks=mongo.db.tasks.find())
@app.route('/add_task')
def add_task():
return render_template('addtask.html',
categories=mongo.db.categories.find())
@app.route('/insert_task', methods=['POST'])
def insert_task():
tasks = mongo.db.tasks
tasks.insert_one(request.form.to_dict())
return redirect(url_for('get_tasks'))
@app.route('/edit_task/<task_id>')
def edit_task(task_id):
the_task = mongo.db.tasks.find_one({"_id": ObjectId(task_id)})
all_categories = mongo.db.categories.find()
return render_template('edittask.html', task=the_task, categories=all_categories)
@app.route('/update_task/<task_id>', methods=["POST"])
def update_task(task_id):
tasks = mongo.db.tasks
tasks.update( {'_id': ObjectId(task_id)},
{
'task_name':request.form.get['task_name'],
'category_name':request.form.get['category_name'],
'task_description': request.form.get['task_description'],
'due_date': request.form.get['due_date'],
'is_urgent':request.form.get['is_urgent']
})
return redirect(url_for('get_tasks'))
@app.route('/delete_task/<task_id>')
def delete_task(task_id):
mongo.db.tasks.remove({'_id': ObjectId(task_id)})
return redirect(url_for('get_tasks'))
@app.route('/get_categories')
def get_categories():
return render_template('categories.html',
categories=mongo.db.categories.find())
@app.route('/delete_category/<category_id>')
def delete_category(category_id):
mongo.db.categories.remove({'_id': ObjectId(category_id)})
return redirect(url_for('get_categories'))
@app.route('/edit_category/<category_id>')
def edit_category(category_id):
return render_template('editcategory.html',
category=mongo.db.categories.find_one({'_id': ObjectId(category_id)}))
@app.route('/update_category/<category_id>', methods=['POST'])
def update_category(category_id):
mongo.db.categories.update(
{'_id': ObjectId(category_id)},
{'category_name': request.form.get['category_name']})
return redirect(url_for('get_categories'))
@app.route('/insert_category', methods=['POST'])
def insert_category():
category_doc = {'category_name': request.form.get('category_name')}
mongo.db.categories.insert_one(category_doc)
return redirect(url_for('get_categories'))
@app.route('/add_category')
def add_category():
return render_template('addcategory.html')
@app.route('/new_category')
def new_category():
return render_template('addcategory.html')
if __name__ == '__main__':
app.run(host=os.environ.get('IP'),
port=int(os.environ.get('PORT')),
debug=True) | [
"[email protected]"
] | |
086555baf9b1d577fe4ed21275782332790ea0aa | 4c0b8e3885de5ae5cbf0f1378443b3535ee7dab3 | /acme-large/tools/unetlab.py | cfae2b0d5cba06cc21ddd23144ffee8c89de4aab | [] | no_license | networkop/network-ci | f930028206727949aa75679d8849ccf2692a7956 | 8a0c5d7c636133ba4b101d926474fbfe7a6869fa | refs/heads/master | 2021-01-10T07:09:43.025745 | 2016-03-22T07:01:18 | 2016-03-22T07:01:18 | 51,913,133 | 13 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,746 | py | from restunl.unetlab import UnlServer
from restunl.device import Router, Switch
from globals import *
import file_io
import decorators
import os
class UNetLab(object):
def __init__(self, ip='', user='', pwd='', lab_name=''):
self.ip, self.user, self.pwd, self.lab_name = ip, user, pwd, lab_name
if os.environ.get('UNL_IP'):
self.ip = os.environ.get('UNL_IP')
self.unl = UnlServer(self.ip)
self.unl.login(self.user, self.pwd)
self.lab = None
self.nodes = dict()
def create_lab(self):
self.lab = self.unl.create_lab(self.lab_name)
self.lab.cleanup()
def get_lab(self):
return self.unl.get_lab(self.lab_name)
def build_topo(self, topology):
real_topo = topology.real
intf_conv = file_io.read_yaml(INTF_CONV_FILE)
for ((a_name, a_intf), (b_name, b_intf)) in real_topo:
a_device = Switch(a_name, L2_IMAGE) if 'sw' in a_name.lower() else Router(a_name, L3_IMAGE)
b_device = Switch(b_name, L2_IMAGE) if 'sw' in b_name.lower() else Router(b_name, L3_IMAGE)
if a_name not in self.nodes:
self.nodes[a_name] = self.lab.create_node(a_device)
# print("*** NODE {} CREATED".format(a_name))
if b_name not in self.nodes:
self.nodes[b_name] = self.lab.create_node(b_device)
# print("*** NODE {} CREATED".format(b_name))
node_a = self.nodes[a_name]
node_b = self.nodes[b_name]
if intf_conv.get(a_name, {}).get(a_intf, None):
a_intf_lab = intf_conv[a_name][a_intf]
else:
a_intf_lab = node_a.get_next_intf()
if intf_conv.get(b_name, {}).get(b_intf, None):
b_intf_lab = intf_conv[b_name][b_intf]
else:
b_intf_lab = node_b.get_next_intf()
intf_conv.setdefault(a_name, {})[a_intf] = a_intf_lab
intf_conv.setdefault(b_name, {})[b_intf] = b_intf_lab
node_a.connect_node(a_intf_lab, node_b, b_intf_lab)
# print("*** NODES {} and {} ARE CONNECTED".format(a_name, b_name))
file_io.write_yaml(INTF_CONV_FILE, intf_conv)
return None
def ext_connect(self, topo):
ext_topo = topo.ext_net
intf_conv = file_io.read_yaml(INTF_CONV_FILE)
for (node_name, node_intf), pnet in ext_topo.iteritems():
ext_net = self.lab.create_net('cloud', net_type=pnet)
the_node = self.nodes[node_name]
node_intf_lab = the_node.get_next_intf()
the_node.connect_interface(node_intf_lab, ext_net)
intf_conv.setdefault(node_name, {})[node_intf] = node_intf_lab
file_io.write_yaml(INTF_CONV_FILE, intf_conv)
return None
@decorators.timer
@decorators.progress
def configure_nodes(self, path):
import threading
processes = []
for node_name in self.nodes:
conf = 'no\renable\r configure terminal\r no ip domain-lookup\r'
conf += file_io.read_txt('{0}/{1}.txt'.format(path, node_name))
conf += '\rend\r write\r'
process = threading.Thread(target=self.nodes[node_name].configure, args=(conf,))
# self.nodes[node_name].configure(conf)
process.start()
processes.append(process)
# print("*** NODE {} CONFIGURED".format(node_name))
[p.join() for p in processes]
return None
def start(self):
return self.lab.start_all_nodes()
@decorators.timer
@decorators.progress
def destroy(self):
self.lab = self.get_lab()
self.lab.cleanup()
self.unl.delete_lab(self.lab_name)
| [
"[email protected]"
] | |
1fbffc44fe3a1f8dc22d4a1813e1688d61f401b8 | 5b8d0cd314fdd4537bc77ce9209ca903694b02e8 | /datasets/mkb/mkb.py | 80739ddae41ce478ce787a9c4c101b3f197adc14 | [
"CC-BY-4.0",
"CC-BY-SA-4.0",
"Apache-2.0"
] | permissive | amankhandelia/datasets | 97106f6d98b9cd17c50b1bf0c91f4ced6240dfd6 | 1a138f9bd2d1b62a255736375001bf918d36508d | refs/heads/master | 2023-06-21T01:08:25.212378 | 2021-07-26T13:27:59 | 2021-07-26T13:27:59 | 389,644,974 | 1 | 0 | Apache-2.0 | 2021-07-26T14:36:09 | 2021-07-26T13:36:08 | null | UTF-8 | Python | false | false | 3,951 | py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mann Ki Baat (mkb) Corpus"""
import os
import datasets
_CITATION = """\
@misc{siripragada2020multilingual,
title={A Multilingual Parallel Corpora Collection Effort for Indian Languages},
author={Shashank Siripragada and Jerin Philip and Vinay P. Namboodiri and C V Jawahar},
year={2020},
eprint={2007.07691},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """\
The Prime Minister's speeches - Mann Ki Baat, on All India Radio, translated into many languages.
"""
_URL = "http://preon.iiit.ac.in/~jerin/resources/datasets/mkb-v0.tar"
_LanguagePairs = [
"or-ur",
"ml-or",
"bn-ta",
"gu-mr",
"hi-or",
"en-or",
"mr-ur",
"en-ta",
"hi-ta",
"bn-en",
"bn-or",
"ml-ta",
"gu-ur",
"bn-ml",
"bn-hi",
"gu-te",
"hi-ml",
"or-te",
"en-ml",
"en-hi",
"mr-te",
"bn-te",
"gu-hi",
"ta-ur",
"te-ur",
"gu-ml",
"hi-te",
"en-te",
"ml-te",
"hi-ur",
"mr-or",
"en-ur",
"ml-ur",
"bn-mr",
"gu-ta",
"bn-gu",
"bn-ur",
"ml-mr",
"or-ta",
"ta-te",
"gu-or",
"en-gu",
"hi-mr",
"mr-ta",
"en-mr",
]
class MkbConfig(datasets.BuilderConfig):
"""BuilderConfig for Mkb"""
def __init__(self, language_pair, **kwargs):
super().__init__(**kwargs)
"""
Args:
language_pair: language pair, you want to load
**kwargs: keyword arguments forwarded to super.
"""
self.language_pair = language_pair
class Mkb(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("0.0.0")
BUILDER_CONFIG_CLASS = MkbConfig
BUILDER_CONFIGS = [MkbConfig(name=pair, description=_DESCRIPTION, language_pair=pair) for pair in _LanguagePairs]
def _info(self):
src_tag, tgt_tag = self.config.language_pair.split("-")
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"translation": datasets.features.Translation(languages=(src_tag, tgt_tag))}),
supervised_keys=(src_tag, tgt_tag),
homepage="http://preon.iiit.ac.in/~jerin/resources/datasets/mkb-v0.tar",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
lang_pair = self.config.language_pair
src_tag, tgt_tag = lang_pair.split("-")
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, "mkb", lang_pair)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(data_dir, f"mkb.{src_tag}"),
"labelpath": os.path.join(data_dir, f"mkb.{tgt_tag}"),
},
)
]
def _generate_examples(self, filepath, labelpath):
"""Yields examples."""
src_tag, tgt_tag = self.config.language_pair.split("-")
with open(filepath, encoding="utf-8") as f1, open(labelpath, encoding="utf-8") as f2:
src = f1.read().split("\n")[:-1]
tgt = f2.read().split("\n")[:-1]
for idx, (s, t) in enumerate(zip(src, tgt)):
yield idx, {"translation": {src_tag: s, tgt_tag: t}}
| [
"[email protected]"
] | |
74d4254225644a5b1a14f0a45d3f7ce0f4fb4193 | 09f8a3825c5109a6cec94ae34ea17d9ace66f381 | /cohesity_management_sdk/models/feature_flag.py | 18ad38ff068262b38f7274aa79f43aa9b48b037c | [
"Apache-2.0"
] | permissive | cohesity/management-sdk-python | 103ee07b2f047da69d7b1edfae39d218295d1747 | e4973dfeb836266904d0369ea845513c7acf261e | refs/heads/master | 2023-08-04T06:30:37.551358 | 2023-07-19T12:02:12 | 2023-07-19T12:02:12 | 134,367,879 | 24 | 20 | Apache-2.0 | 2023-08-31T04:37:28 | 2018-05-22T06:04:19 | Python | UTF-8 | Python | false | false | 2,269 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Cohesity Inc.
class FeatureFlag(object):
"""Implementation of the 'FeatureFlag' model.
Specify feature flag override status.
Attributes:
is_approved (bool): Specifies the overridden approval status.
is_ui_feature (bool): Specifies if it's a front-end(UI) or back-end
feature.
name (string, required): Specifies name of the feature.
reason (string): Specifies the reason for override.
timestamp (long|int): Specifies the timestamp of override.
"""
# Create a mapping from Model property names to API property names
_names = {
"is_approved":'isApproved',
"is_ui_feature":'isUiFeature',
"name":'name',
"reason":'reason',
"timestamp":'timestamp',
}
def __init__(self,
is_approved=None,
is_ui_feature=None,
name=None,
reason=None,
timestamp=None,
):
"""Constructor for the FeatureFlag class"""
# Initialize members of the class
self.is_approved = is_approved
self.is_ui_feature = is_ui_feature
self.name = name
self.reason = reason
self.timestamp = timestamp
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
is_approved = dictionary.get('isApproved')
is_ui_feature = dictionary.get('isUiFeature')
name = dictionary.get('name')
reason = dictionary.get('reason')
timestamp = dictionary.get('timestamp')
# Return an object of this model
return cls(
is_approved,
is_ui_feature,
name,
reason,
timestamp
) | [
"[email protected]"
] | |
83f6a062397608c339be3356e3db59f67d030bed | cc37d7b21bba0684b0965af67509059cef15295c | /lab_09/main.py | 34c92de124ac6d7e729a739306513f26a447a167 | [] | no_license | wasp-lahis/PED-1s2020 | 170d4272b84e5e9786cc6a2b5d8432d4ea749d6c | b8b6ee3c2e63c0ca37750eb3661f35c758028e81 | refs/heads/master | 2022-12-01T21:58:15.123065 | 2020-08-18T01:12:14 | 2020-08-18T01:12:14 | 286,138,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,981 | py | def tupla_float_int(x):
x = x[1:-1]
x = x.split(",")
f = float(x[0])
i = int(x[1])
return (f,i)
notas_laboratorio = [tupla_float_int(x) for x in input().split()]
prova1, prova2 = [float(x) for x in input().split()]
def med_lab(notas_laboratorio):
peso_labs = 0
nota_labs = 0
for notas in notas_laboratorio:
nota_labs += notas[0] * notas[1]
peso_labs += notas[1]
med_lab = nota_labs/peso_labs
return float(med_lab)
med_provas = ((prova1 * 3) + (prova2 * 4))/7
print(med_provas, med_lab(notas_laboratorio))
if med_lab(notas_laboratorio) >= 5.0 and med_provas >= 5.0:
media_final2 = 0.7*med_provas + 0.3*med_lab(notas_laboratorio)
print("Aprovado(a) por nota e frequencia")
print("Media final:", format(media_final2, '.1f'))
if float(med_lab(notas_laboratorio)) >= 2.5 and med_provas >= 2.5:
if ((med_lab(notas_laboratorio)) < 5 and med_provas < 5) and (med_lab(notas_laboratorio) >= 2.5 and med_provas >= 2.5):
print("--------------------------------------------")
med_preliminar = min(4.9, 0.7*med_provas + 0.3*med_lab(notas_laboratorio))
exame = float(input())
print("Media das tarefas de laboratorio:", format(med_lab(notas_laboratorio),'.1f'))
print("Media das provas:", format(med_provas, '.1f'))
print("Media preliminar:", format(med_preliminar, '.1f'))
print("Nota do exame:", exame)
media_final1 = (med_preliminar + exame)/2
if media_final1 >= 5.0:
print("Aprovado(a) por nota e frequencia")
print("Media final:", format((media_final1), '.1f'))
else:
print("Reprovado por nota")
print("Media final:", format(media_final1, '.1f'))
else:
media_final3 = min(med_provas, med_lab(notas_laboratorio))
print("Media das tarefas de laboratorio:", format(med_lab(notas_laboratorio),'.1f'))
print("Media das provas:", format(med_provas, '.1f'))
print("Reprovado por nota")
print("Media final:", format(media_final3, '.1f'))
| [
"[email protected]"
] | |
43e5fcfa1c598bd10c9bc6d8f75ac70622d698a0 | f05d749793086a85c9b41c06bb5843c851b08ea7 | /tests/test_hypothesis.py | 89953068fe1a9fd683ed2375c58077657c6b9601 | [
"MIT"
] | permissive | theaeolianmachine/hypothesis_1085_repro | adbec4b795dec6c06f0aa98c88d910b76b7d102a | 9d886ea8a8860eadd22767271d85786209f5abe6 | refs/heads/master | 2021-05-05T06:12:13.777721 | 2018-01-24T16:40:05 | 2018-01-24T16:40:05 | 118,790,203 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | # import unittest
# from hypothesis import given
# from hypothesis.strategies import integers
# from repro_package.hit_by_unittest import sum
# class TestSumHypothesis(unittest.TestCase):
# @given(x=integers(), y=integers())
# def test_add(self, x, y):
# self.assertEquals(sum(x, y), x + y)
# if __name__ == '__main__':
# unittest.main()
| [
"[email protected]"
] | |
e8e77bc84accacabe71c0788f0c0ff74d027d1a1 | 4982ab3e48f9e5c40b6a8edfd926901ca2b4cb23 | /LeetCode/17_letter_combinations.py | de9195e46345c4ff203d9f3c981b91f7d5131073 | [] | no_license | Kacper20/Algorithmic-Tasks | 17c55c365bce5f0bac5c86135d29d6aec3c833da | 38f5b9053e29808a95064ed23ecd4bc1d2d65f0b | refs/heads/master | 2020-04-18T22:06:59.142960 | 2016-11-09T22:38:30 | 2016-11-09T22:38:30 | 66,146,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | class Solution(object):
def letterCombinations(self, digits):
def helper(dict, result, temp_string, digits, index):
if len(temp_string) == len(digits):
if len(temp_string) != 0:
result.append(temp_string)
return
current_char = digits[index]
for character in dict[current_char]:
helper(dict, result, temp_string + character, digits, index + 1)
dict = {
'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z'],
}
result = []
helper(dict, result, '', digits, 0)
return result
| [
"[email protected]"
] | |
256cd50fecacde6339f36e2b1ead2f784278f38b | 1aeb828e57be9b046ee25433fff05956f01db53b | /python_bms/ALGORITHMS/1103/1531.투명.py | f3a9dec6c5174ae073dcdf772ae8d3165b85e665 | [] | no_license | LynnYeonjuLee/TIL_BMS2 | 11f2753e2e82c4898a782d6907a21e973c34cf69 | f363723391598caf5ec6b33925fcb8a13a252d9f | refs/heads/master | 2023-01-22T00:45:25.091512 | 2020-12-04T00:22:44 | 2020-12-04T00:22:44 | 290,238,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | paint = [[0]*100 for _ in range(100)]
N, M = map(int,input().split())
cnt = 0
for _ in range(N):
x1, y1, x2, y2 = list(map(int,input().split()))
for r in range(y1, y2+1):
for c in range(x1, x2+1):
paint[c][r] += 1
for r in range(y1, y2+1):
for c in range(x1, x2+1):
if paint[c][r] > M:
cnt += 1
print(cnt) | [
"[email protected]"
] | |
29869e3a2716f4098e3a5e8e9dcd9a17fbd7b539 | 90389bee115b79c8e187236b0509bdd372ca625f | /aws_lambda_powertools/utilities/data_classes/code_pipeline_job_event.py | e13d32fb169699797159dae04532038e443f60fa | [
"MIT-0",
"Apache-2.0"
] | permissive | pcolazurdo/aws-lambda-powertools-python | ebc7da963c09af69fcc32f34f0828f48df0ace0e | 4c41ec5c0b8f4864819561bc71494029131135c9 | refs/heads/develop | 2023-08-19T14:15:23.608733 | 2021-10-25T06:21:50 | 2021-10-25T06:21:50 | 360,665,741 | 0 | 0 | MIT-0 | 2021-05-17T14:29:17 | 2021-04-22T19:53:33 | Python | UTF-8 | Python | false | false | 7,124 | py | import json
import tempfile
import zipfile
from typing import Any, Dict, List, Optional
from urllib.parse import unquote_plus
import boto3
from aws_lambda_powertools.utilities.data_classes.common import DictWrapper
class CodePipelineConfiguration(DictWrapper):
@property
def function_name(self) -> str:
"""Function name"""
return self["FunctionName"]
@property
def user_parameters(self) -> str:
"""User parameters"""
return self["UserParameters"]
@property
def decoded_user_parameters(self) -> Dict[str, Any]:
"""Json Decoded user parameters"""
return json.loads(self.user_parameters)
class CodePipelineActionConfiguration(DictWrapper):
"""CodePipeline Action Configuration"""
@property
def configuration(self) -> CodePipelineConfiguration:
return CodePipelineConfiguration(self["configuration"])
class CodePipelineS3Location(DictWrapper):
@property
def bucket_name(self) -> str:
return self["bucketName"]
@property
def key(self) -> str:
"""Raw S3 object key"""
return self["objectKey"]
@property
def object_key(self) -> str:
"""Unquote plus of the S3 object key"""
return unquote_plus(self["objectKey"])
class CodePipelineLocation(DictWrapper):
@property
def get_type(self) -> str:
"""Location type eg: S3"""
return self["type"]
@property
def s3_location(self) -> CodePipelineS3Location:
"""S3 location"""
return CodePipelineS3Location(self["s3Location"])
class CodePipelineArtifact(DictWrapper):
@property
def name(self) -> str:
"""Name"""
return self["name"]
@property
def revision(self) -> Optional[str]:
return self.get("revision")
@property
def location(self) -> CodePipelineLocation:
return CodePipelineLocation(self["location"])
class CodePipelineArtifactCredentials(DictWrapper):
@property
def access_key_id(self) -> str:
return self["accessKeyId"]
@property
def secret_access_key(self) -> str:
return self["secretAccessKey"]
@property
def session_token(self) -> str:
return self["sessionToken"]
@property
def expiration_time(self) -> Optional[int]:
return self.get("expirationTime")
class CodePipelineData(DictWrapper):
"""CodePipeline Job Data"""
@property
def action_configuration(self) -> CodePipelineActionConfiguration:
"""CodePipeline action configuration"""
return CodePipelineActionConfiguration(self["actionConfiguration"])
@property
def input_artifacts(self) -> List[CodePipelineArtifact]:
"""Represents a CodePipeline input artifact"""
return [CodePipelineArtifact(item) for item in self["inputArtifacts"]]
@property
def output_artifacts(self) -> List[CodePipelineArtifact]:
"""Represents a CodePipeline output artifact"""
return [CodePipelineArtifact(item) for item in self["outputArtifacts"]]
@property
def artifact_credentials(self) -> CodePipelineArtifactCredentials:
"""Represents a CodePipeline artifact credentials"""
return CodePipelineArtifactCredentials(self["artifactCredentials"])
@property
def continuation_token(self) -> Optional[str]:
"""A continuation token if continuing job"""
return self.get("continuationToken")
class CodePipelineJobEvent(DictWrapper):
"""AWS CodePipeline Job Event
Documentation:
-------------
- https://docs.aws.amazon.com/codepipeline/latest/userguide/actions-invoke-lambda-function.html
- https://docs.aws.amazon.com/lambda/latest/dg/services-codepipeline.html
"""
def __init__(self, data: Dict[str, Any]):
super().__init__(data)
self._job = self["CodePipeline.job"]
@property
def get_id(self) -> str:
"""Job id"""
return self._job["id"]
@property
def account_id(self) -> str:
"""Account id"""
return self._job["accountId"]
@property
def data(self) -> CodePipelineData:
"""Code pipeline jab data"""
return CodePipelineData(self._job["data"])
@property
def user_parameters(self) -> str:
"""Action configuration user parameters"""
return self.data.action_configuration.configuration.user_parameters
@property
def decoded_user_parameters(self) -> Dict[str, Any]:
"""Json Decoded action configuration user parameters"""
return self.data.action_configuration.configuration.decoded_user_parameters
@property
def input_bucket_name(self) -> str:
"""Get the first input artifact bucket name"""
return self.data.input_artifacts[0].location.s3_location.bucket_name
@property
def input_object_key(self) -> str:
"""Get the first input artifact order key unquote plus"""
return self.data.input_artifacts[0].location.s3_location.object_key
def setup_s3_client(self):
"""Creates an S3 client
Uses the credentials passed in the event by CodePipeline. These
credentials can be used to access the artifact bucket.
Returns
-------
BaseClient
An S3 client with the appropriate credentials
"""
return boto3.client(
"s3",
aws_access_key_id=self.data.artifact_credentials.access_key_id,
aws_secret_access_key=self.data.artifact_credentials.secret_access_key,
aws_session_token=self.data.artifact_credentials.session_token,
)
def find_input_artifact(self, artifact_name: str) -> Optional[CodePipelineArtifact]:
"""Find an input artifact by artifact name
Parameters
----------
artifact_name : str
The name of the input artifact to look for
Returns
-------
CodePipelineArtifact, None
Matching CodePipelineArtifact if found
"""
for artifact in self.data.input_artifacts:
if artifact.name == artifact_name:
return artifact
return None
def get_artifact(self, artifact_name: str, filename: str) -> Optional[str]:
"""Get a file within an artifact zip on s3
Parameters
----------
artifact_name : str
Name of the S3 artifact to download
filename : str
The file name within the artifact zip to extract as a string
Returns
-------
str, None
Returns the contents file contents as a string
"""
artifact = self.find_input_artifact(artifact_name)
if artifact is None:
return None
with tempfile.NamedTemporaryFile() as tmp_file:
s3 = self.setup_s3_client()
bucket = artifact.location.s3_location.bucket_name
key = artifact.location.s3_location.key
s3.download_file(bucket, key, tmp_file.name)
with zipfile.ZipFile(tmp_file.name, "r") as zip_file:
return zip_file.read(filename).decode("UTF-8")
| [
"[email protected]"
] | |
725b1ecc0e3da1cae4765e332b02c4bac3fa7d1b | 01061bb17b26173a93b199d5988e3163e5516137 | /y2021/day07_align.py | b33ea433221b517804e772a0c1fbc204ae1c4a0f | [] | no_license | cathackk/advent-of-code | 514c337fc242414c6b82593b910614f0ba0f6e9b | 1886ec0092728fccb9da7dd9aaaf232059b661df | refs/heads/main | 2023-02-07T06:08:45.252807 | 2023-01-22T11:16:56 | 2023-01-22T11:16:56 | 231,619,831 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,366 | py | """
Advent of Code 2021
Day 7: The Treachery of Whales
https://adventofcode.com/2021/day/7
"""
from typing import Callable
from common.iteration import minmax
from meta.aoc_tools import data_path
def part_1(positions: list[int]) -> int:
"""
...
You quickly make a list of **the horizontal position of each crab** (your puzzle input). Crab
submarines have limited fuel, so you need to find a way to make all of their horizontal
positions match while requiring them to spend as little fuel as possible.
For example, consider the following horizontal positions:
>>> crabs = [16, 1, 2, 0, 4, 2, 7, 1, 2, 14]
This means there's a crab with horizontal position `16`, a crab with horizontal position `1`,
and so on.
Each change of `1` step in horizontal position of a single crab costs `1` fuel. You could choose
any horizontal position to align them all on, but the one that costs the least fuel is
horizontal position `2`:
- Move from `16` to `2`: `14` fuel
- Move from `1` to `2`: `1` fuel
- Move from `2` to `2`: `0` fuel
- Move from `0` to `2`: `2` fuel
- Move from `4` to `2`: `2` fuel
- Move from `2` to `2`: `0` fuel
- Move from `7` to `2`: `5` fuel
- Move from `1` to `2`: `1` fuel
- Move from `2` to `2`: `0` fuel
- Move from `14` to `2`: `12` fuel
This costs a total of `37` fuel.
>>> alignment_cost(crabs, destination=2)
37
This is the cheapest possible outcome:
>>> best_destination(crabs)
2
More expensive outcomes include:
>>> alignment_cost(crabs, destination=1)
41
>>> alignment_cost(crabs, destination=3)
39
>>> alignment_cost(crabs, destination=10)
71
Determine the horizontal position that the crabs can align to using the least fuel possible.
**How much fuel must they spend to align to that position?**
>>> part_1(crabs)
part 1: crabs align at position 2, which costs 37 fuel
37
"""
destination = best_destination(positions)
cost = alignment_cost(positions, destination)
print(f"part 1: crabs align at position {destination}, which costs {cost} fuel")
return cost
def part_2(positions: list[int]) -> int:
"""
As it turns out, crab submarine engines don't burn fuel at a constant rate. Instead, each change
of 1 step in horizontal position costs 1 more unit of fuel than the last: the first step costs
`1`, the second step costs `2`, the third step costs `3`, and so on.
As each crab moves, moving further becomes more expensive. This changes the best horizontal
position to align them all on; in the example above, this becomes `5`:
- Move from `16` to `5`: `66` fuel
- Move from `1` to `5`: `10` fuel
- Move from `2` to `5`: `6` fuel
- Move from `0` to `5`: `15` fuel
- Move from `4` to `5`: `1` fuel
- Move from `2` to `5`: `6` fuel
- Move from `7` to `5`: `3` fuel
- Move from `1` to `5`: `10` fuel
- Move from `2` to `5`: `6` fuel
- Move from `14` to `5`: `45` fuel
This costs a total of `168` fuel.
>>> crabs = [16, 1, 2, 0, 4, 2, 7, 1, 2, 14]
>>> alignment_cost(crabs, 5, cost_quadratic)
168
This is the new cheapest possible outcome:
>>> best_destination(crabs, cost_quadratic)
5
The old alignment position `2` now costs `206` fuel instead:
>>> alignment_cost(crabs, 2, cost_quadratic)
206
Determine the horizontal position that the crabs can align to using the least fuel possible so
they can make you an escape route! **How much fuel must they spend to align to that position?**
>>> part_2(crabs)
part 2: crabs align at position 5, which costs 168 fuel
168
"""
destination = best_destination(positions, cost_quadratic)
cost = alignment_cost(positions, destination, cost_quadratic)
print(f"part 2: crabs align at position {destination}, which costs {cost} fuel")
return cost
CostFunction = Callable[[int, int], int]
def cost_linear(pos_1: int, pos_2: int) -> int:
# distance
return abs(pos_1 - pos_2)
def cost_quadratic(pos_1: int, pos_2: int) -> int:
# 1 + 2 + 3 + 4 + ... per unit of distance
dist = abs(pos_1 - pos_2)
return (dist * (dist + 1)) // 2
def alignment_cost(
positions: list[int],
destination: int,
cost_fn: CostFunction = cost_linear,
) -> int:
return sum(cost_fn(pos, destination) for pos in positions)
def best_destination(positions: list[int], cost_fn: CostFunction = cost_linear):
# TODO: optimize
# convex function -> stop after it starts to grow
# can be likely estimated with a quadratic function (for both cost_fns?)
# part 1 -> probably median works?
return min(
range(*minmax(positions)),
key=lambda d: alignment_cost(positions, d, cost_fn)
)
def positions_from_file(fn: str) -> list[int]:
return [int(v) for v in next(open(fn)).strip().split(',')]
def main(input_path: str = data_path(__file__)) -> tuple[int, int]:
positions = positions_from_file(input_path)
result_1 = part_1(positions)
result_2 = part_2(positions)
return result_1, result_2
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
bd73eb5364831ce6092cee4003958ccbd7521ec5 | 44523252201190e2ab2b5d2ea494651d2674ab85 | /app.py | 15d314973aff44f6d6dbdf067de9862ee7645d63 | [] | no_license | akhmetov-bulat/Sky_pro_test | 6e1716d5dab1be844e5978f909fd5638627e2453 | ee122de7f463cff0afa1b3eeb76bb8d0d800d301 | refs/heads/main | 2023-08-10T20:23:01.417725 | 2021-09-30T11:33:34 | 2021-09-30T11:33:34 | 412,019,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | from flask import Flask, request, render_template
from utils import read_users, write_users
app = Flask(__name__)
users = read_users()
@app.route('/')
def index():
return render_template('index.html', users=users)
@app.route('/search', methods = ['GET', 'POST'])
def search_page():
if request.method == 'POST':
found_users = []
text = request.form.get("seek_req")
if text:
for user in users:
if text in user['name']:
found_users.append(user)
return render_template('search.html', users=users, found_users=found_users)
return render_template('search.html')
@app.route('/add_user', methods = ['GET','POST'])
def add_user():
global users
if request.method == 'POST':
name = request.form.get("name")
age = request.form.get('age')
is_blocked = request.form.get('is_blocked')
date = request.form.get('unblock_date')
users = write_users(name, age, is_blocked, date)
return '''<p> пользователь добавлен</p>
<a href="/">Вернутся на главную</a>
'''
return render_template('add_user.html')
if __name__ == '__main__':
app.run()
| [
"[email protected]"
] | |
64901ebc6d6a569b5f7dbafb2930b138ce233f0a | b8106a0cd1e1604de98ef2b4b5311ec3f36d122a | /ask-sdk-core/tests/unit/data/model_test_object_2.py | ddf7d881186a082e8197aeae149f29a137c0137f | [
"Apache-2.0"
] | permissive | alexa/alexa-skills-kit-sdk-for-python | b5e8288c6dd7a3ff6e13b19a7f0026561087ed93 | 7e13ca69b240985584dff6ec633a27598a154ca1 | refs/heads/master | 2023-06-26T02:01:58.858446 | 2023-06-08T18:20:05 | 2023-06-08T18:20:05 | 130,283,857 | 560 | 239 | Apache-2.0 | 2023-05-23T18:51:30 | 2018-04-19T23:40:46 | Python | UTF-8 | Python | false | false | 890 | py | # -*- coding: utf-8 -*-
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the
# License.
#
class ModelTestObject2(object):
deserialized_types = {
'int_var': 'int'
}
attribute_map = {
'int_var': 'var4Int'
}
def __init__(self, int_var=None):
self.int_var = int_var
def __eq__(self, other):
return self.__dict__ == other.__dict__
| [
"[email protected]"
] | |
e86c60d40b6e21775f17c40b7e188da5bf5f7913 | bb03266d7a2134e59ee300754e227e9e8d2b891b | /oblig4_2f.py | bc92927e7122109c46ae340c66922a6915876d95 | [] | no_license | linegpe/FYS3120 | 66cb1d01f81af2b5210f528cc41860794d6fc3f2 | 37f6bdb003b084b53d05127c4191f069393d2cc0 | refs/heads/master | 2021-01-19T10:25:32.543556 | 2017-02-19T21:52:06 | 2017-02-19T21:52:06 | 82,178,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | import numpy as np
from pylab import meshgrid,cm,imshow,contour,clabel,colorbar,axis,title,show,xlabel,ylabel
m = 1
R = 1
g = 1
omega = 1.5
def Hamiltonian(theta,p):
return p**2/(2*m*R) - m*g*R*np.cos(theta) - 0.5*m*omega**2*R**2*(np.sin(theta))**2
x = np.arange(-np.pi,np.pi,0.1)
y = np.arange(-2.5,2.5,0.1)
X,Y = meshgrid(x, y) # grid of point
Z = Hamiltonian(X, Y) # evaluation of the function on the grid
cset = contour(X,Y,Z,np.arange(-1.5,1.5,0.1),linewidths=2,cmap=cm.Set2)
clabel(cset,inline=True,fmt='%1.1f',fontsize=15)
colorbar()
xlabel("Theta",fontsize=15)
ylabel("Momentum",fontsize=15)
show() | [
"[email protected]"
] | |
1f8fa2b568db0e9a6bdc909f701ec4e5457ce522 | 15f7312f0b18f8fc28bab1d81f3769d25556e85c | /zhihu/zhihu/views.py | 07ec87741d68968f29ec52929087edd26c85631f | [] | no_license | JatWaston/Django | 6a6b3f2ed5e79882ed0e1b6bc413e396ac105e93 | dfdcfc30a3722a2fae3fcf08528e2a515b954b34 | refs/heads/master | 2016-08-06T21:42:10.797625 | 2015-07-08T10:50:58 | 2015-07-08T10:50:58 | 38,563,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,638 | py | # -*- coding:UTF-8 -*-
__author__ = 'JatWaston'
from django.http import HttpResponse,Http404
from django.shortcuts import render_to_response
from daily.models import ZhDaily
import datetime
import time
import urllib2,urllib,cookielib
from bs4 import BeautifulSoup
from django.views.decorators.csrf import csrf_exempt
from daily.models import *
import hashlib
from zhihu import settings
import json
from django.utils.timezone import utc
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def hello(request):
return HttpResponse("Hello World")
def home(request):
return HttpResponse("Home Page")
def time(request):
time = datetime.datetime.now()
html = "<html><body>现在时间为:%s.</body></html>" % time
return HttpResponse(html)
def hours_ahead(request,offset):
try:
offset = int(offset)
except ValueError:
raise Http404()
dt = datetime.datetime.now() + datetime.timedelta(hours=offset)
html = "<html><body>再过%s小时的时间为%s</body></html>" % (offset,dt)
return HttpResponse(html)
def current_datetime(request):
now = datetime.datetime.now()
return render_to_response('current_datetime.html', {'current_date': now})
def daily(request):
items = ZhDaily.objects.all()
today = datetime.datetime.today().replace(tzinfo=utc).strftime("%Y-%m-%d")
print today
return render_to_response('daily.html',{'item_list':items,'date_value':today})
def daily_time(request,timeoffset):
try:
timeoffset = str(timeoffset)
except ValueError:
raise Http404()
print timeoffset
oneday = datetime.timedelta(days=1)
#要特别小心时区的问题,如果开了USE_TZ = True的话,时间转换的时候要处理时区问题
today = datetime.datetime.strptime(timeoffset,"%Y-%m-%d").replace(tzinfo=utc) #%Y%m%d %H:%M:%S
tomorrow = today + oneday
#提取日期处于today和tomorrow之间的数据
print today
print tomorrow
items = ZhDaily.objects.filter(publish_date__gte = today,publish_date__lt = tomorrow)
return render_to_response('daily.html',{'item_list':items,'date_value':timeoffset})
def daily_search(request):
if 'dateTime1' in request.GET:
message = 'You searched for:%s' % request.GET['time']
else:
message = 'You submitted an empty form.'
return daily_time(request,request.GET['time'])
# return HttpResponse(message)
def json_test(request):
response_data = {}
response_data['message'] = '获取失败'
response_data['code'] = '-1'
return HttpResponse(json.dumps(response_data),content_type="application/json")
def info(request):
values = request.META.items()
values.sort()
html = []
for k,v in values:
html.append('<tr><td>%s</td></tr>%s</td></tr>' % (k,v))
return HttpResponse('<table>%s</table>' % '\n'.join(html))
def search_form(request):
return render_to_response('search_form.html')
def search(request):
if 'q' in request.GET:
message = 'You searched for:%s' % request.GET['q']
else:
message = 'You submitted an empty form.'
return HttpResponse(message)
def craw_daily(request):
url = "http://daily.zhihu.com/"
content = htmlContent(url)
soup = BeautifulSoup(content)
items = soup.find_all('div',attrs={'class':'box'})
for item in items:
# print str(item.contents)
# print "xxxxxx"
# print type(str(item.contents))
contentSoup = BeautifulSoup(str(item.contents))
imgs = contentSoup.find_all("img",attrs={"class":"preview-image"})
if len(imgs) > 0:
preview_img = imgs[0].get('src')
title = contentSoup.find_all('span',attrs={'class':'title'})
title = title[0].get_text() #获取内容
link = contentSoup.find_all('a',attrs={'class':'link-button'})
if len(link) > 0:
link = link[0].get('href')
hash = hashlib.md5()
hash.update(link)
time = datetime.datetime.now().replace(tzinfo=utc)
print time
dailyModel = ZhDaily(title=title,link=link,img=preview_img,md5=hash.hexdigest(),publish_date=time)
dailyModel.save()
print 'link: %s title: %s img: %s' % (link,title,preview_img)
return HttpResponse("爬取知乎日报...")
#获取网页内容
def htmlContent(url):
cj = cookielib.CookieJar()
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
req = urllib2.Request(url,headers=headers);
response = urllib2.urlopen(req)
return response.read().decode('utf-8')
| [
"[email protected]"
] | |
cf486499b4658c2ec1a1e82ef0354e5906243bcd | c373b35bb5b88f735326895af73504f8016055b8 | /manage.py | 2249dfb8ba1c0331ace470ed6c5f77c5cc3f80bf | [] | no_license | coylec/django_todo | 33f73b86ce87c400ae276e6136a2dd6c49373044 | 205eb457949c60bf2eb5d9bb6aedbc7c5d3f7702 | refs/heads/master | 2021-01-01T06:15:15.957807 | 2017-07-16T15:45:43 | 2017-07-16T15:45:43 | 97,393,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "todo_list_django.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
6bfdc00bf049b406898e609832d0b6620596ba3c | 9f541271263dceb0b7b0134ab6cff5ed324c6444 | /mqtt/conf.py | 39075bcb13bc4d5982cd9ed9e39b1f7124cd336e | [] | no_license | shannondec/bonaventureli.github.io | bd0fa5286ab4077b690f2621425ddd78f16e72dc | ab442ff704dc539b5c9da1d7cacb826256392c17 | refs/heads/main | 2023-06-04T10:34:40.224571 | 2021-06-29T08:38:53 | 2021-06-29T08:38:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,837 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'mqtt'
copyright = '2021, bona'
author = 'bona'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | [
"[email protected]"
] | |
6b52fab4e9d87a86650d05adc02f08a5b581a36c | 847a03b24e3c2e9f99b66490ab1022cd3fc634a4 | /10 spectrum analyzer/msgeq7_read.py | d2cf1e9c41a8c292aaf2d449f1be390ba91be9a8 | [] | no_license | raspberry-pi-maker/RaspberryPi-For-Makers | a4f77a361bf86d32390e26ff39759f765e9f0d2e | 00b985ee3b4bd17a330fd7ba8278130c14f04b69 | refs/heads/master | 2022-12-24T20:40:51.944004 | 2022-12-18T11:54:05 | 2022-12-18T11:54:05 | 90,004,858 | 4 | 10 | null | null | null | null | UTF-8 | Python | false | false | 2,099 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#If this code works, it was written by Seunghyun Lee(www.bluebaynetworks.co.kr).
#If not, I don't know who wrote it
import spidev
import time
import os
import RPi.GPIO as GPIO
# 2개의 MSGEQ7으로부터 전달되는 아날로그 전압을 0번, 1번 채널을 이용해 읽을 것임
LEFT_AUDIO = 1
RIGHT_AUDIO = 0
delay = 0.1
index = 0
# Left 오디오 데이터를 저장할 버퍼
# 7밴드 버퍼
left = [0,0,0,0,0,0,0]
right = [0,0,0,0,0,0,0]
# MSGEQ7의 strobe(4번)핀을 GPIO 17에 연결
strobe = 17
# MSGEQ7의 reset(7번)핀을 GPIO 27에 연결
res = 27
# MCP3008 칩에서 값을 읽는 함수 채널은 0-7이 가능하다.
def ReadChannel(channel):
# adc = spi.xfer2([1,(8+channel)<<4,0])
adc = spi.xfer([1,(8+channel)<<4,0])
data = ((adc[1]&3) << 8) + adc[2]
return data
# MSGEQ7으로부터 데이터를 읽는다.
def readMSGEQ7():
GPIO.output(res, 1)
GPIO.output(res, 0)
time.sleep(0.00001)
for band in range(0, 7):
GPIO.output(strobe, 0)
time.sleep(0.00001)
# spi 통신을 이용해 MCP3008칩에서 MSGEQ7의 출력값을 읽는다.
left[band] = ReadChannel(LEFT_AUDIO)
right[band] = ReadChannel(RIGHT_AUDIO)
time.sleep(0.00001)
GPIO.output(strobe,1)
# GPIO 초기화
GPIO.setmode(GPIO.BCM)
GPIO.setup(strobe, GPIO.OUT)
GPIO.setup(res, GPIO.OUT)
# 초기 상태는 reset핀은 LOW, strobe는 HIGH 상태를 유지한다.
GPIO.output(res, 0)
GPIO.output(strobe, 1)
# MCP3008 ADC 칩으로부터 MSGEQ7 출력값을 읽기 위해 SPI 버스를 초기화한다.
spi = spidev.SpiDev()
# SPI 디바이스(/dev/spidev0.0)을 개방한다.
spi.open(0,0)
try:
while True:
readMSGEQ7()
print("Left: %5d %5d %5d %5d %5d %5d %5d Right: %5d %5d %5d %5d %5d %5d %5d" % (left[0], left[1], left[2], left[3], left[4], left[5], left[6], right[0], right[1], right[2], right[3], right[4], right[5], right[6]))
time.sleep(delay)
except KeyboardInterrupt:
print "Now Exit"
finally:
GPIO.cleanup()
spi.close()
| [
"[email protected]"
] | |
d82afcb1df18763a5ddb8124c7591f323607a15c | 045cb1a5638c3575296f83471758dc09a8065725 | /addons/account/models/account_move.py | 18f08e13ba1df4a68c9b07ef4dfad67d973a6b25 | [] | no_license | marionumza/saas | 7236842b0db98d1a0d0c3c88df32d268509629cb | 148dd95d991a348ebbaff9396759a7dd1fe6e101 | refs/heads/main | 2023-03-27T14:08:57.121601 | 2021-03-20T07:59:08 | 2021-03-20T07:59:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237,826 | py | # -*- coding: utf-8 -*-
from harpiya import api, fields, models, _
from harpiya.exceptions import RedirectWarning, UserError, ValidationError, AccessError
from harpiya.tools import float_is_zero, float_compare, safe_eval, date_utils, email_split, email_escape_char, email_re
from harpiya.tools.misc import formatLang, format_date, get_lang
from datetime import date, timedelta
from itertools import groupby
from itertools import zip_longest
from hashlib import sha256
from json import dumps
import json
import re
#forbidden fields
INTEGRITY_HASH_MOVE_FIELDS = ('date', 'journal_id', 'company_id')
INTEGRITY_HASH_LINE_FIELDS = ('debit', 'credit', 'account_id', 'partner_id')
def calc_check_digits(number):
"""Calculate the extra digits that should be appended to the number to make it a valid number.
Source: python-stdnum iso7064.mod_97_10.calc_check_digits
"""
number_base10 = ''.join(str(int(x, 36)) for x in number)
checksum = int(number_base10) % 97
return '%02d' % ((98 - 100 * checksum) % 97)
class AccountMove(models.Model):
_name = "account.move"
_inherit = ['portal.mixin', 'mail.thread', 'mail.activity.mixin']
_description = "Journal Entries"
_order = 'date desc, name desc, id desc'
_mail_post_access = 'read'
@api.model
def _get_default_journal(self):
''' Get the default journal.
It could either be passed through the context using the 'default_journal_id' key containing its id,
either be determined by the default type.
'''
move_type = self._context.get('default_type', 'entry')
journal_type = 'general'
if move_type in self.get_sale_types(include_receipts=True):
journal_type = 'sale'
elif move_type in self.get_purchase_types(include_receipts=True):
journal_type = 'purchase'
if self._context.get('default_journal_id'):
journal = self.env['account.journal'].browse(self._context['default_journal_id'])
if move_type != 'entry' and journal.type != journal_type:
raise UserError(_("Cannot create an invoice of type %s with a journal having %s as type.") % (move_type, journal.type))
else:
company_id = self._context.get('force_company', self._context.get('default_company_id', self.env.company.id))
domain = [('company_id', '=', company_id), ('type', '=', journal_type)]
journal = None
if self._context.get('default_currency_id'):
currency_domain = domain + [('currency_id', '=', self._context['default_currency_id'])]
journal = self.env['account.journal'].search(currency_domain, limit=1)
if not journal:
journal = self.env['account.journal'].search(domain, limit=1)
if not journal:
error_msg = _('Please define an accounting miscellaneous journal in your company')
if journal_type == 'sale':
error_msg = _('Please define an accounting sale journal in your company')
elif journal_type == 'purchase':
error_msg = _('Please define an accounting purchase journal in your company')
raise UserError(error_msg)
return journal
@api.model
def _get_default_invoice_date(self):
return fields.Date.today() if self._context.get('default_type', 'entry') in ('in_invoice', 'in_refund', 'in_receipt') else False
@api.model
def _get_default_currency(self):
''' Get the default currency from either the journal, either the default journal's company. '''
journal = self._get_default_journal()
return journal.currency_id or journal.company_id.currency_id
@api.model
def _get_default_invoice_incoterm(self):
''' Get the default incoterm for invoice. '''
return self.env.company.incoterm_id
# ==== Business fields ====
name = fields.Char(string='Number', required=True, readonly=True, copy=False, default='/')
date = fields.Date(string='Date', required=True, index=True, readonly=True,
states={'draft': [('readonly', False)]},
default=fields.Date.context_today)
ref = fields.Char(string='Reference', copy=False)
narration = fields.Text(string='Terms and Conditions')
state = fields.Selection(selection=[
('draft', 'Draft'),
('posted', 'Posted'),
('cancel', 'Cancelled')
], string='Status', required=True, readonly=True, copy=False, tracking=True,
default='draft')
type = fields.Selection(selection=[
('entry', 'Journal Entry'),
('out_invoice', 'Customer Invoice'),
('out_refund', 'Customer Credit Note'),
('in_invoice', 'Vendor Bill'),
('in_refund', 'Vendor Credit Note'),
('out_receipt', 'Sales Receipt'),
('in_receipt', 'Purchase Receipt'),
], string='Type', required=True, store=True, index=True, readonly=True, tracking=True,
default="entry", change_default=True)
type_name = fields.Char('Type Name', compute='_compute_type_name')
to_check = fields.Boolean(string='To Check', default=False,
help='If this checkbox is ticked, it means that the user was not sure of all the related informations at the time of the creation of the move and that the move needs to be checked again.')
journal_id = fields.Many2one('account.journal', string='Journal', required=True, readonly=True,
states={'draft': [('readonly', False)]},
domain="[('company_id', '=', company_id)]",
default=_get_default_journal)
user_id = fields.Many2one(related='invoice_user_id', string='User')
company_id = fields.Many2one(string='Company', store=True, readonly=True,
related='journal_id.company_id', change_default=True)
company_currency_id = fields.Many2one(string='Company Currency', readonly=True,
related='journal_id.company_id.currency_id')
currency_id = fields.Many2one('res.currency', store=True, readonly=True, tracking=True, required=True,
states={'draft': [('readonly', False)]},
string='Currency',
default=_get_default_currency)
line_ids = fields.One2many('account.move.line', 'move_id', string='Journal Items', copy=True, readonly=True,
states={'draft': [('readonly', False)]})
partner_id = fields.Many2one('res.partner', readonly=True, tracking=True,
states={'draft': [('readonly', False)]},
domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]",
string='Partner', change_default=True)
commercial_partner_id = fields.Many2one('res.partner', string='Commercial Entity', store=True, readonly=True,
compute='_compute_commercial_partner_id')
# === Amount fields ===
amount_untaxed = fields.Monetary(string='Untaxed Amount', store=True, readonly=True, tracking=True,
compute='_compute_amount')
amount_tax = fields.Monetary(string='Tax', store=True, readonly=True,
compute='_compute_amount')
amount_total = fields.Monetary(string='Total', store=True, readonly=True,
compute='_compute_amount',
inverse='_inverse_amount_total')
amount_residual = fields.Monetary(string='Amount Due', store=True,
compute='_compute_amount')
amount_untaxed_signed = fields.Monetary(string='Untaxed Amount Signed', store=True, readonly=True,
compute='_compute_amount', currency_field='company_currency_id')
amount_tax_signed = fields.Monetary(string='Tax Signed', store=True, readonly=True,
compute='_compute_amount', currency_field='company_currency_id')
amount_total_signed = fields.Monetary(string='Total Signed', store=True, readonly=True,
compute='_compute_amount', currency_field='company_currency_id')
amount_residual_signed = fields.Monetary(string='Amount Due Signed', store=True,
compute='_compute_amount', currency_field='company_currency_id')
amount_by_group = fields.Binary(string="Tax amount by group",
compute='_compute_invoice_taxes_by_group')
# ==== Cash basis feature fields ====
tax_cash_basis_rec_id = fields.Many2one(
'account.partial.reconcile',
string='Tax Cash Basis Entry of',
help="Technical field used to keep track of the tax cash basis reconciliation. "
"This is needed when cancelling the source: it will post the inverse journal entry to cancel that part too.")
# ==== Auto-post feature fields ====
auto_post = fields.Boolean(string='Post Automatically', default=False,
help='If this checkbox is ticked, this entry will be automatically posted at its date.')
# ==== Reverse feature fields ====
reversed_entry_id = fields.Many2one('account.move', string="Reversal of", readonly=True, copy=False)
reversal_move_id = fields.One2many('account.move', 'reversed_entry_id')
# =========================================================
# Invoice related fields
# =========================================================
# ==== Business fields ====
fiscal_position_id = fields.Many2one('account.fiscal.position', string='Fiscal Position', readonly=True,
states={'draft': [('readonly', False)]},
domain="[('company_id', '=', company_id)]",
help="Fiscal positions are used to adapt taxes and accounts for particular customers or sales orders/invoices. "
"The default value comes from the customer.")
invoice_user_id = fields.Many2one('res.users', copy=False, tracking=True,
string='Salesperson',
default=lambda self: self.env.user)
user_id = fields.Many2one(string='User', related='invoice_user_id',
help='Technical field used to fit the generic behavior in mail templates.')
invoice_payment_state = fields.Selection(selection=[
('not_paid', 'Not Paid'),
('in_payment', 'In Payment'),
('paid', 'Paid')],
string='Payment', store=True, readonly=True, copy=False, tracking=True,
compute='_compute_amount')
invoice_date = fields.Date(string='Invoice/Bill Date', readonly=True, index=True, copy=False,
states={'draft': [('readonly', False)]},
default=_get_default_invoice_date)
invoice_date_due = fields.Date(string='Due Date', readonly=True, index=True, copy=False,
states={'draft': [('readonly', False)]})
invoice_payment_ref = fields.Char(string='Payment Reference', index=True, copy=False,
help="The payment reference to set on journal items.")
invoice_sent = fields.Boolean(readonly=True, default=False, copy=False,
help="It indicates that the invoice has been sent.")
invoice_origin = fields.Char(string='Origin', readonly=True, tracking=True,
help="The document(s) that generated the invoice.")
invoice_payment_term_id = fields.Many2one('account.payment.term', string='Payment Terms',
domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]",
readonly=True, states={'draft': [('readonly', False)]})
# /!\ invoice_line_ids is just a subset of line_ids.
invoice_line_ids = fields.One2many('account.move.line', 'move_id', string='Invoice lines',
copy=False, readonly=True,
domain=[('exclude_from_invoice_tab', '=', False)],
states={'draft': [('readonly', False)]})
invoice_partner_bank_id = fields.Many2one('res.partner.bank', string='Bank Account',
help='Bank Account Number to which the invoice will be paid. A Company bank account if this is a Customer Invoice or Vendor Credit Note, otherwise a Partner bank account number.',
domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]")
invoice_incoterm_id = fields.Many2one('account.incoterms', string='Incoterm',
default=_get_default_invoice_incoterm,
help='International Commercial Terms are a series of predefined commercial terms used in international transactions.')
# ==== Payment widget fields ====
invoice_outstanding_credits_debits_widget = fields.Text(groups="account.group_account_invoice",
compute='_compute_payments_widget_to_reconcile_info')
invoice_payments_widget = fields.Text(groups="account.group_account_invoice",
compute='_compute_payments_widget_reconciled_info')
invoice_has_outstanding = fields.Boolean(groups="account.group_account_invoice",
compute='_compute_payments_widget_to_reconcile_info')
# ==== Vendor bill fields ====
invoice_vendor_bill_id = fields.Many2one('account.move', store=False,
domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]",
string='Vendor Bill',
help="Auto-complete from a past bill.")
invoice_source_email = fields.Char(string='Source Email', tracking=True)
invoice_partner_display_name = fields.Char(compute='_compute_invoice_partner_display_info', store=True)
invoice_partner_icon = fields.Char(compute='_compute_invoice_partner_display_info', store=False, compute_sudo=True)
# ==== Cash rounding fields ====
invoice_cash_rounding_id = fields.Many2one('account.cash.rounding', string='Cash Rounding Method',
readonly=True, states={'draft': [('readonly', False)]},
help='Defines the smallest coinage of the currency that can be used to pay by cash.')
# ==== Fields to set the sequence, on the first invoice of the journal ====
invoice_sequence_number_next = fields.Char(string='Next Number',
compute='_compute_invoice_sequence_number_next',
inverse='_inverse_invoice_sequence_number_next')
invoice_sequence_number_next_prefix = fields.Char(string='Next Number Prefix',
compute="_compute_invoice_sequence_number_next")
# ==== Display purpose fields ====
invoice_filter_type_domain = fields.Char(compute='_compute_invoice_filter_type_domain',
help="Technical field used to have a dynamic domain on journal / taxes in the form view.")
bank_partner_id = fields.Many2one('res.partner', help='Technical field to get the domain on the bank', compute='_compute_bank_partner_id')
invoice_has_matching_suspense_amount = fields.Boolean(compute='_compute_has_matching_suspense_amount',
groups='account.group_account_invoice',
help="Technical field used to display an alert on invoices if there is at least a matching amount in any supsense account.")
tax_lock_date_message = fields.Char(
compute='_compute_tax_lock_date_message',
help="Technical field used to display a message when the invoice's accounting date is prior of the tax lock date.")
# Technical field to hide Reconciled Entries stat button
has_reconciled_entries = fields.Boolean(compute="_compute_has_reconciled_entries")
# ==== Hash Fields ====
restrict_mode_hash_table = fields.Boolean(related='journal_id.restrict_mode_hash_table')
secure_sequence_number = fields.Integer(string="Inalteralbility No Gap Sequence #", readonly=True, copy=False)
inalterable_hash = fields.Char(string="Inalterability Hash", readonly=True, copy=False)
string_to_hash = fields.Char(compute='_compute_string_to_hash', readonly=True)
@api.model
def _field_will_change(self, record, vals, field_name):
if field_name not in vals:
return False
field = record._fields[field_name]
if field.type == 'many2one':
return record[field_name].id != vals[field_name]
if field.type == 'many2many':
current_ids = set(record[field_name].ids)
after_write_ids = set(record.new({field_name: vals[field_name]})[field_name].ids)
return current_ids != after_write_ids
if field.type == 'one2many':
return True
if field.type == 'monetary' and record[field.currency_field]:
return not record[field.currency_field].is_zero(record[field_name] - vals[field_name])
if field.type == 'float':
record_value = field.convert_to_cache(record[field_name], record)
to_write_value = field.convert_to_cache(vals[field_name], record)
return record_value != to_write_value
return record[field_name] != vals[field_name]
@api.model
def _cleanup_write_orm_values(self, record, vals):
cleaned_vals = dict(vals)
for field_name, value in vals.items():
if not self._field_will_change(record, vals, field_name):
del cleaned_vals[field_name]
return cleaned_vals
# -------------------------------------------------------------------------
# ONCHANGE METHODS
# -------------------------------------------------------------------------
@api.onchange('invoice_date')
def _onchange_invoice_date(self):
if self.invoice_date:
if not self.invoice_payment_term_id:
self.invoice_date_due = self.invoice_date
self.date = self.invoice_date
self._onchange_currency()
@api.onchange('journal_id')
def _onchange_journal(self):
if self.journal_id and self.journal_id.currency_id:
new_currency = self.journal_id.currency_id
if new_currency != self.currency_id:
self.currency_id = new_currency
self._onchange_currency()
@api.onchange('partner_id')
def _onchange_partner_id(self):
self = self.with_context(force_company=self.journal_id.company_id.id)
warning = {}
if self.partner_id:
rec_account = self.partner_id.property_account_receivable_id
pay_account = self.partner_id.property_account_payable_id
if not rec_account and not pay_account:
action = self.env.ref('account.action_account_config')
msg = _('Cannot find a chart of accounts for this company, You should configure it. \nPlease go to Account Configuration.')
raise RedirectWarning(msg, action.id, _('Go to the configuration panel'))
p = self.partner_id
if p.invoice_warn == 'no-message' and p.parent_id:
p = p.parent_id
if p.invoice_warn and p.invoice_warn != 'no-message':
# Block if partner only has warning but parent company is blocked
if p.invoice_warn != 'block' and p.parent_id and p.parent_id.invoice_warn == 'block':
p = p.parent_id
warning = {
'title': _("Warning for %s") % p.name,
'message': p.invoice_warn_msg
}
if p.invoice_warn == 'block':
self.partner_id = False
return {'warning': warning}
if self.is_sale_document(include_receipts=True) and self.partner_id.property_payment_term_id:
self.invoice_payment_term_id = self.partner_id.property_payment_term_id
new_term_account = self.partner_id.commercial_partner_id.property_account_receivable_id
elif self.is_purchase_document(include_receipts=True) and self.partner_id.property_supplier_payment_term_id:
self.invoice_payment_term_id = self.partner_id.property_supplier_payment_term_id
new_term_account = self.partner_id.commercial_partner_id.property_account_payable_id
else:
new_term_account = None
for line in self.line_ids:
line.partner_id = self.partner_id.commercial_partner_id
if new_term_account and line.account_id.user_type_id.type in ('receivable', 'payable'):
line.account_id = new_term_account
self._compute_bank_partner_id()
self.invoice_partner_bank_id = self.bank_partner_id.bank_ids and self.bank_partner_id.bank_ids[0]
# Find the new fiscal position.
delivery_partner_id = self._get_invoice_delivery_partner_id()
new_fiscal_position_id = self.env['account.fiscal.position'].with_context(force_company=self.company_id.id).get_fiscal_position(
self.partner_id.id, delivery_id=delivery_partner_id)
self.fiscal_position_id = self.env['account.fiscal.position'].browse(new_fiscal_position_id)
self._recompute_dynamic_lines()
if warning:
return {'warning': warning}
@api.onchange('date', 'currency_id')
def _onchange_currency(self):
if not self.currency_id:
return
if self.is_invoice(include_receipts=True):
company_currency = self.company_id.currency_id
has_foreign_currency = self.currency_id and self.currency_id != company_currency
for line in self._get_lines_onchange_currency():
new_currency = has_foreign_currency and self.currency_id
line.currency_id = new_currency
line._onchange_currency()
else:
self.line_ids._onchange_currency()
self._recompute_dynamic_lines(recompute_tax_base_amount=True)
@api.onchange('invoice_payment_ref')
def _onchange_invoice_payment_ref(self):
for line in self.line_ids.filtered(lambda line: line.account_id.user_type_id.type in ('receivable', 'payable')):
line.name = self.invoice_payment_ref
@api.onchange('invoice_vendor_bill_id')
def _onchange_invoice_vendor_bill(self):
if self.invoice_vendor_bill_id:
# Copy invoice lines.
for line in self.invoice_vendor_bill_id.invoice_line_ids:
copied_vals = line.copy_data()[0]
copied_vals['move_id'] = self.id
new_line = self.env['account.move.line'].new(copied_vals)
new_line.recompute_tax_line = True
# Copy payment terms.
self.invoice_payment_term_id = self.invoice_vendor_bill_id.invoice_payment_term_id
# Copy currency.
if self.currency_id != self.invoice_vendor_bill_id.currency_id:
self.currency_id = self.invoice_vendor_bill_id.currency_id
# Reset
self.invoice_vendor_bill_id = False
self._recompute_dynamic_lines()
@api.onchange('type')
def _onchange_type(self):
''' Onchange made to filter the partners depending of the type. '''
if self.is_sale_document(include_receipts=True):
if self.env['ir.config_parameter'].sudo().get_param('account.use_invoice_terms'):
self.narration = self.company_id.invoice_terms or self.env.company.invoice_terms
@api.onchange('invoice_line_ids')
def _onchange_invoice_line_ids(self):
current_invoice_lines = self.line_ids.filtered(lambda line: not line.exclude_from_invoice_tab)
others_lines = self.line_ids - current_invoice_lines
if others_lines and current_invoice_lines - self.invoice_line_ids:
others_lines[0].recompute_tax_line = True
self.line_ids = others_lines + self.invoice_line_ids
self._onchange_recompute_dynamic_lines()
@api.onchange('line_ids', 'invoice_payment_term_id', 'invoice_date_due', 'invoice_cash_rounding_id', 'invoice_vendor_bill_id')
def _onchange_recompute_dynamic_lines(self):
self._recompute_dynamic_lines()
@api.model
def _get_tax_grouping_key_from_tax_line(self, tax_line):
''' Create the dictionary based on a tax line that will be used as key to group taxes together.
/!\ Must be consistent with '_get_tax_grouping_key_from_base_line'.
:param tax_line: An account.move.line being a tax line (with 'tax_repartition_line_id' set then).
:return: A dictionary containing all fields on which the tax will be grouped.
'''
return {
'tax_repartition_line_id': tax_line.tax_repartition_line_id.id,
'account_id': tax_line.account_id.id,
'currency_id': tax_line.currency_id.id,
'analytic_tag_ids': [(6, 0, tax_line.tax_line_id.analytic and tax_line.analytic_tag_ids.ids or [])],
'analytic_account_id': tax_line.tax_line_id.analytic and tax_line.analytic_account_id.id,
'tax_ids': [(6, 0, tax_line.tax_ids.ids)],
'tag_ids': [(6, 0, tax_line.tag_ids.ids)],
}
@api.model
def _get_tax_grouping_key_from_base_line(self, base_line, tax_vals):
''' Create the dictionary based on a base line that will be used as key to group taxes together.
/!\ Must be consistent with '_get_tax_grouping_key_from_tax_line'.
:param base_line: An account.move.line being a base line (that could contains something in 'tax_ids').
:param tax_vals: An element of compute_all(...)['taxes'].
:return: A dictionary containing all fields on which the tax will be grouped.
'''
tax_repartition_line = self.env['account.tax.repartition.line'].browse(tax_vals['tax_repartition_line_id'])
account = base_line._get_default_tax_account(tax_repartition_line) or base_line.account_id
return {
'tax_repartition_line_id': tax_vals['tax_repartition_line_id'],
'account_id': account.id,
'currency_id': base_line.currency_id.id,
'analytic_tag_ids': [(6, 0, tax_vals['analytic'] and base_line.analytic_tag_ids.ids or [])],
'analytic_account_id': tax_vals['analytic'] and base_line.analytic_account_id.id,
'tax_ids': [(6, 0, tax_vals['tax_ids'])],
'tag_ids': [(6, 0, tax_vals['tag_ids'])],
}
def _recompute_tax_lines(self, recompute_tax_base_amount=False):
''' Compute the dynamic tax lines of the journal entry.
:param lines_map: The line_ids dispatched by type containing:
* base_lines: The lines having a tax_ids set.
* tax_lines: The lines having a tax_line_id set.
* terms_lines: The lines generated by the payment terms of the invoice.
* rounding_lines: The cash rounding lines of the invoice.
'''
self.ensure_one()
in_draft_mode = self != self._origin
def _serialize_tax_grouping_key(grouping_dict):
''' Serialize the dictionary values to be used in the taxes_map.
:param grouping_dict: The values returned by '_get_tax_grouping_key_from_tax_line' or '_get_tax_grouping_key_from_base_line'.
:return: A string representing the values.
'''
return '-'.join(str(v) for v in grouping_dict.values())
def _compute_base_line_taxes(base_line):
''' Compute taxes amounts both in company currency / foreign currency as the ratio between
amount_currency & balance could not be the same as the expected currency rate.
The 'amount_currency' value will be set on compute_all(...)['taxes'] in multi-currency.
:param base_line: The account.move.line owning the taxes.
:return: The result of the compute_all method.
'''
move = base_line.move_id
if move.is_invoice(include_receipts=True):
sign = -1 if move.is_inbound() else 1
quantity = base_line.quantity
if base_line.currency_id:
price_unit_foreign_curr = sign * base_line.price_unit * (1 - (base_line.discount / 100.0))
price_unit_comp_curr = base_line.currency_id._convert(price_unit_foreign_curr, move.company_id.currency_id, move.company_id, move.date)
else:
price_unit_foreign_curr = 0.0
price_unit_comp_curr = sign * base_line.price_unit * (1 - (base_line.discount / 100.0))
else:
quantity = 1.0
price_unit_foreign_curr = base_line.amount_currency
price_unit_comp_curr = base_line.balance
if move.is_invoice(include_receipts=True):
handle_price_include = True
else:
handle_price_include = False
balance_taxes_res = base_line.tax_ids._origin.compute_all(
price_unit_comp_curr,
currency=base_line.company_currency_id,
quantity=quantity,
product=base_line.product_id,
partner=base_line.partner_id,
is_refund=self.type in ('out_refund', 'in_refund'),
handle_price_include=handle_price_include,
)
if base_line.currency_id:
# Multi-currencies mode: Taxes are computed both in company's currency / foreign currency.
amount_currency_taxes_res = base_line.tax_ids._origin.compute_all(
price_unit_foreign_curr,
currency=base_line.currency_id,
quantity=quantity,
product=base_line.product_id,
partner=base_line.partner_id,
is_refund=self.type in ('out_refund', 'in_refund'),
)
for b_tax_res, ac_tax_res in zip(balance_taxes_res['taxes'], amount_currency_taxes_res['taxes']):
tax = self.env['account.tax'].browse(b_tax_res['id'])
b_tax_res['amount_currency'] = ac_tax_res['amount']
# A tax having a fixed amount must be converted into the company currency when dealing with a
# foreign currency.
if tax.amount_type == 'fixed':
b_tax_res['amount'] = base_line.currency_id._convert(b_tax_res['amount'], move.company_id.currency_id, move.company_id, move.date)
return balance_taxes_res
taxes_map = {}
# ==== Add tax lines ====
to_remove = self.env['account.move.line']
for line in self.line_ids.filtered('tax_repartition_line_id'):
grouping_dict = self._get_tax_grouping_key_from_tax_line(line)
grouping_key = _serialize_tax_grouping_key(grouping_dict)
if grouping_key in taxes_map:
# A line with the same key does already exist, we only need one
# to modify it; we have to drop this one.
to_remove += line
else:
taxes_map[grouping_key] = {
'tax_line': line,
'balance': 0.0,
'amount_currency': 0.0,
'tax_base_amount': 0.0,
'grouping_dict': False,
}
self.line_ids -= to_remove
# ==== Mount base lines ====
for line in self.line_ids.filtered(lambda line: not line.tax_repartition_line_id):
# Don't call compute_all if there is no tax.
if not line.tax_ids:
line.tag_ids = [(5, 0, 0)]
continue
compute_all_vals = _compute_base_line_taxes(line)
# Assign tags on base line
line.tag_ids = compute_all_vals['base_tags']
tax_exigible = True
for tax_vals in compute_all_vals['taxes']:
grouping_dict = self._get_tax_grouping_key_from_base_line(line, tax_vals)
grouping_key = _serialize_tax_grouping_key(grouping_dict)
tax_repartition_line = self.env['account.tax.repartition.line'].browse(tax_vals['tax_repartition_line_id'])
tax = tax_repartition_line.invoice_tax_id or tax_repartition_line.refund_tax_id
if tax.tax_exigibility == 'on_payment':
tax_exigible = False
taxes_map_entry = taxes_map.setdefault(grouping_key, {
'tax_line': None,
'balance': 0.0,
'amount_currency': 0.0,
'tax_base_amount': 0.0,
'grouping_dict': False,
})
taxes_map_entry['balance'] += tax_vals['amount']
taxes_map_entry['amount_currency'] += tax_vals.get('amount_currency', 0.0)
taxes_map_entry['tax_base_amount'] += tax_vals['base']
taxes_map_entry['grouping_dict'] = grouping_dict
line.tax_exigible = tax_exigible
# ==== Process taxes_map ====
for taxes_map_entry in taxes_map.values():
# Don't create tax lines with zero balance.
if self.currency_id.is_zero(taxes_map_entry['balance']) and self.currency_id.is_zero(taxes_map_entry['amount_currency']):
taxes_map_entry['grouping_dict'] = False
tax_line = taxes_map_entry['tax_line']
tax_base_amount = -taxes_map_entry['tax_base_amount'] if self.is_inbound() else taxes_map_entry['tax_base_amount']
if not tax_line and not taxes_map_entry['grouping_dict']:
continue
elif tax_line and recompute_tax_base_amount:
tax_line.tax_base_amount = tax_base_amount
elif tax_line and not taxes_map_entry['grouping_dict']:
# The tax line is no longer used, drop it.
self.line_ids -= tax_line
elif tax_line:
tax_line.update({
'amount_currency': taxes_map_entry['amount_currency'],
'debit': taxes_map_entry['balance'] > 0.0 and taxes_map_entry['balance'] or 0.0,
'credit': taxes_map_entry['balance'] < 0.0 and -taxes_map_entry['balance'] or 0.0,
'tax_base_amount': tax_base_amount,
})
else:
create_method = in_draft_mode and self.env['account.move.line'].new or self.env['account.move.line'].create
tax_repartition_line_id = taxes_map_entry['grouping_dict']['tax_repartition_line_id']
tax_repartition_line = self.env['account.tax.repartition.line'].browse(tax_repartition_line_id)
tax = tax_repartition_line.invoice_tax_id or tax_repartition_line.refund_tax_id
tax_line = create_method({
'name': tax.name,
'move_id': self.id,
'partner_id': line.partner_id.id,
'company_id': line.company_id.id,
'company_currency_id': line.company_currency_id.id,
'quantity': 1.0,
'date_maturity': False,
'amount_currency': taxes_map_entry['amount_currency'],
'debit': taxes_map_entry['balance'] > 0.0 and taxes_map_entry['balance'] or 0.0,
'credit': taxes_map_entry['balance'] < 0.0 and -taxes_map_entry['balance'] or 0.0,
'tax_base_amount': tax_base_amount,
'exclude_from_invoice_tab': True,
'tax_exigible': tax.tax_exigibility == 'on_invoice',
**taxes_map_entry['grouping_dict'],
})
if in_draft_mode:
tax_line._onchange_amount_currency()
tax_line._onchange_balance()
def _recompute_cash_rounding_lines(self):
''' Handle the cash rounding feature on invoices.
In some countries, the smallest coins do not exist. For example, in Switzerland, there is no coin for 0.01 CHF.
For this reason, if invoices are paid in cash, you have to round their total amount to the smallest coin that
exists in the currency. For the CHF, the smallest coin is 0.05 CHF.
There are two strategies for the rounding:
1) Add a line on the invoice for the rounding: The cash rounding line is added as a new invoice line.
2) Add the rounding in the biggest tax amount: The cash rounding line is added as a new tax line on the tax
having the biggest balance.
'''
self.ensure_one()
in_draft_mode = self != self._origin
def _compute_cash_rounding(self, total_balance, total_amount_currency):
''' Compute the amount differences due to the cash rounding.
:param self: The current account.move record.
:param total_balance: The invoice's total in company's currency.
:param total_amount_currency: The invoice's total in invoice's currency.
:return: The amount differences both in company's currency & invoice's currency.
'''
if self.currency_id == self.company_id.currency_id:
diff_balance = self.invoice_cash_rounding_id.compute_difference(self.currency_id, total_balance)
diff_amount_currency = 0.0
else:
diff_amount_currency = self.invoice_cash_rounding_id.compute_difference(self.currency_id, total_amount_currency)
diff_balance = self.currency_id._convert(diff_amount_currency, self.company_id.currency_id, self.company_id, self.date)
return diff_balance, diff_amount_currency
def _apply_cash_rounding(self, diff_balance, diff_amount_currency, cash_rounding_line):
''' Apply the cash rounding.
:param self: The current account.move record.
:param diff_balance: The computed balance to set on the new rounding line.
:param diff_amount_currency: The computed amount in invoice's currency to set on the new rounding line.
:param cash_rounding_line: The existing cash rounding line.
:return: The newly created rounding line.
'''
rounding_line_vals = {
'debit': diff_balance > 0.0 and diff_balance or 0.0,
'credit': diff_balance < 0.0 and -diff_balance or 0.0,
'quantity': 1.0,
'amount_currency': diff_amount_currency,
'partner_id': self.partner_id.id,
'move_id': self.id,
'currency_id': self.currency_id if self.currency_id != self.company_id.currency_id else False,
'company_id': self.company_id.id,
'company_currency_id': self.company_id.currency_id.id,
'is_rounding_line': True,
'sequence': 9999,
}
if self.invoice_cash_rounding_id.strategy == 'biggest_tax':
biggest_tax_line = None
for tax_line in self.line_ids.filtered('tax_repartition_line_id'):
if not biggest_tax_line or tax_line.price_subtotal > biggest_tax_line.price_subtotal:
biggest_tax_line = tax_line
# No tax found.
if not biggest_tax_line:
return
rounding_line_vals.update({
'name': _('%s (rounding)') % biggest_tax_line.name,
'account_id': biggest_tax_line.account_id.id,
'tax_repartition_line_id': biggest_tax_line.tax_repartition_line_id.id,
'tax_exigible': biggest_tax_line.tax_exigible,
'exclude_from_invoice_tab': True,
})
elif self.invoice_cash_rounding_id.strategy == 'add_invoice_line':
if diff_balance > 0.0:
account_id = self.invoice_cash_rounding_id._get_loss_account_id().id
else:
account_id = self.invoice_cash_rounding_id._get_profit_account_id().id
rounding_line_vals.update({
'name': self.invoice_cash_rounding_id.name,
'account_id': account_id,
})
# Create or update the cash rounding line.
if cash_rounding_line:
cash_rounding_line.update({
'amount_currency': rounding_line_vals['amount_currency'],
'debit': rounding_line_vals['debit'],
'credit': rounding_line_vals['credit'],
'account_id': rounding_line_vals['account_id'],
})
else:
create_method = in_draft_mode and self.env['account.move.line'].new or self.env['account.move.line'].create
cash_rounding_line = create_method(rounding_line_vals)
if in_draft_mode:
cash_rounding_line._onchange_amount_currency()
cash_rounding_line._onchange_balance()
existing_cash_rounding_line = self.line_ids.filtered(lambda line: line.is_rounding_line)
# The cash rounding has been removed.
if not self.invoice_cash_rounding_id:
self.line_ids -= existing_cash_rounding_line
return
# The cash rounding strategy has changed.
if self.invoice_cash_rounding_id and existing_cash_rounding_line:
strategy = self.invoice_cash_rounding_id.strategy
old_strategy = 'biggest_tax' if existing_cash_rounding_line.tax_line_id else 'add_invoice_line'
if strategy != old_strategy:
self.line_ids -= existing_cash_rounding_line
existing_cash_rounding_line = self.env['account.move.line']
others_lines = self.line_ids.filtered(lambda line: line.account_id.user_type_id.type not in ('receivable', 'payable'))
others_lines -= existing_cash_rounding_line
total_balance = sum(others_lines.mapped('balance'))
total_amount_currency = sum(others_lines.mapped('amount_currency'))
diff_balance, diff_amount_currency = _compute_cash_rounding(self, total_balance, total_amount_currency)
# The invoice is already rounded.
if self.currency_id.is_zero(diff_balance) and self.currency_id.is_zero(diff_amount_currency):
self.line_ids -= existing_cash_rounding_line
return
_apply_cash_rounding(self, diff_balance, diff_amount_currency, existing_cash_rounding_line)
def _recompute_payment_terms_lines(self):
''' Compute the dynamic payment term lines of the journal entry.'''
self.ensure_one()
in_draft_mode = self != self._origin
today = fields.Date.context_today(self)
self = self.with_context(force_company=self.journal_id.company_id.id)
def _get_payment_terms_computation_date(self):
''' Get the date from invoice that will be used to compute the payment terms.
:param self: The current account.move record.
:return: A datetime.date object.
'''
if self.invoice_payment_term_id:
return self.invoice_date or today
else:
return self.invoice_date_due or self.invoice_date or today
def _get_payment_terms_account(self, payment_terms_lines):
''' Get the account from invoice that will be set as receivable / payable account.
:param self: The current account.move record.
:param payment_terms_lines: The current payment terms lines.
:return: An account.account record.
'''
if payment_terms_lines:
# Retrieve account from previous payment terms lines in order to allow the user to set a custom one.
return payment_terms_lines[0].account_id
elif self.partner_id:
# Retrieve account from partner.
if self.is_sale_document(include_receipts=True):
return self.partner_id.property_account_receivable_id
else:
return self.partner_id.property_account_payable_id
else:
# Search new account.
domain = [
('company_id', '=', self.company_id.id),
('internal_type', '=', 'receivable' if self.type in ('out_invoice', 'out_refund', 'out_receipt') else 'payable'),
]
return self.env['account.account'].search(domain, limit=1)
def _compute_payment_terms(self, date, total_balance, total_amount_currency):
''' Compute the payment terms.
:param self: The current account.move record.
:param date: The date computed by '_get_payment_terms_computation_date'.
:param total_balance: The invoice's total in company's currency.
:param total_amount_currency: The invoice's total in invoice's currency.
:return: A list <to_pay_company_currency, to_pay_invoice_currency, due_date>.
'''
if self.invoice_payment_term_id:
to_compute = self.invoice_payment_term_id.compute(total_balance, date_ref=date, currency=self.currency_id)
if self.currency_id != self.company_id.currency_id:
# Multi-currencies.
to_compute_currency = self.invoice_payment_term_id.compute(total_amount_currency, date_ref=date, currency=self.currency_id)
return [(b[0], b[1], ac[1]) for b, ac in zip(to_compute, to_compute_currency)]
else:
# Single-currency.
return [(b[0], b[1], 0.0) for b in to_compute]
else:
return [(fields.Date.to_string(date), total_balance, total_amount_currency)]
def _compute_diff_payment_terms_lines(self, existing_terms_lines, account, to_compute):
''' Process the result of the '_compute_payment_terms' method and creates/updates corresponding invoice lines.
:param self: The current account.move record.
:param existing_terms_lines: The current payment terms lines.
:param account: The account.account record returned by '_get_payment_terms_account'.
:param to_compute: The list returned by '_compute_payment_terms'.
'''
# As we try to update existing lines, sort them by due date.
existing_terms_lines = existing_terms_lines.sorted(lambda line: line.date_maturity or today)
existing_terms_lines_index = 0
# Recompute amls: update existing line or create new one for each payment term.
new_terms_lines = self.env['account.move.line']
for date_maturity, balance, amount_currency in to_compute:
if self.journal_id.company_id.currency_id.is_zero(balance) and len(to_compute) > 1:
continue
if existing_terms_lines_index < len(existing_terms_lines):
# Update existing line.
candidate = existing_terms_lines[existing_terms_lines_index]
existing_terms_lines_index += 1
candidate.update({
'date_maturity': date_maturity,
'amount_currency': -amount_currency,
'debit': balance < 0.0 and -balance or 0.0,
'credit': balance > 0.0 and balance or 0.0,
})
else:
# Create new line.
create_method = in_draft_mode and self.env['account.move.line'].new or self.env['account.move.line'].create
candidate = create_method({
'name': self.invoice_payment_ref or '',
'debit': balance < 0.0 and -balance or 0.0,
'credit': balance > 0.0 and balance or 0.0,
'quantity': 1.0,
'amount_currency': -amount_currency,
'date_maturity': date_maturity,
'move_id': self.id,
'currency_id': self.currency_id.id if self.currency_id != self.company_id.currency_id else False,
'account_id': account.id,
'partner_id': self.commercial_partner_id.id,
'exclude_from_invoice_tab': True,
})
new_terms_lines += candidate
if in_draft_mode:
candidate._onchange_amount_currency()
candidate._onchange_balance()
return new_terms_lines
existing_terms_lines = self.line_ids.filtered(lambda line: line.account_id.user_type_id.type in ('receivable', 'payable'))
others_lines = self.line_ids.filtered(lambda line: line.account_id.user_type_id.type not in ('receivable', 'payable'))
company_currency_id = self.company_id.currency_id
total_balance = sum(others_lines.mapped(lambda l: company_currency_id.round(l.balance)))
total_amount_currency = sum(others_lines.mapped('amount_currency'))
if not others_lines:
self.line_ids -= existing_terms_lines
return
computation_date = _get_payment_terms_computation_date(self)
account = _get_payment_terms_account(self, existing_terms_lines)
to_compute = _compute_payment_terms(self, computation_date, total_balance, total_amount_currency)
new_terms_lines = _compute_diff_payment_terms_lines(self, existing_terms_lines, account, to_compute)
# Remove old terms lines that are no longer needed.
self.line_ids -= existing_terms_lines - new_terms_lines
if new_terms_lines:
self.invoice_payment_ref = new_terms_lines[-1].name or ''
self.invoice_date_due = new_terms_lines[-1].date_maturity
def _recompute_dynamic_lines(self, recompute_all_taxes=False, recompute_tax_base_amount=False):
''' Recompute all lines that depend of others.
For example, tax lines depends of base lines (lines having tax_ids set). This is also the case of cash rounding
lines that depend of base lines or tax lines depending the cash rounding strategy. When a payment term is set,
this method will auto-balance the move with payment term lines.
:param recompute_all_taxes: Force the computation of taxes. If set to False, the computation will be done
or not depending of the field 'recompute_tax_line' in lines.
'''
for invoice in self:
# Dispatch lines and pre-compute some aggregated values like taxes.
for line in invoice.line_ids:
if line.recompute_tax_line:
recompute_all_taxes = True
line.recompute_tax_line = False
# Compute taxes.
if recompute_all_taxes:
invoice._recompute_tax_lines()
if recompute_tax_base_amount:
invoice._recompute_tax_lines(recompute_tax_base_amount=True)
if invoice.is_invoice(include_receipts=True):
# Compute cash rounding.
invoice._recompute_cash_rounding_lines()
# Compute payment terms.
invoice._recompute_payment_terms_lines()
# Only synchronize one2many in onchange.
if invoice != invoice._origin:
invoice.invoice_line_ids = invoice.line_ids.filtered(lambda line: not line.exclude_from_invoice_tab)
def _get_lines_onchange_currency(self):
# Override needed for COGS
return self.line_ids
def onchange(self, values, field_name, field_onchange):
# OVERRIDE
# As the dynamic lines in this model are quite complex, we need to ensure some computations are done exactly
# at the beginning / at the end of the onchange mechanism. So, the onchange recursivity is disabled.
return super(AccountMove, self.with_context(recursive_onchanges=False)).onchange(values, field_name, field_onchange)
# -------------------------------------------------------------------------
# COMPUTE METHODS
# -------------------------------------------------------------------------
@api.depends('type')
def _compute_type_name(self):
type_name_mapping = {k: v for k, v in
self._fields['type']._description_selection(self.env)}
replacements = {'out_invoice': _('Invoice'), 'out_refund': _('Credit Note')}
for record in self:
name = type_name_mapping[record.type]
record.type_name = replacements.get(record.type, name)
@api.depends('type')
def _compute_invoice_filter_type_domain(self):
for move in self:
if move.is_sale_document(include_receipts=True):
move.invoice_filter_type_domain = 'sale'
elif move.is_purchase_document(include_receipts=True):
move.invoice_filter_type_domain = 'purchase'
else:
move.invoice_filter_type_domain = False
@api.depends('partner_id')
def _compute_commercial_partner_id(self):
for move in self:
move.commercial_partner_id = move.partner_id.commercial_partner_id
@api.depends('commercial_partner_id')
def _compute_bank_partner_id(self):
for move in self:
if move.is_outbound():
move.bank_partner_id = move.commercial_partner_id
else:
move.bank_partner_id = move.company_id.partner_id
@api.depends(
'line_ids.debit',
'line_ids.credit',
'line_ids.currency_id',
'line_ids.amount_currency',
'line_ids.amount_residual',
'line_ids.amount_residual_currency',
'line_ids.payment_id.state')
def _compute_amount(self):
invoice_ids = [move.id for move in self if move.id and move.is_invoice(include_receipts=True)]
self.env['account.payment'].flush(['state'])
if invoice_ids:
self._cr.execute(
'''
SELECT move.id
FROM account_move move
JOIN account_move_line line ON line.move_id = move.id
JOIN account_partial_reconcile part ON part.debit_move_id = line.id OR part.credit_move_id = line.id
JOIN account_move_line rec_line ON
(rec_line.id = part.debit_move_id AND line.id = part.credit_move_id)
JOIN account_payment payment ON payment.id = rec_line.payment_id
JOIN account_journal journal ON journal.id = rec_line.journal_id
WHERE payment.state IN ('posted', 'sent')
AND journal.post_at = 'bank_rec'
AND move.id IN %s
UNION
SELECT move.id
FROM account_move move
JOIN account_move_line line ON line.move_id = move.id
JOIN account_partial_reconcile part ON part.debit_move_id = line.id OR part.credit_move_id = line.id
JOIN account_move_line rec_line ON
(rec_line.id = part.credit_move_id AND line.id = part.debit_move_id)
JOIN account_payment payment ON payment.id = rec_line.payment_id
JOIN account_journal journal ON journal.id = rec_line.journal_id
WHERE payment.state IN ('posted', 'sent')
AND journal.post_at = 'bank_rec'
AND move.id IN %s
''', [tuple(invoice_ids), tuple(invoice_ids)]
)
in_payment_set = set(res[0] for res in self._cr.fetchall())
else:
in_payment_set = {}
for move in self:
total_untaxed = 0.0
total_untaxed_currency = 0.0
total_tax = 0.0
total_tax_currency = 0.0
total_residual = 0.0
total_residual_currency = 0.0
total = 0.0
total_currency = 0.0
currencies = set()
for line in move.line_ids:
if line.currency_id:
currencies.add(line.currency_id)
if move.is_invoice(include_receipts=True):
# === Invoices ===
if not line.exclude_from_invoice_tab:
# Untaxed amount.
total_untaxed += line.balance
total_untaxed_currency += line.amount_currency
total += line.balance
total_currency += line.amount_currency
elif line.tax_line_id:
# Tax amount.
total_tax += line.balance
total_tax_currency += line.amount_currency
total += line.balance
total_currency += line.amount_currency
elif line.account_id.user_type_id.type in ('receivable', 'payable'):
# Residual amount.
total_residual += line.amount_residual
total_residual_currency += line.amount_residual_currency
else:
# === Miscellaneous journal entry ===
if line.debit:
total += line.balance
total_currency += line.amount_currency
if move.type == 'entry' or move.is_outbound():
sign = 1
else:
sign = -1
move.amount_untaxed = sign * (total_untaxed_currency if len(currencies) == 1 else total_untaxed)
move.amount_tax = sign * (total_tax_currency if len(currencies) == 1 else total_tax)
move.amount_total = sign * (total_currency if len(currencies) == 1 else total)
move.amount_residual = -sign * (total_residual_currency if len(currencies) == 1 else total_residual)
move.amount_untaxed_signed = -total_untaxed
move.amount_tax_signed = -total_tax
move.amount_total_signed = abs(total) if move.type == 'entry' else -total
move.amount_residual_signed = total_residual
currency = len(currencies) == 1 and currencies.pop() or move.company_id.currency_id
is_paid = currency and currency.is_zero(move.amount_residual) or not move.amount_residual
# Compute 'invoice_payment_state'.
if move.type == 'entry':
move.invoice_payment_state = False
elif move.state == 'posted' and is_paid:
if move.id in in_payment_set:
move.invoice_payment_state = 'in_payment'
else:
move.invoice_payment_state = 'paid'
else:
move.invoice_payment_state = 'not_paid'
def _inverse_amount_total(self):
for move in self:
if len(move.line_ids) != 2 or move.is_invoice(include_receipts=True):
continue
to_write = []
if move.currency_id != move.company_id.currency_id:
amount_currency = abs(move.amount_total)
balance = move.currency_id._convert(amount_currency, move.company_currency_id, move.company_id, move.date)
else:
balance = abs(move.amount_total)
amount_currency = 0.0
for line in move.line_ids:
if float_compare(abs(line.balance), balance, precision_rounding=move.currency_id.rounding) != 0:
to_write.append((1, line.id, {
'debit': line.balance > 0.0 and balance or 0.0,
'credit': line.balance < 0.0 and balance or 0.0,
'amount_currency': line.balance > 0.0 and amount_currency or -amount_currency,
}))
move.write({'line_ids': to_write})
def _get_domain_matching_suspense_moves(self):
self.ensure_one()
domain = self.env['account.move.line']._get_suspense_moves_domain()
domain += ['|', ('partner_id', '=?', self.partner_id.id), ('partner_id', '=', False)]
if self.is_inbound():
domain.append(('balance', '=', -self.amount_residual))
else:
domain.append(('balance', '=', self.amount_residual))
return domain
def _compute_has_matching_suspense_amount(self):
for r in self:
res = False
if r.state == 'posted' and r.is_invoice() and r.invoice_payment_state == 'not_paid':
domain = r._get_domain_matching_suspense_moves()
#there are more than one but less than 5 suspense moves matching the residual amount
if (0 < self.env['account.move.line'].search_count(domain) < 5):
domain2 = [
('invoice_payment_state', '=', 'not_paid'),
('state', '=', 'posted'),
('amount_residual', '=', r.amount_residual),
('type', '=', r.type)]
#there are less than 5 other open invoices of the same type with the same residual
if self.env['account.move'].search_count(domain2) < 5:
res = True
r.invoice_has_matching_suspense_amount = res
@api.depends('partner_id', 'invoice_source_email')
def _compute_invoice_partner_display_info(self):
for move in self:
vendor_display_name = move.partner_id.display_name
if not vendor_display_name:
if move.invoice_source_email:
vendor_display_name = _('From: ') + move.invoice_source_email
move.invoice_partner_icon = '@'
else:
vendor_display_name = _('Created by: %s') % (move.sudo().create_uid.name or self.env.user.name)
move.invoice_partner_icon = '#'
else:
move.invoice_partner_icon = False
move.invoice_partner_display_name = vendor_display_name
@api.depends('state', 'journal_id', 'date', 'invoice_date')
def _compute_invoice_sequence_number_next(self):
""" computes the prefix of the number that will be assigned to the first invoice/bill/refund of a journal, in order to
let the user manually change it.
"""
# Check user group.
system_user = self.env.is_system()
if not system_user:
self.invoice_sequence_number_next_prefix = False
self.invoice_sequence_number_next = False
return
# Check moves being candidates to set a custom number next.
moves = self.filtered(lambda move: move.is_invoice() and move.name == '/')
if not moves:
self.invoice_sequence_number_next_prefix = False
self.invoice_sequence_number_next = False
return
treated = self.browse()
for key, group in groupby(moves, key=lambda move: (move.journal_id, move._get_sequence())):
journal, sequence = key
domain = [('journal_id', '=', journal.id), ('state', '=', 'posted')]
if self.ids:
domain.append(('id', 'not in', self.ids))
if journal.type == 'sale':
domain.append(('type', 'in', ('out_invoice', 'out_refund')))
elif journal.type == 'purchase':
domain.append(('type', 'in', ('in_invoice', 'in_refund')))
else:
continue
if self.search_count(domain):
continue
for move in group:
sequence_date = move.date or move.invoice_date
prefix, dummy = sequence._get_prefix_suffix(date=sequence_date, date_range=sequence_date)
number_next = sequence._get_current_sequence(sequence_date=sequence_date).number_next_actual
move.invoice_sequence_number_next_prefix = prefix
move.invoice_sequence_number_next = '%%0%sd' % sequence.padding % number_next
treated |= move
remaining = (self - treated)
remaining.invoice_sequence_number_next_prefix = False
remaining.invoice_sequence_number_next = False
def _inverse_invoice_sequence_number_next(self):
''' Set the number_next on the sequence related to the invoice/bill/refund'''
# Check user group.
if not self.env.is_admin():
return
# Set the next number in the sequence.
for move in self:
if not move.invoice_sequence_number_next:
continue
sequence = move._get_sequence()
nxt = re.sub("[^0-9]", '', move.invoice_sequence_number_next)
result = re.match("(0*)([0-9]+)", nxt)
if result and sequence:
sequence_date = move.date or move.invoice_date
date_sequence = sequence._get_current_sequence(sequence_date=sequence_date)
date_sequence.number_next_actual = int(result.group(2))
def _compute_payments_widget_to_reconcile_info(self):
for move in self:
move.invoice_outstanding_credits_debits_widget = json.dumps(False)
move.invoice_has_outstanding = False
if move.state != 'posted' or move.invoice_payment_state != 'not_paid' or not move.is_invoice(include_receipts=True):
continue
pay_term_line_ids = move.line_ids.filtered(lambda line: line.account_id.user_type_id.type in ('receivable', 'payable'))
domain = [('account_id', 'in', pay_term_line_ids.mapped('account_id').ids),
'|', ('move_id.state', '=', 'posted'), '&', ('move_id.state', '=', 'draft'), ('journal_id.post_at', '=', 'bank_rec'),
('partner_id', '=', move.commercial_partner_id.id),
('reconciled', '=', False), '|', ('amount_residual', '!=', 0.0),
('amount_residual_currency', '!=', 0.0)]
if move.is_inbound():
domain.extend([('credit', '>', 0), ('debit', '=', 0)])
type_payment = _('Outstanding credits')
else:
domain.extend([('credit', '=', 0), ('debit', '>', 0)])
type_payment = _('Outstanding debits')
info = {'title': '', 'outstanding': True, 'content': [], 'move_id': move.id}
lines = self.env['account.move.line'].search(domain)
currency_id = move.currency_id
if len(lines) != 0:
for line in lines:
# get the outstanding residual value in invoice currency
if line.currency_id and line.currency_id == move.currency_id:
amount_to_show = abs(line.amount_residual_currency)
else:
currency = line.company_id.currency_id
amount_to_show = currency._convert(abs(line.amount_residual), move.currency_id, move.company_id,
line.date or fields.Date.today())
if float_is_zero(amount_to_show, precision_rounding=move.currency_id.rounding):
continue
info['content'].append({
'journal_name': line.ref or line.move_id.name,
'amount': amount_to_show,
'currency': currency_id.symbol,
'id': line.id,
'position': currency_id.position,
'digits': [69, move.currency_id.decimal_places],
'payment_date': fields.Date.to_string(line.date),
})
info['title'] = type_payment
move.invoice_outstanding_credits_debits_widget = json.dumps(info)
move.invoice_has_outstanding = True
def _get_reconciled_info_JSON_values(self):
self.ensure_one()
foreign_currency = self.currency_id if self.currency_id != self.company_id.currency_id else False
reconciled_vals = []
pay_term_line_ids = self.line_ids.filtered(lambda line: line.account_id.user_type_id.type in ('receivable', 'payable'))
partials = pay_term_line_ids.mapped('matched_debit_ids') + pay_term_line_ids.mapped('matched_credit_ids')
for partial in partials:
counterpart_lines = partial.debit_move_id + partial.credit_move_id
counterpart_line = counterpart_lines.filtered(lambda line: line not in self.line_ids)
if foreign_currency and partial.currency_id == foreign_currency:
amount = partial.amount_currency
else:
amount = partial.company_currency_id._convert(partial.amount, self.currency_id, self.company_id, self.date)
if float_is_zero(amount, precision_rounding=self.currency_id.rounding):
continue
ref = counterpart_line.move_id.name
if counterpart_line.move_id.ref:
ref += ' (' + counterpart_line.move_id.ref + ')'
reconciled_vals.append({
'name': counterpart_line.name,
'journal_name': counterpart_line.journal_id.name,
'amount': amount,
'currency': self.currency_id.symbol,
'digits': [69, self.currency_id.decimal_places],
'position': self.currency_id.position,
'date': counterpart_line.date,
'payment_id': counterpart_line.id,
'account_payment_id': counterpart_line.payment_id.id,
'payment_method_name': counterpart_line.payment_id.payment_method_id.name if counterpart_line.journal_id.type == 'bank' else None,
'move_id': counterpart_line.move_id.id,
'ref': ref,
})
return reconciled_vals
@api.depends('type', 'line_ids.amount_residual')
def _compute_payments_widget_reconciled_info(self):
for move in self:
if move.state != 'posted' or not move.is_invoice(include_receipts=True):
move.invoice_payments_widget = json.dumps(False)
continue
reconciled_vals = move._get_reconciled_info_JSON_values()
if reconciled_vals:
info = {
'title': _('Less Payment'),
'outstanding': False,
'content': reconciled_vals,
}
move.invoice_payments_widget = json.dumps(info, default=date_utils.json_default)
else:
move.invoice_payments_widget = json.dumps(False)
@api.depends('line_ids.price_subtotal', 'line_ids.tax_base_amount', 'line_ids.tax_line_id', 'partner_id', 'currency_id')
def _compute_invoice_taxes_by_group(self):
''' Helper to get the taxes grouped according their account.tax.group.
This method is only used when printing the invoice.
'''
for move in self:
lang_env = move.with_context(lang=move.partner_id.lang).env
tax_lines = move.line_ids.filtered(lambda line: line.tax_line_id)
tax_balance_multiplicator = -1 if move.is_inbound(True) else 1
res = {}
# There are as many tax line as there are repartition lines
done_taxes = set()
for line in tax_lines:
res.setdefault(line.tax_line_id.tax_group_id, {'base': 0.0, 'amount': 0.0})
res[line.tax_line_id.tax_group_id]['amount'] += tax_balance_multiplicator * (line.amount_currency if line.currency_id else line.balance)
tax_key_add_base = tuple(move._get_tax_key_for_group_add_base(line))
if tax_key_add_base not in done_taxes:
if line.currency_id and line.company_currency_id and line.currency_id != line.company_currency_id:
amount = line.company_currency_id._convert(line.tax_base_amount, line.currency_id, line.company_id, line.date or fields.Date.today())
else:
amount = line.tax_base_amount
res[line.tax_line_id.tax_group_id]['base'] += amount
# The base should be added ONCE
done_taxes.add(tax_key_add_base)
# At this point we only want to keep the taxes with a zero amount since they do not
# generate a tax line.
for line in move.line_ids:
for tax in line.tax_ids.flatten_taxes_hierarchy().filtered(lambda t: t.amount == 0.0):
res.setdefault(tax.tax_group_id, {'base': 0.0, 'amount': 0.0})
res[tax.tax_group_id]['base'] += tax_balance_multiplicator * (line.amount_currency if line.currency_id else line.balance)
res = sorted(res.items(), key=lambda l: l[0].sequence)
move.amount_by_group = [(
group.name, amounts['amount'],
amounts['base'],
formatLang(lang_env, amounts['amount'], currency_obj=move.currency_id),
formatLang(lang_env, amounts['base'], currency_obj=move.currency_id),
len(res),
group.id
) for group, amounts in res]
@api.model
def _get_tax_key_for_group_add_base(self, line):
"""
Useful for _compute_invoice_taxes_by_group
must be consistent with _get_tax_grouping_key_from_tax_line
@return list
"""
return [line.tax_line_id.id]
@api.depends('date', 'line_ids.debit', 'line_ids.credit', 'line_ids.tax_line_id', 'line_ids.tax_ids', 'line_ids.tag_ids')
def _compute_tax_lock_date_message(self):
for move in self:
if move._affect_tax_report() and move.company_id.tax_lock_date and move.date and move.date <= move.company_id.tax_lock_date:
move.tax_lock_date_message = _(
"The accounting date is prior to the tax lock date which is set on %s. "
"Then, this will be moved to the next available one during the invoice validation."
% format_date(self.env, move.company_id.tax_lock_date))
else:
move.tax_lock_date_message = False
# -------------------------------------------------------------------------
# CONSTRAINS METHODS
# -------------------------------------------------------------------------
@api.constrains('line_ids', 'journal_id')
def _validate_move_modification(self):
if 'posted' in self.mapped('line_ids.payment_id.state'):
raise ValidationError(_("You cannot modify a journal entry linked to a posted payment."))
@api.constrains('name', 'journal_id', 'state')
def _check_unique_sequence_number(self):
moves = self.filtered(lambda move: move.state == 'posted')
if not moves:
return
self.flush()
# /!\ Computed stored fields are not yet inside the database.
self._cr.execute('''
SELECT move2.id
FROM account_move move
INNER JOIN account_move move2 ON
move2.name = move.name
AND move2.journal_id = move.journal_id
AND move2.type = move.type
AND move2.id != move.id
WHERE move.id IN %s AND move2.state = 'posted'
''', [tuple(moves.ids)])
res = self._cr.fetchone()
if res:
raise ValidationError(_('Posted journal entry must have an unique sequence number per company.'))
@api.constrains('ref', 'type', 'partner_id', 'journal_id', 'invoice_date')
def _check_duplicate_supplier_reference(self):
moves = self.filtered(lambda move: move.is_purchase_document() and move.ref)
if not moves:
return
self.env["account.move"].flush([
"ref", "type", "invoice_date", "journal_id",
"company_id", "partner_id", "commercial_partner_id",
])
self.env["account.journal"].flush(["company_id"])
self.env["res.partner"].flush(["commercial_partner_id"])
# /!\ Computed stored fields are not yet inside the database.
self._cr.execute('''
SELECT move2.id
FROM account_move move
JOIN account_journal journal ON journal.id = move.journal_id
JOIN res_partner partner ON partner.id = move.partner_id
INNER JOIN account_move move2 ON
move2.ref = move.ref
AND move2.company_id = journal.company_id
AND move2.commercial_partner_id = partner.commercial_partner_id
AND move2.type = move.type
AND (move.invoice_date is NULL OR move2.invoice_date = move.invoice_date)
AND move2.id != move.id
WHERE move.id IN %s
''', [tuple(moves.ids)])
duplicated_moves = self.browse([r[0] for r in self._cr.fetchall()])
if duplicated_moves:
raise ValidationError(_('Duplicated vendor reference detected. You probably encoded twice the same vendor bill/credit note:\n%s') % "\n".join(
duplicated_moves.mapped(lambda m: "%(partner)s - %(ref)s - %(date)s" % {'ref': m.ref, 'partner': m.partner_id.display_name, 'date': format_date(self.env, m.date)})
))
def _check_balanced(self):
''' Assert the move is fully balanced debit = credit.
An error is raised if it's not the case.
'''
moves = self.filtered(lambda move: move.line_ids)
if not moves:
return
# /!\ As this method is called in create / write, we can't make the assumption the computed stored fields
# are already done. Then, this query MUST NOT depend of computed stored fields (e.g. balance).
# It happens as the ORM makes the create with the 'no_recompute' statement.
self.env['account.move.line'].flush(['debit', 'credit', 'move_id'])
self.env['account.move'].flush(['journal_id'])
self._cr.execute('''
SELECT line.move_id, ROUND(SUM(line.debit - line.credit), currency.decimal_places)
FROM account_move_line line
JOIN account_move move ON move.id = line.move_id
JOIN account_journal journal ON journal.id = move.journal_id
JOIN res_company company ON company.id = journal.company_id
JOIN res_currency currency ON currency.id = company.currency_id
WHERE line.move_id IN %s
GROUP BY line.move_id, currency.decimal_places
HAVING ROUND(SUM(line.debit - line.credit), currency.decimal_places) != 0.0;
''', [tuple(self.ids)])
query_res = self._cr.fetchall()
if query_res:
ids = [res[0] for res in query_res]
sums = [res[1] for res in query_res]
raise UserError(_("Cannot create unbalanced journal entry. Ids: %s\nDifferences debit - credit: %s") % (ids, sums))
def _check_fiscalyear_lock_date(self):
for move in self.filtered(lambda move: move.state == 'posted'):
lock_date = max(move.company_id.period_lock_date or date.min, move.company_id.fiscalyear_lock_date or date.min)
if self.user_has_groups('account.group_account_manager'):
lock_date = move.company_id.fiscalyear_lock_date
if move.date <= (lock_date or date.min):
if self.user_has_groups('account.group_account_manager'):
message = _("You cannot add/modify entries prior to and inclusive of the lock date %s.") % format_date(self.env, lock_date)
else:
message = _("You cannot add/modify entries prior to and inclusive of the lock date %s. Check the company settings or ask someone with the 'Adviser' role") % format_date(self.env, lock_date)
raise UserError(message)
return True
# -------------------------------------------------------------------------
# LOW-LEVEL METHODS
# -------------------------------------------------------------------------
def _move_autocomplete_invoice_lines_values(self):
''' This method recomputes dynamic lines on the current journal entry that include taxes, cash rounding
and payment terms lines.
'''
self.ensure_one()
line_currency = self.currency_id if self.currency_id != self.company_id.currency_id else False
for line in self.line_ids:
# Do something only on invoice lines.
if line.exclude_from_invoice_tab:
continue
# Shortcut to load the demo data.
# Doing line.account_id triggers a default_get(['account_id']) that could returns a result.
# A section / note must not have an account_id set.
if not line._cache.get('account_id') and not line.display_type and not line._origin:
line.account_id = line._get_computed_account()
if not line.account_id:
if self.is_sale_document(include_receipts=True):
line.account_id = self.journal_id.default_credit_account_id
elif self.is_purchase_document(include_receipts=True):
line.account_id = self.journal_id.default_debit_account_id
if line.product_id and not line._cache.get('name'):
line.name = line._get_computed_name()
# Compute the account before the partner_id
# In case account_followup is installed
# Setting the partner will get the account_id in cache
# If the account_id is not in cache, it will trigger the default value
# Which is wrong in some case
# It's better to set the account_id before the partner_id
# Ensure related fields are well copied.
line.partner_id = self.partner_id
line.date = self.date
line.recompute_tax_line = True
line.currency_id = line_currency
self.line_ids._onchange_price_subtotal()
self._recompute_dynamic_lines(recompute_all_taxes=True)
values = self._convert_to_write(self._cache)
values.pop('invoice_line_ids', None)
return values
@api.model
def _move_autocomplete_invoice_lines_create(self, vals_list):
''' During the create of an account.move with only 'invoice_line_ids' set and not 'line_ids', this method is called
to auto compute accounting lines of the invoice. In that case, accounts will be retrieved and taxes, cash rounding
and payment terms will be computed. At the end, the values will contains all accounting lines in 'line_ids'
and the moves should be balanced.
:param vals_list: The list of values passed to the 'create' method.
:return: Modified list of values.
'''
new_vals_list = []
for vals in vals_list:
if not vals.get('invoice_line_ids'):
new_vals_list.append(vals)
continue
if vals.get('line_ids'):
vals.pop('invoice_line_ids', None)
new_vals_list.append(vals)
continue
if not vals.get('type') and not self._context.get('default_type'):
vals.pop('invoice_line_ids', None)
new_vals_list.append(vals)
continue
vals['type'] = vals.get('type', self._context.get('default_type', 'entry'))
if not vals['type'] in self.get_invoice_types(include_receipts=True):
new_vals_list.append(vals)
continue
vals['line_ids'] = vals.pop('invoice_line_ids')
if vals.get('invoice_date') and not vals.get('date'):
vals['date'] = vals['invoice_date']
ctx_vals = {'default_type': vals.get('type') or self._context.get('default_type')}
if vals.get('journal_id'):
ctx_vals['default_journal_id'] = vals['journal_id']
# reorder the companies in the context so that the company of the journal
# (which will be the company of the move) is the main one, ensuring all
# property fields are read with the correct company
journal_company = self.env['account.journal'].browse(vals['journal_id']).company_id
allowed_companies = self._context.get('allowed_company_ids', journal_company.ids)
reordered_companies = sorted(allowed_companies, key=lambda cid: cid != journal_company.id)
ctx_vals['allowed_company_ids'] = reordered_companies
self_ctx = self.with_context(**ctx_vals)
new_vals = self_ctx._add_missing_default_values(vals)
move = self_ctx.new(new_vals)
new_vals_list.append(move._move_autocomplete_invoice_lines_values())
return new_vals_list
def _move_autocomplete_invoice_lines_write(self, vals):
''' During the write of an account.move with only 'invoice_line_ids' set and not 'line_ids', this method is called
to auto compute accounting lines of the invoice. In that case, accounts will be retrieved and taxes, cash rounding
and payment terms will be computed. At the end, the values will contains all accounting lines in 'line_ids'
and the moves should be balanced.
:param vals_list: A python dict representing the values to write.
:return: True if the auto-completion did something, False otherwise.
'''
enable_autocomplete = 'invoice_line_ids' in vals and 'line_ids' not in vals and True or False
if not enable_autocomplete:
return False
vals['line_ids'] = vals.pop('invoice_line_ids')
for invoice in self:
invoice_new = invoice.with_context(default_type=invoice.type, default_journal_id=invoice.journal_id.id).new(origin=invoice)
invoice_new.update(vals)
values = invoice_new._move_autocomplete_invoice_lines_values()
values.pop('invoice_line_ids', None)
invoice.write(values)
return True
@api.model_create_multi
def create(self, vals_list):
# OVERRIDE
if any('state' in vals and vals.get('state') == 'posted' for vals in vals_list):
raise UserError(_('You cannot create a move already in the posted state. Please create a draft move and post it after.'))
vals_list = self._move_autocomplete_invoice_lines_create(vals_list)
return super(AccountMove, self).create(vals_list)
def write(self, vals):
for move in self:
if (move.restrict_mode_hash_table and move.state == "posted" and set(vals).intersection(INTEGRITY_HASH_MOVE_FIELDS)):
raise UserError(_("You cannot edit the following fields due to restrict mode being activated on the journal: %s.") % ', '.join(INTEGRITY_HASH_MOVE_FIELDS))
if (move.restrict_mode_hash_table and move.inalterable_hash and 'inalterable_hash' in vals) or (move.secure_sequence_number and 'secure_sequence_number' in vals):
raise UserError(_('You cannot overwrite the values ensuring the inalterability of the accounting.'))
if (move.name != '/' and 'journal_id' in vals and move.journal_id.id != vals['journal_id']):
raise UserError(_('You cannot edit the journal of an account move if it has been posted once.'))
# You can't change the date of a move being inside a locked period.
if 'date' in vals and move.date != vals['date']:
move._check_fiscalyear_lock_date()
move.line_ids._check_tax_lock_date()
# You can't post subtract a move to a locked period.
if 'state' in vals and move.state == 'posted' and vals['state'] != 'posted':
move._check_fiscalyear_lock_date()
move.line_ids._check_tax_lock_date()
if self._move_autocomplete_invoice_lines_write(vals):
res = True
else:
vals.pop('invoice_line_ids', None)
res = super(AccountMove, self.with_context(check_move_validity=False)).write(vals)
# You can't change the date of a not-locked move to a locked period.
# You can't post a new journal entry inside a locked period.
if 'date' in vals or 'state' in vals:
self._check_fiscalyear_lock_date()
self.mapped('line_ids')._check_tax_lock_date()
if ('state' in vals and vals.get('state') == 'posted') and self.restrict_mode_hash_table:
for move in self.filtered(lambda m: not(m.secure_sequence_number or m.inalterable_hash)):
new_number = move.journal_id.secure_sequence_id.next_by_id()
vals_hashing = {'secure_sequence_number': new_number,
'inalterable_hash': move._get_new_hash(new_number)}
res |= super(AccountMove, move).write(vals_hashing)
# Ensure the move is still well balanced.
if 'line_ids' in vals and self._context.get('check_move_validity', True):
self._check_balanced()
return res
def unlink(self):
for move in self:
#if move.name != '/' and not self._context.get('force_delete'):
# raise UserError(_("You cannot delete an entry which has been posted once."))
move.line_ids.unlink()
return super(AccountMove, self).unlink()
@api.depends('name', 'state')
def name_get(self):
result = []
for move in self:
if self._context.get('name_groupby'):
name = '**%s**, %s' % (format_date(self.env, move.date), move._get_move_display_name())
if move.ref:
name += ' (%s)' % move.ref
if move.partner_id.name:
name += ' - %s' % move.partner_id.name
else:
name = move._get_move_display_name(show_ref=True)
result.append((move.id, name))
return result
def _creation_subtype(self):
# OVERRIDE
if self.type in ('out_invoice', 'out_refund', 'out_receipt'):
return self.env.ref('account.mt_invoice_created')
else:
return super(AccountMove, self)._creation_subtype()
def _track_subtype(self, init_values):
# OVERRIDE to add custom subtype depending of the state.
self.ensure_one()
if not self.is_invoice(include_receipts=True):
return super(AccountMove, self)._track_subtype(init_values)
if 'invoice_payment_state' in init_values and self.invoice_payment_state == 'paid':
return self.env.ref('account.mt_invoice_paid')
elif 'state' in init_values and self.state == 'posted' and self.is_sale_document(include_receipts=True):
return self.env.ref('account.mt_invoice_validated')
return super(AccountMove, self)._track_subtype(init_values)
def _get_creation_message(self):
# OVERRIDE
if not self.is_invoice(include_receipts=True):
return super()._get_creation_message()
return {
'out_invoice': _('Invoice Created'),
'out_refund': _('Refund Created'),
'in_invoice': _('Vendor Bill Created'),
'in_refund': _('Credit Note Created'),
'out_receipt': _('Sales Receipt Created'),
'in_receipt': _('Purchase Receipt Created'),
}[self.type]
# -------------------------------------------------------------------------
# BUSINESS METHODS
# -------------------------------------------------------------------------
@api.model
def get_invoice_types(self, include_receipts=False):
return ['out_invoice', 'out_refund', 'in_refund', 'in_invoice'] + (include_receipts and ['out_receipt', 'in_receipt'] or [])
def is_invoice(self, include_receipts=False):
return self.type in self.get_invoice_types(include_receipts)
@api.model
def get_sale_types(self, include_receipts=False):
return ['out_invoice', 'out_refund'] + (include_receipts and ['out_receipt'] or [])
def is_sale_document(self, include_receipts=False):
return self.type in self.get_sale_types(include_receipts)
@api.model
def get_purchase_types(self, include_receipts=False):
return ['in_invoice', 'in_refund'] + (include_receipts and ['in_receipt'] or [])
def is_purchase_document(self, include_receipts=False):
return self.type in self.get_purchase_types(include_receipts)
@api.model
def get_inbound_types(self, include_receipts=True):
return ['out_invoice', 'in_refund'] + (include_receipts and ['out_receipt'] or [])
def is_inbound(self, include_receipts=True):
return self.type in self.get_inbound_types(include_receipts)
@api.model
def get_outbound_types(self, include_receipts=True):
return ['in_invoice', 'out_refund'] + (include_receipts and ['in_receipt'] or [])
def is_outbound(self, include_receipts=True):
return self.type in self.get_outbound_types(include_receipts)
def _affect_tax_report(self):
return any(line._affect_tax_report() for line in self.line_ids)
def _get_invoice_reference_euro_invoice(self):
""" This computes the reference based on the RF Creditor Reference.
The data of the reference is the database id number of the invoice.
For instance, if an invoice is issued with id 43, the check number
is 07 so the reference will be 'RF07 43'.
"""
self.ensure_one()
base = self.id
check_digits = calc_check_digits('{}RF'.format(base))
reference = 'RF{} {}'.format(check_digits, " ".join(["".join(x) for x in zip_longest(*[iter(str(base))]*4, fillvalue="")]))
return reference
def _get_invoice_reference_euro_partner(self):
""" This computes the reference based on the RF Creditor Reference.
The data of the reference is the user defined reference of the
partner or the database id number of the parter.
For instance, if an invoice is issued for the partner with internal
reference 'food buyer 654', the digits will be extracted and used as
the data. This will lead to a check number equal to 00 and the
reference will be 'RF00 654'.
If no reference is set for the partner, its id in the database will
be used.
"""
self.ensure_one()
partner_ref = self.partner_id.ref
partner_ref_nr = re.sub('\D', '', partner_ref or '')[-21:] or str(self.partner_id.id)[-21:]
partner_ref_nr = partner_ref_nr[-21:]
check_digits = calc_check_digits('{}RF'.format(partner_ref_nr))
reference = 'RF{} {}'.format(check_digits, " ".join(["".join(x) for x in zip_longest(*[iter(partner_ref_nr)]*4, fillvalue="")]))
return reference
def _get_invoice_reference_harpiya_invoice(self):
""" This computes the reference based on the Harpiya format.
We simply return the number of the invoice, defined on the journal
sequence.
"""
self.ensure_one()
return self.name
def _get_invoice_reference_harpiya_partner(self):
""" This computes the reference based on the Harpiya format.
The data used is the reference set on the partner or its database
id otherwise. For instance if the reference of the customer is
'dumb customer 97', the reference will be 'CUST/dumb customer 97'.
"""
ref = self.partner_id.ref or str(self.partner_id.id)
prefix = _('CUST')
return '%s/%s' % (prefix, ref)
def _get_invoice_computed_reference(self):
self.ensure_one()
if self.journal_id.invoice_reference_type == 'none':
return ''
else:
ref_function = getattr(self, '_get_invoice_reference_{}_{}'.format(self.journal_id.invoice_reference_model, self.journal_id.invoice_reference_type))
if ref_function:
return ref_function()
else:
raise UserError(_('The combination of reference model and reference type on the journal is not implemented'))
def _get_sequence(self):
''' Return the sequence to be used during the post of the current move.
:return: An ir.sequence record or False.
'''
self.ensure_one()
journal = self.journal_id
if self.type in ('entry', 'out_invoice', 'in_invoice', 'out_receipt', 'in_receipt') or not journal.refund_sequence:
return journal.sequence_id
if not journal.refund_sequence_id:
return
return journal.refund_sequence_id
def _get_move_display_name(self, show_ref=False):
''' Helper to get the display name of an invoice depending of its type.
:param show_ref: A flag indicating of the display name must include or not the journal entry reference.
:return: A string representing the invoice.
'''
self.ensure_one()
draft_name = ''
if self.state == 'draft':
draft_name += {
'out_invoice': _('Draft Invoice'),
'out_refund': _('Draft Credit Note'),
'in_invoice': _('Draft Bill'),
'in_refund': _('Draft Vendor Credit Note'),
'out_receipt': _('Draft Sales Receipt'),
'in_receipt': _('Draft Purchase Receipt'),
'entry': _('Draft Entry'),
}[self.type]
if not self.name or self.name == '/':
draft_name += ' (* %s)' % str(self.id)
else:
draft_name += ' ' + self.name
return (draft_name or self.name) + (show_ref and self.ref and ' (%s%s)' % (self.ref[:50], '...' if len(self.ref) > 50 else '') or '')
def _get_invoice_delivery_partner_id(self):
''' Hook allowing to retrieve the right delivery address depending of installed modules.
:return: A res.partner record's id representing the delivery address.
'''
self.ensure_one()
return self.partner_id.address_get(['delivery'])['delivery']
def _get_invoice_intrastat_country_id(self):
''' Hook allowing to retrieve the intrastat country depending of installed modules.
:return: A res.country record's id.
'''
self.ensure_one()
return self.partner_id.country_id.id
def _get_cash_basis_matched_percentage(self):
"""Compute the percentage to apply for cash basis method. This value is relevant only for moves that
involve journal items on receivable or payable accounts.
"""
self.ensure_one()
query = '''
SELECT
(
SELECT COALESCE(SUM(line.balance), 0.0)
FROM account_move_line line
JOIN account_account account ON account.id = line.account_id
JOIN account_account_type account_type ON account_type.id = account.user_type_id
WHERE line.move_id = %s AND account_type.type IN ('receivable', 'payable')
) AS total_amount,
(
SELECT COALESCE(SUM(partial.amount), 0.0)
FROM account_move_line line
JOIN account_account account ON account.id = line.account_id
JOIN account_account_type account_type ON account_type.id = account.user_type_id
LEFT JOIN account_partial_reconcile partial ON
partial.debit_move_id = line.id
OR
partial.credit_move_id = line.id
WHERE line.move_id = %s AND account_type.type IN ('receivable', 'payable')
) AS total_reconciled
'''
params = [self.id, self.id]
self._cr.execute(query, params)
total_amount, total_reconciled = self._cr.fetchone()
currency = self.company_id.currency_id
if float_is_zero(total_amount, precision_rounding=currency.rounding):
return 1.0
else:
return abs(currency.round(total_reconciled) / currency.round(total_amount))
def _get_reconciled_payments(self):
"""Helper used to retrieve the reconciled payments on this journal entry"""
pay_term_line_ids = self.line_ids.filtered(lambda line: line.account_id.user_type_id.type in ('receivable', 'payable'))
reconciled_amls = pay_term_line_ids.mapped('matched_debit_ids.debit_move_id') + \
pay_term_line_ids.mapped('matched_credit_ids.credit_move_id')
return reconciled_amls.mapped('payment_id')
def _reverse_move_vals(self, default_values, cancel=True):
''' Reverse values passed as parameter being the copied values of the original journal entry.
For example, debit / credit must be switched. The tax lines must be edited in case of refunds.
:param default_values: A copy_date of the original journal entry.
:param cancel: A flag indicating the reverse is made to cancel the original journal entry.
:return: The updated default_values.
'''
self.ensure_one()
def compute_tax_repartition_lines_mapping(move_vals):
''' Computes and returns a mapping between the current repartition lines to the new expected one.
:param move_vals: The newly created invoice as a python dictionary to be passed to the 'create' method.
:return: A map invoice_repartition_line => refund_repartition_line.
'''
# invoice_repartition_line => refund_repartition_line
mapping = {}
# Do nothing if the move is not a credit note.
if move_vals['type'] not in ('out_refund', 'in_refund'):
return mapping
for line_command in move_vals.get('line_ids', []):
line_vals = line_command[2] # (0, 0, {...})
if line_vals.get('tax_ids') and line_vals['tax_ids'][0][2]:
# Base line.
tax_ids = line_vals['tax_ids'][0][2]
elif line_vals.get('tax_line_id'):
# Tax line.
tax_ids = [line_vals['tax_line_id']]
else:
continue
for tax in self.env['account.tax'].browse(tax_ids).flatten_taxes_hierarchy():
for inv_rep_line, ref_rep_line in zip(tax.invoice_repartition_line_ids, tax.refund_repartition_line_ids):
mapping[inv_rep_line] = ref_rep_line
return mapping
move_vals = self.with_context(include_business_fields=True).copy_data(default=default_values)[0]
tax_repartition_lines_mapping = compute_tax_repartition_lines_mapping(move_vals)
for line_command in move_vals.get('line_ids', []):
line_vals = line_command[2] # (0, 0, {...})
# ==== Inverse debit / credit / amount_currency ====
amount_currency = -line_vals.get('amount_currency', 0.0)
balance = line_vals['credit'] - line_vals['debit']
line_vals.update({
'amount_currency': amount_currency,
'debit': balance > 0.0 and balance or 0.0,
'credit': balance < 0.0 and -balance or 0.0,
})
if move_vals['type'] not in ('out_refund', 'in_refund'):
continue
# ==== Map tax repartition lines ====
if line_vals.get('tax_ids') and line_vals['tax_ids'][0][2]:
# Base line.
taxes = self.env['account.tax'].browse(line_vals['tax_ids'][0][2]).flatten_taxes_hierarchy()
invoice_repartition_lines = taxes\
.mapped('invoice_repartition_line_ids')\
.filtered(lambda line: line.repartition_type == 'base')
refund_repartition_lines = invoice_repartition_lines\
.mapped(lambda line: tax_repartition_lines_mapping[line])
line_vals['tag_ids'] = [(6, 0, refund_repartition_lines.mapped('tag_ids').ids)]
elif line_vals.get('tax_repartition_line_id'):
# Tax line.
invoice_repartition_line = self.env['account.tax.repartition.line'].browse(line_vals['tax_repartition_line_id'])
refund_repartition_line = tax_repartition_lines_mapping[invoice_repartition_line]
# Find the right account.
account_id = self.env['account.move.line']._get_default_tax_account(refund_repartition_line).id
if not account_id:
if not invoice_repartition_line.account_id:
# Keep the current account as the current one comes from the base line.
account_id = line_vals['account_id']
else:
tax = invoice_repartition_line.invoice_tax_id
base_line = self.line_ids.filtered(lambda line: tax in line.tax_ids.flatten_taxes_hierarchy())[0]
account_id = base_line.account_id.id
line_vals.update({
'tax_repartition_line_id': refund_repartition_line.id,
'account_id': account_id,
'tag_ids': [(6, 0, refund_repartition_line.tag_ids.ids)],
})
return move_vals
def _reverse_moves(self, default_values_list=None, cancel=False):
''' Reverse a recordset of account.move.
If cancel parameter is true, the reconcilable or liquidity lines
of each original move will be reconciled with its reverse's.
:param default_values_list: A list of default values to consider per move.
('type' & 'reversed_entry_id' are computed in the method).
:return: An account.move recordset, reverse of the current self.
'''
if not default_values_list:
default_values_list = [{} for move in self]
if cancel:
lines = self.mapped('line_ids')
# Avoid maximum recursion depth.
if lines:
lines.remove_move_reconcile()
reverse_type_map = {
'entry': 'entry',
'out_invoice': 'out_refund',
'out_refund': 'entry',
'in_invoice': 'in_refund',
'in_refund': 'entry',
'out_receipt': 'entry',
'in_receipt': 'entry',
}
move_vals_list = []
for move, default_values in zip(self, default_values_list):
default_values.update({
'type': reverse_type_map[move.type],
'reversed_entry_id': move.id,
})
move_vals_list.append(move._reverse_move_vals(default_values, cancel=cancel))
reverse_moves = self.env['account.move'].create(move_vals_list)
for move, reverse_move in zip(self, reverse_moves.with_context(check_move_validity=False)):
# Update amount_currency if the date has changed.
if move.date != reverse_move.date:
for line in reverse_move.line_ids:
if line.currency_id:
line._onchange_currency()
reverse_move._recompute_dynamic_lines(recompute_all_taxes=False)
reverse_moves._check_balanced()
# Reconcile moves together to cancel the previous one.
if cancel:
reverse_moves.with_context(move_reverse_cancel=cancel).post()
for move, reverse_move in zip(self, reverse_moves):
accounts = move.mapped('line_ids.account_id') \
.filtered(lambda account: account.reconcile or account.internal_type == 'liquidity')
for account in accounts:
(move.line_ids + reverse_move.line_ids)\
.filtered(lambda line: line.account_id == account and line.balance)\
.reconcile()
return reverse_moves
def open_reconcile_view(self):
return self.line_ids.open_reconcile_view()
@api.model
def message_new(self, msg_dict, custom_values=None):
# OVERRIDE
# Add custom behavior when receiving a new invoice through the mail's gateway.
if (custom_values or {}).get('type', 'entry') not in ('out_invoice', 'in_invoice'):
return super().message_new(msg_dict, custom_values=custom_values)
def is_internal_partner(partner):
# Helper to know if the partner is an internal one.
return partner.user_ids and all(user.has_group('base.group_user') for user in partner.user_ids)
# Search for partners in copy.
cc_mail_addresses = email_split(msg_dict.get('cc', ''))
followers = [partner for partner in self._mail_find_partner_from_emails(cc_mail_addresses) if partner]
# Search for partner that sent the mail.
from_mail_addresses = email_split(msg_dict.get('from', ''))
senders = partners = [partner for partner in self._mail_find_partner_from_emails(from_mail_addresses) if partner]
# Search for partners using the user.
if not senders:
senders = partners = list(self._mail_search_on_user(from_mail_addresses))
if partners:
# Check we are not in the case when an internal user forwarded the mail manually.
if is_internal_partner(partners[0]):
# Search for partners in the mail's body.
body_mail_addresses = set(email_re.findall(msg_dict.get('body')))
partners = [partner for partner in self._mail_find_partner_from_emails(body_mail_addresses) if not is_internal_partner(partner)]
# Little hack: Inject the mail's subject in the body.
if msg_dict.get('subject') and msg_dict.get('body'):
msg_dict['body'] = '<div><div><h3>%s</h3></div>%s</div>' % (msg_dict['subject'], msg_dict['body'])
# Create the invoice.
values = {
'name': self.default_get(['name'])['name'],
'invoice_source_email': from_mail_addresses[0],
'partner_id': partners and partners[0].id or False,
}
move_ctx = self.with_context(default_type=custom_values['type'], default_journal_id=custom_values['journal_id'])
move = super(AccountMove, move_ctx).message_new(msg_dict, custom_values=values)
# Assign followers.
all_followers_ids = set(partner.id for partner in followers + senders + partners if is_internal_partner(partner))
move.message_subscribe(list(all_followers_ids))
return move
def post(self):
# `user_has_group` won't be bypassed by `sudo()` since it doesn't change the user anymore.
if not self.env.su and not self.env.user.has_group('account.group_account_invoice'):
raise AccessError(_("You don't have the access rights to post an invoice."))
for move in self:
if not move.line_ids.filtered(lambda line: not line.display_type):
raise UserError(_('You need to add a line before posting.'))
if move.auto_post and move.date > fields.Date.today():
date_msg = move.date.strftime(get_lang(self.env).date_format)
raise UserError(_("This move is configured to be auto-posted on %s" % date_msg))
if not move.partner_id:
if move.is_sale_document():
raise UserError(_("The field 'Customer' is required, please complete it to validate the Customer Invoice."))
elif move.is_purchase_document():
raise UserError(_("The field 'Vendor' is required, please complete it to validate the Vendor Bill."))
if move.is_invoice(include_receipts=True) and float_compare(move.amount_total, 0.0, precision_rounding=move.currency_id.rounding) < 0:
raise UserError(_("You cannot validate an invoice with a negative total amount. You should create a credit note instead. Use the action menu to transform it into a credit note or refund."))
# Handle case when the invoice_date is not set. In that case, the invoice_date is set at today and then,
# lines are recomputed accordingly.
# /!\ 'check_move_validity' must be there since the dynamic lines will be recomputed outside the 'onchange'
# environment.
if not move.invoice_date and move.is_invoice(include_receipts=True):
move.invoice_date = fields.Date.context_today(self)
move.with_context(check_move_validity=False)._onchange_invoice_date()
# When the accounting date is prior to the tax lock date, move it automatically to the next available date.
# /!\ 'check_move_validity' must be there since the dynamic lines will be recomputed outside the 'onchange'
# environment.
if (move.company_id.tax_lock_date and move.date <= move.company_id.tax_lock_date) and (move.line_ids.tax_ids or move.line_ids.tag_ids):
move.date = move.company_id.tax_lock_date + timedelta(days=1)
move.with_context(check_move_validity=False)._onchange_currency()
# Create the analytic lines in batch is faster as it leads to less cache invalidation.
self.mapped('line_ids').create_analytic_lines()
for move in self:
if move.auto_post and move.date > fields.Date.today():
raise UserError(_("This move is configured to be auto-posted on {}".format(move.date.strftime(get_lang(self.env).date_format))))
move.message_subscribe([p.id for p in [move.partner_id] if p not in move.sudo().message_partner_ids])
to_write = {'state': 'posted'}
if move.name == '/':
# Get the journal's sequence.
sequence = move._get_sequence()
if not sequence:
raise UserError(_('Please define a sequence on your journal.'))
# Consume a new number.
to_write['name'] = sequence.with_context(ir_sequence_date=move.date).next_by_id()
move.write(to_write)
# Compute 'ref' for 'out_invoice'.
if move.type == 'out_invoice' and not move.invoice_payment_ref:
to_write = {
'invoice_payment_ref': move._get_invoice_computed_reference(),
'line_ids': []
}
for line in move.line_ids.filtered(lambda line: line.account_id.user_type_id.type in ('receivable', 'payable')):
to_write['line_ids'].append((1, line.id, {'name': to_write['invoice_payment_ref']}))
move.write(to_write)
if move == move.company_id.account_opening_move_id and not move.company_id.account_bank_reconciliation_start:
# For opening moves, we set the reconciliation date threshold
# to the move's date if it wasn't already set (we don't want
# to have to reconcile all the older payments -made before
# installing Accounting- with bank statements)
move.company_id.account_bank_reconciliation_start = move.date
for move in self:
if not move.partner_id: continue
if move.type.startswith('out_'):
move.partner_id._increase_rank('customer_rank')
elif move.type.startswith('in_'):
move.partner_id._increase_rank('supplier_rank')
else:
continue
# Trigger action for paid invoices in amount is zero
self.filtered(
lambda m: m.is_invoice(include_receipts=True) and m.currency_id.is_zero(m.amount_total)
).action_invoice_paid()
# Force balance check since nothing prevents another module to create an incorrect entry.
# This is performed at the very end to avoid flushing fields before the whole processing.
self._check_balanced()
def action_reverse(self):
action = self.env.ref('account.action_view_account_move_reversal').read()[0]
if self.is_invoice():
action['name'] = _('Credit Note')
return action
def action_post(self):
if self.filtered(lambda x: x.journal_id.post_at == 'bank_rec').mapped('line_ids.payment_id').filtered(lambda x: x.state != 'reconciled'):
raise UserError(_("A payment journal entry generated in a journal configured to post entries only when payments are reconciled with a bank statement cannot be manually posted. Those will be posted automatically after performing the bank reconciliation."))
return self.post()
def js_assign_outstanding_line(self, line_id):
self.ensure_one()
lines = self.env['account.move.line'].browse(line_id)
lines += self.line_ids.filtered(lambda line: line.account_id == lines[0].account_id and not line.reconciled)
return lines.reconcile()
def button_draft(self):
AccountMoveLine = self.env['account.move.line']
excluded_move_ids = []
if self._context.get('suspense_moves_mode'):
excluded_move_ids = AccountMoveLine.search(AccountMoveLine._get_suspense_moves_domain() + [('move_id', 'in', self.ids)]).mapped('move_id').ids
for move in self:
if move in move.line_ids.mapped('full_reconcile_id.exchange_move_id'):
raise UserError(_('You cannot reset to draft an exchange difference journal entry.'))
if move.tax_cash_basis_rec_id:
raise UserError(_('You cannot reset to draft a tax cash basis journal entry.'))
if move.restrict_mode_hash_table and move.state == 'posted' and move.id not in excluded_move_ids:
raise UserError(_('You cannot modify a posted entry of this journal because it is in strict mode.'))
# We remove all the analytics entries for this journal
move.mapped('line_ids.analytic_line_ids').unlink()
self.mapped('line_ids').remove_move_reconcile()
self.write({'state': 'draft'})
def button_cancel(self):
self.write({'state': 'cancel'})
def action_invoice_sent(self):
""" Open a window to compose an email, with the edi invoice template
message loaded by default
"""
self.ensure_one()
template = self.env.ref('account.email_template_edi_invoice', raise_if_not_found=False)
lang = get_lang(self.env)
if template and template.lang:
lang = template._render_template(template.lang, 'account.move', self.id)
else:
lang = lang.code
compose_form = self.env.ref('account.account_invoice_send_wizard_form', raise_if_not_found=False)
ctx = dict(
default_model='account.move',
default_res_id=self.id,
default_use_template=bool(template),
default_template_id=template and template.id or False,
default_composition_mode='comment',
mark_invoice_as_sent=True,
custom_layout="mail.mail_notification_paynow",
model_description=self.with_context(lang=lang).type_name,
force_email=True
)
return {
'name': _('Send Invoice'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'account.invoice.send',
'views': [(compose_form.id, 'form')],
'view_id': compose_form.id,
'target': 'new',
'context': ctx,
}
def _get_new_hash(self, secure_seq_number):
""" Returns the hash to write on journal entries when they get posted"""
self.ensure_one()
#get the only one exact previous move in the securisation sequence
prev_move = self.search([('state', '=', 'posted'),
('company_id', '=', self.company_id.id),
('journal_id', '=', self.journal_id.id),
('secure_sequence_number', '!=', 0),
('secure_sequence_number', '=', int(secure_seq_number) - 1)])
if prev_move and len(prev_move) != 1:
raise UserError(
_('An error occured when computing the inalterability. Impossible to get the unique previous posted journal entry.'))
#build and return the hash
return self._compute_hash(prev_move.inalterable_hash if prev_move else u'')
def _compute_hash(self, previous_hash):
""" Computes the hash of the browse_record given as self, based on the hash
of the previous record in the company's securisation sequence given as parameter"""
self.ensure_one()
hash_string = sha256((previous_hash + self.string_to_hash).encode('utf-8'))
return hash_string.hexdigest()
def _compute_string_to_hash(self):
def _getattrstring(obj, field_str):
field_value = obj[field_str]
if obj._fields[field_str].type == 'many2one':
field_value = field_value.id
return str(field_value)
for move in self:
values = {}
for field in INTEGRITY_HASH_MOVE_FIELDS:
values[field] = _getattrstring(move, field)
for line in move.line_ids:
for field in INTEGRITY_HASH_LINE_FIELDS:
k = 'line_%d_%s' % (line.id, field)
values[k] = _getattrstring(line, field)
#make the json serialization canonical
# (https://tools.ietf.org/html/draft-staykov-hu-json-canonical-form-00)
move.string_to_hash = dumps(values, sort_keys=True,
ensure_ascii=True, indent=None,
separators=(',',':'))
def action_invoice_print(self):
""" Print the invoice and mark it as sent, so that we can see more
easily the next step of the workflow
"""
if any(not move.is_invoice(include_receipts=True) for move in self):
raise UserError(_("Only invoices could be printed."))
self.filtered(lambda inv: not inv.invoice_sent).write({'invoice_sent': True})
if self.user_has_groups('account.group_account_invoice'):
return self.env.ref('account.account_invoices').report_action(self)
else:
return self.env.ref('account.account_invoices_without_payment').report_action(self)
def action_invoice_paid(self):
''' Hook to be overrided called when the invoice moves to the paid state. '''
pass
def action_open_matching_suspense_moves(self):
self.ensure_one()
domain = self._get_domain_matching_suspense_moves()
ids = self.env['account.move.line'].search(domain).mapped('statement_line_id').ids
action_context = {'show_mode_selector': False, 'company_ids': self.mapped('company_id').ids}
action_context.update({'suspense_moves_mode': True})
action_context.update({'statement_line_ids': ids})
action_context.update({'partner_id': self.partner_id.id})
action_context.update({'partner_name': self.partner_id.name})
return {
'type': 'ir.actions.client',
'tag': 'bank_statement_reconciliation_view',
'context': action_context,
}
def action_invoice_register_payment(self):
return self.env['account.payment']\
.with_context(active_ids=self.ids, active_model='account.move', active_id=self.id)\
.action_register_payment()
def action_switch_invoice_into_refund_credit_note(self):
if any(move.type not in ('in_invoice', 'out_invoice') for move in self):
raise ValidationError(_("This action isn't available for this document."))
for move in self:
move.type = move.type.replace('invoice', 'refund')
reversed_move = move._reverse_move_vals({}, False)
new_invoice_line_ids = []
for cmd, virtualid, line_vals in reversed_move['line_ids']:
if not line_vals['exclude_from_invoice_tab']:
new_invoice_line_ids.append((0, 0,line_vals))
if move.amount_total < 0:
# Inverse all invoice_line_ids
for cmd, virtualid, line_vals in new_invoice_line_ids:
line_vals.update({
'quantity' : -line_vals['quantity'],
'amount_currency' : -line_vals['amount_currency'],
'debit' : line_vals['credit'],
'credit' : line_vals['debit']
})
move.write({'invoice_line_ids' : [(5, 0, 0)], 'invoice_partner_bank_id': False})
move.write({'invoice_line_ids' : new_invoice_line_ids})
def _get_report_base_filename(self):
if any(not move.is_invoice() for move in self):
raise UserError(_("Only invoices could be printed."))
return self._get_move_display_name()
def preview_invoice(self):
self.ensure_one()
return {
'type': 'ir.actions.act_url',
'target': 'self',
'url': self.get_portal_url(),
}
def _compute_access_url(self):
super(AccountMove, self)._compute_access_url()
for move in self.filtered(lambda move: move.is_invoice()):
move.access_url = '/my/invoices/%s' % (move.id)
@api.depends('line_ids')
def _compute_has_reconciled_entries(self):
for move in self:
move.has_reconciled_entries = len(move.line_ids._reconciled_lines()) > 1
def action_view_reverse_entry(self):
self.ensure_one()
# Create action.
action = {
'name': _('Reverse Moves'),
'type': 'ir.actions.act_window',
'res_model': 'account.move',
}
reverse_entries = self.env['account.move'].search([('reversed_entry_id', '=', self.id)])
if len(reverse_entries) == 1:
action.update({
'view_mode': 'form',
'res_id': reverse_entries.id,
})
else:
action.update({
'view_mode': 'tree',
'domain': [('id', 'in', reverse_entries.ids)],
})
return action
@api.model
def _autopost_draft_entries(self):
''' This method is called from a cron job.
It is used to post entries such as those created by the module
account_asset.
'''
records = self.search([
('state', '=', 'draft'),
('date', '<=', fields.Date.today()),
('auto_post', '=', True),
])
records.post()
# offer the possibility to duplicate thanks to a button instead of a hidden menu, which is more visible
def action_duplicate(self):
self.ensure_one()
action = self.env.ref('account.action_move_journal_line').read()[0]
action['context'] = dict(self.env.context)
action['context']['form_view_initial_mode'] = 'edit'
action['context']['view_no_maturity'] = False
action['views'] = [(self.env.ref('account.view_move_form').id, 'form')]
action['res_id'] = self.copy().id
return action
class AccountMoveLine(models.Model):
_name = "account.move.line"
_description = "Journal Item"
_order = "date desc, move_name desc, id"
_check_company_auto = True
# ==== Business fields ====
move_id = fields.Many2one('account.move', string='Journal Entry',
index=True, required=True, readonly=True, auto_join=True, ondelete="cascade",
help="The move of this entry line.")
move_name = fields.Char(string='Number', related='move_id.name', store=True, index=True)
date = fields.Date(related='move_id.date', store=True, readonly=True, index=True, copy=False, group_operator='min')
ref = fields.Char(related='move_id.ref', store=True, copy=False, index=True, readonly=False)
parent_state = fields.Selection(related='move_id.state', store=True, readonly=True)
journal_id = fields.Many2one(related='move_id.journal_id', store=True, index=True, copy=False)
company_id = fields.Many2one(related='move_id.company_id', store=True, readonly=True)
company_currency_id = fields.Many2one(related='company_id.currency_id', string='Company Currency',
readonly=True, store=True,
help='Utility field to express amount currency')
country_id = fields.Many2one(comodel_name='res.country', related='move_id.company_id.country_id')
account_id = fields.Many2one('account.account', string='Account',
index=True, ondelete="restrict", check_company=True,
domain=[('deprecated', '=', False)])
account_internal_type = fields.Selection(related='account_id.user_type_id.type', string="Internal Type", store=True, readonly=True)
account_root_id = fields.Many2one(related='account_id.root_id', string="Account Root", store=True, readonly=True)
sequence = fields.Integer(default=10)
name = fields.Char(string='Label')
quantity = fields.Float(string='Quantity',
default=1.0, digits='Product Unit of Measure',
help="The optional quantity expressed by this line, eg: number of product sold. "
"The quantity is not a legal requirement but is very useful for some reports.")
price_unit = fields.Float(string='Unit Price', digits='Product Price')
discount = fields.Float(string='Discount (%)', digits='Discount', default=0.0)
debit = fields.Monetary(string='Debit', default=0.0, currency_field='company_currency_id')
credit = fields.Monetary(string='Credit', default=0.0, currency_field='company_currency_id')
balance = fields.Monetary(string='Balance', store=True,
currency_field='company_currency_id',
compute='_compute_balance',
help="Technical field holding the debit - credit in order to open meaningful graph views from reports")
amount_currency = fields.Monetary(string='Amount in Currency', store=True, copy=True,
help="The amount expressed in an optional other currency if it is a multi-currency entry.")
price_subtotal = fields.Monetary(string='Subtotal', store=True, readonly=True,
currency_field='always_set_currency_id')
price_total = fields.Monetary(string='Total', store=True, readonly=True,
currency_field='always_set_currency_id')
reconciled = fields.Boolean(compute='_amount_residual', store=True)
blocked = fields.Boolean(string='No Follow-up', default=False,
help="You can check this box to mark this journal item as a litigation with the associated partner")
date_maturity = fields.Date(string='Due Date', index=True,
help="This field is used for payable and receivable journal entries. You can put the limit date for the payment of this line.")
currency_id = fields.Many2one('res.currency', string='Currency')
partner_id = fields.Many2one('res.partner', string='Partner', ondelete='restrict')
product_uom_id = fields.Many2one('uom.uom', string='Unit of Measure')
product_id = fields.Many2one('product.product', string='Product')
# ==== Origin fields ====
reconcile_model_id = fields.Many2one('account.reconcile.model', string="Reconciliation Model", copy=False, readonly=True)
payment_id = fields.Many2one('account.payment', string="Originator Payment", copy=False,
help="Payment that created this entry")
statement_line_id = fields.Many2one('account.bank.statement.line',
string='Bank statement line reconciled with this entry',
index=True, copy=False, readonly=True)
statement_id = fields.Many2one(related='statement_line_id.statement_id', store=True, index=True, copy=False,
help="The bank statement used for bank reconciliation")
# ==== Tax fields ====
tax_ids = fields.Many2many('account.tax', string='Taxes', help="Taxes that apply on the base amount")
tax_line_id = fields.Many2one('account.tax', string='Originator Tax', ondelete='restrict', store=True,
compute='_compute_tax_line_id', help="Indicates that this journal item is a tax line")
tax_group_id = fields.Many2one(related='tax_line_id.tax_group_id', string='Originator tax group',
readonly=True, store=True,
help='technical field for widget tax-group-custom-field')
tax_base_amount = fields.Monetary(string="Base Amount", store=True, readonly=True,
currency_field='company_currency_id')
tax_exigible = fields.Boolean(string='Appears in VAT report', default=True, readonly=True,
help="Technical field used to mark a tax line as exigible in the vat report or not (only exigible journal items"
" are displayed). By default all new journal items are directly exigible, but with the feature cash_basis"
" on taxes, some will become exigible only when the payment is recorded.")
tax_repartition_line_id = fields.Many2one(comodel_name='account.tax.repartition.line',
string="Originator Tax Repartition Line", ondelete='restrict', readonly=True,
help="Tax repartition line that caused the creation of this move line, if any")
tag_ids = fields.Many2many(string="Tags", comodel_name='account.account.tag', ondelete='restrict',
help="Tags assigned to this line by the tax creating it, if any. It determines its impact on financial reports.")
tax_audit = fields.Char(string="Tax Audit String", compute="_compute_tax_audit", store=True,
help="Computed field, listing the tax grids impacted by this line, and the amount it applies to each of them.")
# ==== Reconciliation fields ====
amount_residual = fields.Monetary(string='Residual Amount', store=True,
currency_field='company_currency_id',
compute='_amount_residual',
help="The residual amount on a journal item expressed in the company currency.")
amount_residual_currency = fields.Monetary(string='Residual Amount in Currency', store=True,
compute='_amount_residual',
help="The residual amount on a journal item expressed in its currency (possibly not the company currency).")
full_reconcile_id = fields.Many2one('account.full.reconcile', string="Matching #", copy=False, index=True, readonly=True)
matched_debit_ids = fields.One2many('account.partial.reconcile', 'credit_move_id', string='Matched Debits',
help='Debit journal items that are matched with this journal item.', readonly=True)
matched_credit_ids = fields.One2many('account.partial.reconcile', 'debit_move_id', string='Matched Credits',
help='Credit journal items that are matched with this journal item.', readonly=True)
# ==== Analytic fields ====
analytic_line_ids = fields.One2many('account.analytic.line', 'move_id', string='Analytic lines')
analytic_account_id = fields.Many2one('account.analytic.account', string='Analytic Account', index=True)
analytic_tag_ids = fields.Many2many('account.analytic.tag', string='Analytic Tags')
# ==== Onchange / display purpose fields ====
recompute_tax_line = fields.Boolean(store=False, readonly=True,
help="Technical field used to know on which lines the taxes must be recomputed.")
display_type = fields.Selection([
('line_section', 'Section'),
('line_note', 'Note'),
], default=False, help="Technical field for UX purpose.")
is_rounding_line = fields.Boolean(help="Technical field used to retrieve the cash rounding line.")
exclude_from_invoice_tab = fields.Boolean(help="Technical field used to exclude some lines from the invoice_line_ids tab in the form view.")
always_set_currency_id = fields.Many2one('res.currency', string='Foreign Currency',
compute='_compute_always_set_currency_id',
help="Technical field used to compute the monetary field. As currency_id is not a required field, we need to use either the foreign currency, either the company one.")
_sql_constraints = [
(
'check_credit_debit',
'CHECK(credit + debit>=0 AND credit * debit=0)',
'Wrong credit or debit value in accounting entry !'
),
(
'check_accountable_required_fields',
"CHECK(COALESCE(display_type IN ('line_section', 'line_note'), 'f') OR account_id IS NOT NULL)",
"Missing required account on accountable invoice line."
),
(
'check_non_accountable_fields_null',
"CHECK(display_type NOT IN ('line_section', 'line_note') OR (amount_currency = 0 AND debit = 0 AND credit = 0 AND account_id IS NULL))",
"Forbidden unit price, account and quantity on non-accountable invoice line"
),
(
'check_amount_currency_balance_sign',
'''CHECK(
currency_id IS NULL
OR
company_currency_id IS NULL
OR
(
(currency_id != company_currency_id)
AND
(
(balance > 0 AND amount_currency > 0)
OR (balance <= 0 AND amount_currency <= 0)
OR (balance >= 0 AND amount_currency >= 0)
)
)
)''',
"The amount expressed in the secondary currency must be positive when account is debited and negative when account is credited. Moreover, the currency field has to be left empty when the amount is expressed in the company currency."
),
]
# -------------------------------------------------------------------------
# HELPERS
# -------------------------------------------------------------------------
@api.model
def _get_default_tax_account(self, repartition_line):
tax = repartition_line.invoice_tax_id or repartition_line.refund_tax_id
if tax.tax_exigibility == 'on_payment':
account = tax.cash_basis_transition_account_id
else:
account = repartition_line.account_id
return account
def _get_computed_name(self):
self.ensure_one()
if not self.product_id:
return ''
if self.partner_id.lang:
product = self.product_id.with_context(lang=self.partner_id.lang)
else:
product = self.product_id
values = []
if product.partner_ref:
values.append(product.partner_ref)
if self.journal_id.type == 'sale':
if product.description_sale:
values.append(product.description_sale)
elif self.journal_id.type == 'purchase':
if product.description_purchase:
values.append(product.description_purchase)
return '\n'.join(values)
def _get_computed_price_unit(self):
self.ensure_one()
if not self.product_id:
return self.price_unit
elif self.move_id.is_sale_document(include_receipts=True):
# Out invoice.
price_unit = self.product_id.lst_price
elif self.move_id.is_purchase_document(include_receipts=True):
# In invoice.
price_unit = self.product_id.standard_price
else:
return self.price_unit
if self.product_uom_id != self.product_id.uom_id:
price_unit = self.product_id.uom_id._compute_price(price_unit, self.product_uom_id)
return price_unit
def _get_computed_account(self):
self.ensure_one()
self = self.with_context(force_company=self.move_id.journal_id.company_id.id)
if not self.product_id:
return
fiscal_position = self.move_id.fiscal_position_id
accounts = self.product_id.product_tmpl_id.get_product_accounts(fiscal_pos=fiscal_position)
if self.move_id.is_sale_document(include_receipts=True):
# Out invoice.
return accounts['income']
elif self.move_id.is_purchase_document(include_receipts=True):
# In invoice.
return accounts['expense']
def _get_computed_taxes(self):
self.ensure_one()
if self.move_id.is_sale_document(include_receipts=True):
# Out invoice.
if self.product_id.taxes_id:
tax_ids = self.product_id.taxes_id.filtered(lambda tax: tax.company_id == self.move_id.company_id)
elif self.account_id.tax_ids:
tax_ids = self.account_id.tax_ids
else:
tax_ids = self.env['account.tax']
if not tax_ids and not self.exclude_from_invoice_tab:
tax_ids = self.move_id.company_id.account_sale_tax_id
elif self.move_id.is_purchase_document(include_receipts=True):
# In invoice.
if self.product_id.supplier_taxes_id:
tax_ids = self.product_id.supplier_taxes_id.filtered(lambda tax: tax.company_id == self.move_id.company_id)
elif self.account_id.tax_ids:
tax_ids = self.account_id.tax_ids
else:
tax_ids = self.env['account.tax']
if not tax_ids and not self.exclude_from_invoice_tab:
tax_ids = self.move_id.company_id.account_purchase_tax_id
else:
# Miscellaneous operation.
tax_ids = self.account_id.tax_ids
if self.company_id and tax_ids:
tax_ids = tax_ids.filtered(lambda tax: tax.company_id == self.company_id)
return tax_ids
def _get_computed_uom(self):
self.ensure_one()
if self.product_id:
return self.product_id.uom_id
return False
def _get_price_total_and_subtotal(self, price_unit=None, quantity=None, discount=None, currency=None, product=None, partner=None, taxes=None, move_type=None):
self.ensure_one()
return self._get_price_total_and_subtotal_model(
price_unit=price_unit or self.price_unit,
quantity=quantity or self.quantity,
discount=discount or self.discount,
currency=currency or self.currency_id,
product=product or self.product_id,
partner=partner or self.partner_id,
taxes=taxes or self.tax_ids,
move_type=move_type or self.move_id.type,
)
@api.model
def _get_price_total_and_subtotal_model(self, price_unit, quantity, discount, currency, product, partner, taxes, move_type):
''' This method is used to compute 'price_total' & 'price_subtotal'.
:param price_unit: The current price unit.
:param quantity: The current quantity.
:param discount: The current discount.
:param currency: The line's currency.
:param product: The line's product.
:param partner: The line's partner.
:param taxes: The applied taxes.
:param move_type: The type of the move.
:return: A dictionary containing 'price_subtotal' & 'price_total'.
'''
res = {}
# Compute 'price_subtotal'.
price_unit_wo_discount = price_unit * (1 - (discount / 100.0))
subtotal = quantity * price_unit_wo_discount
# Compute 'price_total'.
if taxes:
taxes_res = taxes._origin.compute_all(price_unit_wo_discount,
quantity=quantity, currency=currency, product=product, partner=partner, is_refund=move_type in ('out_refund', 'in_refund'))
res['price_subtotal'] = taxes_res['total_excluded']
res['price_total'] = taxes_res['total_included']
else:
res['price_total'] = res['price_subtotal'] = subtotal
#In case of multi currency, round before it's use for computing debit credit
if currency:
res = {k: currency.round(v) for k, v in res.items()}
return res
def _get_fields_onchange_subtotal(self, price_subtotal=None, move_type=None, currency=None, company=None, date=None):
self.ensure_one()
return self._get_fields_onchange_subtotal_model(
price_subtotal=price_subtotal or self.price_subtotal,
move_type=move_type or self.move_id.type,
currency=currency or self.currency_id,
company=company or self.move_id.company_id,
date=date or self.move_id.date,
)
@api.model
def _get_fields_onchange_subtotal_model(self, price_subtotal, move_type, currency, company, date):
''' This method is used to recompute the values of 'amount_currency', 'debit', 'credit' due to a change made
in some business fields (affecting the 'price_subtotal' field).
:param price_subtotal: The untaxed amount.
:param move_type: The type of the move.
:param currency: The line's currency.
:param company: The move's company.
:param date: The move's date.
:return: A dictionary containing 'debit', 'credit', 'amount_currency'.
'''
if move_type in self.move_id.get_outbound_types():
sign = 1
elif move_type in self.move_id.get_inbound_types():
sign = -1
else:
sign = 1
price_subtotal *= sign
if currency and currency != company.currency_id:
# Multi-currencies.
balance = currency._convert(price_subtotal, company.currency_id, company, date)
return {
'amount_currency': price_subtotal,
'debit': balance > 0.0 and balance or 0.0,
'credit': balance < 0.0 and -balance or 0.0,
}
else:
# Single-currency.
return {
'amount_currency': 0.0,
'debit': price_subtotal > 0.0 and price_subtotal or 0.0,
'credit': price_subtotal < 0.0 and -price_subtotal or 0.0,
}
def _get_fields_onchange_balance(self, quantity=None, discount=None, balance=None, move_type=None, currency=None, taxes=None, price_subtotal=None):
self.ensure_one()
return self._get_fields_onchange_balance_model(
quantity=quantity or self.quantity,
discount=discount or self.discount,
balance=balance or self.balance,
move_type=move_type or self.move_id.type,
currency=currency or self.currency_id or self.move_id.currency_id,
taxes=taxes or self.tax_ids,
price_subtotal=price_subtotal or self.price_subtotal,
)
@api.model
def _get_fields_onchange_balance_model(self, quantity, discount, balance, move_type, currency, taxes, price_subtotal):
''' This method is used to recompute the values of 'quantity', 'discount', 'price_unit' due to a change made
in some accounting fields such as 'balance'.
This method is a bit complex as we need to handle some special cases.
For example, setting a positive balance with a 100% discount.
:param quantity: The current quantity.
:param discount: The current discount.
:param balance: The new balance.
:param move_type: The type of the move.
:param currency: The currency.
:param taxes: The applied taxes.
:param price_subtotal: The price_subtotal.
:return: A dictionary containing 'quantity', 'discount', 'price_unit'.
'''
if move_type in self.move_id.get_outbound_types():
sign = 1
elif move_type in self.move_id.get_inbound_types():
sign = -1
else:
sign = 1
balance *= sign
# Avoid rounding issue when dealing with price included taxes. For example, when the price_unit is 2300.0 and
# a 5.5% price included tax is applied on it, a balance of 2300.0 / 1.055 = 2180.094 ~ 2180.09 is computed.
# However, when triggering the inverse, 2180.09 + (2180.09 * 0.055) = 2180.09 + 119.90 = 2299.99 is computed.
# To avoid that, set the price_subtotal at the balance if the difference between them looks like a rounding
# issue.
if currency.is_zero(balance - price_subtotal):
return {}
taxes = taxes.flatten_taxes_hierarchy()
if taxes and any(tax.price_include for tax in taxes):
# Inverse taxes. E.g:
#
# Price Unit | Taxes | Originator Tax |Price Subtotal | Price Total
# -----------------------------------------------------------------------------------
# 110 | 10% incl, 5% | | 100 | 115
# 10 | | 10% incl | 10 | 10
# 5 | | 5% | 5 | 5
#
# When setting the balance to -200, the expected result is:
#
# Price Unit | Taxes | Originator Tax |Price Subtotal | Price Total
# -----------------------------------------------------------------------------------
# 220 | 10% incl, 5% | | 200 | 230
# 20 | | 10% incl | 20 | 20
# 10 | | 5% | 10 | 10
taxes_res = taxes._origin.compute_all(balance, currency=currency, handle_price_include=False)
for tax_res in taxes_res['taxes']:
tax = self.env['account.tax'].browse(tax_res['id'])
if tax.price_include:
balance += tax_res['amount']
discount_factor = 1 - (discount / 100.0)
if balance and discount_factor:
# discount != 100%
vals = {
'quantity': quantity or 1.0,
'price_unit': balance / discount_factor / (quantity or 1.0),
}
elif balance and not discount_factor:
# discount == 100%
vals = {
'quantity': quantity or 1.0,
'discount': 0.0,
'price_unit': balance / (quantity or 1.0),
}
elif not discount_factor:
# balance of line is 0, but discount == 100% so we display the normal unit_price
vals = {}
else:
# balance is 0, so unit price is 0 as well
vals = {'price_unit': 0.0}
return vals
# -------------------------------------------------------------------------
# ONCHANGE METHODS
# -------------------------------------------------------------------------
@api.onchange('amount_currency', 'currency_id', 'debit', 'credit', 'tax_ids', 'account_id')
def _onchange_mark_recompute_taxes(self):
''' Recompute the dynamic onchange based on taxes.
If the edited line is a tax line, don't recompute anything as the user must be able to
set a custom value.
'''
for line in self:
if not line.tax_repartition_line_id:
line.recompute_tax_line = True
@api.onchange('analytic_account_id', 'analytic_tag_ids')
def _onchange_mark_recompute_taxes_analytic(self):
''' Trigger tax recomputation only when some taxes with analytics
'''
for line in self:
if not line.tax_repartition_line_id and any(tax.analytic for tax in line.tax_ids):
line.recompute_tax_line = True
@api.onchange('product_id')
def _onchange_product_id(self):
for line in self:
if not line.product_id or line.display_type in ('line_section', 'line_note'):
continue
line.name = line._get_computed_name()
line.account_id = line._get_computed_account()
line.tax_ids = line._get_computed_taxes()
line.product_uom_id = line._get_computed_uom()
line.price_unit = line._get_computed_price_unit()
# Manage the fiscal position after that and adapt the price_unit.
# E.g. mapping a price-included-tax to a price-excluded-tax must
# remove the tax amount from the price_unit.
# However, mapping a price-included tax to another price-included tax must preserve the balance but
# adapt the price_unit to the new tax.
# E.g. mapping a 10% price-included tax to a 20% price-included tax for a price_unit of 110 should preserve
# 100 as balance but set 120 as price_unit.
if line.tax_ids and line.move_id.fiscal_position_id:
line.price_unit = line._get_price_total_and_subtotal()['price_subtotal']
line.tax_ids = line.move_id.fiscal_position_id.map_tax(line.tax_ids._origin, partner=line.move_id.partner_id)
accounting_vals = line._get_fields_onchange_subtotal(price_subtotal=line.price_unit, currency=line.move_id.company_currency_id)
balance = accounting_vals['debit'] - accounting_vals['credit']
line.price_unit = line._get_fields_onchange_balance(balance=balance).get('price_unit', line.price_unit)
# Convert the unit price to the invoice's currency.
company = line.move_id.company_id
line.price_unit = company.currency_id._convert(line.price_unit, line.move_id.currency_id, company, line.move_id.date)
if len(self) == 1:
return {'domain': {'product_uom_id': [('category_id', '=', self.product_uom_id.category_id.id)]}}
@api.onchange('product_uom_id')
def _onchange_uom_id(self):
''' Recompute the 'price_unit' depending of the unit of measure. '''
price_unit = self._get_computed_price_unit()
# See '_onchange_product_id' for details.
taxes = self._get_computed_taxes()
if taxes and self.move_id.fiscal_position_id:
price_subtotal = self._get_price_total_and_subtotal(price_unit=price_unit, taxes=taxes)['price_subtotal']
accounting_vals = self._get_fields_onchange_subtotal(price_subtotal=price_subtotal, currency=self.move_id.company_currency_id)
balance = accounting_vals['debit'] - accounting_vals['credit']
price_unit = self._get_fields_onchange_balance(balance=balance).get('price_unit', price_unit)
# Convert the unit price to the invoice's currency.
company = self.move_id.company_id
self.price_unit = company.currency_id._convert(price_unit, self.move_id.currency_id, company, self.move_id.date)
@api.onchange('account_id')
def _onchange_account_id(self):
''' Recompute 'tax_ids' based on 'account_id'.
/!\ Don't remove existing taxes if there is no explicit taxes set on the account.
'''
if not self.display_type and (self.account_id.tax_ids or not self.tax_ids):
taxes = self._get_computed_taxes()
if taxes and self.move_id.fiscal_position_id:
taxes = self.move_id.fiscal_position_id.map_tax(taxes, partner=self.partner_id)
self.tax_ids = taxes
def _onchange_balance(self):
for line in self:
if line.currency_id:
continue
if not line.move_id.is_invoice(include_receipts=True):
continue
line.update(line._get_fields_onchange_balance())
line.update(line._get_price_total_and_subtotal())
@api.onchange('debit')
def _onchange_debit(self):
if self.debit:
self.credit = 0.0
self._onchange_balance()
@api.onchange('credit')
def _onchange_credit(self):
if self.credit:
self.debit = 0.0
self._onchange_balance()
@api.onchange('amount_currency')
def _onchange_amount_currency(self):
for line in self:
if not line.currency_id:
continue
if not line.move_id.is_invoice(include_receipts=True):
line._recompute_debit_credit_from_amount_currency()
continue
line.update(line._get_fields_onchange_balance(
balance=line.amount_currency,
))
line.update(line._get_price_total_and_subtotal())
@api.onchange('quantity', 'discount', 'price_unit', 'tax_ids')
def _onchange_price_subtotal(self):
for line in self:
if not line.move_id.is_invoice(include_receipts=True):
continue
line.update(line._get_price_total_and_subtotal())
line.update(line._get_fields_onchange_subtotal())
@api.onchange('currency_id')
def _onchange_currency(self):
for line in self:
if line.move_id.is_invoice(include_receipts=True):
line._onchange_price_subtotal()
elif not line.move_id.reversed_entry_id:
line._recompute_debit_credit_from_amount_currency()
def _recompute_debit_credit_from_amount_currency(self):
for line in self:
# Recompute the debit/credit based on amount_currency/currency_id and date.
company_currency = line.account_id.company_id.currency_id
balance = line.amount_currency
if line.currency_id and company_currency and line.currency_id != company_currency:
balance = line.currency_id._convert(balance, company_currency, line.account_id.company_id, line.move_id.date or fields.Date.today())
line.debit = balance > 0 and balance or 0.0
line.credit = balance < 0 and -balance or 0.0
# -------------------------------------------------------------------------
# COMPUTE METHODS
# -------------------------------------------------------------------------
@api.depends('currency_id')
def _compute_always_set_currency_id(self):
for line in self:
line.always_set_currency_id = line.currency_id or line.company_currency_id
@api.depends('debit', 'credit')
def _compute_balance(self):
for line in self:
line.balance = line.debit - line.credit
@api.depends('debit', 'credit', 'account_id', 'amount_currency', 'currency_id', 'matched_debit_ids', 'matched_credit_ids', 'matched_debit_ids.amount', 'matched_credit_ids.amount', 'move_id.state', 'company_id')
def _amount_residual(self):
""" Computes the residual amount of a move line from a reconcilable account in the company currency and the line's currency.
This amount will be 0 for fully reconciled lines or lines from a non-reconcilable account, the original line amount
for unreconciled lines, and something in-between for partially reconciled lines.
"""
for line in self:
if not line.account_id.reconcile and line.account_id.internal_type != 'liquidity':
line.reconciled = False
line.amount_residual = 0
line.amount_residual_currency = 0
continue
#amounts in the partial reconcile table aren't signed, so we need to use abs()
amount = abs(line.debit - line.credit)
amount_residual_currency = abs(line.amount_currency) or 0.0
sign = 1 if (line.debit - line.credit) > 0 else -1
if not line.debit and not line.credit and line.amount_currency and line.currency_id:
#residual for exchange rate entries
sign = 1 if float_compare(line.amount_currency, 0, precision_rounding=line.currency_id.rounding) == 1 else -1
for partial_line in (line.matched_debit_ids + line.matched_credit_ids):
# If line is a credit (sign = -1) we:
# - subtract matched_debit_ids (partial_line.credit_move_id == line)
# - add matched_credit_ids (partial_line.credit_move_id != line)
# If line is a debit (sign = 1), do the opposite.
sign_partial_line = sign if partial_line.credit_move_id == line else (-1 * sign)
amount += sign_partial_line * partial_line.amount
#getting the date of the matched item to compute the amount_residual in currency
if line.currency_id and line.amount_currency:
if partial_line.currency_id and partial_line.currency_id == line.currency_id:
amount_residual_currency += sign_partial_line * partial_line.amount_currency
else:
if line.balance and line.amount_currency:
rate = line.amount_currency / line.balance
else:
date = partial_line.credit_move_id.date if partial_line.debit_move_id == line else partial_line.debit_move_id.date
rate = line.currency_id.with_context(date=date).rate
amount_residual_currency += sign_partial_line * line.currency_id.round(partial_line.amount * rate)
#computing the `reconciled` field.
reconciled = False
digits_rounding_precision = line.move_id.company_id.currency_id.rounding
if float_is_zero(amount, precision_rounding=digits_rounding_precision):
if line.currency_id and line.amount_currency:
if float_is_zero(amount_residual_currency, precision_rounding=line.currency_id.rounding):
reconciled = True
else:
reconciled = True
line.reconciled = reconciled
line.amount_residual = line.move_id.company_id.currency_id.round(amount * sign) if line.move_id.company_id else amount * sign
line.amount_residual_currency = line.currency_id and line.currency_id.round(amount_residual_currency * sign) or 0.0
@api.depends('tax_repartition_line_id.invoice_tax_id', 'tax_repartition_line_id.refund_tax_id')
def _compute_tax_line_id(self):
""" tax_line_id is computed as the tax linked to the repartition line creating
the move.
"""
for record in self:
rep_line = record.tax_repartition_line_id
# A constraint on account.tax.repartition.line ensures both those fields are mutually exclusive
record.tax_line_id = rep_line.invoice_tax_id or rep_line.refund_tax_id
@api.depends('tag_ids', 'debit', 'credit')
def _compute_tax_audit(self):
separator = ' '
for record in self:
currency = record.company_id.currency_id
audit_str = ''
for tag in record.tag_ids:
caba_origin_inv_type = record.move_id.type
caba_origin_inv_journal_type = record.journal_id.type
if record.move_id.tax_cash_basis_rec_id:
# Cash basis entries are always treated as misc operations, applying the tag sign directly to the balance
type_multiplicator = 1
else:
type_multiplicator = (record.journal_id.type == 'sale' and -1 or 1) * (record.move_id.type in ('in_refund', 'out_refund') and -1 or 1)
tag_amount = type_multiplicator * (tag.tax_negate and -1 or 1) * record.balance
if tag.tax_report_line_ids:
#Then, the tag comes from a report line, and hence has a + or - sign (also in its name)
for report_line in tag.tax_report_line_ids:
audit_str += separator if audit_str else ''
audit_str += report_line.tag_name + ': ' + formatLang(self.env, tag_amount, currency_obj=currency)
else:
# Then, it's a financial tag (sign is always +, and never shown in tag name)
audit_str += separator if audit_str else ''
audit_str += tag.name + ': ' + formatLang(self.env, tag_amount, currency_obj=currency)
record.tax_audit = audit_str
# -------------------------------------------------------------------------
# CONSTRAINT METHODS
# -------------------------------------------------------------------------
@api.constrains('currency_id', 'account_id')
def _check_account_currency(self):
for line in self:
account_currency = line.account_id.currency_id
if account_currency and account_currency != line.company_currency_id and account_currency != line.currency_id:
raise UserError(_('The account selected on your journal entry forces to provide a secondary currency. You should remove the secondary currency on the account.'))
@api.constrains('account_id')
def _check_constrains_account_id(self):
for line in self.filtered(lambda x: x.display_type not in ('line_section', 'line_note')):
account = line.account_id
journal = line.journal_id
if account.deprecated:
raise UserError(_('The account %s (%s) is deprecated.') % (account.name, account.code))
control_type_failed = journal.type_control_ids and account.user_type_id not in journal.type_control_ids
control_account_failed = journal.account_control_ids and account not in journal.account_control_ids
if control_type_failed or control_account_failed:
raise UserError(_('You cannot use this general account in this journal, check the tab \'Entry Controls\' on the related journal.'))
@api.constrains('account_id', 'tax_ids', 'tax_line_id', 'reconciled')
def _check_off_balance(self):
for line in self:
if line.account_id.internal_group == 'off_balance':
if any(a.internal_group != line.account_id.internal_group for a in line.move_id.line_ids.account_id):
raise UserError(_('If you want to use "Off-Balance Sheet" accounts, all the accounts of the journal entry must be of this type'))
if line.tax_ids or line.tax_line_id:
raise UserError(_('You cannot use taxes on lines with an Off-Balance account'))
if line.reconciled:
raise UserError(_('Lines from "Off-Balance Sheet" accounts cannot be reconciled'))
def _affect_tax_report(self):
self.ensure_one()
return self.tax_ids or self.tax_line_id or self.tag_ids.filtered(lambda x: x.applicability == "taxes")
def _check_tax_lock_date(self):
for line in self.filtered(lambda l: l.move_id.state == 'posted'):
move = line.move_id
if move.company_id.tax_lock_date and move.date <= move.company_id.tax_lock_date and line._affect_tax_report():
raise UserError(_("The operation is refused as it would impact an already issued tax statement. "
"Please change the journal entry date or the tax lock date set in the settings (%s) to proceed.")
% format_date(self.env, move.company_id.tax_lock_date))
def _check_reconciliation(self):
for line in self:
if line.matched_debit_ids or line.matched_credit_ids:
raise UserError(_("You cannot do this modification on a reconciled journal entry. "
"You can just change some non legal fields or you must unreconcile first.\n"
"Journal Entry (id): %s (%s)") % (line.move_id.name, line.move_id.id))
# -------------------------------------------------------------------------
# LOW-LEVEL METHODS
# -------------------------------------------------------------------------
def init(self):
""" change index on partner_id to a multi-column index on (partner_id, ref), the new index will behave in the
same way when we search on partner_id, with the addition of being optimal when having a query that will
search on partner_id and ref at the same time (which is the case when we open the bank reconciliation widget)
"""
cr = self._cr
cr.execute('DROP INDEX IF EXISTS account_move_line_partner_id_index')
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('account_move_line_partner_id_ref_idx',))
if not cr.fetchone():
cr.execute('CREATE INDEX account_move_line_partner_id_ref_idx ON account_move_line (partner_id, ref)')
@api.model_create_multi
def create(self, vals_list):
# OVERRIDE
ACCOUNTING_FIELDS = ('debit', 'credit', 'amount_currency')
BUSINESS_FIELDS = ('price_unit', 'quantity', 'discount', 'tax_ids')
for vals in vals_list:
move = self.env['account.move'].browse(vals['move_id'])
vals.setdefault('company_currency_id', move.company_id.currency_id.id) # important to bypass the ORM limitation where monetary fields are not rounded; more info in the commit message
if move.is_invoice(include_receipts=True):
currency = move.currency_id
partner = self.env['res.partner'].browse(vals.get('partner_id'))
taxes = self.resolve_2many_commands('tax_ids', vals.get('tax_ids', []), fields=['id'])
tax_ids = set(tax['id'] for tax in taxes)
taxes = self.env['account.tax'].browse(tax_ids)
# Ensure consistency between accounting & business fields.
# As we can't express such synchronization as computed fields without cycling, we need to do it both
# in onchange and in create/write. So, if something changed in accounting [resp. business] fields,
# business [resp. accounting] fields are recomputed.
if any(vals.get(field) for field in ACCOUNTING_FIELDS):
if vals.get('currency_id'):
balance = vals.get('amount_currency', 0.0)
else:
balance = vals.get('debit', 0.0) - vals.get('credit', 0.0)
price_subtotal = self._get_price_total_and_subtotal_model(
vals.get('price_unit', 0.0),
vals.get('quantity', 0.0),
vals.get('discount', 0.0),
currency,
self.env['product.product'].browse(vals.get('product_id')),
partner,
taxes,
move.type,
).get('price_subtotal', 0.0)
vals.update(self._get_fields_onchange_balance_model(
vals.get('quantity', 0.0),
vals.get('discount', 0.0),
balance,
move.type,
currency,
taxes,
price_subtotal
))
vals.update(self._get_price_total_and_subtotal_model(
vals.get('price_unit', 0.0),
vals.get('quantity', 0.0),
vals.get('discount', 0.0),
currency,
self.env['product.product'].browse(vals.get('product_id')),
partner,
taxes,
move.type,
))
elif any(vals.get(field) for field in BUSINESS_FIELDS):
vals.update(self._get_price_total_and_subtotal_model(
vals.get('price_unit', 0.0),
vals.get('quantity', 0.0),
vals.get('discount', 0.0),
currency,
self.env['product.product'].browse(vals.get('product_id')),
partner,
taxes,
move.type,
))
vals.update(self._get_fields_onchange_subtotal_model(
vals['price_subtotal'],
move.type,
currency,
move.company_id,
move.date,
))
# Ensure consistency between taxes & tax exigibility fields.
if 'tax_exigible' in vals:
continue
if vals.get('tax_repartition_line_id'):
repartition_line = self.env['account.tax.repartition.line'].browse(vals['tax_repartition_line_id'])
tax = repartition_line.invoice_tax_id or repartition_line.refund_tax_id
vals['tax_exigible'] = tax.tax_exigibility == 'on_invoice'
elif vals.get('tax_ids'):
tax_ids = [v['id'] for v in self.resolve_2many_commands('tax_ids', vals['tax_ids'], fields=['id'])]
taxes = self.env['account.tax'].browse(tax_ids).flatten_taxes_hierarchy()
vals['tax_exigible'] = not any(tax.tax_exigibility == 'on_payment' for tax in taxes)
lines = super(AccountMoveLine, self).create(vals_list)
moves = lines.mapped('move_id')
if self._context.get('check_move_validity', True):
moves._check_balanced()
moves._check_fiscalyear_lock_date()
lines._check_tax_lock_date()
return lines
def write(self, vals):
# OVERRIDE
def field_will_change(line, field_name):
if field_name not in vals:
return False
field = line._fields[field_name]
if field.type == 'many2one':
return line[field_name].id != vals[field_name]
if field.type in ('one2many', 'many2many'):
current_ids = set(line[field_name].ids)
after_write_ids = set(r['id'] for r in line.resolve_2many_commands(field_name, vals[field_name], fields=['id']))
return current_ids != after_write_ids
if field.type == 'monetary' and line[field.currency_field]:
return not line[field.currency_field].is_zero(line[field_name] - vals[field_name])
return line[field_name] != vals[field_name]
ACCOUNTING_FIELDS = ('debit', 'credit', 'amount_currency')
BUSINESS_FIELDS = ('price_unit', 'quantity', 'discount', 'tax_ids')
PROTECTED_FIELDS_TAX_LOCK_DATE = ['debit', 'credit', 'tax_line_id', 'tax_ids', 'tag_ids']
PROTECTED_FIELDS_LOCK_DATE = PROTECTED_FIELDS_TAX_LOCK_DATE + ['account_id', 'journal_id', 'amount_currency', 'currency_id', 'partner_id']
PROTECTED_FIELDS_RECONCILIATION = ('account_id', 'date', 'debit', 'credit', 'amount_currency', 'currency_id')
account_to_write = self.env['account.account'].browse(vals['account_id']) if 'account_id' in vals else None
# Check writing a deprecated account.
if account_to_write and account_to_write.deprecated:
raise UserError(_('You cannot use a deprecated account.'))
# when making a reconciliation on an existing liquidity journal item, mark the payment as reconciled
for line in self:
if line.parent_state == 'posted':
if line.move_id.restrict_mode_hash_table and set(vals).intersection(INTEGRITY_HASH_LINE_FIELDS):
raise UserError(_("You cannot edit the following fields due to restrict mode being activated on the journal: %s.") % ', '.join(INTEGRITY_HASH_LINE_FIELDS))
if any(key in vals for key in ('tax_ids', 'tax_line_ids')):
raise UserError(_('You cannot modify the taxes related to a posted journal item, you should reset the journal entry to draft to do so.'))
if 'statement_line_id' in vals and line.payment_id:
# In case of an internal transfer, there are 2 liquidity move lines to match with a bank statement
if all(line.statement_id for line in line.payment_id.move_line_ids.filtered(
lambda r: r.id != line.id and r.account_id.internal_type == 'liquidity')):
line.payment_id.state = 'reconciled'
# Check the lock date.
if any(self.env['account.move']._field_will_change(line, vals, field_name) for field_name in PROTECTED_FIELDS_LOCK_DATE):
line.move_id._check_fiscalyear_lock_date()
# Check the tax lock date.
if any(self.env['account.move']._field_will_change(line, vals, field_name) for field_name in PROTECTED_FIELDS_TAX_LOCK_DATE):
line._check_tax_lock_date()
# Check the reconciliation.
if any(self.env['account.move']._field_will_change(line, vals, field_name) for field_name in PROTECTED_FIELDS_RECONCILIATION):
line._check_reconciliation()
# Check switching receivable / payable accounts.
if account_to_write:
account_type = line.account_id.user_type_id.type
if line.move_id.is_sale_document(include_receipts=True):
if (account_type == 'receivable' and account_to_write.user_type_id.type != account_type) \
or (account_type != 'receivable' and account_to_write.user_type_id.type == 'receivable'):
raise UserError(_("You can only set an account having the receivable type on payment terms lines for customer invoice."))
if line.move_id.is_purchase_document(include_receipts=True):
if (account_type == 'payable' and account_to_write.user_type_id.type != account_type) \
or (account_type != 'payable' and account_to_write.user_type_id.type == 'payable'):
raise UserError(_("You can only set an account having the payable type on payment terms lines for vendor bill."))
result = True
for line in self:
cleaned_vals = line.move_id._cleanup_write_orm_values(line, vals)
if not cleaned_vals:
continue
result |= super(AccountMoveLine, line).write(cleaned_vals)
if not line.move_id.is_invoice(include_receipts=True):
continue
# Ensure consistency between accounting & business fields.
# As we can't express such synchronization as computed fields without cycling, we need to do it both
# in onchange and in create/write. So, if something changed in accounting [resp. business] fields,
# business [resp. accounting] fields are recomputed.
if any(field in cleaned_vals for field in ACCOUNTING_FIELDS):
balance = line.currency_id and line.amount_currency or line.debit - line.credit
price_subtotal = line._get_price_total_and_subtotal().get('price_subtotal', 0.0)
to_write = line._get_fields_onchange_balance(
balance=balance,
price_subtotal=price_subtotal,
)
to_write.update(line._get_price_total_and_subtotal(
price_unit=to_write.get('price_unit', line.price_unit),
quantity=to_write.get('quantity', line.quantity),
discount=to_write.get('discount', line.discount),
))
result |= super(AccountMoveLine, line).write(to_write)
elif any(field in cleaned_vals for field in BUSINESS_FIELDS):
to_write = line._get_price_total_and_subtotal()
to_write.update(line._get_fields_onchange_subtotal(
price_subtotal=to_write['price_subtotal'],
))
result |= super(AccountMoveLine, line).write(to_write)
# Check total_debit == total_credit in the related moves.
if self._context.get('check_move_validity', True):
self.mapped('move_id')._check_balanced()
return result
def unlink(self):
moves = self.mapped('move_id')
# Check the lines are not reconciled (partially or not).
self._check_reconciliation()
# Check the lock date.
moves._check_fiscalyear_lock_date()
# Check the tax lock date.
self._check_tax_lock_date()
res = super(AccountMoveLine, self).unlink()
# Check total_debit == total_credit in the related moves.
if self._context.get('check_move_validity', True):
moves._check_balanced()
return res
@api.model
def default_get(self, default_fields):
# OVERRIDE
values = super(AccountMoveLine, self).default_get(default_fields)
if 'account_id' in default_fields \
and (self._context.get('journal_id') or self._context.get('default_journal_id')) \
and not values.get('account_id') \
and self._context.get('default_type') in self.move_id.get_inbound_types():
# Fill missing 'account_id'.
journal = self.env['account.journal'].browse(self._context.get('default_journal_id') or self._context['journal_id'])
values['account_id'] = journal.default_credit_account_id.id
elif 'account_id' in default_fields \
and (self._context.get('journal_id') or self._context.get('default_journal_id')) \
and not values.get('account_id') \
and self._context.get('default_type') in self.move_id.get_outbound_types():
# Fill missing 'account_id'.
journal = self.env['account.journal'].browse(self._context.get('default_journal_id') or self._context['journal_id'])
values['account_id'] = journal.default_debit_account_id.id
elif self._context.get('line_ids') and any(field_name in default_fields for field_name in ('debit', 'credit', 'account_id', 'partner_id')):
move = self.env['account.move'].new({'line_ids': self._context['line_ids']})
# Suggest default value for debit / credit to balance the journal entry.
balance = sum(line['debit'] - line['credit'] for line in move.line_ids)
# if we are here, line_ids is in context, so journal_id should also be.
journal = self.env['account.journal'].browse(self._context.get('default_journal_id') or self._context['journal_id'])
currency = journal.exists() and journal.company_id.currency_id
if currency:
balance = currency.round(balance)
if balance < 0.0:
values.update({'debit': -balance})
if balance > 0.0:
values.update({'credit': balance})
# Suggest default value for 'partner_id'.
if 'partner_id' in default_fields and not values.get('partner_id'):
if len(move.line_ids[-2:]) == 2 and move.line_ids[-1].partner_id == move.line_ids[-2].partner_id != False:
values['partner_id'] = move.line_ids[-2:].mapped('partner_id').id
# Suggest default value for 'account_id'.
if 'account_id' in default_fields and not values.get('account_id'):
if len(move.line_ids[-2:]) == 2 and move.line_ids[-1].account_id == move.line_ids[-2].account_id != False:
values['account_id'] = move.line_ids[-2:].mapped('account_id').id
if values.get('display_type'):
values.pop('account_id', None)
return values
@api.depends('ref', 'move_id')
def name_get(self):
result = []
for line in self:
name = line.move_id.name or ''
if line.ref:
name += " (%s)" % line.ref
name += (line.name or line.product_id.display_name) and (' ' + (line.name or line.product_id.display_name)) or ''
result.append((line.id, name))
return result
# -------------------------------------------------------------------------
# RECONCILIATION
# -------------------------------------------------------------------------
def check_full_reconcile(self):
"""
This method check if a move is totally reconciled and if we need to create exchange rate entries for the move.
In case exchange rate entries needs to be created, one will be created per currency present.
In case of full reconciliation, all moves belonging to the reconciliation will belong to the same account_full_reconcile object.
"""
# Get first all aml involved
todo = self.env['account.partial.reconcile'].search_read(['|', ('debit_move_id', 'in', self.ids), ('credit_move_id', 'in', self.ids)], ['debit_move_id', 'credit_move_id'])
amls = set(self.ids)
seen = set()
while todo:
aml_ids = [rec['debit_move_id'][0] for rec in todo if rec['debit_move_id']] + [rec['credit_move_id'][0] for rec in todo if rec['credit_move_id']]
amls |= set(aml_ids)
seen |= set([rec['id'] for rec in todo])
todo = self.env['account.partial.reconcile'].search_read(['&', '|', ('credit_move_id', 'in', aml_ids), ('debit_move_id', 'in', aml_ids), '!', ('id', 'in', list(seen))], ['debit_move_id', 'credit_move_id'])
partial_rec_ids = list(seen)
if not amls:
return
else:
amls = self.browse(list(amls))
# If we have multiple currency, we can only base ourselves on debit-credit to see if it is fully reconciled
currency = set([a.currency_id for a in amls if a.currency_id.id != False])
multiple_currency = False
if len(currency) != 1:
currency = False
multiple_currency = True
else:
currency = list(currency)[0]
# Get the sum(debit, credit, amount_currency) of all amls involved
total_debit = 0
total_credit = 0
total_amount_currency = 0
maxdate = date.min
to_balance = {}
cash_basis_partial = self.env['account.partial.reconcile']
for aml in amls:
cash_basis_partial |= aml.move_id.tax_cash_basis_rec_id
total_debit += aml.debit
total_credit += aml.credit
maxdate = max(aml.date, maxdate)
total_amount_currency += aml.amount_currency
# Convert in currency if we only have one currency and no amount_currency
if not aml.amount_currency and currency:
multiple_currency = True
total_amount_currency += aml.company_id.currency_id._convert(aml.balance, currency, aml.company_id, aml.date)
# If we still have residual value, it means that this move might need to be balanced using an exchange rate entry
if aml.amount_residual != 0 or aml.amount_residual_currency != 0:
if not to_balance.get(aml.currency_id):
to_balance[aml.currency_id] = [self.env['account.move.line'], 0]
to_balance[aml.currency_id][0] += aml
to_balance[aml.currency_id][1] += aml.amount_residual != 0 and aml.amount_residual or aml.amount_residual_currency
# Check if reconciliation is total
# To check if reconciliation is total we have 3 different use case:
# 1) There are multiple currency different than company currency, in that case we check using debit-credit
# 2) We only have one currency which is different than company currency, in that case we check using amount_currency
# 3) We have only one currency and some entries that don't have a secundary currency, in that case we check debit-credit
# or amount_currency.
# 4) Cash basis full reconciliation
# - either none of the moves are cash basis reconciled, and we proceed
# - or some moves are cash basis reconciled and we make sure they are all fully reconciled
digits_rounding_precision = amls[0].company_id.currency_id.rounding
if (
(
not cash_basis_partial or (cash_basis_partial and all([p >= 1.0 for p in amls._get_matched_percentage().values()]))
) and
(
currency and float_is_zero(total_amount_currency, precision_rounding=currency.rounding) or
multiple_currency and float_compare(total_debit, total_credit, precision_rounding=digits_rounding_precision) == 0
)
):
exchange_move_id = False
missing_exchange_difference = False
# Eventually create a journal entry to book the difference due to foreign currency's exchange rate that fluctuates
if to_balance and any([not float_is_zero(residual, precision_rounding=digits_rounding_precision) for aml, residual in to_balance.values()]):
if not self.env.context.get('no_exchange_difference'):
exchange_move_vals = self.env['account.full.reconcile']._prepare_exchange_diff_move(
move_date=maxdate, company=amls[0].company_id)
if len(amls.mapped('partner_id')) == 1 and amls[0].partner_id:
exchange_move_vals['partner_id'] = amls[0].partner_id.id
exchange_move = self.env['account.move'].with_context(default_type='entry').create(exchange_move_vals)
part_reconcile = self.env['account.partial.reconcile']
for aml_to_balance, total in to_balance.values():
if total:
rate_diff_amls, rate_diff_partial_rec = part_reconcile.create_exchange_rate_entry(aml_to_balance, exchange_move)
amls += rate_diff_amls
partial_rec_ids += rate_diff_partial_rec.ids
else:
aml_to_balance.reconcile()
exchange_move.post()
exchange_move_id = exchange_move.id
else:
missing_exchange_difference = True
if not missing_exchange_difference:
#mark the reference of the full reconciliation on the exchange rate entries and on the entries
self.env['account.full.reconcile'].create({
'partial_reconcile_ids': [(6, 0, partial_rec_ids)],
'reconciled_line_ids': [(6, 0, amls.ids)],
'exchange_move_id': exchange_move_id,
})
def _reconcile_lines(self, debit_moves, credit_moves, field):
""" This function loops on the 2 recordsets given as parameter as long as it
can find a debit and a credit to reconcile together. It returns the recordset of the
account move lines that were not reconciled during the process.
"""
(debit_moves + credit_moves).read([field])
to_create = []
cash_basis = debit_moves and debit_moves[0].account_id.internal_type in ('receivable', 'payable') or False
cash_basis_percentage_before_rec = {}
dc_vals ={}
while (debit_moves and credit_moves):
debit_move = debit_moves[0]
credit_move = credit_moves[0]
company_currency = debit_move.company_id.currency_id
# We need those temporary value otherwise the computation might be wrong below
temp_amount_residual = min(debit_move.amount_residual, -credit_move.amount_residual)
temp_amount_residual_currency = min(debit_move.amount_residual_currency, -credit_move.amount_residual_currency)
dc_vals[(debit_move.id, credit_move.id)] = (debit_move, credit_move, temp_amount_residual_currency)
amount_reconcile = min(debit_move[field], -credit_move[field])
#Remove from recordset the one(s) that will be totally reconciled
# For optimization purpose, the creation of the partial_reconcile are done at the end,
# therefore during the process of reconciling several move lines, there are actually no recompute performed by the orm
# and thus the amount_residual are not recomputed, hence we have to do it manually.
if amount_reconcile == debit_move[field]:
debit_moves -= debit_move
else:
debit_moves[0].amount_residual -= temp_amount_residual
debit_moves[0].amount_residual_currency -= temp_amount_residual_currency
if amount_reconcile == -credit_move[field]:
credit_moves -= credit_move
else:
credit_moves[0].amount_residual += temp_amount_residual
credit_moves[0].amount_residual_currency += temp_amount_residual_currency
#Check for the currency and amount_currency we can set
currency = False
amount_reconcile_currency = 0
if field == 'amount_residual_currency':
currency = credit_move.currency_id.id
amount_reconcile_currency = temp_amount_residual_currency
amount_reconcile = temp_amount_residual
elif bool(debit_move.currency_id) != bool(credit_move.currency_id):
# If only one of debit_move or credit_move has a secondary currency, also record the converted amount
# in that secondary currency in the partial reconciliation. That allows the exchange difference entry
# to be created, in case it is needed. It also allows to compute the amount residual in foreign currency.
currency = debit_move.currency_id or credit_move.currency_id
currency_date = debit_move.currency_id and credit_move.date or debit_move.date
amount_reconcile_currency = company_currency._convert(amount_reconcile, currency, debit_move.company_id, currency_date)
currency = currency.id
if cash_basis:
tmp_set = debit_move | credit_move
cash_basis_percentage_before_rec.update(tmp_set._get_matched_percentage())
to_create.append({
'debit_move_id': debit_move.id,
'credit_move_id': credit_move.id,
'amount': amount_reconcile,
'amount_currency': amount_reconcile_currency,
'currency_id': currency,
})
cash_basis_subjected = []
part_rec = self.env['account.partial.reconcile']
for partial_rec_dict in to_create:
debit_move, credit_move, amount_residual_currency = dc_vals[partial_rec_dict['debit_move_id'], partial_rec_dict['credit_move_id']]
# /!\ NOTE: Exchange rate differences shouldn't create cash basis entries
# i. e: we don't really receive/give money in a customer/provider fashion
# Since those are not subjected to cash basis computation we process them first
if not amount_residual_currency and debit_move.currency_id and credit_move.currency_id:
part_rec.create(partial_rec_dict)
else:
cash_basis_subjected.append(partial_rec_dict)
for after_rec_dict in cash_basis_subjected:
new_rec = part_rec.create(after_rec_dict)
# if the pair belongs to move being reverted, do not create CABA entry
if cash_basis and not (
new_rec.debit_move_id.move_id == new_rec.credit_move_id.move_id.reversed_entry_id
or
new_rec.credit_move_id.move_id == new_rec.debit_move_id.move_id.reversed_entry_id
):
new_rec.create_tax_cash_basis_entry(cash_basis_percentage_before_rec)
return debit_moves+credit_moves
def auto_reconcile_lines(self):
# Create list of debit and list of credit move ordered by date-currency
debit_moves = self.filtered(lambda r: r.debit != 0 or r.amount_currency > 0)
credit_moves = self.filtered(lambda r: r.credit != 0 or r.amount_currency < 0)
debit_moves = debit_moves.sorted(key=lambda a: (a.date_maturity or a.date, a.currency_id))
credit_moves = credit_moves.sorted(key=lambda a: (a.date_maturity or a.date, a.currency_id))
# Compute on which field reconciliation should be based upon:
if self[0].account_id.currency_id and self[0].account_id.currency_id != self[0].account_id.company_id.currency_id:
field = 'amount_residual_currency'
else:
field = 'amount_residual'
#if all lines share the same currency, use amount_residual_currency to avoid currency rounding error
if self[0].currency_id and all([x.amount_currency and x.currency_id == self[0].currency_id for x in self]):
field = 'amount_residual_currency'
# Reconcile lines
ret = self._reconcile_lines(debit_moves, credit_moves, field)
return ret
def _check_reconcile_validity(self):
#Perform all checks on lines
company_ids = set()
all_accounts = []
for line in self:
company_ids.add(line.company_id.id)
all_accounts.append(line.account_id)
if line.reconciled:
raise UserError(_('You are trying to reconcile some entries that are already reconciled.'))
if len(company_ids) > 1:
raise UserError(_('To reconcile the entries company should be the same for all entries.'))
if len(set(all_accounts)) > 1:
raise UserError(_('Entries are not from the same account.'))
if not (all_accounts[0].reconcile or all_accounts[0].internal_type == 'liquidity'):
raise UserError(_('Account %s (%s) does not allow reconciliation. First change the configuration of this account to allow it.') % (all_accounts[0].name, all_accounts[0].code))
def reconcile(self, writeoff_acc_id=False, writeoff_journal_id=False):
# Empty self can happen if the user tries to reconcile entries which are already reconciled.
# The calling method might have filtered out reconciled lines.
if not self:
return
# List unpaid invoices
not_paid_invoices = self.mapped('move_id').filtered(
lambda m: m.is_invoice(include_receipts=True) and m.invoice_payment_state not in ('paid', 'in_payment')
)
reconciled_lines = self.filtered(lambda aml: float_is_zero(aml.balance, precision_rounding=aml.move_id.company_id.currency_id.rounding) and aml.reconciled)
(self - reconciled_lines)._check_reconcile_validity()
#reconcile everything that can be
remaining_moves = self.auto_reconcile_lines()
writeoff_to_reconcile = self.env['account.move.line']
#if writeoff_acc_id specified, then create write-off move with value the remaining amount from move in self
if writeoff_acc_id and writeoff_journal_id and remaining_moves:
all_aml_share_same_currency = all([x.currency_id == self[0].currency_id for x in self])
writeoff_vals = {
'account_id': writeoff_acc_id.id,
'journal_id': writeoff_journal_id.id
}
if not all_aml_share_same_currency:
writeoff_vals['amount_currency'] = False
writeoff_to_reconcile = remaining_moves._create_writeoff([writeoff_vals])
#add writeoff line to reconcile algorithm and finish the reconciliation
remaining_moves = (remaining_moves + writeoff_to_reconcile).auto_reconcile_lines()
# Check if reconciliation is total or needs an exchange rate entry to be created
(self + writeoff_to_reconcile).check_full_reconcile()
# Trigger action for paid invoices
not_paid_invoices.filtered(
lambda m: m.invoice_payment_state in ('paid', 'in_payment')
).action_invoice_paid()
return True
def _create_writeoff(self, writeoff_vals):
""" Create a writeoff move per journal for the account.move.lines in self. If debit/credit is not specified in vals,
the writeoff amount will be computed as the sum of amount_residual of the given recordset.
:param writeoff_vals: list of dicts containing values suitable for account_move_line.create(). The data in vals will
be processed to create bot writeoff account.move.line and their enclosing account.move.
"""
def compute_writeoff_counterpart_vals(values):
line_values = values.copy()
line_values['debit'], line_values['credit'] = line_values['credit'], line_values['debit']
if 'amount_currency' in values:
line_values['amount_currency'] = -line_values['amount_currency']
return line_values
# Group writeoff_vals by journals
writeoff_dict = {}
for val in writeoff_vals:
journal_id = val.get('journal_id', False)
if not writeoff_dict.get(journal_id, False):
writeoff_dict[journal_id] = [val]
else:
writeoff_dict[journal_id].append(val)
partner_id = self.env['res.partner']._find_accounting_partner(self[0].partner_id).id
company_currency = self[0].account_id.company_id.currency_id
writeoff_currency = self[0].account_id.currency_id or company_currency
line_to_reconcile = self.env['account.move.line']
# Iterate and create one writeoff by journal
writeoff_moves = self.env['account.move']
for journal_id, lines in writeoff_dict.items():
total = 0
total_currency = 0
writeoff_lines = []
date = fields.Date.today()
for vals in lines:
# Check and complete vals
if 'account_id' not in vals or 'journal_id' not in vals:
raise UserError(_("It is mandatory to specify an account and a journal to create a write-off."))
if ('debit' in vals) ^ ('credit' in vals):
raise UserError(_("Either pass both debit and credit or none."))
if 'date' not in vals:
vals['date'] = self._context.get('date_p') or fields.Date.today()
vals['date'] = fields.Date.to_date(vals['date'])
if vals['date'] and vals['date'] < date:
date = vals['date']
if 'name' not in vals:
vals['name'] = self._context.get('comment') or _('Write-Off')
if 'analytic_account_id' not in vals:
vals['analytic_account_id'] = self.env.context.get('analytic_id', False)
#compute the writeoff amount if not given
if 'credit' not in vals and 'debit' not in vals:
amount = sum([r.amount_residual for r in self])
vals['credit'] = amount > 0 and amount or 0.0
vals['debit'] = amount < 0 and abs(amount) or 0.0
vals['partner_id'] = partner_id
total += vals['debit']-vals['credit']
if 'amount_currency' not in vals and writeoff_currency != company_currency:
vals['currency_id'] = writeoff_currency.id
sign = 1 if vals['debit'] > 0 else -1
vals['amount_currency'] = sign * abs(sum([r.amount_residual_currency for r in self]))
total_currency += vals['amount_currency']
writeoff_lines.append(compute_writeoff_counterpart_vals(vals))
# Create balance line
writeoff_lines.append({
'name': _('Write-Off'),
'debit': total > 0 and total or 0.0,
'credit': total < 0 and -total or 0.0,
'amount_currency': total_currency,
'currency_id': total_currency and writeoff_currency.id or False,
'journal_id': journal_id,
'account_id': self[0].account_id.id,
'partner_id': partner_id
})
# Create the move
writeoff_move = self.env['account.move'].create({
'journal_id': journal_id,
'date': date,
'state': 'draft',
'line_ids': [(0, 0, line) for line in writeoff_lines],
})
writeoff_moves += writeoff_move
line_to_reconcile += writeoff_move.line_ids.filtered(lambda r: r.account_id == self[0].account_id).sorted(key='id')[-1:]
#post all the writeoff moves at once
if writeoff_moves:
writeoff_moves.post()
# Return the writeoff move.line which is to be reconciled
return line_to_reconcile
def remove_move_reconcile(self):
""" Undo a reconciliation """
(self.mapped('matched_debit_ids') + self.mapped('matched_credit_ids')).unlink()
def _copy_data_extend_business_fields(self, values):
''' Hook allowing copying business fields under certain conditions.
E.g. The link to the sale order lines must be preserved in case of a refund.
'''
self.ensure_one()
def copy_data(self, default=None):
res = super(AccountMoveLine, self).copy_data(default=default)
for line, values in zip(self, res):
# Don't copy the name of a payment term line.
if line.move_id.is_invoice() and line.account_id.user_type_id.type in ('receivable', 'payable'):
values['name'] = ''
if self._context.get('include_business_fields'):
line._copy_data_extend_business_fields(values)
return res
# -------------------------------------------------------------------------
# MISC
# -------------------------------------------------------------------------
def _get_matched_percentage(self):
""" This function returns a dictionary giving for each move_id of self, the percentage to consider as cash basis factor.
This is actually computing the same as the matched_percentage field of account.move, except in case of multi-currencies
where we recompute the matched percentage based on the amount_currency fields.
Note that this function is used only by the tax cash basis module since we want to consider the matched_percentage only
based on the company currency amounts in reports.
"""
matched_percentage_per_move = {}
for line in self:
if not matched_percentage_per_move.get(line.move_id.id, False):
lines_to_consider = line.move_id.line_ids.filtered(lambda x: x.account_id.internal_type in ('receivable', 'payable'))
total_amount_currency = 0.0
total_reconciled_currency = 0.0
all_same_currency = False
#if all receivable/payable aml and their payments have the same currency, we can safely consider
#the amount_currency fields to avoid including the exchange rate difference in the matched_percentage
if lines_to_consider and all([x.currency_id.id == lines_to_consider[0].currency_id.id for x in lines_to_consider]):
all_same_currency = lines_to_consider[0].currency_id.id
for line in lines_to_consider:
if all_same_currency:
total_amount_currency += abs(line.amount_currency)
for partial_line in (line.matched_debit_ids + line.matched_credit_ids):
if partial_line.currency_id and partial_line.currency_id.id == all_same_currency:
total_reconciled_currency += partial_line.amount_currency
else:
all_same_currency = False
break
if not all_same_currency:
#we cannot rely on amount_currency fields as it is not present on all partial reconciliation
matched_percentage_per_move[line.move_id.id] = line.move_id._get_cash_basis_matched_percentage()
else:
#we can rely on amount_currency fields, which allow us to post a tax cash basis move at the initial rate
#to avoid currency rate difference issues.
if total_amount_currency == 0.0:
matched_percentage_per_move[line.move_id.id] = 1.0
else:
# lines_to_consider is always non-empty when total_amount_currency is 0
currency = lines_to_consider[0].currency_id or lines_to_consider[0].company_id.currency_id
matched_percentage_per_move[line.move_id.id] = currency.round(total_reconciled_currency) / currency.round(total_amount_currency)
return matched_percentage_per_move
def _get_analytic_tag_ids(self):
self.ensure_one()
return self.analytic_tag_ids.filtered(lambda r: not r.active_analytic_distribution).ids
def create_analytic_lines(self):
""" Create analytic items upon validation of an account.move.line having an analytic account or an analytic distribution.
"""
lines_to_create_analytic_entries = self.env['account.move.line']
for obj_line in self:
for tag in obj_line.analytic_tag_ids.filtered('active_analytic_distribution'):
for distribution in tag.analytic_distribution_ids:
vals_line = obj_line._prepare_analytic_distribution_line(distribution)
self.env['account.analytic.line'].create(vals_line)
if obj_line.analytic_account_id:
lines_to_create_analytic_entries |= obj_line
# create analytic entries in batch
if lines_to_create_analytic_entries:
values_list = lines_to_create_analytic_entries._prepare_analytic_line()
self.env['account.analytic.line'].create(values_list)
def _prepare_analytic_line(self):
""" Prepare the values used to create() an account.analytic.line upon validation of an account.move.line having
an analytic account. This method is intended to be extended in other modules.
:return list of values to create analytic.line
:rtype list
"""
result = []
for move_line in self:
amount = (move_line.credit or 0.0) - (move_line.debit or 0.0)
default_name = move_line.name or (move_line.ref or '/' + ' -- ' + (move_line.partner_id and move_line.partner_id.name or '/'))
result.append({
'name': default_name,
'date': move_line.date,
'account_id': move_line.analytic_account_id.id,
'group_id': move_line.analytic_account_id.group_id.id,
'tag_ids': [(6, 0, move_line._get_analytic_tag_ids())],
'unit_amount': move_line.quantity,
'product_id': move_line.product_id and move_line.product_id.id or False,
'product_uom_id': move_line.product_uom_id and move_line.product_uom_id.id or False,
'amount': amount,
'general_account_id': move_line.account_id.id,
'ref': move_line.ref,
'move_id': move_line.id,
'user_id': move_line.move_id.invoice_user_id.id or self._uid,
'partner_id': move_line.partner_id.id,
'company_id': move_line.analytic_account_id.company_id.id or self.env.company.id,
})
return result
def _prepare_analytic_distribution_line(self, distribution):
""" Prepare the values used to create() an account.analytic.line upon validation of an account.move.line having
analytic tags with analytic distribution.
"""
self.ensure_one()
amount = -self.balance * distribution.percentage / 100.0
default_name = self.name or (self.ref or '/' + ' -- ' + (self.partner_id and self.partner_id.name or '/'))
return {
'name': default_name,
'date': self.date,
'account_id': distribution.account_id.id,
'partner_id': self.partner_id.id,
'tag_ids': [(6, 0, [distribution.tag_id.id] + self._get_analytic_tag_ids())],
'unit_amount': self.quantity,
'product_id': self.product_id and self.product_id.id or False,
'product_uom_id': self.product_uom_id and self.product_uom_id.id or False,
'amount': amount,
'general_account_id': self.account_id.id,
'ref': self.ref,
'move_id': self.id,
'user_id': self.move_id.invoice_user_id.id or self._uid,
'company_id': distribution.account_id.company_id.id or self.env.company.id,
}
@api.model
def _query_get(self, domain=None):
self.check_access_rights('read')
context = dict(self._context or {})
domain = domain or []
if not isinstance(domain, (list, tuple)):
domain = safe_eval(domain)
date_field = 'date'
if context.get('aged_balance'):
date_field = 'date_maturity'
if context.get('date_to'):
domain += [(date_field, '<=', context['date_to'])]
if context.get('date_from'):
if not context.get('strict_range'):
domain += ['|', (date_field, '>=', context['date_from']), ('account_id.user_type_id.include_initial_balance', '=', True)]
elif context.get('initial_bal'):
domain += [(date_field, '<', context['date_from'])]
else:
domain += [(date_field, '>=', context['date_from'])]
if context.get('journal_ids'):
domain += [('journal_id', 'in', context['journal_ids'])]
state = context.get('state')
if state and state.lower() != 'all':
domain += [('move_id.state', '=', state)]
if context.get('company_id'):
domain += [('company_id', '=', context['company_id'])]
if 'company_ids' in context:
domain += [('company_id', 'in', context['company_ids'])]
if context.get('reconcile_date'):
domain += ['|', ('reconciled', '=', False), '|', ('matched_debit_ids.max_date', '>', context['reconcile_date']), ('matched_credit_ids.max_date', '>', context['reconcile_date'])]
if context.get('account_tag_ids'):
domain += [('account_id.tag_ids', 'in', context['account_tag_ids'].ids)]
if context.get('account_ids'):
domain += [('account_id', 'in', context['account_ids'].ids)]
if context.get('analytic_tag_ids'):
domain += [('analytic_tag_ids', 'in', context['analytic_tag_ids'].ids)]
if context.get('analytic_account_ids'):
domain += [('analytic_account_id', 'in', context['analytic_account_ids'].ids)]
if context.get('partner_ids'):
domain += [('partner_id', 'in', context['partner_ids'].ids)]
if context.get('partner_categories'):
domain += [('partner_id.category_id', 'in', context['partner_categories'].ids)]
where_clause = ""
where_clause_params = []
tables = ''
if domain:
domain.append(('display_type', 'not in', ('line_section', 'line_note')))
domain.append(('move_id.state', '!=', 'cancel'))
query = self._where_calc(domain)
# Wrap the query with 'company_id IN (...)' to avoid bypassing company access rights.
self._apply_ir_rules(query)
tables, where_clause, where_clause_params = query.get_sql()
return tables, where_clause, where_clause_params
def _reconciled_lines(self):
ids = []
for aml in self.filtered('account_id.reconcile'):
ids.extend([r.debit_move_id.id for r in aml.matched_debit_ids] if aml.credit > 0 else [r.credit_move_id.id for r in aml.matched_credit_ids])
ids.append(aml.id)
return ids
def open_reconcile_view(self):
[action] = self.env.ref('account.action_account_moves_all_a').read()
ids = self._reconciled_lines()
action['domain'] = [('id', 'in', ids)]
return action
def action_accrual_entry(self):
[action] = self.env.ref('account.account_accrual_accounting_wizard_action').read()
action['context'] = self.env.context
return action
@api.model
def _get_suspense_moves_domain(self):
return [
('move_id.to_check', '=', True),
('full_reconcile_id', '=', False),
('statement_line_id', '!=', False),
]
def _convert_tags_for_cash_basis(self, tags):
""" Cash basis entries are managed by the tax report just like misc operations.
So it means that the tax report will not apply any additional multiplicator
to the balance of the cash basis lines.
For invoices move lines whose multiplicator would have been -1 (if their
taxes had not CABA), it will hence cause sign inversion if we directly copy
the tags from those lines. Instead, we need to invert all the signs from these
tags (if they come from tax report lines; tags created in data for financial
reports will stay onchanged).
"""
self.ensure_one()
tax_multiplicator = (self.journal_id.type == 'sale' and -1 or 1) * (self.move_id.type in ('in_refund', 'out_refund') and -1 or 1)
if tax_multiplicator == -1:
# Take the opposite tags instead
rslt = self.env['account.account.tag']
for tag in tags:
if tag.tax_report_line_ids:
# tag created by an account.tax.report.line
new_tag = tag.tax_report_line_ids[0].tag_ids.filtered(lambda x: x.tax_negate != tag.tax_negate)
rslt += new_tag
else:
# tag created in data for use by an account.financial.html.report.line
rslt += tag
return rslt
return tags
class AccountPartialReconcile(models.Model):
_name = "account.partial.reconcile"
_description = "Partial Reconcile"
debit_move_id = fields.Many2one('account.move.line', index=True, required=True)
credit_move_id = fields.Many2one('account.move.line', index=True, required=True)
amount = fields.Monetary(currency_field='company_currency_id', help="Amount concerned by this matching. Assumed to be always positive")
amount_currency = fields.Monetary(string="Amount in Currency")
currency_id = fields.Many2one('res.currency', string='Currency')
company_currency_id = fields.Many2one('res.currency', string="Company Currency", related='company_id.currency_id', readonly=True,
help='Utility field to express amount currency')
company_id = fields.Many2one('res.company', related='debit_move_id.company_id', store=True, string='Company', readonly=False)
full_reconcile_id = fields.Many2one('account.full.reconcile', string="Full Reconcile", copy=False)
max_date = fields.Date(string='Max Date of Matched Lines', compute='_compute_max_date',
readonly=True, copy=False, store=True,
help='Technical field used to determine at which date this reconciliation needs to be shown on the aged receivable/payable reports.')
@api.depends('debit_move_id.date', 'credit_move_id.date')
def _compute_max_date(self):
for rec in self:
rec.max_date = max(
rec.debit_move_id.date,
rec.credit_move_id.date
)
@api.model
def _prepare_exchange_diff_partial_reconcile(self, aml, line_to_reconcile, currency):
"""
Prepares the values for the partial reconciliation between an account.move.line
that needs to be fixed by an exchange rate entry and the account.move.line that fixes it
@param {account.move.line} aml:
The line that needs fixing with exchange difference entry
(e.g. a receivable/payable from an invoice)
@param {account.move.line} line_to_reconcile:
The line that fixes the aml. it is the receivable/payable line
of the exchange difference entry move
@param {res.currency} currency
@return {dict} values of account.partial.reconcile; ready for create()
"""
# the exhange rate difference that will be fixed may be of opposite direction
# than the original move line (i.e. the exchange difference may be negative whereas
# the move line on which it applies may be a debit -- positive)
# So we need to register both the move line and the exchange line
# to either debit_move or credit_move as a function of whether the direction (debit VS credit)
# of the exchange loss/gain is the same (or not) as the direction of the line that is fixed here
if aml.currency_id:
residual_same_sign = aml.amount_currency * aml.amount_residual_currency >= 0
else:
residual_same_sign = aml.balance * aml.amount_residual >= 0
if residual_same_sign:
debit_move_id = line_to_reconcile.id if aml.credit else aml.id
credit_move_id = line_to_reconcile.id if aml.debit else aml.id
else:
debit_move_id = aml.id if aml.credit else line_to_reconcile.id
credit_move_id = aml.id if aml.debit else line_to_reconcile.id
return {
'debit_move_id': debit_move_id,
'credit_move_id': credit_move_id,
'amount': abs(aml.amount_residual),
'amount_currency': abs(aml.amount_residual_currency),
'currency_id': currency and currency.id or False,
}
@api.model
def create_exchange_rate_entry(self, aml_to_fix, move):
"""
Automatically create a journal items to book the exchange rate
differences that can occur in multi-currencies environment. That
new journal item will be made into the given `move` in the company
`currency_exchange_journal_id`, and one of its journal items is
matched with the other lines to balance the full reconciliation.
:param aml_to_fix: recordset of account.move.line (possible several
but sharing the same currency)
:param move: account.move
:return: tuple.
[0]: account.move.line created to balance the `aml_to_fix`
[1]: recordset of account.partial.reconcile created between the
tuple first element and the `aml_to_fix`
"""
partial_rec = self.env['account.partial.reconcile']
aml_model = self.env['account.move.line']
created_lines = self.env['account.move.line']
for aml in aml_to_fix:
#create the line that will compensate all the aml_to_fix
line_to_rec = aml_model.with_context(check_move_validity=False).create({
'name': _('Currency exchange rate difference'),
'debit': aml.amount_residual < 0 and -aml.amount_residual or 0.0,
'credit': aml.amount_residual > 0 and aml.amount_residual or 0.0,
'account_id': aml.account_id.id,
'move_id': move.id,
'currency_id': aml.currency_id.id,
'amount_currency': aml.amount_residual_currency and -aml.amount_residual_currency or 0.0,
'partner_id': aml.partner_id.id,
})
#create the counterpart on exchange gain/loss account
exchange_journal = move.company_id.currency_exchange_journal_id
aml_model.with_context(check_move_validity=False).create({
'name': _('Currency exchange rate difference'),
'debit': aml.amount_residual > 0 and aml.amount_residual or 0.0,
'credit': aml.amount_residual < 0 and -aml.amount_residual or 0.0,
'account_id': aml.amount_residual > 0 and exchange_journal.default_debit_account_id.id or exchange_journal.default_credit_account_id.id,
'move_id': move.id,
'currency_id': aml.currency_id.id,
'amount_currency': aml.amount_residual_currency and aml.amount_residual_currency or 0.0,
'partner_id': aml.partner_id.id,
})
#reconcile all aml_to_fix
partial_rec |= self.create(
self._prepare_exchange_diff_partial_reconcile(
aml=aml,
line_to_reconcile=line_to_rec,
currency=aml.currency_id or False)
)
created_lines |= line_to_rec
return created_lines, partial_rec
def _get_tax_cash_basis_base_account(self, line, tax):
''' Get the account of lines that will contain the base amount of taxes.
:param line: An account.move.line record
:param tax: An account.tax record
:return: An account record
'''
return tax.cash_basis_base_account_id or line.account_id
def _get_amount_tax_cash_basis(self, amount, line):
return line.company_id.currency_id.round(amount)
def _set_tax_cash_basis_entry_date(self, move_date, newly_created_move):
if move_date > (self.company_id.period_lock_date or date.min) and newly_created_move.date != move_date:
# The move date should be the maximum date between payment and invoice (in case
# of payment in advance). However, we should make sure the move date is not
# recorded before the period lock date as the tax statement for this period is
# probably already sent to the estate.
newly_created_move.write({'date': move_date})
def create_tax_cash_basis_entry(self, percentage_before_rec):
self.ensure_one()
move_date = self.debit_move_id.date
newly_created_move = self.env['account.move']
# We use a set here in case the reconciled lines belong to the same move (it happens with POS)
for move in {self.debit_move_id.move_id, self.credit_move_id.move_id}:
#move_date is the max of the 2 reconciled items
if move_date < move.date:
move_date = move.date
percentage_before = percentage_before_rec[move.id]
percentage_after = move.line_ids[0]._get_matched_percentage()[move.id]
# update the percentage before as the move can be part of
# multiple partial reconciliations
percentage_before_rec[move.id] = percentage_after
for line in move.line_ids:
if not line.tax_exigible:
#amount is the current cash_basis amount minus the one before the reconciliation
amount = line.balance * percentage_after - line.balance * percentage_before
rounded_amt = self._get_amount_tax_cash_basis(amount, line)
if float_is_zero(rounded_amt, precision_rounding=line.company_id.currency_id.rounding):
continue
if line.tax_line_id and line.tax_line_id.tax_exigibility == 'on_payment':
if not newly_created_move:
newly_created_move = self._create_tax_basis_move()
#create cash basis entry for the tax line
to_clear_aml = self.env['account.move.line'].with_context(check_move_validity=False).create({
'name': line.move_id.name,
'debit': abs(rounded_amt) if rounded_amt < 0 else 0.0,
'credit': rounded_amt if rounded_amt > 0 else 0.0,
'account_id': line.account_id.id,
'analytic_account_id': line.analytic_account_id.id,
'analytic_tag_ids': line.analytic_tag_ids.ids,
'tax_exigible': True,
'amount_currency': line.amount_currency and line.currency_id.round(-line.amount_currency * amount / line.balance) or 0.0,
'currency_id': line.currency_id.id,
'move_id': newly_created_move.id,
'partner_id': line.partner_id.id,
'journal_id': newly_created_move.journal_id.id,
})
# Group by cash basis account and tax
self.env['account.move.line'].with_context(check_move_validity=False).create({
'name': line.name,
'debit': rounded_amt if rounded_amt > 0 else 0.0,
'credit': abs(rounded_amt) if rounded_amt < 0 else 0.0,
'account_id': line.tax_repartition_line_id.account_id.id or line.account_id.id,
'analytic_account_id': line.analytic_account_id.id,
'analytic_tag_ids': line.analytic_tag_ids.ids,
'tax_exigible': True,
'amount_currency': line.amount_currency and line.currency_id.round(line.amount_currency * amount / line.balance) or 0.0,
'currency_id': line.currency_id.id,
'move_id': newly_created_move.id,
'partner_id': line.partner_id.id,
'journal_id': newly_created_move.journal_id.id,
'tax_repartition_line_id': line.tax_repartition_line_id.id,
'tax_base_amount': line.tax_base_amount,
'tag_ids': [(6, 0, line._convert_tags_for_cash_basis(line.tag_ids).ids)],
})
if line.account_id.reconcile and not line.reconciled:
#setting the account to allow reconciliation will help to fix rounding errors
to_clear_aml |= line
to_clear_aml.reconcile()
taxes_payment_exigible = line.tax_ids.flatten_taxes_hierarchy().filtered(lambda tax: tax.tax_exigibility == 'on_payment')
if taxes_payment_exigible:
if not newly_created_move:
newly_created_move = self._create_tax_basis_move()
#create cash basis entry for the base
for tax in taxes_payment_exigible:
account_id = self._get_tax_cash_basis_base_account(line, tax)
self.env['account.move.line'].with_context(check_move_validity=False).create({
'name': line.name,
'debit': rounded_amt > 0 and rounded_amt or 0.0,
'credit': rounded_amt < 0 and abs(rounded_amt) or 0.0,
'account_id': account_id.id,
'tax_exigible': True,
'tax_ids': [(6, 0, [tax.id])],
'move_id': newly_created_move.id,
'currency_id': line.currency_id.id,
'amount_currency': line.currency_id.round(line.amount_currency * amount / line.balance) if line.currency_id and line.balance else 0.0,
'partner_id': line.partner_id.id,
'journal_id': newly_created_move.journal_id.id,
'tax_repartition_line_id': line.tax_repartition_line_id.id,
'tax_base_amount': line.tax_base_amount,
'tag_ids': [(6, 0, line._convert_tags_for_cash_basis(line.tag_ids).ids)],
})
self.env['account.move.line'].with_context(check_move_validity=False).create({
'name': line.name,
'credit': rounded_amt > 0 and rounded_amt or 0.0,
'debit': rounded_amt < 0 and abs(rounded_amt) or 0.0,
'account_id': account_id.id,
'tax_exigible': True,
'move_id': newly_created_move.id,
'currency_id': line.currency_id.id,
'amount_currency': line.currency_id.round(-line.amount_currency * amount / line.balance) if line.currency_id and line.balance else 0.0,
'partner_id': line.partner_id.id,
'journal_id': newly_created_move.journal_id.id,
})
if newly_created_move:
self._set_tax_cash_basis_entry_date(move_date, newly_created_move)
# post move
newly_created_move.post()
def _create_tax_basis_move(self):
# Check if company_journal for cash basis is set if not, raise exception
if not self.company_id.tax_cash_basis_journal_id:
raise UserError(_('There is no tax cash basis journal defined '
'for this company: "%s" \nConfigure it in Accounting/Configuration/Settings') %
(self.company_id.name))
move_vals = {
'journal_id': self.company_id.tax_cash_basis_journal_id.id,
'tax_cash_basis_rec_id': self.id,
'ref': self.credit_move_id.move_id.name if self.credit_move_id.payment_id else self.debit_move_id.move_id.name,
}
return self.env['account.move'].create(move_vals)
def unlink(self):
""" When removing a partial reconciliation, also unlink its full reconciliation if it exists """
full_to_unlink = self.env['account.full.reconcile']
for rec in self:
if rec.full_reconcile_id:
full_to_unlink |= rec.full_reconcile_id
#reverse the tax basis move created at the reconciliation time
for move in self.env['account.move'].search([('tax_cash_basis_rec_id', 'in', self._ids)]):
if move.date > (move.company_id.period_lock_date or date.min):
move._reverse_moves([{'ref': _('Reversal of %s') % move.name}], cancel=True)
else:
move._reverse_moves([{'date': fields.Date.today(), 'ref': _('Reversal of %s') % move.name}], cancel=True)
res = super(AccountPartialReconcile, self).unlink()
if full_to_unlink:
full_to_unlink.unlink()
return res
class AccountFullReconcile(models.Model):
_name = "account.full.reconcile"
_description = "Full Reconcile"
name = fields.Char(string='Number', required=True, copy=False, default=lambda self: self.env['ir.sequence'].next_by_code('account.reconcile'))
partial_reconcile_ids = fields.One2many('account.partial.reconcile', 'full_reconcile_id', string='Reconciliation Parts')
reconciled_line_ids = fields.One2many('account.move.line', 'full_reconcile_id', string='Matched Journal Items')
exchange_move_id = fields.Many2one('account.move')
def unlink(self):
""" When removing a full reconciliation, we need to revert the eventual journal entries we created to book the
fluctuation of the foreign currency's exchange rate.
We need also to reconcile together the origin currency difference line and its reversal in order to completely
cancel the currency difference entry on the partner account (otherwise it will still appear on the aged balance
for example).
"""
for rec in self:
if rec.exists() and rec.exchange_move_id:
# reverse the exchange rate entry after de-referencing it to avoid looping
# (reversing will cause a nested attempt to drop the full reconciliation)
to_reverse = rec.exchange_move_id
rec.exchange_move_id = False
if to_reverse.date > (to_reverse.company_id.period_lock_date or date.min):
reverse_date = to_reverse.date
else:
reverse_date = fields.Date.today()
to_reverse._reverse_moves([{
'date': reverse_date,
'ref': _('Reversal of: %s') % to_reverse.name,
}], cancel=True)
return super(AccountFullReconcile, self).unlink()
@api.model
def _prepare_exchange_diff_move(self, move_date, company):
if not company.currency_exchange_journal_id:
raise UserError(_("You should configure the 'Exchange Rate Journal' in the accounting settings, to manage automatically the booking of accounting entries related to differences between exchange rates."))
if not company.income_currency_exchange_account_id.id:
raise UserError(_("You should configure the 'Gain Exchange Rate Account' in the accounting settings, to manage automatically the booking of accounting entries related to differences between exchange rates."))
if not company.expense_currency_exchange_account_id.id:
raise UserError(_("You should configure the 'Loss Exchange Rate Account' in the accounting settings, to manage automatically the booking of accounting entries related to differences between exchange rates."))
res = {'journal_id': company.currency_exchange_journal_id.id}
# The move date should be the maximum date between payment and invoice
# (in case of payment in advance). However, we should make sure the
# move date is not recorded after the end of year closing.
if move_date > (company.fiscalyear_lock_date or date.min):
res['date'] = move_date
return res
| [
"[email protected]"
] | |
c7e8896c10d6a1b95cf8e0769fe1eabe54f788dc | fe890ad11644516bb9ec16a55b6c333373269c88 | /apps/users/admin.py | 535170fd0d7d4187d05578e85051ff2bc328a156 | [] | no_license | Arisfello/Student_registration | 82bb5fe5a50cea9f14e1aba7f9b275363015e44f | 14a7ea6a3845a20d5822c61e80188f4d9937fc7f | refs/heads/master | 2023-08-09T08:28:12.270156 | 2021-09-16T11:08:18 | 2021-09-16T11:08:18 | 402,364,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from .models import User, UserProfile
class UserAdmin(BaseUserAdmin):
list_display = ('email', 'is_staff', 'is_admin', 'is_student')
list_filter = ('is_student', 'is_admin',)
fieldsets = (
(None, {'fields': ('email', 'password')}),
('Permissions', {'fields': ('is_student','is_admin', 'is_staff')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2'),
}),
)
search_fields = ('email',)
ordering = ('email',)
filter_horizontal = ()
class UserProfileAdmin(admin.ModelAdmin):
pass
admin.site.register(User, UserAdmin)
admin.site.register(UserProfile, UserProfileAdmin) | [
"[email protected]"
] | |
f75f88386c94ecee651b580ccfe6db91dde4af10 | 31483d25a50bfb97ce28b2d9662af0bac0469bff | /OpenRTM_aist/test/test_Topic.py | bde2a5999b67286dc170ca0ca044d81150c9297d | [] | no_license | n-ando/OpenRTM-aist-Python | f4c4ecd56757fab1a58e2bc1edc7c2e68efea319 | 008447533261cb4f5d054630132f0a496e9deaab | refs/heads/master | 2021-07-07T12:40:14.889323 | 2018-11-21T08:50:39 | 2018-11-21T08:50:39 | 161,041,342 | 1 | 1 | null | 2020-07-26T04:09:39 | 2018-12-09T13:18:34 | Python | UTF-8 | Python | false | false | 8,022 | py | #!/usr/bin/env python
# -*- coding: euc-jp -*-
#
# \file test_Topic.py
# \brief
# \date $Date: $
# \author Nobuhiko Miyamoto
#
import sys
sys.path.insert(1,"../")
try:
import unittest2 as unittest
except (ImportError):
import unittest
import time
#from Manager import *
import OpenRTM_aist
import RTC, RTC__POA
import OpenRTM, OpenRTM__POA
testcomp1_spec = ["implementation_id", "TestComp1",
"type_name", "TestComp1",
"description", "Test example component",
"version", "1.0",
"vendor", "Nobuhiko Myiyamoto",
"category", "example",
"activity_type", "DataFlowComponent",
"max_instance", "10",
"language", "C++",
"lang_type", "compile",
""]
testcomp2_spec = ["implementation_id", "TestComp2",
"type_name", "TestComp2",
"description", "Test example component",
"version", "1.0",
"vendor", "Nobuhiko Myiyamoto",
"category", "example",
"activity_type", "DataFlowComponent",
"max_instance", "10",
"language", "C++",
"lang_type", "compile",
""]
class Test_i(OpenRTM__POA.InPortCdr):
def __init__(self):
pass
def put(self, data):
return OpenRTM.PORT_OK
class TestComp1(OpenRTM_aist.DataFlowComponentBase):
def __init__(self, manager):
OpenRTM_aist.DataFlowComponentBase.__init__(self, manager)
self._d_out = RTC.TimedLong(RTC.Time(0,0),0)
self._outOut = OpenRTM_aist.OutPort("out", self._d_out)
self._servicePort_provided = OpenRTM_aist.CorbaPort("service")
self._Service_provided = Test_i()
self._d_topic_out = RTC.TimedLong(RTC.Time(0,0),0)
self._topic_outOut = OpenRTM_aist.OutPort("topic_out", self._d_topic_out)
self._topic_servicePort_provided = OpenRTM_aist.CorbaPort("topic_service")
self._topic_Service_provided = Test_i()
def onInitialize(self):
self.addOutPort("out",self._outOut)
self._servicePort_provided.registerProvider("service", "TestService", self._Service_provided)
self.addPort(self._servicePort_provided)
self.addOutPort("topic_out",self._topic_outOut)
self._topic_outOut.appendProperty("publish_topic","test")
self._topic_servicePort_provided.registerProvider("topic_service", "TestService", self._topic_Service_provided)
self.addPort(self._topic_servicePort_provided)
self._topic_servicePort_provided.appendProperty("publish_topic","test")
return RTC.RTC_OK
class TestComp2(OpenRTM_aist.DataFlowComponentBase):
def __init__(self, manager):
OpenRTM_aist.DataFlowComponentBase.__init__(self, manager)
self._d_in = RTC.TimedLong(RTC.Time(0,0),0)
self._inIn = OpenRTM_aist.InPort("in", self._d_in)
self._servicePort_required = OpenRTM_aist.CorbaPort("service")
self._Service_required = OpenRTM_aist.CorbaConsumer(interfaceType=OpenRTM.InPortCdr)
self._d_topic_in = RTC.TimedLong(RTC.Time(0,0),0)
self._topic_inIn = OpenRTM_aist.InPort("topic_in", self._d_topic_in)
self._topic_servicePort_required = OpenRTM_aist.CorbaPort("topic_service")
self._topic_Service_required = OpenRTM_aist.CorbaConsumer(interfaceType=OpenRTM.InPortCdr)
return
def onInitialize(self):
self.addInPort("in",self._inIn)
self._servicePort_required.registerConsumer("service", "TestService", self._Service_required)
self.addPort(self._servicePort_required)
self.addInPort("topic_in",self._topic_inIn)
self._topic_inIn.appendProperty("publish_topic","test")
self._topic_servicePort_required.registerConsumer("topic_service", "TestService", self._topic_Service_required)
self.addPort(self._topic_servicePort_required)
self._topic_servicePort_required.appendProperty("publish_topic","test")
return RTC.RTC_OK
def TestComp1Init(manager):
profile = OpenRTM_aist.Properties(defaults_str=testcomp1_spec)
manager.registerFactory(profile,
TestComp1,
OpenRTM_aist.Delete)
def TestComp2Init(manager):
profile = OpenRTM_aist.Properties(defaults_str=testcomp2_spec)
manager.registerFactory(profile,
TestComp2,
OpenRTM_aist.Delete)
def MyModuleInit(manager):
TestComp1Init(manager)
TestComp2Init(manager)
com = manager.createComponent("TestComp1")
com = manager.createComponent("TestComp2")
class test_Topic(unittest.TestCase):
def setUp(self):
#sys.argv.extend(['-o','port.outport.topic_out.publish_topic:test2'])
#sys.argv.extend(['-o','port.inport.topic_in.publish_topic:test2'])
#sys.argv.extend(['-o','port.corbaport.topic_service.publish_topic:test2'])
self.manager = OpenRTM_aist.Manager.init(sys.argv)
self.manager.setModuleInitProc(MyModuleInit)
self.manager.activateManager()
self.comps = []
self.comps.append(self.manager.getComponent("TestComp10"))
self.comps.append(self.manager.getComponent("TestComp20"))
self.comp1 = self.comps[0].getObjRef()
self.comp2 = self.comps[1].getObjRef()
def tearDown(self):
for comp in self.comps:
self.manager.unregisterComponent(comp)
comp_id = comp.getProperties()
factory = self.manager._factory.find(comp_id)
factory.destroy(comp)
self.manager.shutdownNaming()
time.sleep(0.1)
def test_port(self):
inport = OpenRTM_aist.get_port_by_name(self.comp2, "TestComp20.in")
outport = OpenRTM_aist.get_port_by_name(self.comp1, "TestComp10.out")
self.manager.connectDataPorts(inport, [outport])
ans = OpenRTM_aist.already_connected(inport, outport)
self.assertTrue(ans)
service_required = OpenRTM_aist.get_port_by_name(self.comp2, "TestComp20.service")
service_provided = OpenRTM_aist.get_port_by_name(self.comp1, "TestComp10.service")
self.manager.connectServicePorts(service_required, [service_provided])
ans = OpenRTM_aist.already_connected(service_required, service_provided)
self.assertTrue(ans)
ports = self.manager.getPortsOnNameServers("dataports.port_cxt/test.topic_cxt","inport")
name = ports[0].get_port_profile().name
self.assertEqual(name, "TestComp20.topic_in")
orb = self.manager.getORB()
names = "localhost"
cns = OpenRTM_aist.CorbaNaming(orb,names)
bl = cns.listByKind("dataports.port_cxt/test.topic_cxt","inport")
name = bl[0].binding_name[0].id
self.assertEqual(name, "TestComp20.topic_in")
self._d_out = RTC.TimedOctetSeq(RTC.Time(0,0),[])
self._outOut = OpenRTM_aist.OutPort("out", self._d_out)
prop = OpenRTM_aist.Properties()
self._outOut.init(prop)
naming = self.manager.getNaming()
naming.bindPortObject("test.port",self._outOut)
port = cns.resolveStr("test.port")
self.assertTrue(port is not None)
naming = OpenRTM_aist.NamingOnCorba(orb, "localhost")
naming.bindPortObject("test2.port",self._outOut)
port = cns.resolveStr("test2.port")
self.assertTrue(port is not None)
def test_Topic(self):
inport = OpenRTM_aist.get_port_by_name(self.comp2, "TestComp20.topic_in")
outport = OpenRTM_aist.get_port_by_name(self.comp1, "TestComp10.topic_out")
ans = OpenRTM_aist.already_connected(inport, outport)
self.assertTrue(ans)
provided = OpenRTM_aist.get_port_by_name(self.comp1, "TestComp10.topic_service")
required = OpenRTM_aist.get_port_by_name(self.comp2, "TestComp20.topic_service")
ans = OpenRTM_aist.already_connected(provided, required)
self.assertTrue(ans)
############### test #################
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
a3bb1eb015c0c06389061b47e44784f6e338ffa5 | 526d4626294ffd1e9d866972acfd7857845fe989 | /download_mars_control_pv02.py | af0937405d1d6337f5be0c5df84d01165d990214 | [] | no_license | EdurneMShadow/PrediccionSolarEnsembles | 0372c90b2b96c31bdf36a7eb8688f0af36b93b19 | debacd76c47dbc879062ac15ab767b537dc94cba | refs/heads/master | 2021-01-19T05:18:22.376802 | 2018-07-16T15:03:11 | 2018-07-16T15:03:11 | 87,425,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,062 | py | from ecmwfapi import ECMWFService
import os
import pandas as pd
from multiprocessing import Pool
def download_mars(d):
print(d)
if not os.path.isfile(
"pv_control_ensembles_{}.nc".format(d.strftime("%Y%m%d"))):
server.execute(
{
"class": "od",
"date": "{}".format(d.strftime("%Y-%m-%d")),
"expver": "1",
"levtype": "sfc",
"param": "21.228/22.228/164.128/165.128/166.128/167.128/169.128/176.128/210.128",
"step": "0/to/21/by/3",
"stream": "ef",
"time": "00:00:00",
"area": "44/-9.5/35/4.5",
"grid": "0.5/0.5",
"format": "netcdf",
"type": "control forecast"
},
"pv_control_ensembles_{}.nc".format(d.strftime("%Y%m%d")), )
return d
if __name__ == '__main__':
server = ECMWFService('mars')
dates = pd.date_range('20130101', '20140101', freq='D')
p = Pool(1)
p.map(download_mars, dates)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.