blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
04194d3259bddb1b85dd569f81d42adde8bd6519
|
b64c45e75aa215ddcf7249fb92e047f3e7731187
|
/mainapp/migrations/0002_auto_20200217_2307.py
|
3ff8952ddbbcd3d0d5fc5ed54ba586369c9da0b1
|
[] |
no_license
|
johngaitho05/CohMat
|
6731b4dfb94475c75f1cd1d2ec55cc810729f939
|
ff5b8e5eb877f68a0477f4f19b78c6e7c407af2c
|
refs/heads/master
| 2022-12-12T15:55:53.363782 | 2021-04-04T13:17:05 | 2021-04-04T13:17:05 | 239,868,710 | 1 | 0 | null | 2022-11-04T19:31:50 | 2020-02-11T21:31:47 |
Python
|
UTF-8
|
Python
| false | false | 537 |
py
|
# Generated by Django 2.2.6 on 2020-02-17 20:07
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='cohort',
name='sub_groups',
field=django.contrib.postgres.fields.ArrayField(base_field=django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None), size=None),
),
]
|
[
"[email protected]"
] | |
fb577e0bdcfa8c1aaeb76a7181b11a55ad7d13b8
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_093/ch15_2020_03_23_19_23_29_484996.py
|
5d206db34db42a05c7a3430435e11b5a24aa7869
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 84 |
py
|
programa=input('Olá, ')
if programa == 'Chris':
print('Todo mundo odeia o Chris')
|
[
"[email protected]"
] | |
9c3aba948575031c83d273d634d1d6a34c7d502a
|
c70ac0b6d3ec292ab95626cbd519dee56a70289a
|
/embedded-software/mcu-hal/STM32F4xx_HAL_Driver/wscript
|
8daa2e183e3e623cacd3047f1f461cdf043ac7d9
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] |
permissive
|
dv1990/foxbms
|
c4b28fea533f681c04fae5bc4f004fd2f6bcb498
|
9176f75e8ebf42da0581d82be3db9ebcdfea4f0e
|
refs/heads/master
| 2020-04-17T08:27:07.168099 | 2018-12-14T12:43:17 | 2018-12-14T12:43:17 | 166,412,545 | 1 | 0 |
NOASSERTION
| 2019-01-18T13:52:42 | 2019-01-18T13:52:42 | null |
UTF-8
|
Python
| false | false | 3,967 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @copyright © 2010 - 2018, Fraunhofer-Gesellschaft zur Foerderung der
# angewandten Forschung e.V. All rights reserved.
#
# BSD 3-Clause License
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# We kindly request you to use one or more of the following phrases to refer to
# foxBMS in your hardware, software, documentation or advertising materials:
#
# ″This product uses parts of foxBMS®″
#
# ″This product includes parts of foxBMS®″
#
# ″This product is derived from foxBMS®″
import os
def build(bld):
srcs = ' '.join([
os.path.join('Src', 'stm32f4xx_hal.c'),
os.path.join('Src', 'stm32f4xx_hal_adc.c'),
os.path.join('Src', 'stm32f4xx_hal_adc_ex.c'),
os.path.join('Src', 'stm32f4xx_hal_can.c'),
os.path.join('Src', 'stm32f4xx_hal_cortex.c'),
os.path.join('Src', 'stm32f4xx_hal_dma.c'),
os.path.join('Src', 'stm32f4xx_ll_fmc.c'),
os.path.join('Src', 'stm32f4xx_hal_flash.c'),
os.path.join('Src', 'stm32f4xx_hal_flash_ex.c'),
os.path.join('Src', 'stm32f4xx_hal_gpio.c'),
os.path.join('Src', 'stm32f4xx_hal_iwdg.c'),
os.path.join('Src', 'stm32f4xx_hal_pwr.c'),
os.path.join('Src', 'stm32f4xx_hal_pwr_ex.c'),
os.path.join('Src', 'stm32f4xx_hal_rcc.c'),
os.path.join('Src', 'stm32f4xx_hal_rcc_ex.c'),
os.path.join('Src', 'stm32f4xx_hal_rtc.c'),
os.path.join('Src', 'stm32f4xx_hal_rtc_ex.c'),
os.path.join('Src', 'stm32f4xx_hal_sdram.c'),
os.path.join('Src', 'stm32f4xx_hal_spi.c'),
os.path.join('Src', 'stm32f4xx_hal_tim.c'),
os.path.join('Src', 'stm32f4xx_hal_uart.c')])
includes = os.path.join(bld.bldnode.abspath()) + ' '
includes += ' '.join([
'.',
os.path.join('..', 'CMSIS', 'Device', 'ST', 'STM32F4xx', 'Include'),
os.path.join('..', 'CMSIS', 'Include'),
os.path.join('Inc'),
os.path.join('Inc', 'Legacy'),
os.path.join('Src'),
os.path.join(bld.top_dir, bld.env.__sw_dir, bld.env.__bld_project, 'src', 'general', 'config'),
os.path.join(bld.top_dir, bld.env.__sw_dir, bld.env.__bld_project, 'src', 'general', 'config', bld.env.CPU_MAJOR)])
bld.stlib(target='foxbms-stmhal',
source=srcs,
includes=includes)
|
[
"[email protected]"
] | ||
5a288ad3997042da67edb9b18baba20de636c05b
|
8cb6cba90622021549b94e62a7fd5ae9ebc3f55f
|
/simplemooc/simplemooc/courses/views.py
|
99291abeafc9581c1987db05ff5212db16ecc237
|
[] |
no_license
|
thiagorossener/course-udemy-django-simplemooc
|
4c319e23fb2d4dae2c7b152179f4f700904d3271
|
f23303f27712149722747dc3f6bcc8361acc1698
|
refs/heads/master
| 2021-06-16T03:11:10.527563 | 2017-05-11T14:35:16 | 2017-05-11T14:35:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,213 |
py
|
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from simplemooc.courses.models import (Course, Enrollment, Announcement, Lesson,
Material)
from simplemooc.courses.forms import ContactCourse, CommentForm
from .decorators import enrollment_required
def index(request):
courses = Course.objects.all()
template_name = 'courses/index.html'
context = {
'courses': courses
}
return render(request, template_name, context)
def details(request, slug):
course = get_object_or_404(Course, slug=slug)
context = {}
if request.method == 'POST':
form = ContactCourse(request.POST)
if form.is_valid():
context['is_valid'] = True
form.send_mail(course)
form = ContactCourse()
else:
form = ContactCourse()
context['form'] = form
context['course'] = course
template_name = 'courses/details.html'
return render(request, template_name, context)
@login_required
def enrollment(request, slug):
course = get_object_or_404(Course, slug=slug)
enrollment, created = Enrollment.objects.get_or_create(
user=request.user, course=course
)
if created:
enrollment.active()
messages.success(request, 'Você foi inscrito no curso com sucesso')
else:
messages.info(request, 'Você já está inscrito no curso')
return redirect('accounts:dashboard')
@login_required
def undo_enrollment(request, slug):
course = get_object_or_404(Course, slug=slug)
enrollment = get_object_or_404(
Enrollment, user=request.user, course=course
)
if request.method == 'POST':
enrollment.delete()
messages.success(request, 'Sua inscrição foi cancelada com sucesso')
return redirect('accounts:dashboard')
template_name = 'courses/undo_enrollment.html'
context = {
'enrollment': enrollment,
'course': course,
}
return render(request, template_name, context)
@login_required
@enrollment_required
def announcements(request, slug):
course = request.course
template_name = 'courses/announcements.html'
context = {
'course': course,
'announcements': course.announcements.all()
}
return render(request, template_name, context)
@login_required
@enrollment_required
def show_announcement(request, slug, pk):
course = request.course
announcement = get_object_or_404(course.announcements.all(), pk=pk)
form = CommentForm(request.POST or None)
if form.is_valid():
comment = form.save(commit=False)
comment.user = request.user
comment.announcement = announcement
comment.save()
form = CommentForm()
messages.success(request, 'Seu comentário foi salvo com sucesso')
template_name = 'courses/show_announcement.html'
context = {
'course': course,
'announcement': announcement,
'form': form,
}
return render(request, template_name, context)
@login_required
@enrollment_required
def lessons(request, slug):
course = request.course
template_name = 'courses/lessons.html'
lessons = course.release_lessons()
if request.user.is_staff:
lessons = course.lessons.all()
context = {
'course': course,
'lessons': lessons,
}
return render(request, template_name, context)
@login_required
@enrollment_required
def lesson(request, slug, pk):
course = request.course
lesson = get_object_or_404(Lesson, pk=pk, course=course)
if not request.user.is_staff or not lesson.is_available():
message.error(request, 'Esta aula não está disponível')
return redirect('courses:lessons', slug=course.slug)
template_name = 'courses/lesson.html'
context = {
'course': course,
'lesson': lesson,
}
return render(request, template_name, context)
@login_required
@enrollment_required
def material(request, slug, pk):
course = request.course
material = get_object_or_404(Material, pk=pk, lesson__course=course)
lesson = material.lesson
if not request.user.is_staff or not lesson.is_available():
message.error(request, 'Este material não está disponível')
return redirect('courses:lesson', slug=course.slug, pk=lesson.pk)
if not material.is_embedded():
return redirect(material.file.url)
template_name = 'courses/material.html'
context = {
'course': course,
'lesson': lesson,
'material': material,
}
return render(request, template_name, context)
|
[
"[email protected]"
] | |
342f75521d3abaf866851722cdfd35ec72b29d01
|
2befb6f2a5f1fbbd5340093db43a198abdd5f53b
|
/pythonProject/modelviewset/modelviewsetApp/migrations/0001_initial.py
|
f0099f330374e6813c4bfd515b8bb93cdc7d1383
|
[] |
no_license
|
JanardanPandey/RestAPI
|
1956d3529782d18ef2118961f6286e3213665aad
|
654933a4d9687076a00c6f4c57fc3dfee1a2c567
|
refs/heads/master
| 2023-06-14T07:02:31.702000 | 2021-07-02T07:50:59 | 2021-07-02T07:50:59 | 382,357,537 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 604 |
py
|
# Generated by Django 3.2.3 on 2021-06-06 08:35
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='StudentModel',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('city', models.CharField(max_length=100)),
('roll', models.IntegerField()),
],
),
]
|
[
"[email protected]"
] | |
b035d4c55236efb3af1c591684b28900e1f9eca8
|
3267fb38696d7b114a22f476f2c60425d6ee349a
|
/src/api/exceptions.py
|
6eab930e8268c1c57c0cb4c9d602481895dd8f7e
|
[] |
no_license
|
marcinowski/github-adapter
|
c0092e3f817f9dc1d97691e81b1c247ae281b2c7
|
2d7c6b9601da082de246450cc840412f0c4331b5
|
refs/heads/master
| 2022-12-10T00:53:39.386198 | 2017-09-06T10:57:09 | 2017-09-06T10:57:09 | 100,716,960 | 0 | 0 | null | 2021-06-01T22:02:20 | 2017-08-18T13:55:02 |
Python
|
UTF-8
|
Python
| false | false | 1,602 |
py
|
"""
:created on: 2017-08-21
:author: Marcin Muszynski
:contact: [email protected]
"""
RESPONSE_EXCEPTION_NAME_FORMAT = 'GitHubAdapter{}Error'
class GitHubAdapterBaseError(Exception):
""" Base Exception for GitHub adapter"""
class GitHubAdapterHTTPError(GitHubAdapterBaseError):
""" Base HTTP Error Exception"""
status_code = 400
reason = ''
class GitHubAdapter400Error(GitHubAdapterHTTPError):
""" Exception to raise for Bad Request """
status_code = 400
reason = 'Bad Request'
class GitHubAdapter401Error(GitHubAdapterHTTPError):
""" Exception to raise when authentication error """
status_code = 401
reason = 'Authentication error'
class GitHubAdapter403Error(GitHubAdapterHTTPError):
""" Exception to raise when authentication error """
status_code = 403
reason = 'Access denied'
class GitHubAdapter404Error(GitHubAdapterHTTPError):
""" Exception to raise when resource is not found """
status_code = 404
reason = 'Page not found'
class GitHubAdapter405Error(GitHubAdapterHTTPError):
""" Exception to raise when method is not allowed """
status_code = 405
reason = 'Method not allowed'
class GitHubAdapter422Error(GitHubAdapterHTTPError):
""" Exception to raise when method is not allowed """
status_code = 422
reason = 'Unprocessable Entity - invalid fields received'
class GitHubAdapter500Error(GitHubAdapterHTTPError):
status_code = 500
reason = 'Server Error'
class GitHubAdapter501Error(GitHubAdapterHTTPError):
status_code = 501
reason = 'Unrecognized Error'
|
[
"[email protected]"
] | |
31a9d3dc24bee09b099c63b756f0a4157ae58716
|
ba949e02c0f4a7ea0395a80bdc31ed3e5f5fcd54
|
/problems/dp/Solution727.py
|
398306eae193d167eb1b975cf6e85d29184f1f7b
|
[
"MIT"
] |
permissive
|
akaliutau/cs-problems-python
|
6bc0a74064f6e9687fe58b13763da1fdf2e1f626
|
9b1bd8e3932be62135a38a77f955ded9a766b654
|
refs/heads/master
| 2023-05-11T22:19:06.711001 | 2021-06-04T11:14:42 | 2021-06-04T11:14:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 627 |
py
|
""" Given strings S and T, find the minimum (contiguous) substring W of S, so
that T is a subsequence of W.
If there is no such window in S that covers all characters in T, return the
empty string "". If there are multiple such minimum-length windows, return
the one with the left-most starting index.
Example 1:
Input: S = "abcdebdde", T = "bde" Output: "bcde" Explanation: "bcde" is the
answer because it occurs before "bdde" which has the same length. "deb" is
not a smaller window because the elements of T in the window must occur in
order
"""
class Solution727:
pass
|
[
"[email protected]"
] | |
dcd430ba643880ad3d9694ed4cc1931961efdb1c
|
b9eb496c4551fd091954675a61382636fc68e715
|
/src/ABC1xx/ABC17x/ABC172/ABC172D.py
|
22b78ec009546408cba2d57d41d58a2f5f8fe13c
|
[] |
no_license
|
kttaroha/AtCoder
|
af4c5783d89a61bc6a40f59be5e0992980cc8467
|
dc65ce640954da8c2ad0d1b97580da50fba98a55
|
refs/heads/master
| 2021-04-17T16:52:09.508706 | 2020-11-22T05:45:08 | 2020-11-22T05:45:08 | 249,460,649 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 290 |
py
|
def main():
N = int(input())
nums = [1] * (N + 1)
s = 0
for i in range(2, N+1):
tmp = i
while tmp <= N:
nums[tmp] += 1
tmp += i
for i in range(1, N+1):
s += i * nums[i]
print(s)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
c64fd7d1fec9e8c0937b20a5f8df26495f60825d
|
9ffd14fa00dbea9a7e628825e999e73c1abded5f
|
/Lecture 04/request.py
|
f4a9213706c32a31d2e2de6b45187e0199904b73
|
[] |
no_license
|
riyadhswe/PythonDiyeProgrammingSikha2nd
|
d3b90071f1237e4be679609689dc96039dad868d
|
6e63e23e046aeb724f7cf4426423551afdc081a9
|
refs/heads/master
| 2023-03-03T08:33:47.301639 | 2021-02-14T15:59:53 | 2021-02-14T15:59:53 | 327,816,754 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 83 |
py
|
import requests
url = "http://google.com"
response = requests.get(url)
response.ok
|
[
"[email protected]"
] | |
2e582b4c164d4d1f684a12aed00aa4268b122446
|
e6d862a9df10dccfa88856cf16951de8e0eeff2b
|
/Core/worker/python/test/test_worker_performance_event_evaluation.py
|
b0bcb28658398e498344033f0e7a1a1d032bdb69
|
[] |
no_license
|
AllocateSoftware/API-Stubs
|
c3de123626f831b2bd37aba25050c01746f5e560
|
f19d153f8e9a37c7fb1474a63c92f67fc6c8bdf0
|
refs/heads/master
| 2022-06-01T07:26:53.264948 | 2020-01-09T13:44:41 | 2020-01-09T13:44:41 | 232,816,845 | 0 | 0 | null | 2022-05-20T21:23:09 | 2020-01-09T13:34:35 |
C#
|
UTF-8
|
Python
| false | false | 1,014 |
py
|
# coding: utf-8
"""
Workers
## Workers and events # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import api_server
from api_server.models.worker_performance_event_evaluation import WorkerPerformanceEventEvaluation # noqa: E501
from api_server.rest import ApiException
class TestWorkerPerformanceEventEvaluation(unittest.TestCase):
"""WorkerPerformanceEventEvaluation unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testWorkerPerformanceEventEvaluation(self):
"""Test WorkerPerformanceEventEvaluation"""
# FIXME: construct object with mandatory attributes with example values
# model = api_server.models.worker_performance_event_evaluation.WorkerPerformanceEventEvaluation() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
c0f1fb47b6faa9935dd1ef06dd48a6f9ebe66315
|
2fd087fbc5faf43940153693823969df6c8ec665
|
/pyc_decrypted/latest/distutils/versionpredicate.py
|
1ceb1d76a7d86286983c5fc23abde3ae1bbb4694
|
[] |
no_license
|
mickeystone/DropBoxLibrarySRC
|
ed132bbffda7f47df172056845e5f8f6c07fb5de
|
2e4a151caa88b48653f31a22cb207fff851b75f8
|
refs/heads/master
| 2021-05-27T05:02:30.255399 | 2013-08-27T13:16:55 | 2013-08-27T13:16:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,425 |
py
|
#Embedded file name: distutils/versionpredicate.py
import re
import distutils.version
import operator
re_validPackage = re.compile('(?i)^\\s*([a-z_]\\w*(?:\\.[a-z_]\\w*)*)(.*)')
re_paren = re.compile('^\\s*\\((.*)\\)\\s*$')
re_splitComparison = re.compile('^\\s*(<=|>=|<|>|!=|==)\\s*([^\\s,]+)\\s*$')
def splitUp(pred):
res = re_splitComparison.match(pred)
if not res:
raise ValueError('bad package restriction syntax: %r' % pred)
comp, verStr = res.groups()
return (comp, distutils.version.StrictVersion(verStr))
compmap = {'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'>': operator.gt,
'>=': operator.ge,
'!=': operator.ne}
class VersionPredicate:
def __init__(self, versionPredicateStr):
versionPredicateStr = versionPredicateStr.strip()
if not versionPredicateStr:
raise ValueError('empty package restriction')
match = re_validPackage.match(versionPredicateStr)
if not match:
raise ValueError('bad package name in %r' % versionPredicateStr)
self.name, paren = match.groups()
paren = paren.strip()
if paren:
match = re_paren.match(paren)
if not match:
raise ValueError('expected parenthesized list: %r' % paren)
str = match.groups()[0]
self.pred = [ splitUp(aPred) for aPred in str.split(',') ]
if not self.pred:
raise ValueError('empty parenthesized list in %r' % versionPredicateStr)
else:
self.pred = []
def __str__(self):
if self.pred:
seq = [ cond + ' ' + str(ver) for cond, ver in self.pred ]
return self.name + ' (' + ', '.join(seq) + ')'
else:
return self.name
def satisfied_by(self, version):
for cond, ver in self.pred:
if not compmap[cond](version, ver):
return False
return True
_provision_rx = None
def split_provision(value):
global _provision_rx
if _provision_rx is None:
_provision_rx = re.compile('([a-zA-Z_]\\w*(?:\\.[a-zA-Z_]\\w*)*)(?:\\s*\\(\\s*([^)\\s]+)\\s*\\))?$')
value = value.strip()
m = _provision_rx.match(value)
if not m:
raise ValueError('illegal provides specification: %r' % value)
ver = m.group(2) or None
if ver:
ver = distutils.version.StrictVersion(ver)
return (m.group(1), ver)
|
[
"[email protected]"
] | |
f20f8c824979e86de13da915aadb5f80de1ba16f
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_157/750.py
|
40dcc08356dad6634828062c24ac8c82edb056f4
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 888 |
py
|
#!/usr/bin/python
import sys
char2q={'i':0, 'j':1, 'k':2}
def solve_case():
(L,X)=[int(n) for n in sys.stdin.readline().split(" ")]
string=sys.stdin.readline()[:-1]
string=X*string
#print string
letter=0
Q=3
minus=False
for c in string:
#print c,
q=char2q[c]
if q==Q:
Q=3
minus=not minus
elif Q==3:
Q=q
else:
diff=(3+q-Q)%3
if diff==1:
Q=(Q+2)%3
else:
Q=(Q+1)%3
minus=not minus
if not minus and Q==letter and letter!=3:
letter+=1
Q=3
#print
if letter==3 and not minus and Q==3:
return "YES"
else:
return "NO"
cases_count=int(sys.stdin.readline())
for i in xrange(cases_count):
print "Case #"+`i+1`+": "+solve_case()
|
[
"[email protected]"
] | |
0b9b9d7c58619def344ec1bb8490039754f21dff
|
42c48f3178a48b4a2a0aded547770027bf976350
|
/google/ads/google_ads/v4/services/reach_plan_service_client_config.py
|
7da209ded44302ee1db8c586bc0d53ba3cb561df
|
[
"Apache-2.0"
] |
permissive
|
fiboknacky/google-ads-python
|
e989464a85f28baca1f28d133994c73759e8b4d6
|
a5b6cede64f4d9912ae6ad26927a54e40448c9fe
|
refs/heads/master
| 2021-08-07T20:18:48.618563 | 2020-12-11T09:21:29 | 2020-12-11T09:21:29 | 229,712,514 | 0 | 0 |
Apache-2.0
| 2019-12-23T08:44:49 | 2019-12-23T08:44:49 | null |
UTF-8
|
Python
| false | false | 1,331 |
py
|
config = {
"interfaces": {
"google.ads.googleads.v4.services.ReachPlanService": {
"retry_codes": {
"idempotent": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 5000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000
}
},
"methods": {
"ListPlannableLocations": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"ListPlannableProducts": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"GenerateProductMixIdeas": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"GenerateReachForecast": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
}
}
}
}
}
|
[
"[email protected]"
] | |
487e993470611bf28744dc80b3488e471f00e3a1
|
23db36a9e19a48aa660444dfc32d50ebc6c88a42
|
/doc/_code/inline_keyboard.py
|
2d29d42301312f031228dbcdbd429134c5d299c8
|
[
"MIT"
] |
permissive
|
cpcchengt/telepot
|
fab206ce6ea4149e0dce9619adc9075b59c06a66
|
eb2050fbb36b142a9746533e738322d0cc0d4631
|
refs/heads/master
| 2023-06-20T02:35:36.644708 | 2021-07-21T02:55:44 | 2021-07-21T02:55:44 | 384,059,417 | 2 | 0 |
MIT
| 2021-07-08T08:44:57 | 2021-07-08T08:44:57 | null |
UTF-8
|
Python
| false | false | 936 |
py
|
import sys
import time
import telepot
from telepot.loop import MessageLoop
from telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton
def on_chat_message(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
keyboard = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(text='Press me', callback_data='press')],
])
bot.sendMessage(chat_id, 'Use inline keyboard', reply_markup=keyboard)
def on_callback_query(msg):
query_id, from_id, query_data = telepot.glance(msg, flavor='callback_query')
print('Callback Query:', query_id, from_id, query_data)
bot.answerCallbackQuery(query_id, text='Got it')
TOKEN = sys.argv[1] # get token from command-line
bot = telepot.Bot(TOKEN)
MessageLoop(bot, {'chat': on_chat_message,
'callback_query': on_callback_query}).run_as_thread()
print('Listening ...')
while 1:
time.sleep(10)
|
[
"[email protected]"
] | |
0d444903596cbcb7e38c014cceae5caaa94d4835
|
cb30d1a3a4fa6c8f7a6f89a671fbdb4a808e19e3
|
/c2/proverb.py
|
e2d2db78798fd7415e07ee03feb45940ff7b3deb
|
[] |
no_license
|
systemchip/python-for-everyone
|
0b45172ca5b41c3b5fc1a835fbccf4a479c282ea
|
9fb7f751a97fb6a110079e1e3e1dd9601fb24374
|
refs/heads/master
| 2021-09-02T09:18:22.013704 | 2017-07-17T07:46:19 | 2017-07-17T07:46:19 | 115,913,547 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 86 |
py
|
# 격언을 표시한다
print("두드려라.")
print("그러면 열릴 것이다.")
|
[
"[email protected]"
] | |
69f414c19e8cafff59fec7dc9c7e05ad0644f490
|
b3c47795e8b6d95ae5521dcbbb920ab71851a92f
|
/Leetcode/Algorithm/python/2000/01865-Finding Pairs With a Certain Sum.py
|
66b88abfc548b4814351a7770db1ce304c3a3b3d
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
Wizmann/ACM-ICPC
|
6afecd0fd09918c53a2a84c4d22c244de0065710
|
7c30454c49485a794dcc4d1c09daf2f755f9ecc1
|
refs/heads/master
| 2023-07-15T02:46:21.372860 | 2023-07-09T15:30:27 | 2023-07-09T15:30:27 | 3,009,276 | 51 | 23 | null | null | null | null |
UTF-8
|
Python
| false | false | 724 |
py
|
from collections import defaultdict
class FindSumPairs(object):
def __init__(self, nums1, nums2):
self.d = defaultdict(int)
self.nums1 = nums1
self.nums2 = nums2
for item in nums2:
self.d[item] += 1
def add(self, index, val):
pre = self.nums2[index]
cur = pre + val
self.d[pre] -= 1
self.d[cur] += 1
self.nums2[index] = cur
def count(self, tot):
res = 0
for a in self.nums1:
res += self.d[tot - a]
return res
# Your FindSumPairs object will be instantiated and called as such:
# obj = FindSumPairs(nums1, nums2)
# obj.add(index,val)
# param_2 = obj.count(tot)
|
[
"[email protected]"
] | |
6022f801423acba4dc6fba931bf79e8128f0fd72
|
e30a578e2467b67d82dc8529c2e8107579496d01
|
/ML/3.py
|
3cdaebea2b295d3905e602dbc0e4bad537e6ab9e
|
[
"MIT"
] |
permissive
|
rednithin/7thSemLabs
|
7636ad55e02d7f4fbf56e2b4fbc73ff79868006e
|
5bd4102627aa068afd16c55c02b18e51ec5cba4c
|
refs/heads/master
| 2020-04-06T13:01:15.576852 | 2018-12-09T06:49:12 | 2018-12-09T06:49:12 | 157,480,151 | 3 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,187 |
py
|
import numpy as np
from csv import reader
from math import log2
from collections import Counter
from pprint import pprint
YES, NO = "Y", "N"
class Node:
def __init__(self, label):
self.label = label
self.branches = {}
def entropy(data):
total, positive, negative = len(
data), (data[:, -1] == YES).sum(), (data[:, -1] == NO).sum()
entropy = 0
if positive:
entropy -= positive / total * log2(positive / total)
if negative:
entropy -= negative / total * log2(negative / total)
return entropy
def gain(s, data, column):
values = set(data[:, column])
gain = s
for value in values:
sub = data[data[:, column] == value]
gain -= len(sub) / len(data) * entropy(sub)
return gain
def bestAttribute(data):
s = entropy(data)
g = [gain(s, data, column) for column in range(len(data[0]) - 1)]
return g.index(max(g))
def id3(data, labels):
root = Node('Null')
if entropy(data) == 0:
root.label = data[0, -1]
elif len(data[0]) == 1:
root.label = Counter(data[:, -1]).most_common()[0][0]
else:
column = bestAttribute(data)
root.label = labels[column]
values = set(data[:, column])
for value in values:
nData = np.delete(
data[data[:, column] == value], column, axis=1)
nLabels = np.delete(labels, column)
root.branches[value] = id3(nData, nLabels)
return root
def getRules(root, rule, rules):
if not root.branches:
rules.append(rule[:-2] + "=> " + root.label)
for value, nRoot in root.branches.items():
getRules(nRoot, rule + root.label + "=" + value + " ^ ", rules)
def predict(tree, tup):
if not tree.branches:
return tree.label
return predict(tree.branches[tup[tree.label]], tup)
labels = np.array(['Outlook', 'Temperature', 'Humidity', 'Wind', 'PlayTennis'])
with open('3-dataset.csv') as f:
data = np.array(list(reader(f)))
tree = id3(data, labels)
rules = []
getRules(tree, "", rules)
pprint(sorted(rules))
tup = {}
for label in labels[:-1]:
tup[label] = input(label + ": ")
print(predict(tree, tup))
|
[
"[email protected]"
] | |
0440b1fff9d5435ce1e25dee4e860b2f50ea83b1
|
06569ec06be697beffdc5eeff1e51ca0e69ee388
|
/naengpa/migrations/0001_initial.py
|
1b2e597cd43aee0be84f29f42d34ba72acf44faf
|
[] |
no_license
|
Areum0921/naengpa
|
fbb88f962c03a0a6222942e81726fb883986861a
|
ebf6f27aaa78c8ea06d269410bfc9a179c3f4bcc
|
refs/heads/master
| 2023-07-14T15:55:36.524558 | 2021-08-19T07:50:26 | 2021-08-19T07:50:26 | 388,108,445 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 806 |
py
|
# Generated by Django 3.2.5 on 2021-07-22 08:11
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('time', models.IntegerField()),
('difficulty', models.IntegerField()),
('need_ingredient', models.CharField(max_length=200)),
('content', models.TextField(default='')),
('create_date', models.DateTimeField()),
],
),
]
|
[
"[email protected]"
] | |
a5a6566ee6f951719d7bae58963b4f8df750761f
|
ca2782824a16b7b9dc1284ff058776ca62809787
|
/exercise_library/basic_navigation/api.py
|
6c2a9ddcf12418c07ad9055ba07a07cfb90228b0
|
[] |
no_license
|
captbaritone/exercise-library
|
9ddc838f4ffe24c65fd381be6d421f5084e48392
|
c01274c05a35e47b140846ffa06f3a42cc4dc291
|
refs/heads/master
| 2020-12-24T12:47:23.645268 | 2015-02-09T08:05:34 | 2015-02-09T08:05:34 | 30,525,565 | 1 | 0 | null | 2015-02-09T08:21:29 | 2015-02-09T08:21:28 | null |
UTF-8
|
Python
| false | false | 555 |
py
|
import json
from django.http import Http404
from django.http import HttpResponse
def render_to_json(response_obj, context={}, content_type="application/json", status=200):
json_str = json.dumps(response_obj, indent=4)
return HttpResponse(json_str, content_type=content_type, status=status)
def requires_post(fn):
def inner(request, *args, **kwargs):
if request.method != "POST":
return Http404
# post_data = request.POST or json.loads(request.body)
return fn(request, *args, **kwargs)
return inner
|
[
"[email protected]"
] | |
5b6670e8acd44e1c3c70765eccf7f95c6d7a4463
|
ee89c84c5b2f48d447b7005299b409d61cc4d807
|
/venv/Scripts/rst2html.py
|
d82a8d31a3a9267b435b9d556c0d9e90b2f8e873
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
songweiwei/rasa_run
|
342075cc645725a042acf273ab6508c5da55cbee
|
0cfc0a280b9efea344bacf5f2df5800c32d0b3a8
|
refs/heads/master
| 2023-05-31T03:23:26.490925 | 2020-04-22T07:56:07 | 2020-04-22T07:56:07 | 257,218,895 | 2 | 2 | null | 2023-05-22T23:23:45 | 2020-04-20T08:31:42 |
Python
|
UTF-8
|
Python
| false | false | 641 |
py
|
#!c:\users\sonny\pycharmprojects\rasa_run\venv\scripts\python.exe
# $Id: rst2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html', description=description)
|
[
"[email protected]"
] | |
2f53fe70840c17f5e6eb67b3f761b26aa5105eb1
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/hdinsight/azure-mgmt-hdinsight/generated_samples/get_linux_hadoop_script_action.py
|
c384d8c553f822eb33b468a8920289e2e0df61e7
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 |
MIT
| 2023-09-08T08:38:48 | 2019-11-18T07:09:24 |
Python
|
UTF-8
|
Python
| false | false | 1,580 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.hdinsight import HDInsightManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-hdinsight
# USAGE
python get_linux_hadoop_script_action.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = HDInsightManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.script_actions.list_by_cluster(
resource_group_name="rg1",
cluster_name="cluster1",
)
for item in response:
print(item)
# x-ms-original-file: specification/hdinsight/resource-manager/Microsoft.HDInsight/stable/2021-06-01/examples/GetLinuxHadoopScriptAction.json
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
4390260f8e58d5e4672f6e75b6cd7aa1bc72ce21
|
e40bf504eda3413074cc719a421cdab222ece729
|
/chip.py
|
4bb26d0260a51c141f095838551ef442146d101b
|
[] |
no_license
|
bcrafton/noc
|
93b20725b53890d0ee405ee51a53fa04cf448e3d
|
4191eca66a4498eb5f3e4069b92a3e7c006e868b
|
refs/heads/master
| 2023-02-16T12:46:22.729027 | 2021-01-11T17:09:29 | 2021-01-11T17:09:29 | 328,193,001 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,619 |
py
|
import numpy as np
from modules import *
from transform_weights import transform_weights
from transform_inputs import transform_inputs
############################
def bits(model, inputs):
xh, xw, xc, xn = np.shape(inputs)
size = xh
ws = []
xs = []
for layer in model.keys():
w = model[layer]['f']
k, _, c, n = np.shape(w)
p1 = model[layer]['p1']
p2 = model[layer]['p2']
s = model[layer]['s']
w = np.reshape(w, (k * k * c, n)).astype(int)
ws.append(transform_weights(w))
x = (size, size, c)
xs.append(x)
size = (size - k + s + p1 + p2) // s
return ws, xs
############################
def malloc(grid, w, x):
nwl, _, nbl, _ = np.shape(w)
nd = 8 // nbl
# allocate [nwl] grid cells.
col = np.floor(np.sqrt(nwl))
row = nwl // col
rem = nwl % col
# nwl=3 -> [col=1, row=3, rem=0]
# pick a start address
# this should be an optimization problem
# create clusters, then figure out how to route them.
#
############################
class chip:
def __init__(self):
self.grid = [ [None for _ in range(4)] for _ in range(4) ]
# generate all [SRAM, PCRAM] pairs
for i in range(4):
for j in range(4):
self.grid[i][j] = {'PCM': PCM(), 'SRAM': SRAM()}
def step(self):
pass
'''
def map(self, model, inputs):
# set PCM and SRAM to hold specific parts of our model and activations.
# this is probably a whole field we dont know about.
#
ws, xs = bits(model, inputs)
grid = np.zeros(shape=(4, 4))
for (w, x) in zip(ws, xs):
# malloc - where to place w and x ?
malloc(grid, w, x)
# what about when we cant store the whole input in the SRAM ?
# need to "orchestrate" the transfer to all adjacent nodes.
#
# allocate
# placement / mapping
# routing
#
# think problem is small enough such that we can find optimal solution
#
# we already did allocation with breaking barriers.
# but now we have to do the other parts.
#
#
'''
def map(self, model, inputs):
ws, xs = bits(model, inputs)
alloc = malloc(ws, xs) # breaking barriers
place = placement(alloc)
route = routing(place)
|
[
"[email protected]"
] | |
e0e0877c3ae3262b43646cf6272ffe758b1fc454
|
c17133ea0f7bbad98ef17ce2c0213b8c95a4630f
|
/Design_serach_autocomplete_system.py
|
8971f354fa6ab79c1247ea332d87cc11c02a0754
|
[] |
no_license
|
Mohan110594/Design-6
|
e8be1935bfa9a95537846face73f5f7d8943bf3f
|
36faa1203276e58f8b52470e1e09fa60f388f1e3
|
refs/heads/master
| 2022-04-16T09:25:59.967115 | 2020-04-11T16:06:36 | 2020-04-11T16:06:36 | 254,902,314 | 0 | 0 | null | 2020-04-11T15:55:00 | 2020-04-11T15:54:59 | null |
UTF-8
|
Python
| false | false | 3,575 |
py
|
// Did this code successfully run on Leetcode : Yes
// Any problem you faced while coding this : None
// Your code here along with comments explaining your approach
In this problem we create a trie with all the given sentences and at every letter we maintain the word frequency at each and every letter.
when inputing we check the trie and if the sentence is not present we add it to the trie and make changes in the frequency map accordingly.Then we fetch the frequency map and then we extract the 3 most frequency lexiographically greater sentences and then we send them as output at each input.
# Time complexity --> o(l*n) where n is the number of sentences and l is the length of each sentence
import heapq
from collections import deque
# creating a custom comparator if frequencies of the sentences are of same value we go with higher lexiographic sentence else we go with the sentence which has higher frequency
class lexico:
def __init__(self, key, value):
self.key = key
self.value = value
def __lt__(self, d2):
if self.value == d2.value:
return self.key> d2.key
else:
return self.value<d2.value
# creating a trie which store the sentence character and the word with its frequencies
class TrieNode:
def __init__(self):
self.isEnd = False
self.children = dict()
self.freqmap = dict()
class AutocompleteSystem(object):
def __init__(self, sentences, times):
"""
:type sentences: List[str]
:type times: List[int]
"""
# self.sentences=sentences
# self.times=times
self.str1 = ''
self.root = TrieNode()
self.cursor = self.root
# max length of the heap to be maintained
self.k = 3
# inserting all the sentences into the trie
for i in range(len(sentences)):
sent = sentences[i]
freq = times[i]
self.insert(sent, freq)
# logic for trie insertion
def insert(self, word, freq):
root1=self.root
for i in range(len(word)):
if word[i] not in root1.children:
root1.children[word[i]] = TrieNode()
root1 = root1.children[word[i]]
root1.freqmap[word] = freq
root1.isEnd = True
def input(self, c):
"""
:type c: str
:rtype: List[str]
"""
# if the input is # we have to insert all the previous sentence into the trie else if present we have to increment its frequency
if c == '#':
if self.str1 not in self.cursor.freqmap:
self.insert(self.str1, 1)
else:
self.insert(self.str1,self.cursor.freqmap[self.str1]+1)
self.str1 = ''
self.cursor = self.root
return
self.str1 = self.str1 + c
if c not in self.cursor.children:
self.cursor.children[c] = TrieNode()
# storing the frequency map for that character
freqcursor=self.cursor.children[c].freqmap
self.cursor = self.cursor.children[c]
pq = []
# min heap with custom comparator
for key,value in freqcursor.items():
val1=lexico(key,value)
heapq.heappush(pq, val1)
if len(pq) > self.k:
heapq.heappop(pq)
# storing the values based on the frequency order and lexicographic order
out=deque()
for i in range(len(pq)):
ele=heapq.heappop(pq)
out.appendleft(ele.key)
return out
|
[
"[email protected]"
] | |
1c01d6b6bfd004dd2a32aa4a0929d97689dc22af
|
625108dc5a9b90d0f22609788ff52aff155a2c99
|
/selection/randomized/bootstrap/bayes_boot_randomX_gn.py
|
0c2f94b79a6c8c437a2055d00767c26536c46cd3
|
[] |
no_license
|
guhjy/Python-software
|
0169f6618a570bb5a5e3aaf29e895d2251ca791c
|
061a050bd17ca6f276296dbfa51573f001c320b1
|
refs/heads/master
| 2021-01-09T20:55:01.417131 | 2016-09-12T20:43:43 | 2016-09-12T20:43:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,078 |
py
|
import numpy as np
from scipy.stats import laplace, probplot, uniform
#from selection.algorithms.lasso import instance
from instances import instance, bootstrap_covariance
import selection.sampling.randomized.api as randomized
#from pvalues_bayes_randomX import pval
from pvalues_bayes_ranX_gn import pval
from matplotlib import pyplot as plt
import regreg.api as rr
import selection.sampling.randomized.losses.lasso_randomX as lasso_randomX
import statsmodels.api as sm
def test_lasso(s=0, n=100, p=20, weights = "neutral",
randomization_dist = "logistic", randomization_scale = 1,
Langevin_steps = 10000, burning = 2000, X_scaled = True,
covariance_estimate = "nonparametric", noise = "uniform"):
""" weights: exponential, gamma, normal, gumbel
randomization_dist: logistic, laplace """
step_size = 1./p
X, y, true_beta, nonzero, sigma = instance(n=n, p=p, random_signs=True, s=s, sigma=1.,rho=0, scale=X_scaled, noise=noise)
print 'true beta', true_beta
lam_frac = 1.
if randomization_dist == "laplace":
randomization = laplace(loc=0, scale=1.)
random_Z = randomization.rvs(p)
if randomization_dist == "logistic":
random_Z = np.random.logistic(loc=0, scale = 1, size = p)
if randomization_dist== "normal":
random_Z = np.random.standard_normal(p)
print 'randomization', random_Z*randomization_scale
loss = lasso_randomX.lasso_randomX(X, y)
epsilon = 1./np.sqrt(n)
#epsilon = 1.
lam = sigma * lam_frac * np.mean(np.fabs(np.dot(X.T, np.random.standard_normal((n, 10000)))+randomization_scale*np.random.logistic(size=(p,10000))).max(0))
lam_scaled = lam.copy()
random_Z_scaled = random_Z.copy()
epsilon_scaled = epsilon
if (X_scaled == False):
random_Z_scaled *= np.sqrt(n)
lam_scaled *= np.sqrt(n)
epsilon_scaled *= np.sqrt(n)
penalty = randomized.selective_l1norm_lan(p, lagrange=lam_scaled)
# initial solution
problem = rr.simple_problem(loss, penalty)
random_term = rr.identity_quadratic(epsilon_scaled, 0, -randomization_scale*random_Z_scaled, 0)
solve_args = {'tol': 1.e-10, 'min_its': 100, 'max_its': 500}
initial_soln = problem.solve(random_term, **solve_args)
print 'initial solution', initial_soln
active = (initial_soln != 0)
if np.sum(active)==0:
return [-1], [-1]
inactive = ~active
betaE = initial_soln[active]
signs = np.sign(betaE)
initial_grad = -np.dot(X.T, y - np.dot(X, initial_soln))
if (X_scaled==False):
initial_grad /= np.sqrt(n)
print 'initial_gradient', initial_grad
subgradient = random_Z - initial_grad - epsilon * initial_soln
cube = np.divide(subgradient[inactive], lam)
nactive = betaE.shape[0]
ninactive = cube.shape[0]
beta_unpenalized = np.linalg.lstsq(X[:, active], y)[0]
print 'beta_OLS onto E', beta_unpenalized
obs_residuals = y - np.dot(X[:, active], beta_unpenalized) # y-X_E\bar{\beta}^E
N = np.dot(X[:, inactive].T, obs_residuals) # X_{-E}^T(y-X_E\bar{\beta}_E), null statistic
full_null = np.zeros(p)
full_null[nactive:] = N
# parametric coveriance estimate
if covariance_estimate == "parametric":
XE_pinv = np.linalg.pinv(X[:, active])
mat = np.zeros((nactive+ninactive, n))
mat[:nactive,:] = XE_pinv
mat[nactive:,:] = X[:, inactive].T.dot(np.identity(n)-X[:, active].dot(XE_pinv))
Sigma_full = mat.dot(mat.T)
else:
Sigma_full = bootstrap_covariance(X,y,active, beta_unpenalized)
init_vec_state = np.zeros(n+nactive+ninactive)
if weights =="exponential":
init_vec_state[:n] = np.ones(n)
else:
init_vec_state[:n] = np.zeros(n)
#init_vec_state[:n] = np.random.standard_normal(n)
#init_vec_state[:n] = np.ones(n)
init_vec_state[n:(n+nactive)] = betaE
init_vec_state[(n+nactive):] = cube
def full_projection(vec_state, signs = signs,
nactive=nactive, ninactive = ninactive):
alpha = vec_state[:n].copy()
betaE = vec_state[n:(n+nactive)].copy()
cube = vec_state[(n+nactive):].copy()
projected_alpha = alpha.copy()
projected_betaE = betaE.copy()
projected_cube = np.zeros_like(cube)
if weights == "exponential":
projected_alpha = np.clip(alpha, 0, np.inf)
if weights == "gamma":
projected_alpha = np.clip(alpha, -2+1./n, np.inf)
for i in range(nactive):
if (projected_betaE[i] * signs[i] < 0):
projected_betaE[i] = 0
projected_cube = np.clip(cube, -1, 1)
return np.concatenate((projected_alpha, projected_betaE, projected_cube), 0)
Sigma = np.linalg.inv(np.dot(X[:, active].T, X[:, active]))
null, alt = pval(init_vec_state, full_projection, X, obs_residuals, beta_unpenalized, full_null,
signs, lam, epsilon,
nonzero, active, Sigma,
weights, randomization_dist, randomization_scale,
Langevin_steps, step_size, burning,
X_scaled)
# Sigma_full[:nactive, :nactive])
return null, alt
if __name__ == "__main__":
np.random.seed(1)
plt.figure()
plt.ion()
P0, PA = [], []
for i in range(50):
print "iteration", i
p0, pA = test_lasso()
if np.sum(p0)>-1:
P0.extend(p0); PA.extend(pA)
plt.clf()
plt.xlim([0, 1])
plt.ylim([0, 1])
ecdf = sm.distributions.ECDF(P0)
x = np.linspace(min(P0), max(P0))
y = ecdf(x)
plt.plot(x, y, lw=2)
plt.plot([0, 1], [0, 1], 'k-', lw=1)
#probplot(P0, dist=uniform, sparams=(0, 1), plot=plt,fit=False)
#plt.plot([0, 1], color='k', linestyle='-', linewidth=2)
plt.pause(0.01)
print "done! mean: ", np.mean(P0), "std: ", np.std(P0)
while True:
plt.pause(0.05)
plt.savefig('bayes.pdf')
|
[
"[email protected]"
] | |
7fdcfd43aa5a87d2ab8a4c5a2f506ea8afb52965
|
ec6a55345b2e3358a99113369c62ab4622ab527c
|
/src/Utilities/date_helper.py
|
ed424b9f133177c5b881b0a20ea2e7243aef4c7c
|
[] |
no_license
|
cloew/PersonalAccountingSoftware
|
566a92e127bc060cd18470e35e2f6decf94f1aa5
|
57c909c8581bef3b66388038a1cf5edda426ecf9
|
refs/heads/master
| 2016-08-11T16:16:30.179032 | 2015-05-12T20:09:19 | 2015-05-12T20:09:19 | 8,558,121 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 256 |
py
|
from dateutil import parser
def DateToString(date):
""" Returns a string format of the given date """
return "{0:%m/%d/%Y}".format(date)
def StringToDate(dateString):
""" Converts a String to a date """
return parser.parse(dateString)
|
[
"[email protected]"
] | |
c88654782e0687cc493dc2eb6dbdfc7506ce426d
|
cf025ea3bf079748472557304a290593c753b884
|
/Algorithm/SWEA/연습문제_큐_마이쮸(list).py
|
05a9a3af9874fc2a67cb7097febeae47d7ab0ed7
|
[] |
no_license
|
Silentsoul04/my_software_study
|
7dbb035ceea74f42c7ce2051b2320f6cae75ed88
|
c27d33c57f59fe5244a1041c11bbd826dd481546
|
refs/heads/master
| 2023-03-29T02:43:40.861045 | 2019-07-10T08:09:55 | 2019-07-10T08:09:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 289 |
py
|
front=-1
rear=-1
total=20
Q=[0]*100000
idx=0
while True:
new_sn=[idx+1,0]
rear+=1
Q[rear]=new_sn
front+=1
total-=Q[front][1]+1
if total<=0:
print(Q[front][0])
break
else:
rear+=1
Q[rear]=[Q[front][0],Q[front][1]+1]
idx+=1
|
[
"[email protected]"
] | |
a5214df67c7e06950cf5a6f59334fcf6b5e51b8b
|
c4764283f6d3eb9ee77c05d489ec0763a40c9925
|
/Plot_Graphs.py
|
c125326400c76ff415a368500231cf4e8cfc89c6
|
[] |
no_license
|
Arseni1919/simulator_dcop_mst
|
09475b6369a28120efabf4cfa29a973c80846f9d
|
7f2c8a3066e1760df773ed9a92fdaab67942b20c
|
refs/heads/master
| 2020-08-06T20:32:12.091581 | 2020-05-11T09:02:06 | 2020-05-11T09:02:06 | 213,143,452 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,135 |
py
|
from main_help_functions import *
# ---------------------------
# ------INPUT SETTINGS-------
# ---------------------------
file_name = "08.01.2020-22_54_47__DSA__Max_sum_MST_vanilla__DSA_HPA__Max_sum_HPA__Max_sum_TAC_DSA_vs_MS_file.data"
need_to_plot_variance = False
need_to_plot_min_max = False
# ---------------------------
file_name = 'data/%s' % file_name
file_name = file_name[:-5] + '.info'
with open(file_name, 'rb') as fileObject:
# load the object from the file into var b
info = pickle.load(fileObject)
for k, v in info['collisions'].items():
print(k, 'collisions mean: ', (statistics.mean(v)/2), 'std:', (statistics.stdev(v)/2))
pprint(info)
file_name = file_name[:-5] + '.data'
with open(file_name, 'rb') as fileObject:
# load the object from the file into var b
graphs = pickle.load(fileObject)
algorithms = list(graphs.keys())
plot_results_if(True, need_to_plot_variance, need_to_plot_min_max, graphs, algorithms, alpha=0.025)
# '.'
# ','
# 'o'
# 'v'
# '^'
# '<'
# '>'
# '1'
# '2'
# '3'
# '4'
# 's'
# 'p'
# '*'
# 'h'
# 'H'
# '+'
# 'x'
# 'D'
# 'd'
# '|'
# '_'
|
[
"[email protected]"
] | |
5283b261f46dde76d6167da8cb2c5dd383eb6a7e
|
ea5762e8754d6b039963b0125822afb261844cc8
|
/docs/_examples/drx_numpy-numba.py
|
210d21040bafbc04be9cf0bee76fef45d132e451
|
[
"MIT"
] |
permissive
|
gonzalocasas/compas
|
787977a4712fbfb9e230c4f433b6e2be509e4855
|
2fabc7e5c966a02d823fa453564151e1a1e7e3c6
|
refs/heads/master
| 2020-03-23T20:17:55.126856 | 2018-07-24T22:30:08 | 2018-07-24T22:30:08 | 142,033,431 | 0 | 0 |
MIT
| 2018-07-31T14:54:52 | 2018-07-23T15:27:19 |
Python
|
UTF-8
|
Python
| false | false | 1,838 |
py
|
"""A dynamic relaxation example comparing NumPy with Numba."""
from compas_blender.geometry import BlenderMesh
from compas_blender.helpers import network_from_bmesh
from compas_blender.utilities import clear_layer
from compas_blender.utilities import draw_plane
from compas.numerical import drx_numpy
from compas.hpc import drx_numba
from time import time
from matplotlib import pyplot as plt
__author__ = ['Andrew Liew <[email protected]>']
__copyright__ = 'Copyright 2017, BLOCK Research Group - ETH Zurich'
__license__ = 'MIT License'
__email__ = '[email protected]'
data = {'numpy': [], 'numba': [], 'nodes': []}
for m in range(10, 71, 5):
clear_layer(layer=0)
# Set-up Network
bmesh = draw_plane(dx=1/m, dy=1/m)
blendermesh = BlenderMesh(object=bmesh)
network = network_from_bmesh(bmesh=bmesh)
Pz = 100 / network.number_of_vertices()
network.update_default_vertex_attributes({'B': [0, 0, 1], 'P': [0, 0, Pz]})
network.update_default_edge_attributes({'E': 10, 'A': 1, 'ct': 't', 'l0': 1/m})
corners = [key for key in network.vertices() if network.vertex_degree(key) == 2]
network.set_vertices_attributes(corners, {'B': [0, 0, 0]})
data['nodes'].append(network.number_of_vertices())
# Numpy-SciPy
tic = time()
X, f, l = drx_numpy(network=network, tol=0.01)
data['numpy'].append(time() - tic)
blendermesh.update_vertices(X)
# Numba
tic = time()
X, f, l = drx_numba(network=network, tol=0.01)
data['numba'].append(time() - tic)
blendermesh.update_vertices(X)
# Plot data
plt.plot(data['nodes'], data['numpy'])
plt.plot(data['nodes'], data['numba'])
plt.ylabel('Analysis time [s]')
plt.xlabel('No. nodes')
plt.legend(['NumPy-SciPy', 'Numba'])
plt.xlim([0, data['nodes'][-1]])
plt.ylim([0, max(data['numpy'])])
plt.show()
|
[
"[email protected]"
] | |
759e6bda7235ac11b4f4d331bff779a33d0164b5
|
875c597ab0bb0af1fe3a78944b1424f0defd164b
|
/flask/flaskr/blog/blog.py
|
424621b8a1a12abb1458adcc59a6d6d2988a7dc9
|
[] |
no_license
|
alls77/lectures
|
20dcbb0e69a7a2db0eaed2f8c858e8c1d1483c77
|
4695c35b811176a2ad66c9417708158bf5052d23
|
refs/heads/master
| 2022-05-22T12:35:04.131269 | 2020-01-27T16:31:15 | 2020-01-27T16:31:15 | 221,741,618 | 0 | 0 | null | 2022-04-22T23:01:54 | 2019-11-14T16:33:28 |
Python
|
UTF-8
|
Python
| false | false | 3,102 |
py
|
from flask import Blueprint
from flask import flash
from flask import g
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
from werkzeug.exceptions import abort
from flaskr.auth.auth import login_required
from flaskr.db import get_db
from flaskr.blog.queries import (
create_post, delete_post, get_post, update_post, post_list
)
bp = Blueprint("blog", __name__)
@bp.route("/")
def index():
"""Show all the posts, most recent first."""
db = get_db()
posts = post_list(db)
return render_template("blog/index.html", posts=posts)
def check_post(id, check_author=True):
"""Get a post and its author by id.
Checks that the id exists and optionally that the current user is
the author.
:param id: id of post to get
:param check_author: require the current user to be the author
:return: the post with author information
:raise 404: if a post with the given id doesn't exist
:raise 403: if the current user isn't the author
"""
post = get_post(get_db(), id)
if post is None:
abort(404, "Post id {0} doesn't exist.".format(id))
if check_author and post["author_id"] != g.user["id"]:
abort(403)
return post
@bp.route("/create", methods=("GET", "POST"))
@login_required
def create():
"""Create a new post for the current user."""
if request.method == "POST":
error = None
# TODO: достать title и body из формы
title = request.form['title']
body = request.form['body']
# TODO: title обязательное поле. Если его нет записать ошибку
if title is None:
error = 'Title is required'
if error is not None:
flash(error)
else:
db = get_db()
create_post(db, title, body, g.user["id"])
return redirect(url_for("blog.index"))
return render_template("blog/create.html")
@bp.route("/<int:id>/update", methods=("GET", "POST"))
@login_required
def update(id):
"""Update a post if the current user is the author."""
post = check_post(id)
if request.method == "POST":
error = None
# TODO: достать title и body из формы
title = request.form['title']
body = request.form['body']
# TODO: title обязательное поле. Если его нет записать ошибку
if title is None:
error = 'Title is None'
if error is not None:
flash(error)
else:
db = get_db()
update_post(db, title, body, id)
return redirect(url_for("blog.index"))
return render_template("blog/update.html", post=post)
@bp.route("/<int:id>/delete", methods=("POST",))
@login_required
def delete(id):
"""Delete a post.
Ensures that the post exists and that the logged in user is the
author of the post.
"""
check_post(id)
db = get_db()
delete_post(db, id)
return redirect(url_for("blog.index"))
|
[
"[email protected]"
] | |
41006f95fe2e28137f3887419d27eca666de4098
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-ccc/aliyunsdkccc/request/v20200701/ListHistoricalAgentReportRequest.py
|
4772cc3adbe8a9211facc64e47f783fba1e7272c
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 |
NOASSERTION
| 2023-09-14T08:51:06 | 2015-07-23T09:39:45 |
Python
|
UTF-8
|
Python
| false | false | 2,409 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkccc.endpoint import endpoint_data
class ListHistoricalAgentReportRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'CCC', '2020-07-01', 'ListHistoricalAgentReport','CCC')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_StartTime(self): # Long
return self.get_query_params().get('StartTime')
def set_StartTime(self, StartTime): # Long
self.add_query_param('StartTime', StartTime)
def get_StopTime(self): # Long
return self.get_query_params().get('StopTime')
def set_StopTime(self, StopTime): # Long
self.add_query_param('StopTime', StopTime)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_AgentIdList(self): # String
return self.get_body_params().get('AgentIdList')
def set_AgentIdList(self, AgentIdList): # String
self.add_body_params('AgentIdList', AgentIdList)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
|
[
"[email protected]"
] | |
daa4b586d66b0d3733b9473f4fd62ba0f8d27fca
|
f28b67f23b0e5aa6d008adbd6132ce1d97550ff2
|
/nuitka/build/inline_copy/lib/scons-3.0.4/SCons/Node/__init__.py
|
b6f3383b6151c479eb324c6a4282287ec6436b6f
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
prasunka/Nuitka
|
64580670425ed589f4a08fdb9dad3083eb155b13
|
92d90b55769ae884c568f046bbb066453eda666d
|
refs/heads/master
| 2020-06-19T18:07:57.141227 | 2019-06-07T21:32:37 | 2019-06-07T21:32:37 | 196,814,152 | 1 | 0 |
Apache-2.0
| 2019-07-14T08:50:32 | 2019-07-14T08:50:32 | null |
UTF-8
|
Python
| false | false | 62,831 |
py
|
"""SCons.Node
The Node package for the SCons software construction utility.
This is, in many ways, the heart of SCons.
A Node is where we encapsulate all of the dependency information about
any thing that SCons can build, or about any thing which SCons can use
to build some other thing. The canonical "thing," of course, is a file,
but a Node can also represent something remote (like a web page) or
something completely abstract (like an Alias).
Each specific type of "thing" is specifically represented by a subclass
of the Node base class: Node.FS.File for files, Node.Alias for aliases,
etc. Dependency information is kept here in the base class, and
information specific to files/aliases/etc. is in the subclass. The
goal, if we've done this correctly, is that any type of "thing" should
be able to depend on any other type of "thing."
"""
from __future__ import print_function
#
# Copyright (c) 2001 - 2019 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Node/__init__.py 3a41ed6b288cee8d085373ad7fa02894e1903864 2019-01-23 17:30:35 bdeegan"
import os
import collections
import copy
from itertools import chain
import SCons.Debug
from SCons.Debug import logInstanceCreation
import SCons.Executor
import SCons.Memoize
import SCons.Util
from SCons.Debug import Trace
from SCons.compat import with_metaclass, NoSlotsPyPy
print_duplicate = 0
def classname(obj):
return str(obj.__class__).split('.')[-1]
# Set to false if we're doing a dry run. There's more than one of these
# little treats
do_store_info = True
# Node states
#
# These are in "priority" order, so that the maximum value for any
# child/dependency of a node represents the state of that node if
# it has no builder of its own. The canonical example is a file
# system directory, which is only up to date if all of its children
# were up to date.
no_state = 0
pending = 1
executing = 2
up_to_date = 3
executed = 4
failed = 5
StateString = {
0 : "no_state",
1 : "pending",
2 : "executing",
3 : "up_to_date",
4 : "executed",
5 : "failed",
}
# controls whether implicit dependencies are cached:
implicit_cache = 0
# controls whether implicit dep changes are ignored:
implicit_deps_unchanged = 0
# controls whether the cached implicit deps are ignored:
implicit_deps_changed = 0
# A variable that can be set to an interface-specific function be called
# to annotate a Node with information about its creation.
def do_nothing(node): pass
Annotate = do_nothing
# Gets set to 'True' if we're running in interactive mode. Is
# currently used to release parts of a target's info during
# clean builds and update runs (see release_target_info).
interactive = False
def is_derived_none(node):
raise NotImplementedError
def is_derived_node(node):
"""
Returns true if this node is derived (i.e. built).
"""
return node.has_builder() or node.side_effect
_is_derived_map = {0 : is_derived_none,
1 : is_derived_node}
def exists_none(node):
raise NotImplementedError
def exists_always(node):
return 1
def exists_base(node):
return node.stat() is not None
def exists_entry(node):
"""Return if the Entry exists. Check the file system to see
what we should turn into first. Assume a file if there's no
directory."""
node.disambiguate()
return _exists_map[node._func_exists](node)
def exists_file(node):
# Duplicate from source path if we are set up to do this.
if node.duplicate and not node.is_derived() and not node.linked:
src = node.srcnode()
if src is not node:
# At this point, src is meant to be copied in a variant directory.
src = src.rfile()
if src.get_abspath() != node.get_abspath():
if src.exists():
node.do_duplicate(src)
# Can't return 1 here because the duplication might
# not actually occur if the -n option is being used.
else:
# The source file does not exist. Make sure no old
# copy remains in the variant directory.
if print_duplicate:
print("dup: no src for %s, unlinking old variant copy" % node)
if exists_base(node) or node.islink():
node.fs.unlink(node.get_internal_path())
# Return None explicitly because the Base.exists() call
# above will have cached its value if the file existed.
return None
return exists_base(node)
_exists_map = {0 : exists_none,
1 : exists_always,
2 : exists_base,
3 : exists_entry,
4 : exists_file}
def rexists_none(node):
raise NotImplementedError
def rexists_node(node):
return node.exists()
def rexists_base(node):
return node.rfile().exists()
_rexists_map = {0 : rexists_none,
1 : rexists_node,
2 : rexists_base}
def get_contents_none(node):
raise NotImplementedError
def get_contents_entry(node):
"""Fetch the contents of the entry. Returns the exact binary
contents of the file."""
try:
node = node.disambiguate(must_exist=1)
except SCons.Errors.UserError:
# There was nothing on disk with which to disambiguate
# this entry. Leave it as an Entry, but return a null
# string so calls to get_contents() in emitters and the
# like (e.g. in qt.py) don't have to disambiguate by hand
# or catch the exception.
return ''
else:
return _get_contents_map[node._func_get_contents](node)
def get_contents_dir(node):
"""Return content signatures and names of all our children
separated by new-lines. Ensure that the nodes are sorted."""
contents = []
for n in sorted(node.children(), key=lambda t: t.name):
contents.append('%s %s\n' % (n.get_csig(), n.name))
return ''.join(contents)
def get_contents_file(node):
if not node.rexists():
return b''
fname = node.rfile().get_abspath()
try:
with open(fname, "rb") as fp:
contents = fp.read()
except EnvironmentError as e:
if not e.filename:
e.filename = fname
raise
return contents
_get_contents_map = {0 : get_contents_none,
1 : get_contents_entry,
2 : get_contents_dir,
3 : get_contents_file}
def target_from_source_none(node, prefix, suffix, splitext):
raise NotImplementedError
def target_from_source_base(node, prefix, suffix, splitext):
return node.dir.Entry(prefix + splitext(node.name)[0] + suffix)
_target_from_source_map = {0 : target_from_source_none,
1 : target_from_source_base}
#
# The new decider subsystem for Nodes
#
# We would set and overwrite the changed_since_last_build function
# before, but for being able to use slots (less memory!) we now have
# a dictionary of the different decider functions. Then in the Node
# subclasses we simply store the index to the decider that should be
# used by it.
#
class DeciderNeedsNode(Exception):
"""
Indicate that the decider needs the node as well as the target and the dependency.
Normally the node and the target are the same, but in the case of repository
They may be different. Also the NodeInfo is retrieved from the node
"""
def __init__(self, call_this_decider):
"""
:param call_this_decider: to return the decider to call directly since deciders
are called through several levels of indirection
"""
self.decider = call_this_decider
#
# First, the single decider functions
#
def changed_since_last_build_node(node, target, prev_ni):
"""
Must be overridden in a specific subclass to return True if this
Node (a dependency) has changed since the last time it was used
to build the specified target. prev_ni is this Node's state (for
example, its file timestamp, length, maybe content signature)
as of the last time the target was built.
Note that this method is called through the dependency, not the
target, because a dependency Node must be able to use its own
logic to decide if it changed. For example, File Nodes need to
obey if we're configured to use timestamps, but Python Value Nodes
never use timestamps and always use the content. If this method
were called through the target, then each Node's implementation
of this method would have to have more complicated logic to
handle all the different Node types on which it might depend.
"""
raise NotImplementedError
def changed_since_last_build_alias(node, target, prev_ni):
cur_csig = node.get_csig()
try:
return cur_csig != prev_ni.csig
except AttributeError:
return 1
def changed_since_last_build_entry(node, target, prev_ni):
node.disambiguate()
return _decider_map[node.changed_since_last_build](node, target, prev_ni)
def changed_since_last_build_state_changed(node, target, prev_ni):
return node.state != SCons.Node.up_to_date
def decide_source(node, target, prev_ni):
return target.get_build_env().decide_source(node, target, prev_ni)
def decide_target(node, target, prev_ni):
return target.get_build_env().decide_target(node, target, prev_ni)
def changed_since_last_build_python(node, target, prev_ni):
cur_csig = node.get_csig()
try:
return cur_csig != prev_ni.csig
except AttributeError:
return 1
#
# Now, the mapping from indices to decider functions
#
_decider_map = {0 : changed_since_last_build_node,
1 : changed_since_last_build_alias,
2 : changed_since_last_build_entry,
3 : changed_since_last_build_state_changed,
4 : decide_source,
5 : decide_target,
6 : changed_since_last_build_python}
do_store_info = True
#
# The new store_info subsystem for Nodes
#
# We would set and overwrite the store_info function
# before, but for being able to use slots (less memory!) we now have
# a dictionary of the different functions. Then in the Node
# subclasses we simply store the index to the info method that should be
# used by it.
#
#
# First, the single info functions
#
def store_info_pass(node):
pass
def store_info_file(node):
# Merge our build information into the already-stored entry.
# This accommodates "chained builds" where a file that's a target
# in one build (SConstruct file) is a source in a different build.
# See test/chained-build.py for the use case.
if do_store_info:
node.dir.sconsign().store_info(node.name, node)
store_info_map = {0 : store_info_pass,
1 : store_info_file}
# Classes for signature info for Nodes.
class NodeInfoBase(object):
"""
The generic base class for signature information for a Node.
Node subclasses should subclass NodeInfoBase to provide their own
logic for dealing with their own Node-specific signature information.
"""
__slots__ = ('__weakref__',)
current_version_id = 2
def update(self, node):
try:
field_list = self.field_list
except AttributeError:
return
for f in field_list:
try:
delattr(self, f)
except AttributeError:
pass
try:
func = getattr(node, 'get_' + f)
except AttributeError:
pass
else:
setattr(self, f, func())
def convert(self, node, val):
pass
def merge(self, other):
"""
Merge the fields of another object into this object. Already existing
information is overwritten by the other instance's data.
WARNING: If a '__dict__' slot is added, it should be updated instead of
replaced.
"""
state = other.__getstate__()
self.__setstate__(state)
def format(self, field_list=None, names=0):
if field_list is None:
try:
field_list = self.field_list
except AttributeError:
field_list = list(getattr(self, '__dict__', {}).keys())
for obj in type(self).mro():
for slot in getattr(obj, '__slots__', ()):
if slot not in ('__weakref__', '__dict__'):
field_list.append(slot)
field_list.sort()
fields = []
for field in field_list:
try:
f = getattr(self, field)
except AttributeError:
f = None
f = str(f)
if names:
f = field + ': ' + f
fields.append(f)
return fields
def __getstate__(self):
"""
Return all fields that shall be pickled. Walk the slots in the class
hierarchy and add those to the state dictionary. If a '__dict__' slot is
available, copy all entries to the dictionary. Also include the version
id, which is fixed for all instances of a class.
"""
state = getattr(self, '__dict__', {}).copy()
for obj in type(self).mro():
for name in getattr(obj,'__slots__',()):
if hasattr(self, name):
state[name] = getattr(self, name)
state['_version_id'] = self.current_version_id
try:
del state['__weakref__']
except KeyError:
pass
return state
def __setstate__(self, state):
"""
Restore the attributes from a pickled state. The version is discarded.
"""
# TODO check or discard version
del state['_version_id']
for key, value in state.items():
if key not in ('__weakref__',):
setattr(self, key, value)
class BuildInfoBase(object):
"""
The generic base class for build information for a Node.
This is what gets stored in a .sconsign file for each target file.
It contains a NodeInfo instance for this node (signature information
that's specific to the type of Node) and direct attributes for the
generic build stuff we have to track: sources, explicit dependencies,
implicit dependencies, and action information.
"""
__slots__ = ("bsourcesigs", "bdependsigs", "bimplicitsigs", "bactsig",
"bsources", "bdepends", "bact", "bimplicit", "__weakref__")
current_version_id = 2
def __init__(self):
# Create an object attribute from the class attribute so it ends up
# in the pickled data in the .sconsign file.
self.bsourcesigs = []
self.bdependsigs = []
self.bimplicitsigs = []
self.bactsig = None
def merge(self, other):
"""
Merge the fields of another object into this object. Already existing
information is overwritten by the other instance's data.
WARNING: If a '__dict__' slot is added, it should be updated instead of
replaced.
"""
state = other.__getstate__()
self.__setstate__(state)
def __getstate__(self):
"""
Return all fields that shall be pickled. Walk the slots in the class
hierarchy and add those to the state dictionary. If a '__dict__' slot is
available, copy all entries to the dictionary. Also include the version
id, which is fixed for all instances of a class.
"""
state = getattr(self, '__dict__', {}).copy()
for obj in type(self).mro():
for name in getattr(obj,'__slots__',()):
if hasattr(self, name):
state[name] = getattr(self, name)
state['_version_id'] = self.current_version_id
try:
del state['__weakref__']
except KeyError:
pass
return state
def __setstate__(self, state):
"""
Restore the attributes from a pickled state.
"""
# TODO check or discard version
del state['_version_id']
for key, value in state.items():
if key not in ('__weakref__',):
setattr(self, key, value)
class Node(object, with_metaclass(NoSlotsPyPy)):
"""The base Node class, for entities that we know how to
build, or use to build other Nodes.
"""
__slots__ = ['sources',
'sources_set',
'_specific_sources',
'depends',
'depends_set',
'ignore',
'ignore_set',
'prerequisites',
'implicit',
'waiting_parents',
'waiting_s_e',
'ref_count',
'wkids',
'env',
'state',
'precious',
'noclean',
'nocache',
'cached',
'always_build',
'includes',
'attributes',
'side_effect',
'side_effects',
'linked',
'_memo',
'executor',
'binfo',
'ninfo',
'builder',
'is_explicit',
'implicit_set',
'changed_since_last_build',
'store_info',
'pseudo',
'_tags',
'_func_is_derived',
'_func_exists',
'_func_rexists',
'_func_get_contents',
'_func_target_from_source']
class Attrs(object):
__slots__ = ('shared', '__dict__')
def __init__(self):
if SCons.Debug.track_instances: logInstanceCreation(self, 'Node.Node')
# Note that we no longer explicitly initialize a self.builder
# attribute to None here. That's because the self.builder
# attribute may be created on-the-fly later by a subclass (the
# canonical example being a builder to fetch a file from a
# source code system like CVS or Subversion).
# Each list of children that we maintain is accompanied by a
# dictionary used to look up quickly whether a node is already
# present in the list. Empirical tests showed that it was
# fastest to maintain them as side-by-side Node attributes in
# this way, instead of wrapping up each list+dictionary pair in
# a class. (Of course, we could always still do that in the
# future if we had a good reason to...).
self.sources = [] # source files used to build node
self.sources_set = set()
self._specific_sources = False
self.depends = [] # explicit dependencies (from Depends)
self.depends_set = set()
self.ignore = [] # dependencies to ignore
self.ignore_set = set()
self.prerequisites = None
self.implicit = None # implicit (scanned) dependencies (None means not scanned yet)
self.waiting_parents = set()
self.waiting_s_e = set()
self.ref_count = 0
self.wkids = None # Kids yet to walk, when it's an array
self.env = None
self.state = no_state
self.precious = None
self.pseudo = False
self.noclean = 0
self.nocache = 0
self.cached = 0 # is this node pulled from cache?
self.always_build = None
self.includes = None
self.attributes = self.Attrs() # Generic place to stick information about the Node.
self.side_effect = 0 # true iff this node is a side effect
self.side_effects = [] # the side effects of building this target
self.linked = 0 # is this node linked to the variant directory?
self.changed_since_last_build = 0
self.store_info = 0
self._tags = None
self._func_is_derived = 1
self._func_exists = 1
self._func_rexists = 1
self._func_get_contents = 0
self._func_target_from_source = 0
self.clear_memoized_values()
# Let the interface in which the build engine is embedded
# annotate this Node with its own info (like a description of
# what line in what file created the node, for example).
Annotate(self)
def disambiguate(self, must_exist=None):
return self
def get_suffix(self):
return ''
@SCons.Memoize.CountMethodCall
def get_build_env(self):
"""Fetch the appropriate Environment to build this node.
"""
try:
return self._memo['get_build_env']
except KeyError:
pass
result = self.get_executor().get_build_env()
self._memo['get_build_env'] = result
return result
def get_build_scanner_path(self, scanner):
"""Fetch the appropriate scanner path for this node."""
return self.get_executor().get_build_scanner_path(scanner)
def set_executor(self, executor):
"""Set the action executor for this node."""
self.executor = executor
def get_executor(self, create=1):
"""Fetch the action executor for this node. Create one if
there isn't already one, and requested to do so."""
try:
executor = self.executor
except AttributeError:
if not create:
raise
try:
act = self.builder.action
except AttributeError:
executor = SCons.Executor.Null(targets=[self])
else:
executor = SCons.Executor.Executor(act,
self.env or self.builder.env,
[self.builder.overrides],
[self],
self.sources)
self.executor = executor
return executor
def executor_cleanup(self):
"""Let the executor clean up any cached information."""
try:
executor = self.get_executor(create=None)
except AttributeError:
pass
else:
if executor is not None:
executor.cleanup()
def reset_executor(self):
"""Remove cached executor; forces recompute when needed."""
try:
delattr(self, 'executor')
except AttributeError:
pass
def push_to_cache(self):
"""Try to push a node into a cache
"""
pass
def retrieve_from_cache(self):
"""Try to retrieve the node's content from a cache
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff in
built().
Returns true if the node was successfully retrieved.
"""
return 0
#
# Taskmaster interface subsystem
#
def make_ready(self):
"""Get a Node ready for evaluation.
This is called before the Taskmaster decides if the Node is
up-to-date or not. Overriding this method allows for a Node
subclass to be disambiguated if necessary, or for an implicit
source builder to be attached.
"""
pass
def prepare(self):
"""Prepare for this Node to be built.
This is called after the Taskmaster has decided that the Node
is out-of-date and must be rebuilt, but before actually calling
the method to build the Node.
This default implementation checks that explicit or implicit
dependencies either exist or are derived, and initializes the
BuildInfo structure that will hold the information about how
this node is, uh, built.
(The existence of source files is checked separately by the
Executor, which aggregates checks for all of the targets built
by a specific action.)
Overriding this method allows for for a Node subclass to remove
the underlying file from the file system. Note that subclass
methods should call this base class method to get the child
check and the BuildInfo structure.
"""
if self.depends is not None:
for d in self.depends:
if d.missing():
msg = "Explicit dependency `%s' not found, needed by target `%s'."
raise SCons.Errors.StopError(msg % (d, self))
if self.implicit is not None:
for i in self.implicit:
if i.missing():
msg = "Implicit dependency `%s' not found, needed by target `%s'."
raise SCons.Errors.StopError(msg % (i, self))
self.binfo = self.get_binfo()
def build(self, **kw):
"""Actually build the node.
This is called by the Taskmaster after it's decided that the
Node is out-of-date and must be rebuilt, and after the prepare()
method has gotten everything, uh, prepared.
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff
in built().
"""
try:
self.get_executor()(self, **kw)
except SCons.Errors.BuildError as e:
e.node = self
raise
def built(self):
"""Called just after this node is successfully built."""
# Clear the implicit dependency caches of any Nodes
# waiting for this Node to be built.
for parent in self.waiting_parents:
parent.implicit = None
self.clear()
if self.pseudo:
if self.exists():
raise SCons.Errors.UserError("Pseudo target " + str(self) + " must not exist")
else:
if not self.exists() and do_store_info:
SCons.Warnings.warn(SCons.Warnings.TargetNotBuiltWarning,
"Cannot find target " + str(self) + " after building")
self.ninfo.update(self)
def visited(self):
"""Called just after this node has been visited (with or
without a build)."""
try:
binfo = self.binfo
except AttributeError:
# Apparently this node doesn't need build info, so
# don't bother calculating or storing it.
pass
else:
self.ninfo.update(self)
SCons.Node.store_info_map[self.store_info](self)
def release_target_info(self):
"""Called just after this node has been marked
up-to-date or was built completely.
This is where we try to release as many target node infos
as possible for clean builds and update runs, in order
to minimize the overall memory consumption.
By purging attributes that aren't needed any longer after
a Node (=File) got built, we don't have to care that much how
many KBytes a Node actually requires...as long as we free
the memory shortly afterwards.
@see: built() and File.release_target_info()
"""
pass
#
#
#
def add_to_waiting_s_e(self, node):
self.waiting_s_e.add(node)
def add_to_waiting_parents(self, node):
"""
Returns the number of nodes added to our waiting parents list:
1 if we add a unique waiting parent, 0 if not. (Note that the
returned values are intended to be used to increment a reference
count, so don't think you can "clean up" this function by using
True and False instead...)
"""
wp = self.waiting_parents
if node in wp:
return 0
wp.add(node)
return 1
def postprocess(self):
"""Clean up anything we don't need to hang onto after we've
been built."""
self.executor_cleanup()
self.waiting_parents = set()
def clear(self):
"""Completely clear a Node of all its cached state (so that it
can be re-evaluated by interfaces that do continuous integration
builds).
"""
# The del_binfo() call here isn't necessary for normal execution,
# but is for interactive mode, where we might rebuild the same
# target and need to start from scratch.
self.del_binfo()
self.clear_memoized_values()
self.ninfo = self.new_ninfo()
self.executor_cleanup()
try:
delattr(self, '_calculated_sig')
except AttributeError:
pass
self.includes = None
def clear_memoized_values(self):
self._memo = {}
def builder_set(self, builder):
self.builder = builder
try:
del self.executor
except AttributeError:
pass
def has_builder(self):
"""Return whether this Node has a builder or not.
In Boolean tests, this turns out to be a *lot* more efficient
than simply examining the builder attribute directly ("if
node.builder: ..."). When the builder attribute is examined
directly, it ends up calling __getattr__ for both the __len__
and __nonzero__ attributes on instances of our Builder Proxy
class(es), generating a bazillion extra calls and slowing
things down immensely.
"""
try:
b = self.builder
except AttributeError:
# There was no explicit builder for this Node, so initialize
# the self.builder attribute to None now.
b = self.builder = None
return b is not None
def set_explicit(self, is_explicit):
self.is_explicit = is_explicit
def has_explicit_builder(self):
"""Return whether this Node has an explicit builder
This allows an internal Builder created by SCons to be marked
non-explicit, so that it can be overridden by an explicit
builder that the user supplies (the canonical example being
directories)."""
try:
return self.is_explicit
except AttributeError:
self.is_explicit = None
return self.is_explicit
def get_builder(self, default_builder=None):
"""Return the set builder, or a specified default value"""
try:
return self.builder
except AttributeError:
return default_builder
multiple_side_effect_has_builder = has_builder
def is_derived(self):
"""
Returns true if this node is derived (i.e. built).
This should return true only for nodes whose path should be in
the variant directory when duplicate=0 and should contribute their build
signatures when they are used as source files to other derived files. For
example: source with source builders are not derived in this sense,
and hence should not return true.
"""
return _is_derived_map[self._func_is_derived](self)
def alter_targets(self):
"""Return a list of alternate targets for this Node.
"""
return [], None
def get_found_includes(self, env, scanner, path):
"""Return the scanned include lines (implicit dependencies)
found in this node.
The default is no implicit dependencies. We expect this method
to be overridden by any subclass that can be scanned for
implicit dependencies.
"""
return []
def get_implicit_deps(self, env, initial_scanner, path_func, kw = {}):
"""Return a list of implicit dependencies for this node.
This method exists to handle recursive invocation of the scanner
on the implicit dependencies returned by the scanner, if the
scanner's recursive flag says that we should.
"""
nodes = [self]
seen = set(nodes)
dependencies = []
path_memo = {}
root_node_scanner = self._get_scanner(env, initial_scanner, None, kw)
while nodes:
node = nodes.pop(0)
scanner = node._get_scanner(env, initial_scanner, root_node_scanner, kw)
if not scanner:
continue
try:
path = path_memo[scanner]
except KeyError:
path = path_func(scanner)
path_memo[scanner] = path
included_deps = [x for x in node.get_found_includes(env, scanner, path) if x not in seen]
if included_deps:
dependencies.extend(included_deps)
seen.update(included_deps)
nodes.extend(scanner.recurse_nodes(included_deps))
return dependencies
def _get_scanner(self, env, initial_scanner, root_node_scanner, kw):
if initial_scanner:
# handle explicit scanner case
scanner = initial_scanner.select(self)
else:
# handle implicit scanner case
scanner = self.get_env_scanner(env, kw)
if scanner:
scanner = scanner.select(self)
if not scanner:
# no scanner could be found for the given node's scanner key;
# thus, make an attempt at using a default.
scanner = root_node_scanner
return scanner
def get_env_scanner(self, env, kw={}):
return env.get_scanner(self.scanner_key())
def get_target_scanner(self):
return self.builder.target_scanner
def get_source_scanner(self, node):
"""Fetch the source scanner for the specified node
NOTE: "self" is the target being built, "node" is
the source file for which we want to fetch the scanner.
Implies self.has_builder() is true; again, expect to only be
called from locations where this is already verified.
This function may be called very often; it attempts to cache
the scanner found to improve performance.
"""
scanner = None
try:
scanner = self.builder.source_scanner
except AttributeError:
pass
if not scanner:
# The builder didn't have an explicit scanner, so go look up
# a scanner from env['SCANNERS'] based on the node's scanner
# key (usually the file extension).
scanner = self.get_env_scanner(self.get_build_env())
if scanner:
scanner = scanner.select(node)
return scanner
def add_to_implicit(self, deps):
if not hasattr(self, 'implicit') or self.implicit is None:
self.implicit = []
self.implicit_set = set()
self._children_reset()
self._add_child(self.implicit, self.implicit_set, deps)
def scan(self):
"""Scan this node's dependents for implicit dependencies."""
# Don't bother scanning non-derived files, because we don't
# care what their dependencies are.
# Don't scan again, if we already have scanned.
if self.implicit is not None:
return
self.implicit = []
self.implicit_set = set()
self._children_reset()
if not self.has_builder():
return
build_env = self.get_build_env()
executor = self.get_executor()
# Here's where we implement --implicit-cache.
if implicit_cache and not implicit_deps_changed:
implicit = self.get_stored_implicit()
if implicit is not None:
# We now add the implicit dependencies returned from the
# stored .sconsign entry to have already been converted
# to Nodes for us. (We used to run them through a
# source_factory function here.)
# Update all of the targets with them. This
# essentially short-circuits an N*M scan of the
# sources for each individual target, which is a hell
# of a lot more efficient.
for tgt in executor.get_all_targets():
tgt.add_to_implicit(implicit)
if implicit_deps_unchanged or self.is_up_to_date():
return
# one of this node's sources has changed,
# so we must recalculate the implicit deps for all targets
for tgt in executor.get_all_targets():
tgt.implicit = []
tgt.implicit_set = set()
# Have the executor scan the sources.
executor.scan_sources(self.builder.source_scanner)
# If there's a target scanner, have the executor scan the target
# node itself and associated targets that might be built.
scanner = self.get_target_scanner()
if scanner:
executor.scan_targets(scanner)
def scanner_key(self):
return None
def select_scanner(self, scanner):
"""Selects a scanner for this Node.
This is a separate method so it can be overridden by Node
subclasses (specifically, Node.FS.Dir) that *must* use their
own Scanner and don't select one the Scanner.Selector that's
configured for the target.
"""
return scanner.select(self)
def env_set(self, env, safe=0):
if safe and self.env:
return
self.env = env
#
# SIGNATURE SUBSYSTEM
#
NodeInfo = NodeInfoBase
BuildInfo = BuildInfoBase
def new_ninfo(self):
ninfo = self.NodeInfo()
return ninfo
def get_ninfo(self):
try:
return self.ninfo
except AttributeError:
self.ninfo = self.new_ninfo()
return self.ninfo
def new_binfo(self):
binfo = self.BuildInfo()
return binfo
def get_binfo(self):
"""
Fetch a node's build information.
node - the node whose sources will be collected
cache - alternate node to use for the signature cache
returns - the build signature
This no longer handles the recursive descent of the
node's children's signatures. We expect that they're
already built and updated by someone else, if that's
what's wanted.
"""
try:
return self.binfo
except AttributeError:
pass
binfo = self.new_binfo()
self.binfo = binfo
executor = self.get_executor()
ignore_set = self.ignore_set
if self.has_builder():
binfo.bact = str(executor)
binfo.bactsig = SCons.Util.MD5signature(executor.get_contents())
if self._specific_sources:
sources = [s for s in self.sources if not s in ignore_set]
else:
sources = executor.get_unignored_sources(self, self.ignore)
seen = set()
binfo.bsources = [s for s in sources if s not in seen and not seen.add(s)]
binfo.bsourcesigs = [s.get_ninfo() for s in binfo.bsources]
binfo.bdepends = [d for d in self.depends if d not in ignore_set]
binfo.bdependsigs = [d.get_ninfo() for d in self.depends]
# Because self.implicit is initialized to None (and not empty list [])
# we have to handle this case
if not self.implicit:
binfo.bimplicit = []
binfo.bimplicitsigs = []
else:
binfo.bimplicit = [i for i in self.implicit if i not in ignore_set]
binfo.bimplicitsigs = [i.get_ninfo() for i in binfo.bimplicit]
return binfo
def del_binfo(self):
"""Delete the build info from this node."""
try:
delattr(self, 'binfo')
except AttributeError:
pass
def get_csig(self):
try:
return self.ninfo.csig
except AttributeError:
ninfo = self.get_ninfo()
ninfo.csig = SCons.Util.MD5signature(self.get_contents())
return self.ninfo.csig
def get_cachedir_csig(self):
return self.get_csig()
def get_stored_info(self):
return None
def get_stored_implicit(self):
"""Fetch the stored implicit dependencies"""
return None
#
#
#
def set_precious(self, precious = 1):
"""Set the Node's precious value."""
self.precious = precious
def set_pseudo(self, pseudo = True):
"""Set the Node's precious value."""
self.pseudo = pseudo
def set_noclean(self, noclean = 1):
"""Set the Node's noclean value."""
# Make sure noclean is an integer so the --debug=stree
# output in Util.py can use it as an index.
self.noclean = noclean and 1 or 0
def set_nocache(self, nocache = 1):
"""Set the Node's nocache value."""
# Make sure nocache is an integer so the --debug=stree
# output in Util.py can use it as an index.
self.nocache = nocache and 1 or 0
def set_always_build(self, always_build = 1):
"""Set the Node's always_build value."""
self.always_build = always_build
def exists(self):
"""Does this node exists?"""
return _exists_map[self._func_exists](self)
def rexists(self):
"""Does this node exist locally or in a repository?"""
# There are no repositories by default:
return _rexists_map[self._func_rexists](self)
def get_contents(self):
"""Fetch the contents of the entry."""
return _get_contents_map[self._func_get_contents](self)
def missing(self):
return not self.is_derived() and \
not self.linked and \
not self.rexists()
def remove(self):
"""Remove this Node: no-op by default."""
return None
def add_dependency(self, depend):
"""Adds dependencies."""
try:
self._add_child(self.depends, self.depends_set, depend)
except TypeError as e:
e = e.args[0]
if SCons.Util.is_List(e):
s = list(map(str, e))
else:
s = str(e)
raise SCons.Errors.UserError("attempted to add a non-Node dependency to %s:\n\t%s is a %s, not a Node" % (str(self), s, type(e)))
def add_prerequisite(self, prerequisite):
"""Adds prerequisites"""
if self.prerequisites is None:
self.prerequisites = SCons.Util.UniqueList()
self.prerequisites.extend(prerequisite)
self._children_reset()
def add_ignore(self, depend):
"""Adds dependencies to ignore."""
try:
self._add_child(self.ignore, self.ignore_set, depend)
except TypeError as e:
e = e.args[0]
if SCons.Util.is_List(e):
s = list(map(str, e))
else:
s = str(e)
raise SCons.Errors.UserError("attempted to ignore a non-Node dependency of %s:\n\t%s is a %s, not a Node" % (str(self), s, type(e)))
def add_source(self, source):
"""Adds sources."""
if self._specific_sources:
return
try:
self._add_child(self.sources, self.sources_set, source)
except TypeError as e:
e = e.args[0]
if SCons.Util.is_List(e):
s = list(map(str, e))
else:
s = str(e)
raise SCons.Errors.UserError("attempted to add a non-Node as source of %s:\n\t%s is a %s, not a Node" % (str(self), s, type(e)))
def _add_child(self, collection, set, child):
"""Adds 'child' to 'collection', first checking 'set' to see if it's
already present."""
added = None
for c in child:
if c not in set:
set.add(c)
collection.append(c)
added = 1
if added:
self._children_reset()
def set_specific_source(self, source):
self.add_source(source)
self._specific_sources = True
def add_wkid(self, wkid):
"""Add a node to the list of kids waiting to be evaluated"""
if self.wkids is not None:
self.wkids.append(wkid)
def _children_reset(self):
self.clear_memoized_values()
# We need to let the Executor clear out any calculated
# build info that it's cached so we can re-calculate it.
self.executor_cleanup()
@SCons.Memoize.CountMethodCall
def _children_get(self):
try:
return self._memo['_children_get']
except KeyError:
pass
# The return list may contain duplicate Nodes, especially in
# source trees where there are a lot of repeated #includes
# of a tangle of .h files. Profiling shows, however, that
# eliminating the duplicates with a brute-force approach that
# preserves the order (that is, something like:
#
# u = []
# for n in list:
# if n not in u:
# u.append(n)"
#
# takes more cycles than just letting the underlying methods
# hand back cached values if a Node's information is requested
# multiple times. (Other methods of removing duplicates, like
# using dictionary keys, lose the order, and the only ordered
# dictionary patterns I found all ended up using "not in"
# internally anyway...)
if self.ignore_set:
iter = chain.from_iterable([_f for _f in [self.sources, self.depends, self.implicit] if _f])
children = []
for i in iter:
if i not in self.ignore_set:
children.append(i)
else:
children = self.all_children(scan=0)
self._memo['_children_get'] = children
return children
def all_children(self, scan=1):
"""Return a list of all the node's direct children."""
if scan:
self.scan()
# The return list may contain duplicate Nodes, especially in
# source trees where there are a lot of repeated #includes
# of a tangle of .h files. Profiling shows, however, that
# eliminating the duplicates with a brute-force approach that
# preserves the order (that is, something like:
#
# u = []
# for n in list:
# if n not in u:
# u.append(n)"
#
# takes more cycles than just letting the underlying methods
# hand back cached values if a Node's information is requested
# multiple times. (Other methods of removing duplicates, like
# using dictionary keys, lose the order, and the only ordered
# dictionary patterns I found all ended up using "not in"
# internally anyway...)
return list(chain.from_iterable([_f for _f in [self.sources, self.depends, self.implicit] if _f]))
def children(self, scan=1):
"""Return a list of the node's direct children, minus those
that are ignored by this node."""
if scan:
self.scan()
return self._children_get()
def set_state(self, state):
self.state = state
def get_state(self):
return self.state
def get_env(self):
env = self.env
if not env:
import SCons.Defaults
env = SCons.Defaults.DefaultEnvironment()
return env
def Decider(self, function):
foundkey = None
for k, v in _decider_map.items():
if v == function:
foundkey = k
break
if not foundkey:
foundkey = len(_decider_map)
_decider_map[foundkey] = function
self.changed_since_last_build = foundkey
def Tag(self, key, value):
""" Add a user-defined tag. """
if not self._tags:
self._tags = {}
self._tags[key] = value
def GetTag(self, key):
""" Return a user-defined tag. """
if not self._tags:
return None
return self._tags.get(key, None)
def changed(self, node=None, allowcache=False):
"""
Returns if the node is up-to-date with respect to the BuildInfo
stored last time it was built. The default behavior is to compare
it against our own previously stored BuildInfo, but the stored
BuildInfo from another Node (typically one in a Repository)
can be used instead.
Note that we now *always* check every dependency. We used to
short-circuit the check by returning as soon as we detected
any difference, but we now rely on checking every dependency
to make sure that any necessary Node information (for example,
the content signature of an #included .h file) is updated.
The allowcache option was added for supporting the early
release of the executor/builder structures, right after
a File target was built. When set to true, the return
value of this changed method gets cached for File nodes.
Like this, the executor isn't needed any longer for subsequent
calls to changed().
@see: FS.File.changed(), FS.File.release_target_info()
"""
t = 0
if t: Trace('changed(%s [%s], %s)' % (self, classname(self), node))
if node is None:
node = self
result = False
bi = node.get_stored_info().binfo
then = bi.bsourcesigs + bi.bdependsigs + bi.bimplicitsigs
children = self.children()
diff = len(children) - len(then)
if diff:
# The old and new dependency lists are different lengths.
# This always indicates that the Node must be rebuilt.
# We also extend the old dependency list with enough None
# entries to equal the new dependency list, for the benefit
# of the loop below that updates node information.
then.extend([None] * diff)
if t: Trace(': old %s new %s' % (len(then), len(children)))
result = True
for child, prev_ni in zip(children, then):
try:
if _decider_map[child.changed_since_last_build](child, self, prev_ni):
if t: Trace(': %s changed' % child)
result = True
except DeciderNeedsNode as e:
if e.decider(self, prev_ni, node=node):
if t: Trace(': %s changed' % child)
result = True
if self.has_builder():
import SCons.Util
contents = self.get_executor().get_contents()
newsig = SCons.Util.MD5signature(contents)
if bi.bactsig != newsig:
if t: Trace(': bactsig %s != newsig %s' % (bi.bactsig, newsig))
result = True
if not result:
if t: Trace(': up to date')
if t: Trace('\n')
return result
def is_up_to_date(self):
"""Default check for whether the Node is current: unknown Node
subtypes are always out of date, so they will always get built."""
return None
def children_are_up_to_date(self):
"""Alternate check for whether the Node is current: If all of
our children were up-to-date, then this Node was up-to-date, too.
The SCons.Node.Alias and SCons.Node.Python.Value subclasses
rebind their current() method to this method."""
# Allow the children to calculate their signatures.
self.binfo = self.get_binfo()
if self.always_build:
return None
state = 0
for kid in self.children(None):
s = kid.get_state()
if s and (not state or s > state):
state = s
return (state == 0 or state == SCons.Node.up_to_date)
def is_literal(self):
"""Always pass the string representation of a Node to
the command interpreter literally."""
return 1
def render_include_tree(self):
"""
Return a text representation, suitable for displaying to the
user, of the include tree for the sources of this node.
"""
if self.is_derived():
env = self.get_build_env()
if env:
for s in self.sources:
scanner = self.get_source_scanner(s)
if scanner:
path = self.get_build_scanner_path(scanner)
else:
path = None
def f(node, env=env, scanner=scanner, path=path):
return node.get_found_includes(env, scanner, path)
return SCons.Util.render_tree(s, f, 1)
else:
return None
def get_abspath(self):
"""
Return an absolute path to the Node. This will return simply
str(Node) by default, but for Node types that have a concept of
relative path, this might return something different.
"""
return str(self)
def for_signature(self):
"""
Return a string representation of the Node that will always
be the same for this particular Node, no matter what. This
is by contrast to the __str__() method, which might, for
instance, return a relative path for a file Node. The purpose
of this method is to generate a value to be used in signature
calculation for the command line used to build a target, and
we use this method instead of str() to avoid unnecessary
rebuilds. This method does not need to return something that
would actually work in a command line; it can return any kind of
nonsense, so long as it does not change.
"""
return str(self)
def get_string(self, for_signature):
"""This is a convenience function designed primarily to be
used in command generators (i.e., CommandGeneratorActions or
Environment variables that are callable), which are called
with a for_signature argument that is nonzero if the command
generator is being called to generate a signature for the
command line, which determines if we should rebuild or not.
Such command generators should use this method in preference
to str(Node) when converting a Node to a string, passing
in the for_signature parameter, such that we will call
Node.for_signature() or str(Node) properly, depending on whether
we are calculating a signature or actually constructing a
command line."""
if for_signature:
return self.for_signature()
return str(self)
def get_subst_proxy(self):
"""
This method is expected to return an object that will function
exactly like this Node, except that it implements any additional
special features that we would like to be in effect for
Environment variable substitution. The principle use is that
some Nodes would like to implement a __getattr__() method,
but putting that in the Node type itself has a tendency to kill
performance. We instead put it in a proxy and return it from
this method. It is legal for this method to return self
if no new functionality is needed for Environment substitution.
"""
return self
def explain(self):
if not self.exists():
return "building `%s' because it doesn't exist\n" % self
if self.always_build:
return "rebuilding `%s' because AlwaysBuild() is specified\n" % self
old = self.get_stored_info()
if old is None:
return None
old = old.binfo
old.prepare_dependencies()
try:
old_bkids = old.bsources + old.bdepends + old.bimplicit
old_bkidsigs = old.bsourcesigs + old.bdependsigs + old.bimplicitsigs
except AttributeError:
return "Cannot explain why `%s' is being rebuilt: No previous build information found\n" % self
new = self.get_binfo()
new_bkids = new.bsources + new.bdepends + new.bimplicit
new_bkidsigs = new.bsourcesigs + new.bdependsigs + new.bimplicitsigs
osig = dict(list(zip(old_bkids, old_bkidsigs)))
nsig = dict(list(zip(new_bkids, new_bkidsigs)))
# The sources and dependencies we'll want to report are all stored
# as relative paths to this target's directory, but we want to
# report them relative to the top-level SConstruct directory,
# so we only print them after running them through this lambda
# to turn them into the right relative Node and then return
# its string.
def stringify( s, E=self.dir.Entry):
if hasattr( s, 'dir' ) :
return str(E(s))
return str(s)
lines = []
removed = [x for x in old_bkids if not x in new_bkids]
if removed:
removed = [stringify(r) for r in removed]
fmt = "`%s' is no longer a dependency\n"
lines.extend([fmt % s for s in removed])
for k in new_bkids:
if not k in old_bkids:
lines.append("`%s' is a new dependency\n" % stringify(k))
else:
try:
changed = _decider_map[k.changed_since_last_build](k, self, osig[k])
except DeciderNeedsNode as e:
changed = e.decider(self, osig[k], node=self)
if changed:
lines.append("`%s' changed\n" % stringify(k))
if len(lines) == 0 and old_bkids != new_bkids:
lines.append("the dependency order changed:\n" +
"%sold: %s\n" % (' '*15, list(map(stringify, old_bkids))) +
"%snew: %s\n" % (' '*15, list(map(stringify, new_bkids))))
if len(lines) == 0:
def fmt_with_title(title, strlines):
lines = strlines.split('\n')
sep = '\n' + ' '*(15 + len(title))
return ' '*15 + title + sep.join(lines) + '\n'
if old.bactsig != new.bactsig:
if old.bact == new.bact:
lines.append("the contents of the build action changed\n" +
fmt_with_title('action: ', new.bact))
# lines.append("the contents of the build action changed [%s] [%s]\n"%(old.bactsig,new.bactsig) +
# fmt_with_title('action: ', new.bact))
else:
lines.append("the build action changed:\n" +
fmt_with_title('old: ', old.bact) +
fmt_with_title('new: ', new.bact))
if len(lines) == 0:
return "rebuilding `%s' for unknown reasons\n" % self
preamble = "rebuilding `%s' because" % self
if len(lines) == 1:
return "%s %s" % (preamble, lines[0])
else:
lines = ["%s:\n" % preamble] + lines
return ( ' '*11).join(lines)
class NodeList(collections.UserList):
def __str__(self):
return str(list(map(str, self.data)))
def get_children(node, parent): return node.children()
def ignore_cycle(node, stack): pass
def do_nothing(node, parent): pass
class Walker(object):
"""An iterator for walking a Node tree.
This is depth-first, children are visited before the parent.
The Walker object can be initialized with any node, and
returns the next node on the descent with each get_next() call.
'kids_func' is an optional function that will be called to
get the children of a node instead of calling 'children'.
'cycle_func' is an optional function that will be called
when a cycle is detected.
This class does not get caught in node cycles caused, for example,
by C header file include loops.
"""
def __init__(self, node, kids_func=get_children,
cycle_func=ignore_cycle,
eval_func=do_nothing):
self.kids_func = kids_func
self.cycle_func = cycle_func
self.eval_func = eval_func
node.wkids = copy.copy(kids_func(node, None))
self.stack = [node]
self.history = {} # used to efficiently detect and avoid cycles
self.history[node] = None
def get_next(self):
"""Return the next node for this walk of the tree.
This function is intentionally iterative, not recursive,
to sidestep any issues of stack size limitations.
"""
while self.stack:
if self.stack[-1].wkids:
node = self.stack[-1].wkids.pop(0)
if not self.stack[-1].wkids:
self.stack[-1].wkids = None
if node in self.history:
self.cycle_func(node, self.stack)
else:
node.wkids = copy.copy(self.kids_func(node, self.stack[-1]))
self.stack.append(node)
self.history[node] = None
else:
node = self.stack.pop()
del self.history[node]
if node:
if self.stack:
parent = self.stack[-1]
else:
parent = None
self.eval_func(node, parent)
return node
return None
def is_done(self):
return not self.stack
arg2nodes_lookups = []
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
[
"[email protected]"
] | |
c600cab231ae189afad7e9c793c31698f1d1dda0
|
acd41dc7e684eb2e58b6bef2b3e86950b8064945
|
/res/packages/scripts/scripts/client/gui/Scaleform/locale/COMMON.py
|
6af510f834e815784c03e95821358733067dd94e
|
[] |
no_license
|
webiumsk/WoT-0.9.18.0
|
e07acd08b33bfe7c73c910f5cb2a054a58a9beea
|
89979c1ad547f1a1bbb2189f5ee3b10685e9a216
|
refs/heads/master
| 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null |
WINDOWS-1250
|
Python
| false | false | 637 |
py
|
# 2017.05.04 15:24:58 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/locale/COMMON.py
"""
This file was generated using the wgpygen.
Please, don't edit this file manually.
"""
class COMMON(object):
COMMON_COLON = '#common:common/colon'
COMMON_PERCENT = '#common:common/percent'
COMMON_DASH = '#common:common/dash'
COMMON_SLASH = '#common:common/slash'
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\locale\COMMON.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:24:58 Střední Evropa (letní čas)
|
[
"[email protected]"
] | |
19f41e85f4206a6192a3e3a9be678142773af90e
|
0f4e69f6620ce3bd35b0090ed97b0d520dac2775
|
/build/kobuki_safety_controller/catkin_generated/pkg.develspace.context.pc.py
|
e8c8e32c2906ad95b607e2e0dba31bcfe5203745
|
[] |
no_license
|
yosoy2/turtlebot2
|
277c44fe63bb808ac3ff1b050388f35e7e9aca5d
|
d3052cc648b617c43b6190cbfc8d08addbb8f9de
|
refs/heads/master
| 2021-03-16T12:40:54.660254 | 2020-03-12T18:52:15 | 2020-03-12T18:52:15 | 246,908,194 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 654 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/edu/turtlebot2/src/kobuki_safety_controller/include".split(';') if "/home/edu/turtlebot2/src/kobuki_safety_controller/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;nodelet;pluginlib;std_msgs;geometry_msgs;kobuki_msgs;yocs_controllers;ecl_threads".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lkobuki_safety_controller_nodelet".split(';') if "-lkobuki_safety_controller_nodelet" != "" else []
PROJECT_NAME = "kobuki_safety_controller"
PROJECT_SPACE_DIR = "/home/edu/turtlebot2/devel"
PROJECT_VERSION = "0.7.6"
|
[
"[email protected]"
] | |
cb4095ce914cad8e2421522ea8914167096ef584
|
219d7cf7cf00b778ff1a5709406c144fcf2132f3
|
/exam prep/04. Food for Pets.py
|
16055a3550f1f098f577347be2a5aa08d4590c62
|
[] |
no_license
|
SilviaKoynova/Softuni-Programming-Basics-Python
|
e8e175419383815c65c4e110fdb2b752d940e887
|
0dfef0850f2cb8471dfee1af89f137be4e887cb8
|
refs/heads/main
| 2023-07-13T00:35:09.389302 | 2021-08-27T07:43:45 | 2021-08-27T07:43:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 823 |
py
|
from math import floor
days = int(input())
bought_food = float(input())
total_food_dog = 0
total_eaten_cat = 0
biscuits = 0
total_food = 0
final = 0
dog = 0
cat = 0
for all_days in range (1, days + 1):
dog_eaten = int(input())
cat_eaten = int(input())
total_food_dog += dog_eaten
total_eaten_cat += cat_eaten
if all_days % 3 == 0:
current_biscuits = (dog_eaten + cat_eaten) * 0.1
biscuits += current_biscuits
total_food = total_food_dog + total_eaten_cat
final = total_food / bought_food * 100
dog = total_food_dog / total_food * 100
cat = total_eaten_cat / total_food * 100
print(f'Total eaten biscuits: {floor(biscuits)}gr.')
print(f'{final:.2f}% of the food has been eaten.')
print(f'{dog:.2f}% eaten from the dog.')
print(f'{cat:.2f}% eaten from the cat.')
|
[
"[email protected]"
] | |
ace63eee1b093c6fba610d2765a7a7c30f7d777f
|
57391fbdde43c3d2e8628613d9003c65ff8abf9d
|
/Exercicios/ex025.py
|
b91b51ff7bd5bcc5298a5bf359fef4ec0215b878
|
[] |
no_license
|
JoaolSoares/CursoEmVideo_python
|
082a6aff52414cdcc7ee94d76c3af0ac2cb2aaf5
|
aa9d6553ca890a6d9369e60504290193d1c0fb54
|
refs/heads/main
| 2023-07-15T07:39:57.299061 | 2021-08-26T20:04:22 | 2021-08-26T20:04:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 119 |
py
|
n1 = (str(input('Diga o seu nome: ')).strip()).lower()
print('O seu nome possui "Silva": {}' .format('silva' in n1))
|
[
"[email protected]"
] | |
16fdd87db79816926ec1b2ace8eb8c4e396aa4a0
|
dd31b966b1fbbd129b8657aa35b1dd460cbd1675
|
/sandbox/python/wcpan.ftp/wcpan/ftp/network.py
|
2af7106f582b54be4a85b0f0c0a63dae00995630
|
[] |
no_license
|
legnaleurc/junkcode
|
9e48608d3ecafef0d7103240458f622aee87e077
|
f63b04d04853fb7d3ae0002b24657a9fd781b648
|
refs/heads/master
| 2023-07-08T04:27:01.934100 | 2023-06-26T05:14:18 | 2023-06-26T05:14:39 | 27,518 | 5 | 5 | null | 2019-05-12T17:22:15 | 2008-06-22T19:17:04 |
C++
|
UTF-8
|
Python
| false | false | 5,345 |
py
|
import socket
import subprocess
import os.path
import tornado.tcpserver as tts
import tornado.ioloop
import tornado.iostream
import tornado.locks as tl
import tornado.netutil as tn
class FTPServer(tts.TCPServer):
def __init__(self, path):
super().__init__()
self.path = path
self._loop = tornado.ioloop.IOLoop.current()
async def handle_stream(self, stream, address):
session = ControlChannel(self, stream, address)
self._loop.add_callback(session.start)
class ControlChannel(object):
def __init__(self, server, stream, address):
self.server = server
self.stream = stream
self.address = address
self.encoding = "utf-8"
self._cwd = '/'
self._transfer_mode = 'binary'
self.start_position = 0
async def writeline(self, value):
value += "\r\n"
await self.stream.write(value.encode(self.encoding))
print('->', value)
async def readline(self):
value = await self.stream.read_until(b"\r\n")
value = value.decode(self.encoding)
value = value.rstrip("\r\n")
return value
async def start(self):
print("Incoming connection from {}".format(self.address))
await self.writeline("220 Service ready for new user.")
self.running = True
while self.running:
try:
await self.handle_command()
except tornado.iostream.StreamClosedError:
self.stop()
def stop(self):
print("Closing connection from {}".format(self.address))
self.running = False
self.stream.close()
async def handle_command(self):
command = await self.readline()
print("Received command: " + command)
command = command.split(" ", 1)
if len(command) == 1:
command = command[0]
parameters = ""
else:
command, parameters = command
if command == "USER":
await self.writeline("230 User logged in, proceed.")
elif command == "SYST":
await self.writeline("215 UNIX Type: L8")
elif command == "FEAT":
await self.writeline("211-")
await self.writeline(" PASV")
await self.writeline(" REST")
await self.writeline("211 ")
elif command == "PWD":
await self.writeline('257 "{}"'.format(self._cwd))
elif command == "CWD":
self._cwd = parameters
await self.writeline('250 Requested file action okay, completed.')
elif command == "TYPE":
await self.writeline('200 __TYPE_OK__')
elif command == "PASV":
self.data_connection = PassiveListener(self.address[0])
await self.writeline("227 Entering Passive Mode " + self.data_connection.format_host() + ".")
await self.data_connection.wait_for_ready()
elif command == "LIST":
await self.writeline("150 File status okay; about to open data connection.")
await self.data_connection.send(
subprocess.check_output(["ls", "-l", self.server.path]))
await self.writeline("226 Closing data connection.")
elif command == "RETR":
await self.writeline("150")
filename = os.path.basename(parameters)
# Wait for opened data connection ?
fh = open(os.path.join(self.server.path, filename), "rb")
fh.seek(self.start_position)
await self.data_connection.send(fh.read())
self.start_position = 0
await self.writeline("226")
elif command == "REST":
self.start_position = int(parameters)
await self.writeline("350")
elif command == "QUIT":
await self.writeline("221 Service closing control connection.")
self.close()
else:
await self.writeline("502 Command not implemented.")
class ChannelHandler(object):
def __init__(self, host):
self._host = host
def create_passive_listener(self):
return PassiveListener(self._host)
async def send_passive_port(self, response):
await self._control.send_line(response)
class PassiveListener(tts.TCPServer):
def __init__(self, host):
super().__init__()
self._host = host
self._stream = None
self._ready_lock = tl.Condition()
self._loop = ti.IOLoop.current()
# TODO support IPv6?
socket_list = tn.bind_sockets(0, address=self._host,
family=socket.AF_INET)
self.add_sockets(socket_list)
self.start()
def get_socket(self):
return list(self._sockets.values())[0]
def get_address(self):
addr = socket.gethostbyname(socket.gethostname())
port = self.get_socket().getsockname()[1]
result = addr.replace(".", ",")
result += "," + str(port // 256)
result += "," + str(port % 256)
return result
async def handle_stream(self, stream, addr):
self._stream = stream
self._ready_lock.notify()
self._ready_lock = None
self._loop.add_callback(self.stop)
async def wait_for_ready(self):
await self._ready_lock.wait()
return self._stream
|
[
"[email protected]"
] | |
e59c90e5865491891f0f6300d06a803ab6505488
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03646/s038304175.py
|
851d87f751819207824900501d7ddeaf0e2ff6a4
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 136 |
py
|
k = int(input())
rest = k % 50
print(50)
lst = [50 + k // 50 if i < rest else 50 + (k // 50 - 1) - rest for i in range(50)]
print(*lst)
|
[
"[email protected]"
] | |
c052aa6a6a116e3f4cf56a227c2d8fe9cfdc2b9e
|
1419418226b6ba0f510649daaf62b71554cc2284
|
/clawtools/plot_coast_chiapas.py
|
e3af2a89b43214556f155501d3fdea2e3cbbc522
|
[] |
no_license
|
shineusn/mylife
|
2ef48a777e39be2ef746c3dad16ea963d5b23e5e
|
61dfa72d9047551746d26b7fe01fb5c2f1f0657a
|
refs/heads/master
| 2020-03-22T13:44:42.422127 | 2018-02-13T18:09:43 | 2018-02-13T18:09:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,637 |
py
|
# -*- coding: utf-8 -*-
from matplotlib import pyplot as plt
from numpy import genfromtxt,unique,zeros,where,array,nan,c_,r_,argmin,squeeze,isnan
from obspy import read
from matplotlib import rcParams
etaclip=50
rcParams.update({'font.size': 22})
fgmax_file=u'/Users/dmelgar/Tsunamis/tehuantepec/_output/fort.FG1.valuemax'
aux_file='/Users/dmelgar/Tsunamis/tehuantepec/_output/fort.FG1.aux1'
wet_tol=0.001
etaclip=4
minamr=3
#Get maximum amplitude first
lon=genfromtxt(fgmax_file,usecols=0)
lat=genfromtxt(fgmax_file,usecols=1)
amr=genfromtxt(fgmax_file,usecols=2)
H_in=genfromtxt(fgmax_file,usecols=3)
b_in=genfromtxt(aux_file)
unique_amr=unique(amr)
#i=where(unique_amr>0)[0]
#unique_amr=unique_amr[i]
eta=zeros(len(H_in))
H=zeros(len(H_in))
b=zeros(len(H_in))
for k in range(len(unique_amr)):
i=where(amr==unique_amr[k])[0]
if unique_amr[k]<minamr:
eta[i]=0
else:
eta[i]=H_in[i]+b_in[i,int(unique_amr[k]+1)]
H[i]=H_in[i]
b[i]=b_in[i,int(unique_amr[k]+1)]
#i=where(b<0)[0]
#lon=lon[i]
#lat=lat[i]
#eta=eta[i]
#H=H[i]
#b=b[i]
#
#i=where(H<10)[0]
#lon=lon[i]
#lat=lat[i]
#eta=eta[i]
#H=H[i]
#b=b[i]
#remove onshore points
#Wphase and slip inversion clipping
#iclip=where(lat>-25)[0]
#i=where(eta[iclip]>etaclip)[0]
#eta[iclip[i]]=nan
#i=where(isnan(eta)==False)[0]
#eta=eta[i]
#lat=lat[i]
#PGD clipping
#etaclip=0.6
#iclip=where(lat>-28.6)[0]
#i=where(eta[iclip]>etaclip)[0]
#eta[iclip[i]]=nan
#i=where(isnan(eta)==False)[0]
#eta=eta[i]
#lat=lat[i]
#iclip=where(lat<-34.7)[0]
#i=where(eta[iclip]>etaclip)[0]
#eta[iclip[i]]=nan
#i=where(isnan(eta)==False)[0]
#eta=eta[i]
#lat=lat[i]
|
[
"[email protected]"
] | |
bb9a45c25e772efc4b39731431232d2b38348672
|
6b6e20004b46165595f35b5789e7426d5289ea48
|
/util/tufmetadata/api.py
|
9c039477a91604a4027b703270be9f31092dad3c
|
[
"Apache-2.0"
] |
permissive
|
anwarchk/quay
|
2a83d0ab65aff6a1120fbf3a45dd72f42211633b
|
23c5120790c619174e7d36784ca5aab7f4eece5c
|
refs/heads/master
| 2020-09-12T18:53:21.093606 | 2019-11-15T19:29:02 | 2019-11-15T19:29:02 | 222,517,145 | 0 | 0 |
Apache-2.0
| 2019-11-18T18:32:35 | 2019-11-18T18:32:35 | null |
UTF-8
|
Python
| false | false | 10,315 |
py
|
import logging
from urlparse import urljoin
from posixpath import join
from abc import ABCMeta, abstractmethod
from six import add_metaclass
import requests
from data.database import CloseForLongOperation
from util.abchelpers import nooper
from util.failover import failover, FailoverException
from util.security.instancekeys import InstanceKeys
from util.security.registry_jwt import (build_context_and_subject, generate_bearer_token,
SIGNER_TUF_ROOT)
DEFAULT_HTTP_HEADERS = {'Connection': 'close'}
MITM_CERT_PATH = '/conf/mitm.cert'
TOKEN_VALIDITY_LIFETIME_S = 60 * 60 # 1 hour
logger = logging.getLogger(__name__)
class InvalidMetadataException(Exception):
""" Exception raised when the upstream API metadata that doesn't parse correctly. """
pass
class Non200ResponseException(Exception):
""" Exception raised when the upstream API returns a non-200 HTTP status code. """
def __init__(self, response):
super(Non200ResponseException, self).__init__()
self.response = response
class TUFMetadataAPI(object):
""" Helper class for talking to the TUF Metadata service (Apostille). """
def __init__(self, app, config, client=None):
feature_enabled = config.get('FEATURE_SIGNING', False)
if feature_enabled:
self.state = ImplementedTUFMetadataAPI(app, config, client=client)
else:
self.state = NoopTUFMetadataAPI()
def __getattr__(self, name):
return getattr(self.state, name, None)
@add_metaclass(ABCMeta)
class TUFMetadataAPIInterface(object):
""" Helper class for talking to the TUF Metadata service (Apostille). """
@abstractmethod
def get_default_tags_with_expiration(self, namespace, repository, targets_file=None):
"""
Gets the tag -> sha mappings for a repo, as well as the expiration of the signatures.
Does not verify the metadata, this is purely for display purposes.
Args:
namespace: namespace containing the repository
repository: the repo to get tags for
targets_file: the specific delegation to read from. Default: targets/releases.json
Returns:
targets, expiration or None, None
"""
pass
@abstractmethod
def get_all_tags_with_expiration(self, namespace, repository, targets_file=None, targets_map=None):
"""
Gets the tag -> sha mappings of all delegations for a repo, as well as the expiration of the signatures.
Does not verify the metadata, this is purely for display purposes.
Args:
namespace: namespace containing the repository
repository: the repo to get tags for
targets_file: the specific target or delegation to read from. Default: targets.json
Returns:
targets
"""
pass
@abstractmethod
def delete_metadata(self, namespace, repository):
"""
Deletes the TUF metadata for a repo
Args:
namespace: namespace containing the repository
repository: the repo to delete metadata for
Returns:
True if successful, False otherwise
"""
pass
@nooper
class NoopTUFMetadataAPI(TUFMetadataAPIInterface):
""" No-op version of the TUF API. """
pass
class ImplementedTUFMetadataAPI(TUFMetadataAPIInterface):
def __init__(self, app, config, client=None):
self._app = app
self._instance_keys = InstanceKeys(app)
self._config = config
self._client = client or config['HTTPCLIENT']
self._gun_prefix = config['TUF_GUN_PREFIX'] or config['SERVER_HOSTNAME']
def get_default_tags_with_expiration(self, namespace, repository, targets_file=None):
"""
Gets the tag -> sha mappings for a repo, as well as the expiration of the signatures.
Does not verify the metadata, this is purely for display purposes.
Args:
namespace: namespace containing the repository
repository: the repo to get tags for
targets_file: the specific delegation to read from. Default: targets/releases.json
Returns:
targets, expiration or None, None
"""
if not targets_file:
targets_file = 'targets/releases.json'
signed = self._get_signed(namespace, repository, targets_file)
if not signed:
return None, None
return signed.get('targets'), signed.get('expires')
def get_all_tags_with_expiration(self, namespace, repository, targets_file=None, targets_map=None):
"""
Gets the tag -> sha mappings of all delegations for a repo, as well as the expiration of the signatures.
Does not verify the metadata, this is purely for display purposes.
Args:
namespace: namespace containing the repository
repository: the repo to get tags for
targets_file: the specific target or delegation to read from. Default: targets.json
Returns:
targets
"""
if not targets_file:
targets_file = 'targets.json'
targets_name = targets_file
if targets_name.endswith('.json'):
targets_name = targets_name[:-5]
if not targets_map:
targets_map = {}
signed = self._get_signed(namespace, repository, targets_file)
if not signed:
targets_map[targets_name] = None
return targets_map
if signed.get('targets'):
targets_map[targets_name] = {
'targets': signed.get('targets'),
'expiration': signed.get('expires'),
}
delegation_names = [role.get('name') for role in signed.get('delegations').get('roles')]
for delegation in delegation_names:
targets_map = self.get_all_tags_with_expiration(namespace, repository, targets_file=delegation + '.json', targets_map=targets_map)
return targets_map
def delete_metadata(self, namespace, repository):
"""
Deletes the TUF metadata for a repo
Args:
namespace: namespace containing the repository
repository: the repo to delete metadata for
Returns:
True if successful, False otherwise
"""
gun = self._gun(namespace, repository)
try:
self._delete(gun)
except requests.exceptions.Timeout:
logger.exception('Timeout when trying to delete metadata for %s', gun)
return False
except requests.exceptions.ConnectionError:
logger.exception('Connection error when trying to delete metadata for %s', gun)
return False
except (requests.exceptions.RequestException, ValueError):
logger.exception('Failed to delete metadata for %s', gun)
return False
except Non200ResponseException as ex:
logger.exception('Failed request for %s: %s %s', gun, ex.response, str(ex))
return False
return True
def _gun(self, namespace, repository):
return join(self._gun_prefix, namespace, repository)
def _get_signed(self, namespace, repository, targets_file):
gun = self._gun(namespace, repository)
try:
response = self._get(gun, targets_file)
signed = self._parse_signed(response.json())
return signed
except requests.exceptions.Timeout:
logger.exception('Timeout when trying to get metadata for %s', gun)
except requests.exceptions.ConnectionError:
logger.exception('Connection error when trying to get metadata for %s', gun)
except (requests.exceptions.RequestException, ValueError):
logger.exception('Failed to get metadata for %s', gun)
except Non200ResponseException as ex:
logger.exception('Failed request for %s: %s %s', gun, ex.response, str(ex))
except InvalidMetadataException as ex:
logger.exception('Failed to parse targets from metadata: %s', str(ex))
return None
def _parse_signed(self, json_response):
""" Attempts to parse the targets from a metadata response """
signed = json_response.get('signed')
if not signed:
raise InvalidMetadataException("Could not find `signed` in metadata: %s" % json_response)
return signed
def _auth_header(self, gun, actions):
""" Generate a registry auth token for apostille"""
access = [{
'type': 'repository',
'name': gun,
'actions': actions,
}]
context, subject = build_context_and_subject(auth_context=None, tuf_roots={gun: SIGNER_TUF_ROOT})
token = generate_bearer_token(self._config["SERVER_HOSTNAME"], subject, context, access,
TOKEN_VALIDITY_LIFETIME_S, self._instance_keys)
return {'Authorization': 'Bearer %s' % token}
def _get(self, gun, metadata_file):
return self._call('GET', '/v2/%s/_trust/tuf/%s' % (gun, metadata_file), headers=self._auth_header(gun, ['pull']))
def _delete(self, gun):
return self._call('DELETE', '/v2/%s/_trust/tuf/' % (gun), headers=self._auth_header(gun, ['*']))
def _request(self, method, endpoint, path, body, headers, params, timeout):
""" Issues an HTTP request to the signing endpoint. """
url = urljoin(endpoint, path)
logger.debug('%sing signing URL %s', method.upper(), url)
headers.update(DEFAULT_HTTP_HEADERS)
resp = self._client.request(method, url, json=body, params=params, timeout=timeout,
verify=True, headers=headers)
if resp.status_code // 100 != 2:
raise Non200ResponseException(resp)
return resp
def _call(self, method, path, params=None, body=None, headers=None):
""" Issues an HTTP request to signing service and handles failover for GET requests.
"""
timeout = self._config.get('TUF_API_TIMEOUT_SECONDS', 1)
endpoint = self._config['TUF_SERVER']
with CloseForLongOperation(self._config):
# If the request isn't a read do not fail over.
if method != 'GET':
return self._request(method, endpoint, path, body, headers, params, timeout)
# The request is read-only and can failover.
all_endpoints = [endpoint] + self._config.get('TUF_READONLY_FAILOVER_ENDPOINTS', [])
return _failover_read_request(*[((self._request, endpoint, path, body, headers, params, timeout), {})
for endpoint in all_endpoints])
@failover
def _failover_read_request(request_fn, endpoint, path, body, headers, params, timeout):
""" This function auto-retries read-only requests until they return a 2xx status code. """
try:
return request_fn('GET', endpoint, path, body, headers, params, timeout)
except (requests.exceptions.RequestException, Non200ResponseException) as ex:
raise FailoverException(ex)
|
[
"[email protected]"
] | |
8db418acd2d08188e41e2f04b93c36f5e140c62c
|
c701dbdd743aa807d772bad99a4d903088282fb3
|
/Proj_Centroid_Loss_LeNet/LeNet_plus_centerloss/network.py
|
56b351535335b2b388cb9fcf18aa0451c726b7d0
|
[] |
no_license
|
Beerkay/deep_learning_notes
|
6d5230c95cf67e6330f0d5ff87186515b346d157
|
7f8c7312ddf8ed1e46bf0e6971565b911eb8bc92
|
refs/heads/master
| 2021-05-04T05:54:42.628731 | 2016-10-13T20:27:55 | 2016-10-13T20:27:55 | 71,084,243 | 5 | 6 | null | 2016-10-16T23:43:51 | 2016-10-16T23:43:51 | null |
UTF-8
|
Python
| false | false | 7,285 |
py
|
import math
import tensorflow as tf
from termcolor import colored as c, cprint
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import helpers
### helper functions
from functools import reduce
def fc_layer(x, weight_shape, bias_shape, layer_name):
with tf.name_scope(layer_name):
# initializing at 0 is no-good.
norm = math.sqrt(float(
reduce(lambda v, e: v * e, weight_shape)
))
weight = tf.Variable(
tf.truncated_normal(weight_shape,
mean=0.5,
stddev=1.0 / norm),
name='weight')
bias = tf.Variable(tf.zeros(bias_shape), name='bias')
activation = tf.matmul(x, weight) + bias
return weight, bias, activation
# main network build stages
def inference():
x = tf.placeholder(tf.float32, shape=[None, 784], name='input')
image = tf.reshape(x, [-1, 28, 28, 1])
with tf.name_scope('conv_layer_1'):
W_conv1 = helpers.weight_variable([5, 5, 1, 32], 'W_conv1')
b_conv1 = helpers.bias_variable([32], 'bias_conv1')
alphas_conv1 = helpers.bias_variable([32], 'alpha_conv1')
layer_conv_1 = helpers.prelu(helpers.conv2d(image, W_conv1) + b_conv1, alphas_conv1)
W_conv1_b = helpers.weight_variable([5, 5, 32, 32], 'W_conv1_b')
b_conv1_b = helpers.bias_variable([32], 'bias_conv1_b')
alphas_conv1_b = helpers.bias_variable([32], 'alpha_conv1_b')
layer_conv_1_b = helpers.prelu(helpers.conv2d(layer_conv_1, W_conv1_b) + b_conv1_b, alphas_conv1_b)
stage_1_pool = helpers.max_pool_2x2(layer_conv_1_b)
with tf.name_scope('conv_layer_2'):
W_conv2 = helpers.weight_variable([5, 5, 32, 64], "W_conv2")
b_conv2 = helpers.bias_variable([64], 'bias_conv2')
alphas_conv2 = helpers.bias_variable([64], 'alpha_conv2')
layer_conv_2 = helpers.prelu(helpers.conv2d(stage_1_pool, W_conv2) + b_conv2, alphas_conv2)
W_conv2_b = helpers.weight_variable([5, 5, 64, 64], "W_conv2_b")
b_conv2_b = helpers.bias_variable([64], 'bias_conv2_b')
alphas_conv2_b = helpers.bias_variable([64], 'alpha_conv2_b')
layer_conv_2_b = helpers.prelu(helpers.conv2d(layer_conv_2, W_conv2_b) + b_conv2_b, alphas_conv2_b)
stage_2_pool = helpers.max_pool_2x2(layer_conv_2_b)
# stage_2_pool_flat = tf.reshape(stage_2_pool, [-1, 7 * 7 * 64])
with tf.name_scope('conv_layer_3'):
W_conv3 = helpers.weight_variable([5, 5, 64, 128], "W_conv3")
b_conv3 = helpers.bias_variable([128], 'bias_conv3')
alphas_conv3 = helpers.bias_variable([128], 'alpha_conv3')
layer_conv_3 = helpers.prelu(helpers.conv2d(stage_2_pool, W_conv3) + b_conv3, alphas_conv3)
# stage_3_pool = helpers.max_pool_2x2(layer_conv_3)
# stage_3_pool_flat = tf.reshape(stage_3_pool, [-1, 4 * 4 * 256])
W_conv3_b = helpers.weight_variable([5, 5, 128, 128], "W_conv3_b")
b_conv3_b = helpers.bias_variable([128], 'bias_conv3_b')
alphas_conv3_b = helpers.bias_variable([128], 'alpha_conv3_b')
layer_conv_3_b = helpers.prelu(helpers.conv2d(layer_conv_3, W_conv3_b) + b_conv3_b, alphas_conv3_b)
stage_3_pool = helpers.max_pool_2x2(layer_conv_3_b)
stage_3_pool_flat = tf.reshape(stage_3_pool, [-1, 4 * 4 * 128])
with tf.name_scope('fc_layer_1'):
W_fc1 = helpers.weight_variable([4 * 4 * 128, 2], "W_fc1")
# W_fc1 = helpers.weight_variable([7 * 7 * 64, 2], "W_fc1")
b_fc1 = helpers.bias_variable([2], 'bias_fc1')
alphas_fc1 = helpers.bias_variable([2], 'alpha_conv3')
output = helpers.prelu(tf.matmul(stage_3_pool_flat, W_fc1) + b_fc1, alphas_fc1)
# with tf.name_scope('fc_output'):
# W_output = helpers.weight_variable([500, 10], "W_putput")
# b_output = helpers.bias_variable([10], 'bias_output')
# output = tf.nn.relu(tf.matmul(h_fc1, W_output) + b_output)
# with tf.name_scope('output'):
# W_output = helpers.weight_variable([2, 10], "W_output")
# b_output = helpers.bias_variable([10])
# output = tf.nn.relu(tf.matmul(h_fc2, W_output) + b_output)
return x, output
def loss(deep_features):
with tf.name_scope('softmax_loss'):
batch_labels = tf.placeholder(tf.float32, name='labels')
W_loss = helpers.weight_variable([2, 10], "W_loss")
bias_loss = tf.Variable(
tf.truncated_normal(shape=[10], stddev=1e-4, mean=1e-1), 'bias_loss')
# Note: we don't use the bias here because it does not affect things. removing the
# bias also makes the analysis simpler.
logits = tf.matmul(deep_features, W_loss) + bias_loss
cross_entropy = - tf.reduce_mean(
tf.mul(batch_labels, tf.nn.log_softmax(logits)),
reduction_indices=[1]
)
xentropy_mean = tf.reduce_mean(cross_entropy, name="xentropy_mean")
tf.scalar_summary(xentropy_mean.op.name, xentropy_mean)
return batch_labels, logits, xentropy_mean
def center_loss(deep_features, labels):
with tf.name_scope('center_loss'):
features_expanded = tf.reshape(deep_features, shape=[-1, 2, 1])
labels_expanded = tf.reshape(labels, shape=[-1, 1, 10])
samples_per_label = tf.reduce_sum(
labels_expanded,
reduction_indices=[0]
)
centroids = \
tf.reduce_sum(
tf.reshape(deep_features, shape=[-1, 2, 1]) * \
labels_expanded,
reduction_indices=[0]
) / samples_per_label
centroids_expanded = tf.reshape(centroids, shape=[1, 2, 10]) * labels_expanded
spread = \
tf.reduce_mean(
tf.reduce_sum(
tf.square(
features_expanded * labels_expanded - centroids_expanded
),
reduction_indices=[1, 2]
)
) / 2.0
tf.scalar_summary(spread.op.name, spread)
return spread, centroids, spread
def training(loss):
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
with tf.name_scope('training'):
global_step = tf.Variable(0, name='global_step', trainable=False)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
# optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# grads_and_vars = optimizer.compute_gradients(loss, tf.trainable_variables())
# capped_grads_and_vars = [(tf.clip_by_value(grads, 1e-10, 1e10), vars) for grads, vars in grads_and_vars]
# train_op = optimizer.apply_gradients(capped_grads_and_vars)
return learning_rate, train_op, global_step
def evaluation(logits, labels):
correct = tf.nn.in_top_k(logits, tf.cast(tf.argmax(labels, dimension=1), dtype=tf.int32), 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float64), name='accuracy')
tf.scalar_summary(accuracy.op.name, accuracy)
# Return the number of true entries.
return accuracy
|
[
"[email protected]"
] | |
553634b3245a6931df483918cc4d6c8d3a077d20
|
67b7e6d2c08f08403ec086c510622be48b8d26d8
|
/src/test/tinc/tincrepo/mpp/models/mpp_tc.py
|
460846034b9e46458571cf6a337a685f498d2627
|
[
"Apache-2.0",
"PostgreSQL",
"LicenseRef-scancode-rsa-md4",
"OLDAP-2.8",
"HPND-sell-variant",
"BSD-4-Clause-UC",
"BSD-3-Clause",
"Zlib",
"LicenseRef-scancode-zeusbench",
"LicenseRef-scancode-mit-modification-obligations",
"OpenSSL",
"MIT",
"LicenseRef-scancode-other-copyleft",
"bzip2-1.0.6",
"NTP",
"W3C",
"metamail",
"Beerware",
"RSA-MD",
"LicenseRef-scancode-rsa-1990",
"LicenseRef-scancode-stream-benchmark",
"LicenseRef-scancode-openssl",
"X11-distribute-modifications-variant",
"LicenseRef-scancode-pcre",
"LicenseRef-scancode-ssleay-windows",
"Spencer-94",
"ISC",
"LicenseRef-scancode-other-permissive",
"BSD-2-Clause",
"Python-2.0",
"curl",
"LicenseRef-scancode-sun-bcl-sdk-5.0",
"MIT-CMU",
"W3C-19980720"
] |
permissive
|
sshyran/gpdb
|
41012411d22b0294204dfb0fe67a1f4c8d1ecaf6
|
2d065ecdd2b5535cb42474f17a0ee6592b4e6837
|
refs/heads/master
| 2023-04-09T14:05:44.030212 | 2016-11-12T08:33:33 | 2016-11-12T08:34:36 | 73,544,159 | 0 | 0 |
Apache-2.0
| 2023-04-04T00:30:10 | 2016-11-12T09:43:54 |
PLpgSQL
|
UTF-8
|
Python
| false | false | 25,408 |
py
|
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import inspect
import os
import re
import sys
import time
import tinctest
from tinctest.runner import TINCTextTestResult
from tinctest.lib.system import TINCSystem
from gppylib.commands.base import Command
from mpp.lib.datagen.databases import __databases__, TINCTestDatabase, TINCDatagenException
from mpp.lib.gplog import GpLog
from mpp.lib.gpstop import GpStop
from mpp.lib.PSQL import PSQL
import unittest2 as unittest
class MPPTestCaseException(Exception):
"""
The exception that will be thrown for any errors or failures in MPPTestCase
"""
pass
class MPPDUT(object):
"""
This class is used to find the Device Under Test.
It provides instance variables for product name and version_string.
It will only be used by MPPMetaClassType to dynamically change a class's MRO.
It also provides a product_environment dictionary to store gpopt version if found.
"""
def __init__(self, product = None, version_string = None):
# Valid products as of 11/25/13: gpdb, hawq
self.product = product
# version_string has this format: major#.minor#.service_pack#.version_number<hotfix_alphanumeral>
# It can be incomplete: 4.3 or 4.2.1
self.version_string = version_string
self.product_environment = {}
# First, get the product version
if (self.product is None) or (self.version_string is None):
self._get_product_version()
# Next, get gpopt (GP Optimizer Mode) version
gpopt_version = self._get_gpopt_version()
if gpopt_version:
self.product_environment['gpopt'] = gpopt_version
def _get_version_string_output(self):
# Version string is the output of postgres --gp-version or postgress --version
# Output in gpdb: "postgres (Greenplum Database) 4.3_PARISTX_ORCA build 43249"
# Output in hawq: "postgres (HAWQ) 4.2.0 build 1"
# Output in postgres: "postgres (PostgreSQL) 9.2.4"
# The following command will fail if the DUT is postgres
version_command = Command(name = 'get gp-version', cmdStr = 'postgres --gp-version')
try:
version_command.run(validateAfter = True)
except Exception, e:
tinctest.logger.debug("Failed while running get gp-version: %s" %e)
version_command = Command(name = 'get version', cmdStr = 'postgres --version')
version_command.run(validateAfter = True)
return version_command.get_results().stdout
def _get_product_version(self):
version_string_information = ''
try:
version_string_information = self._get_version_string_output()
except Exception, e:
tinctest.logger.exception("Failure while getting version information: %s" %e)
tinctest.logger.critical("Could not detect one of the supported products (gpdb, hawq or postgres) in your environment. Make sure your environment is set correctly.")
raise MPPTestCaseException("Could not detect one of the supported products (gpdb, hawq or postgres) in your environment. Make sure your environment is set correctly.")
match_object = re.search("\((.+)\)", version_string_information)
database_match = match_object.group(0)
if "HAWQ" in database_match:
self.product = 'hawq'
# Replace version_string_information to point to hawq-version
version_command = Command(name = 'get hawq-version', cmdStr = 'postgres --hawq-version')
version_command.run(validateAfter = True)
version_string_information = version_command.get_results().stdout
tinctest.logger.info("DUT is detected to be hawq. Version string: %s" %version_string_information)
elif "Greenplum Database" in database_match:
tinctest.logger.info("DUT is detected to be gpdb. Version string: %s" %version_string_information)
self.product = 'gpdb'
elif "PostgreSQL" in database_match:
tinctest.logger.info("DUT is detected to be postgres. Version string: %s" %version_string_information)
self.product = 'postgres'
else:
tinctest.logger.critical("Unexpected version string obtained: %s." %version_string_information)
tinctest.logger.critical("Could not detect one of the supported products (gpdb, hawq or postgres) in your environment. Make sure your environment is set correctly.")
raise MPPTestCaseException("Unexpected version string obtained: %s" %version_string_information)
# At this point, version_string_information can be extracted to get the exact version
# version_string_information for gpdb (--gp_version): "postgres (Greenplum Database) 4.3_PARISTX_ORCA build 43249"
# version_string_information for hawq (--hawq_version): "postgres (HAWQ) 1.1.4.0 build dev"
# version_string_information for postgres (--version): "postgres (PostgreSQL) 9.2.4"
version_string_information_match_list = re.findall("\)\s(.*)", version_string_information)
if version_string_information_match_list:
# Remove everything after space and underscore
version_part = re.sub(r'\s.*$', r'', version_string_information_match_list[0])
version_part = re.sub(r'_.*$', r'', version_part)
# At this point, we have a version
self.version_string = version_part
else:
tinctest.logger.critical("Unexpected version string obtained: %s." %version_string_information)
tinctest.logger.critical("Could not detect one of the supported products (gpdb, hawq or postgres) in your environment. Make sure your environment is set correctly.")
raise MPPTestCaseException("Unexpected version string obtained: %s" %version_string_information)
def _get_gpopt_version(self):
# Return gpopt_version. Return empty, if not found.
gp_opt_version = ""
try:
# The following command will fail if the DUT doesn't have optimizer
gp_opt_version_cmd_results = {}
psql_stdout = PSQL.run_sql_command("select gp_opt_version()", flags = "-t -q", results=gp_opt_version_cmd_results).strip()
if gp_opt_version_cmd_results['rc'] or gp_opt_version_cmd_results['stderr'] != "":
# received an error
return gp_opt_version
# Output is in the format of: GPOPT version: 1.241, GPOS version: 1.90, Xerces version: 3.1.1-p1
# We want 1.241 from the above
gp_opt_version = psql_stdout.split()[2].strip(",")
except Exception, e:
tinctest.logger.debug("Failed while running select gp_opt_version: %s" %e)
return gp_opt_version
def __str__(self):
return "DUT: product: %s ; version: %s" % (self.product, self.version_string)
class _MPPMetaClassType(type):
"""
MPPMetaClassType class overrides new and init methods of metaclass type.
It is used to dynamically change a class's MRO for a DUT.
It does this by iterating through the base classes and checking
if there are any product-specific hidden models of those base classes.
MPPTestCase and all of its derived classes are of type MPPMetaClassType.
Product-specific hidden models have to follow these rules:
- They have to reside in the same module as the base class.
- They have to be prefixed and suffixed with two underscores (__)
- They have to have the lower-case product name in the class name, following the prefix of __
- The product name has to be same as the one provided by DUT class.
An example of product-specific hidden model: __gpdbSQLTestCase__ in the same module as SQLTestCase for gpdb DUT.
"""
# Class variable to keep track of DUT
DUT = MPPDUT()
tinctest.logger.info(DUT)
def __new__(metaclass, clsname, bases, dct):
# Add DUT to class's built-in dictionary
dct['__product__'] = _MPPMetaClassType.DUT.product
dct['__version_string__'] = _MPPMetaClassType.DUT.version_string
dct['__product_environment__'] = _MPPMetaClassType.DUT.product_environment
dct['change_mro'] = False
dct['make_me_product_agnostic'] = classmethod(metaclass.make_me_product_agnostic)
new_bases = ()
if (clsname.startswith('__') and clsname.endswith('__')) or (clsname is 'MPPTestCase'):
# If here, our clsname is one of the product-specific hidden models or MPPTestCase
# No need to check bases
new_bases += bases
else:
# If here, we need to check each of our clsname's bases
# and see if each has product-specific class
for base in bases:
new_base_name = '__' + _MPPMetaClassType.DUT.product + base.__name__ + '__'
# Variable to track whether we found a match for the base
try:
""" Product-specific hidden models should always reside in the same module as the base class """
exec ('from ' + base.__module__ + ' import ' + new_base_name)
new_bases += (eval(new_base_name),)
except:
new_bases += (base,)
return super(_MPPMetaClassType, metaclass).__new__(metaclass, clsname, new_bases, dct)
def __init__(cls, clsname, bases, dct):
super(_MPPMetaClassType, cls).__init__(clsname, bases, dct)
@staticmethod
def make_me_product_agnostic(cls):
# Change the class variable change_mro to let mro() method know that this class needs to prepend product specific model
cls.change_mro = True
# The line below (fakingly changing the cls' bases) retriggers mro() method
cls.__bases__ = cls.__bases__ + tuple()
def mro(cls):
default_mro = super(_MPPMetaClassType, cls).mro()
if hasattr(cls, "change_mro") and cls.change_mro:
new_class_name = '__' + _MPPMetaClassType.DUT.product + cls.__name__ + '__'
try:
exec ('from ' + cls.__module__ + ' import ' + new_class_name)
new_class_object = eval(new_class_name)
default_mro.insert(0, new_class_object)
return default_mro
except:
# No hidden class defined. Nothing to do
pass
return default_mro
@tinctest.skipLoading("Test model. No tests loaded.")
class MPPTestCase(tinctest.TINCTestCase):
"""
MPPTestCase model is a top-level executor for all MPP test cases. All MPP test cases (HAWQ, GPDB, etc.)
should either directly or indirectly inherit from MPPTestCase. It inherits from TINCTestCase,
and is a parent of SQLTestCase.
When a test of this type fails, we do the following:
-> if restart_on_fatal_failure is set to True, inspect logs for errors and restart the cluster.
-> if gather_logs_on_failure is set to True, gather master and segment logs for the duration of the test case when this test case fails.
@metadata: host: Host where the MPP database resides. Defaults to localhost.
@metadata: db_name: Database where the test case will be executed. Defaults to system environment variable DBNAME.
@metadata: username: Username to use to login to the database. Defaults to system environment variable USER.
@metadata: password: Password to use to login to the database. If not given, it assumes that user has trust authentication.
@metadata: gather_logs_on_fatal_failure: Gather master and segment logs in case of a fatal failure.
@metadata: restart_on_fatal_failure: Boolean to determine if the cluster should be restarted on failure. If the metadata doesn't exist, it won't be restarted.
@undocumented: defaultTestResult
@undocumented: __metaclass__
"""
# MPPTestCase class is of type MPPMetaClassType
# MPPMetaClassType will take of reconfiguring the bases of all the derived classes that have product-specific hidden models
__metaclass__ = _MPPMetaClassType
#: Directory relative to the test module where all the output artifacts will be collected. Defaults to 'output/'
out_dir = 'output/'
#: Database name to be used for any connection to the test cluster. Defaults to None. This database will also be configured in setUpClass on MPPTestCase
db_name = None
def __init__(self, methodName, baseline_result = None):
#: boolean that determines whether or not to restart the cluster on a fatal failure. Defaults to False.
self.restart_on_fatal_failure = False
#: boolean that determines whether or not to gather logs on failure. Defaults to False
self.gather_logs_on_failure = False
super(MPPTestCase, self).__init__(methodName, baseline_result)
@classmethod
def setUpClass(cls):
"""
setUpClass of MPPTestCase does the following:
-> Create out directory for the class if it does not exist.
This is thread safe in case an MPPTestCase is used concurrently
within a ScenarioTestCase or ConcurrencyTestCase
-> Configures the database specified at the class level variable 'db_name'
"""
tinctest.logger.trace_in()
#QAINF-760 - we need to treat db_name in the class level doc string as a class level variable
#rather than an instance level variable
ds = cls.__doc__
if ds:
lines = ds.splitlines()
for line in lines:
line = line.strip()
if line.find('@db_name') != 0:
continue
line = line[1:]
if len(line.split()) <= 1:
break
(key, cls.db_name) = line.split(' ', 1)
break
super(MPPTestCase, cls).setUpClass()
if not os.path.exists(cls.get_out_dir()):
TINCSystem.make_dirs(cls.get_out_dir(), ignore_exists_error = True)
if cls.db_name:
tinctest.logger.debug("Configure database %s from MPPTestCase setUpClass." % cls.db_name)
cls.configure_database(cls.db_name)
tinctest.logger.trace_out()
@classmethod
def get_out_dir(cls):
"""
Returns the absolute output directory for this test class.
Joins cls.out_dir with the location where the test module exists.
"""
source_file = sys.modules[cls.__module__].__file__
source_dir = os.path.dirname(source_file)
abs_out_dir = os.path.join(source_dir, cls.out_dir)
return abs_out_dir
@classmethod
def get_source_dir(cls):
"""
Returns the directory at which this test class exists.
"""
source_file = sys.modules[cls.__module__].__file__
source_dir = os.path.dirname(source_file)
return source_dir
@classmethod
def configure_database(cls,db_name):
"""
Configures the given database using datagen libraries.
@param db_name: Name of the database to be configured. If there is no specific datagen available for this database,
this will just create an empty database with the given name.
@type db_name: string
"""
tinctest.logger.trace_in(db_name)
if not __databases__.has_key(db_name):
tinctest.logger.info("db_name %s is not defined in __databases__ dictionary." % db_name)
__databases__[db_name] = TINCTestDatabase(database_name=db_name)
py_mod = sys.modules[cls.__module__]
TINCTestCustomDatabase = None
for obj in inspect.getmembers(py_mod, lambda member: inspect.isclass(member)
and issubclass(member, TINCTestDatabase)):
if obj[1]._infer_metadata().get('db_name', None) == db_name:
TINCTestCustomDatabase = obj[1]
break
if TINCTestCustomDatabase:
__databases__[db_name] = TINCTestCustomDatabase(database_name=db_name)
else:
tinctest.logger.warning("No CustomDatabase class provided for %s." %db_name)
if __databases__[db_name]:
tinctest.logger.info("Running setup of database %s." % db_name)
try:
__databases__[db_name].setUp()
except Exception, exp:
# if we are here, setup failed. handle errors
# accordingly.
__databases__[db_name].tearDown()
raise TINCDatagenException(exp)
tinctest.logger.trace_out()
def setUp(self):
"""
setUp method in MPPTestCase does the following:
-> Configures the database specified through the metadat 'db_name'.
This will configure the database only if it was not already done in setUpClass.
"""
tinctest.logger.trace_in()
super(MPPTestCase, self).setUp()
# Create the database if db_name metadata is specified and if it doesn't exists
# TODO: Change TINCTestDatabase to take-in PSQL options (part of story QAINF-191)
if self.db_name and self.__class__.db_name and self.db_name == self.__class__.db_name:
tinctest.logger.debug("No need to configure database %s in setUp, since it would have already been configured via setUpClass." % self.db_name)
elif self.db_name:
tinctest.logger.debug("Configure database %s from MPPTestCase setUp." % self.db_name)
self.configure_database(self.db_name)
tinctest.logger.trace_out()
def defaultTestResult(self, stream=None, descriptions=None, verbosity=None):
"""
TODO: This method should not be exposed as a public method. All result objects
will be internal. Should find out if epydocs allows some language to ignore
certain methods even if it does not start with an '_'.
Return a custom result object for MPPTestCase. We need a handle on
whether the test errored out / failed to honor metadata like 'restart'
"""
if stream and descriptions and verbosity:
return _MPPTestCaseResult(stream, descriptions, verbosity)
else:
return unittest.TestResult()
def get_product_version(self):
"""
This function is used by TINCTestCase to determine the current DUT version.
It uses this information, along with @product_version, to determine if a test case
should run in this particular DUT.
@return: A two-tuple containing name and version of the product where test is executed
@rtype: (string, string)
"""
return (self.__class__.__product__, self.__class__.__version_string__)
def _infer_metadata(self):
"""
Read all the metadata and store them as instance variables.
"""
super(MPPTestCase, self)._infer_metadata()
self.host = self._metadata.get('host', 'localhost')
self.db_name = self._metadata.get('db_name', self.__class__.db_name)
self.username = self._metadata.get('username', None)
self.password = self._metadata.get('password', None)
if self._metadata.get('gather_logs_on_failure') and self._metadata.get('gather_logs_on_failure').lower() == 'true':
self.gather_logs_on_failure = True
if self._metadata.get('restart_on_fatal_failure') and self._metadata.get('restart_on_fatal_failure').lower() == 'true':
self.restart_on_fatal_failure = True
self.gpopt = self._metadata.get('gpopt', None)
if self.gpopt:
if 'gpopt' not in self.__class__.__product_environment__:
self.skip = 'Test does not apply to the deployed system. Test Case GPOPT version - %s , Deployed system has no GPOPT' % self.gpopt
elif tuple(self.gpopt.split('.')) > tuple(self.__class__.__product_environment__['gpopt'].split('.')):
self.skip = 'Test does not apply to the deployed GPOPT version. Test Case GPOPT version - %s , Deployed version - %s' % (self.gpopt, self.__class__.__product_environment__['gpopt'])
def install_cluster(self):
"""
This function will install the cluster
"""
pass
def initialize_cluster(self):
"""
This function will initialize the cluster
"""
pass
def configure_cluster(self):
"""
This function will configure the cluster
"""
pass
def inspect_cluster(self):
"""
This function will inspect the cluster from the start time of this test till now.
Returns true if there are no errors in logs, False if there are errors in logs.
@return: Returns True / False depending on whether errors were found in the log
@rtype: boolean
"""
tinctest.logger.trace_in()
start_time = self.start_time
if start_time == 0 or not start_time:
return True
end_time = self.end_time
if end_time == 0 or not end_time:
end_time = time.time()
return_status = not GpLog.check_log_for_errors(start_time, end_time)
tinctest.logger.trace_out(str(return_status))
return return_status
def gather_log(self):
"""
This method will gather logs from all segments between start_time and end_time
of the test and write it to an out file in the output directory. The default name
of the log file will be <testmethodname>.logs
"""
tinctest.logger.trace_in()
start_time = self.start_time
if start_time == 0 or not start_time:
return
end_time = self.end_time
if end_time == 0 or not end_time:
end_time = time.time()
out_file = os.path.join(self.get_out_dir(), self._testMethodName + '.logs')
GpLog.gather_log(start_time, end_time, out_file)
tinctest.logger.trace_out()
def delete_cluster(self):
"""
This function will delete the cluster
"""
pass
def start_cluster(self):
"""
This function will start the cluster
"""
pass
def stop_cluster(self):
"""
This function will stop the cluster
"""
pass
def restart_cluster(self):
"""
This function will restart the cluster
"""
pass
class _MPPTestCaseResult(TINCTextTestResult):
"""
A custom listener class for MPPTestCase. This is responsible for
reacting appropriately to failures and errors of type MPPTestCase.
Following is what this class does on failure:
-> If restart_on_fatal_failure is set for the test , inspects the logs for
fatal failure and restarts the cluster if there are any errors found.
-> If gather_logs_on_failure is set for the test, gathers segment and master
logs to the output directory.
"""
def addFailure(self, test, err):
try:
# restart the cluster if restart_on_failure is set to True and inspect cluster returns False
if test.gather_logs_on_failure:
test.gather_log()
if test.restart_on_fatal_failure:
if not test.inspect_cluster():
tinctest.logger.warning("Errors found in the logs for this test case. Restarting the cluster")
test.restart_cluster()
except Exception, e:
tinctest.logger.exception("Re-starting cluster failed - %s" %e)
super(_MPPTestCaseResult, self).addFailure(test, err)
class __gpdbMPPTestCase__(MPPTestCase):
"""
__gpdbMPPTestCase__ is a hidden class that overrides GPDB specific methods of MPPTestCase.
This class should never be used as a parent or as an executor for any test cases.
Presently, this class doesn't override any methods. It is here only for reference.
"""
pass
class __hawqMPPTestCase__(MPPTestCase):
"""
__hawqMPPTestCase__ is a hidden class that overrides HAWQ specific methods of MPPTestCase.
This class should never be used as a parent or as an executor for any test cases.
Presently, this class doesn't override any methods. It is here only for reference.
"""
pass
class __postgresMPPTestCase__(MPPTestCase):
"""
__postgresMPPTestCase__ is a hidden class that overrides postgres specific methods of MPPTestCase.
This class should never be used as a parent or as an executor for any test cases.
Presently, this class doesn't override any methods. It is here only for reference.
"""
pass
|
[
"[email protected]"
] | |
0692a79c84ee0e748f2693731a6624ae00bcf533
|
41586d36dd07c06860b9808c760e2b0212ed846b
|
/network/dns/openresolv/actions.py
|
67d9d61544066f741f397767005544aa40d70f26
|
[] |
no_license
|
SulinOS/SulinRepository
|
4d5551861f57bc1f4bec6879dfe28ce68c7c125d
|
9686811a1e06080f63199233561a922fe1f78d67
|
refs/heads/master
| 2021-06-15T21:34:25.039979 | 2021-06-05T13:43:34 | 2021-06-05T13:43:34 | 207,672,864 | 6 | 3 | null | 2019-12-06T08:11:22 | 2019-09-10T22:16:17 |
Python
|
UTF-8
|
Python
| false | false | 455 |
py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) YEAR, YOUR NAME
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from inary.actionsapi import autotools
from inary.actionsapi import inarytools
from inary.actionsapi import get
def setup():
autotools.configure()
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR={}".format(get.installDIR()))
|
[
"[email protected]"
] | |
8d44807bd948a1f0469bd8781eb899137a1b9b58
|
1d2300dc07489c311b52066cf87d926d0386eb3b
|
/app/__init__.py
|
290be65b440a3056fa3e39a46db00909d7ca4c32
|
[
"MIT"
] |
permissive
|
MaxKusnadi/stackoverflow-scrapper
|
92749900b787f7dceacf8fab3b0426af67af68e4
|
da9834ae374fb598bf8810be5a26abacfc958267
|
refs/heads/master
| 2021-01-11T15:43:23.823619 | 2017-01-26T18:32:28 | 2017-01-26T18:32:28 | 79,912,183 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 125 |
py
|
import logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
|
[
"[email protected]"
] | |
ef210239883c7e07621deacc3740c78187b1cc96
|
897914e8acf6e14f9dc42f43f342e42e38bcf5c5
|
/pigeon_app/asgi.py
|
4957b9f14d49662efc818c175dfdf0e5f585e8ab
|
[] |
no_license
|
ShazeRx/pigeon_app
|
aa1f5694419db723ba39d7eb0ef1a01cdcd22464
|
70d0ba1f019ebd322695a7b322af85554118a51e
|
refs/heads/master
| 2023-08-17T02:01:36.796184 | 2021-06-18T19:52:32 | 2021-06-18T19:52:32 | 412,397,560 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 397 |
py
|
"""
ASGI config for pigeon_app project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pigeon_app.settings')
application = get_asgi_application()
|
[
"[email protected]"
] | |
fdbbf3441857f1b3067480de3b89e390ccfce659
|
0b93015540df1aa52435d4a7d24a6f7ddf69e60f
|
/libreria.py
|
32e5073ac1f6afe6322c9b43441da8fda0017d40
|
[] |
no_license
|
smith-sanchez/t10_viilavicencio.carrion
|
f1c9a54838594c4e0a2aa6c2f16c2bec5576b857
|
d761db7e6685a99ef3312aad2817ecf0d23cecfb
|
refs/heads/master
| 2020-12-08T21:04:52.484495 | 2020-01-10T17:33:40 | 2020-01-10T17:33:40 | 233,095,554 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,559 |
py
|
# libreria
# funcion validar talla
def validar_talla(num):
# tiene que ser un entero
if(isinstance(num,int)):
return True
# fin if
else:
return False
# funcion validar rango
def validar_rango(num,ri,rf):
if(validar_talla(num)==True):
if(num>=ri and num<=rf):
return True
# fin_if
else:
return False
else:
return False
# funcion pedir rango
def pedir_rango(msg,ri,rf):
n=-1
while(validar_rango(n,ri,rf)==False):
n=input(msg)
if(n.isdigit()==True):
n=int(n)
# fin_ if
#fin_while
return n
# funcion pedir edad
def pedir_edad(msg,ri,rf):
n=-1
while(validar_rango(n,ri,rf)==False):
n=input(msg)
if(n.isdigit()==True):
n=int(n)
return n
# funcion pedir nombre
def pedir_nombre(msg):
m=""
while(validar_color(m)==False):
m=input(msg)
return m
# funcion validar color
def validar_color(color):
# es una cadena
if(isinstance(color,str)):
# longitud >3
if(len(color)>=3):
return True
else:
return False
else:
return False
# fin_if
# funcion pedir solo dos colores
# solo puede elejir una--> blanco / negro
def pedir_color(msg):
m=""
while(validar_color(m)==False):
m=input(msg)
while (m!="blanco" and m!="negro"):
m = input(msg)
# fin_while
return m
# fumcion pedir colores
# solo se puede ingresar colores seleccionadas segun el programador
def pedir_colores(msg):
m=""
while(validar_color(m)==False):
m=input(msg)
while (m!="blanco" and m!="negro" and m!="amarillo" and m!="rojo" and m!="rosado"
and m != "dorado" and m!="verde" and m!="celeste" and m!="azul"):
m = input(msg)
# fin_while
return m
# funciom validar marca
def validar_marca(sapatilla):
# tiene que ser cadena
if(isinstance(sapatilla,str)):
# su longitud es mayo que dos
if(len(sapatilla)>2):
return True
else:
return False
else:
return False
# fin_if
# fin_def
# funcion pedir marca
def pedir_marca(marca):
m=""
while(validar_marca(m)==False):
m=input(marca)
# fin_while
return m
# fin_def
# funcion pedir postre
# condicion ==> ( gelatina / mazamorra )
def pedir_marcas(marca):
m=""
while(validar_marca(m)==False):
while(m!="gelatina" and m!="mazamorra"):
m=input(marca)
# fin_while
return m
# fin_def
# funcion pedir elejir menu con la condicion
# si solo es ( desayuno / almuerzo / cena )
def pedir_marcas_condic(marca):
m=""
while(validar_marca(m)==False):
while(m!="desayuno" and m!="almuerzo" and m!="cena"):
m=input(marca)
# fin_while
return m
# fin_def
# funcion validar entero
def validar_entero(numero):
# en entero-->int
if(isinstance(numero,int)):
# entero positivo
if(numero>=0):
return True
else:
return False
else:
return False
# funcion pedir entero
def pedir_entero(msg):
n=-1
while(validar_entero(n)==False):
n=input(msg)
if(n.isdigit()==True):
n=int(n)
# fin_if
# fin:while
return n
# funcion validar tamaño
def validar_tamano(tamano):
# tiene queser una cadena
if(isinstance(tamano,str)):
# su longitud >3
if(len(tamano)>3):
return True
else:
return False
else:
return False
def pedir_tamano(msg):
n=""
while(validar_tamano(n)==False):
n=input(msg)
while(n!="grande" and n!="pequeño"):
n=input(msg)
return n
# funcion validar mes
def validar_mes(mes):
# tiene que ser cadena
if(isinstance(mes,str)):
# su longitud nmayor que cuatro
if(len(mes)>4):
return True
else:
return False
else:
return False
def pedir_peli(msg):
n=""
while(validar_mes(n)==False):
# solo se podra ingresar una pelicula ==>
while(n!="star wars" and n!="stand de besos" and n!="naruto" and
n!="dragon ball" and n!="el barco" and n!="la casa de papel"):
n=input(msg)
return n
def pedir_mes(msg):
n=""
while(validar_mes(n)==False):
n=input(msg)
while(n!="enero" and n!="febrero" and n!="marzo" and n!="abril" and n!="mayo" and n!="junio"
and n!="julio" and n!="agosto" and n!="septiembre" and n!="octubre" and n!="noviembre"
and n!="diciembre"):
n=input(msg)
return n
# funcion pedir escuela
def pedir_escuela(msg):
n=""
while(validar_mes(n)==False):
n=input(msg)
while(n!="ing. electronica" and n!="matematica" and n!="ing. informatica" and
n!="estadistica" and n!="fisica" ):
n=input(msg)
return n
def pedir_dia(msg):
n=""
while(validar_mes(n)==False):
n=input(msg)
# tiene que ingresar un dia : lunes/martes/miercoles/jueves/viernes/sabado/dommingo:
while(n!="lunes" and n!="martes" and n!="miercoles" and n!="jueves" and n!="viernes" and
n!="sabado" and n!="domingo" ):
n=input(msg)
return n
def pedir_curso(msg):
n=""
while(validar_mes(n)==False):
n=input(msg)
while(n!="matematica" and n!="programacion" and n!="analisis"):
n=input(msg)
return n
# funcion validar año
def validar_ano(ano):
# tiene que ser un entero
if(isinstance(ano,int)):
# rango entre 1950 y 2020
if(ano>=1950 and ano <=2020):
return True
else:
return False
else:
return False
# funcion pedir año
def pedir_ano(msg):
n=0
while(validar_ano(n)==False):
n=input(msg)
if(n.isdigit()==True):
n=int(n)
return n
# funcion validar telefono
def validar_telefono(telefono):
# sus digitos tienen que ser enteros
if(isinstance(telefono,int)):
# el telefono tiene que tener la forma(925780779)
if(telefono>=900000000 and telefono<=999999999):
return True
else:
return False
else:
return False
# funcion pedir telefono
def pedir_telefono(msg):
n=-1
while(validar_telefono(n)==False):
n=input(msg)
if(n.isdigit()==True):
n=int(n)
return n
# funcion validar dni
def validar_dni(dni):
if(isinstance(dni,int)):
# sus digitos son enteros
# el dni tiene que tener la forma( 74286646 )
if(dni>=10000000 and dni<=99999999):
return True
else:
return False
else:
return False
# funcion pedir dni
def pedir_dni(msg):
n=-1
while(validar_dni(n)==False):
n=input(msg)
if(n.isdigit()==True):
n=int(n)
return n
def guardar_d(nombre_archivo,contenido,modo):
archivo=open(nombre_archivo,modo)
archivo.write(contenido)
archivo.close()
def obtener_datos(nombre_archivos):
archivo = open(nombre_archivos, "r")
datos = archivo.read()
archivo.close()
return datos
|
[
"[email protected]"
] | |
ee47f2e5c8e8db84a4778c986ab56b7a70348e2f
|
7c1df6de8b6eb64f941a189d6015161713efd194
|
/weather/forms.py
|
7ffd4874167a717459eef52aadb9df4cbddc8336
|
[] |
no_license
|
phuclhv/weather_app_django
|
ad3cbd1d6d46a5bc41cd1c1f57e998e456faaa9f
|
01587369538c873ab7d3a3550dc8ca2093ea5236
|
refs/heads/master
| 2020-09-06T21:36:56.957805 | 2019-11-09T01:13:37 | 2019-11-09T01:13:37 | 220,561,400 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 238 |
py
|
from django.forms import ModelForm, TextInput
from .models import City
class Cityform(ModelForm):
class Meta:
model = City
fields = ['name']
widget = {'name': TextInput(attrs={'class': 'input', 'placeholder': 'City name'})}
|
[
"[email protected]"
] | |
e290a946b03dfac33f19285e2b51bec5e6bd5377
|
f719dc32c437a15c0eb7a229adc2848e4646a172
|
/billy/tests/functional/test_company.py
|
2a6ab678f297833c0c7ecc07b9f20901429996db
|
[
"MIT",
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
grang5/billy
|
db3a88b650962f25b8bdea80a81c5efa5d80dec0
|
a723c3aca18f817829ae088f469fabc5bea9d538
|
refs/heads/master
| 2021-04-18T19:36:05.586549 | 2014-06-16T21:47:37 | 2014-06-16T21:47:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,232 |
py
|
from __future__ import unicode_literals
import json
import mock
from freezegun import freeze_time
from billy.utils.generic import utc_now
from billy.tests.functional.helper import ViewTestCase
@freeze_time('2013-08-16')
class TestCompanyViews(ViewTestCase):
@mock.patch('billy.tests.fixtures.processor.DummyProcessor.register_callback')
def test_create_company(self, register_callback_method):
processor_key = 'MOCK_PROCESSOR_KEY'
now = utc_now()
now_iso = now.isoformat()
res = self.testapp.post(
'/v1/companies',
dict(processor_key=processor_key),
status=200
)
self.failUnless('processor_key' not in res.json)
self.failUnless('guid' in res.json)
self.failUnless('api_key' in res.json)
self.assertEqual(res.json['created_at'], now_iso)
self.assertEqual(res.json['updated_at'], now_iso)
company = self.company_model.get(res.json['guid'])
expected_url = 'http://localhost/v1/companies/{}/callbacks/{}/'.format(
company.guid, company.callback_key,
)
register_callback_method.assert_called_once_with(company, expected_url)
def test_create_company_with_random_callback_keys(self):
times = 100
callback_keys = set()
for _ in range(times):
res = self.testapp.post(
'/v1/companies',
dict(processor_key='MOCK_PROCESSOR_KEY'),
status=200
)
company = self.company_model.get(res.json['guid'])
callback_keys.add(company.callback_key)
# ensure callback keys won't repeat
self.assertEqual(len(callback_keys), times)
@mock.patch('billy.tests.fixtures.processor.DummyProcessor.callback')
def test_callback(self, callback_method, slash=False):
res = self.testapp.post(
'/v1/companies',
dict(processor_key='MOCK_PROCESSOR_KEY'),
)
guid = res.json['guid']
payload = dict(foo='bar')
company = self.company_model.get(guid)
url = '/v1/companies/{}/callbacks/{}'.format(guid, company.callback_key)
if slash:
url = url + '/'
res = self.testapp.post(
url,
json.dumps(payload),
headers=[(b'content-type', b'application/json')],
)
self.assertEqual(res.json['code'], 'ok')
callback_method.assert_called_once_with(company, payload)
@mock.patch('billy.tests.fixtures.processor.DummyProcessor.callback')
def test_callback_with_slash_ending(self, callback_method):
self.test_callback(slash=True)
def test_create_company_with_bad_parameters(self):
self.testapp.post(
'/v1/companies',
status=400,
)
def test_get_company(self):
processor_key = 'MOCK_PROCESSOR_KEY'
res = self.testapp.post(
'/v1/companies',
dict(processor_key=processor_key),
status=200
)
created_company = res.json
guid = created_company['guid']
api_key = str(created_company['api_key'])
res = self.testapp.get(
'/v1/companies/{}'.format(guid),
extra_environ=dict(REMOTE_USER=api_key),
status=200,
)
self.assertEqual(res.json, created_company)
def test_get_company_with_bad_api_key(self):
processor_key = 'MOCK_PROCESSOR_KEY'
res = self.testapp.post(
'/v1/companies',
dict(processor_key=processor_key),
status=200
)
created_company = res.json
guid = created_company['guid']
self.testapp.get(
'/v1/companies/{}'.format(guid),
extra_environ=dict(REMOTE_USER=b'BAD_API_KEY'),
status=403,
)
self.testapp.get(
'/v1/companies/{}'.format(guid),
status=403,
)
def test_get_non_existing_company(self):
processor_key = 'MOCK_PROCESSOR_KEY'
res = self.testapp.post(
'/v1/companies',
dict(processor_key=processor_key),
status=200
)
api_key = str(res.json['api_key'])
self.testapp.get(
'/v1/companies/NON_EXIST',
extra_environ=dict(REMOTE_USER=api_key),
status=404
)
def test_get_other_company(self):
processor_key = 'MOCK_PROCESSOR_KEY'
res = self.testapp.post(
'/v1/companies',
dict(processor_key=processor_key),
status=200
)
api_key1 = str(res.json['api_key'])
guid1 = res.json['guid']
res = self.testapp.post(
'/v1/companies',
dict(processor_key=processor_key),
status=200
)
api_key2 = str(res.json['api_key'])
guid2 = res.json['guid']
self.testapp.get(
'/v1/companies/{}'.format(guid2),
extra_environ=dict(REMOTE_USER=api_key1),
status=403,
)
self.testapp.get(
'/v1/companies/{}'.format(guid1),
extra_environ=dict(REMOTE_USER=api_key2),
status=403,
)
|
[
"[email protected]"
] | |
c7875cc5e8a302db04c8bdc305771a81583e3d0e
|
bf426f52cf7462ba4b8b583f0fbd3f5585a73491
|
/Internet/Web/cgi-bin/tutor3.py
|
8f7c848d5df8a49b7532ae06c74fe9e851df00a5
|
[] |
no_license
|
CodedQuen/Programming-Python-by-Mark-Lutz
|
337b309f8ba98be3ac7585d0de0fc8d7ee4697f5
|
0397b29973ab24d476308b1f4f3c9befb3169a25
|
refs/heads/master
| 2022-06-09T21:19:01.891651 | 2020-05-01T01:57:13 | 2020-05-01T01:57:13 | 260,358,442 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 465 |
py
|
#!/usr/bin/python
"""
runs on the server, reads form input, prints HTML;
url=http://server-name/cgi-bin/tutor3.py
"""
import cgi
form = cgi.FieldStorage() # parse form data
print('Content-type: text/html') # plus blank line
html = """
<TITLE>tutor3.py</TITLE>
<H1>Greetings</H1>
<HR>
<P>%s</P>
<HR>"""
if not 'user' in form:
print(html % 'Who are you?')
else:
print(html % ('Hello, %s.' % form['user'].value))
|
[
"[email protected]"
] | |
40ecff4fd8323752bb84797d6c98d85d52bd3e40
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02269/s412170301.py
|
7790eefd3630490518a1dc914ece731d74e805f4
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 235 |
py
|
se = set([])
n = int(raw_input())
for i in range(n):
s = raw_input().split()
if s[0] == 'insert':
se.add(s[1])
elif s[0] == 'find':
if s[1] in se:
print 'yes'
else:
print 'no'
|
[
"[email protected]"
] | |
ae2861b0b2ff85c3be0f336934ba4427b074f31d
|
711a99404fe7e540f2c23f3b28b894921ec8a679
|
/System_Test/Test_scripts/Click_on_Clusters.py
|
6ab19409f34b459f3d2df36c0c7523d4c9ca09ac
|
[] |
no_license
|
chetandg123/cQube_Testing
|
14a8683799f65b1ad45ff768efb7101cb0be6389
|
f4fa01111e7958740c73b3bea6dc54b2241c83d9
|
refs/heads/master
| 2022-10-02T06:21:38.563225 | 2020-06-04T11:42:18 | 2020-06-04T11:42:18 | 268,982,907 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 901 |
py
|
import time
import unittest
from selenium import webdriver
from Data.parameters import Data
from TS.reuse_func import cqube
from get_dir import pwd
class test_cluster(unittest.TestCase):
def setUp(self):
driver_path = pwd()
self.driver = webdriver.Chrome(executable_path=driver_path.get_driver_path())
driver = cqube(self.driver)
driver.open_cqube_appln()
driver = cqube(self.driver)
driver.login_cqube()
driver.navigate_to_student_report()
def test_url(self):
time.sleep(5)
# self.driver.find_element_by_xpath(Data.Clusters).click()
# time.sleep(15)
dots = self.driver.find_elements_by_xpath(Data.dots)
count = len(dots)
self.assertEqual(0,count,msg="Failed ")
def tearDown(self):
time.sleep(5)
self.driver.close()
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
60ca89aa36d05ec224b9ef141ca560d9eb666111
|
3c62516d2afa561dc53e09d718a32bffef08b497
|
/defense/resnet_xception_vgg19_dual/common/nips_util.py
|
29ec5b1e8f6955e752791e0a8956faf80b516367
|
[
"Apache-2.0"
] |
permissive
|
ckomaki/kaggle-nips-2017
|
fec1516b7ed36cc7fd5eafb209719c8cf0655a2b
|
6ecea74307b68ca033b6388a61b57fc96d3da124
|
refs/heads/master
| 2021-07-07T13:11:56.767439 | 2017-10-04T12:41:48 | 2017-10-04T12:41:48 | 105,617,104 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,754 |
py
|
import numpy as np
import os
import pandas as pd
from argparse import ArgumentParser
from numpy import random
from scipy.misc import imsave
class NipsUtil:
def write(self, predicted_labels):
"""
@type predicted_labels: dict[str, int]
"""
with open(self.__args.output_file, 'w') as fp:
for image_name, label in predicted_labels.items():
fp.write('%s,%d\n' % (image_name, label + 1))
def __init__(self):
parser = ArgumentParser()
parser.add_argument('--input-dir', dest='input_dir', type=str)
parser.add_argument('--output-file', dest='output_file', type=str)
parser.add_argument('--output-dir', dest='output_dir', type=str)
parser.add_argument('--max-epsilon', dest='max_epsilon', type=int)
parser.add_argument('--fall11', dest='fall11_path', type=str)
self.__args = parser.parse_args()
def get_image_names(self):
names = filter(lambda path: path.endswith('.png'), os.listdir(self.__args.input_dir))
# names = filter(lambda _: random.randint(20) == 0, names)
return names
def read_target_classes(self):
path = os.path.join(self.__args.input_dir, 'target_class.csv')
df = pd.read_csv(path, header=None, names=['name', 'target'])
df['target'] = df['target'].map(lambda target_: int(target_) - 1)
df['target'] = df['target'].map(lambda target_: target_ if 0 <= target_ < 1000 else 0)
return df.set_index('name')['target'].to_dict()
def get_defense_output_path(self):
return self.__args.output_file
def get_attack_output_path(self, image_name):
return os.path.join(self.__args.output_dir, image_name)
def get_max_epsilon(self):
return self.__args.max_epsilon
def read_image(self, path):
from keras.preprocessing import image
return image.img_to_array(image.load_img(path))
def read_images(self, image_names):
return np.array([
self.read_image(os.path.join(self.__args.input_dir, image_name))
for image_name
in image_names
]).astype(np.float64)
def clip(self, images, base_images):
images = np.clip(images, base_images - self.__args.max_epsilon, base_images + self.__args.max_epsilon)
images = np.clip(images, 0, 255)
return images
def write_attack(self, images, image_names):
for image, image_name in zip(images, image_names):
imsave(os.path.join(self.__args.output_dir, image_name), image)
def write_defense(self, predicted_labels):
"""
@type predicted_labels: dict[str, int]
"""
with open(self.__args.output_file, 'w') as fp:
for image_name, label in predicted_labels.items():
fp.write('%s,%d\n' % (image_name, label + 1))
def simple_attacker(self, epoch, compute_gradient_list, apply_gradient):
all_image_names = self.get_image_names()
batch_size = 10
for begin in range(0, len(all_image_names), batch_size):
image_names = all_image_names[begin: begin + batch_size]
base_images = self.read_images(image_names)
images = base_images.copy()
for i in range(epoch):
gradients = compute_gradient_list[i % len(compute_gradient_list)](images, image_names)
images = apply_gradient(images, gradients, i)
images = self.clip(images, base_images)
self.write_attack(images, image_names)
def simple_target_attacker(self, epoch, compute_gradient_list, apply_gradient):
all_target_classes = self.read_target_classes()
all_image_names = self.get_image_names()
self.println("image names: %d" % len(all_image_names))
batch_size = 10
for begin in range(0, len(all_image_names), batch_size):
if batch_size % 100 == 0:
self.println("begin: %d" % begin)
image_names = all_image_names[begin: begin + batch_size]
target_classes = map(lambda image_name_: all_target_classes[image_name_], image_names)
base_images = self.read_images(image_names)
images = base_images.copy()
for i in range(epoch):
gradients = compute_gradient_list[i % len(compute_gradient_list)](images, target_classes)
images = apply_gradient(images, gradients, i)
images = self.clip(images, base_images)
self.write_attack(images, image_names)
def simple_defenser_predict_labels(self, predicts):
if not isinstance(predicts, list):
predicts = [predicts]
all_image_names = self.get_image_names()
self.println("image names: %d" % len(all_image_names))
predicted_labels = {}
batch_size = 10
for begin in range(0, len(all_image_names), batch_size):
if batch_size % 100 == 0:
self.println("begin: %d" % begin)
image_names = all_image_names[begin: begin + batch_size]
images = self.read_images(image_names)
predictions = np.sum([predict(images) for predict in predicts], axis=0)
for image_name, prediction in zip(image_names, predictions):
predicted_labels[image_name] = np.argmax(prediction)
return predicted_labels
def simple_defenser_predict_labels_score(self, predicts):
if not isinstance(predicts, list):
predicts = [predicts]
all_image_names = self.get_image_names()
self.println("image names: %d" % len(all_image_names))
predicted_labels = {}
batch_size = 10
for begin in range(0, len(all_image_names), batch_size):
if batch_size % 100 == 0:
self.println("begin: %d" % begin)
image_names = all_image_names[begin: begin + batch_size]
images = self.read_images(image_names)
predictions = np.sum([predict(images) for predict in predicts], axis=0)
for image_name, prediction in zip(image_names, predictions):
predicted_labels[image_name] = prediction
return predicted_labels
def println(self, message):
import sys
print(message)
sys.stdout.flush()
def simple_trainer_train(self, trainer):
image_to_label = pd.read_csv(self.__args.fall11_path).set_index('name')['imagenet_index'].to_dict()
# print image_to_label
paths = {}
for attack_type in ['attacks_output', 'targeted_attacks_output']:
self.println(os.path.join(self.__args.input_dir, attack_type))
for attacker_name in os.listdir(os.path.join(self.__args.input_dir, attack_type)):
print attacker_name
found = 0
attacker_path = os.path.join(self.__args.input_dir, attack_type, attacker_name)
if not os.path.isdir(attacker_path):
continue
for image_name in os.listdir(attacker_path):
label = image_to_label.get(image_name[:-4], None)
if label is None:
continue
image_path = os.path.join(attacker_path, image_name)
if label in [134, 517]:
paths.setdefault(134, []).append(image_path)
paths.setdefault(517, []).append(image_path)
else:
paths.setdefault(label, []).append(image_path)
found += 1
self.println("attack %s: %d" % (attacker_name, found))
self.println("label num: %d" % len(paths.keys()))
for i in range(1000):
if i not in paths:
self.println(i)
def get_random(l_):
return l_[random.randint(len(l_))]
batch_size = 12
for step in range(40):
score = 0
for _ in range(100):
labels = [get_random(list(paths.keys())) for _ in range(batch_size)]
label_onehots = np.zeros((batch_size, 1000))
for label_i, label in enumerate(labels):
label_onehots[label_i, label] = 1
# print get_random(paths[labels[0]])
images = np.array([self.read_image(get_random(paths[label])) for label in labels])
predictions = trainer.predict(images)
trainer.train(images, label_onehots)
score += np.mean((predictions - label_onehots) ** 2)
self.println("%4d: %f" % (step, score))
self.println("saving to: %s" % self.__args.output_file)
trainer.save_weights(self.__args.output_file)
|
[
"[email protected]"
] | |
328911b18176b0e78a1e7b37cb8d91b1fe1a12fd
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03496/s065615411.py
|
36ffdb7558e56bc715947a4d7a8023899a98c2e3
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 387 |
py
|
from operator import itemgetter
N = int(input())
A = list(map(int, input().split()))
i = max(enumerate(map(abs, A)), key=itemgetter(1))[0]
print(2*N)
if A[i]<0:
print(i+1, N)
print(i+1, N)
for j in range(N, 1, -1) :
print(j, j-1)
print(j, j-1)
else :
print(i+1, 1)
print(i+1, 1)
for j in range(1, N) :
print(j, j+1)
print(j, j+1)
|
[
"[email protected]"
] | |
d18f189a6e54f4324082f1387a36081bbc37aee3
|
5b71e2952f34dd3bb20148874d952fee06d31857
|
/app/mf/crud/migrations/0114_auto_20210213_1157.py
|
a34fbcd3016d1fb31dcd178b570e5cf365728f97
|
[] |
no_license
|
isela1998/facebook
|
a937917cddb9ef043dd6014efc44d59d034102b1
|
a0f2f146eb602b45c951995a5cb44409426250c5
|
refs/heads/master
| 2023-07-18T02:14:50.293774 | 2021-08-28T03:26:06 | 2021-08-28T03:26:06 | 400,613,743 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 663 |
py
|
# Generated by Django 3.1.1 on 2021-02-13 16:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crud', '0113_auto_20210213_1006'),
]
operations = [
migrations.AddField(
model_name='deliveryorder',
name='date_end',
field=models.DateField(default='2021-02-13', max_length=10, verbose_name='Fecha|Vencimiento'),
),
migrations.AlterField(
model_name='deliveryorder',
name='datejoined',
field=models.DateField(default='2021-02-13', max_length=10, verbose_name='Fecha|Traslado'),
),
]
|
[
"[email protected]"
] | |
babd3c72d5adbec375eaa01f34dc17838cfd6c77
|
a2e638cd0c124254e67963bda62c21351881ee75
|
/Extensions/_export_base_py/FPythonCode/FSyncronizeBPWithTransHist.py
|
a9b4f05832e933b3ee49300ee4de1f7dedafa1aa
|
[] |
no_license
|
webclinic017/fa-absa-py3
|
1ffa98f2bd72d541166fdaac421d3c84147a4e01
|
5e7cc7de3495145501ca53deb9efee2233ab7e1c
|
refs/heads/main
| 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,725 |
py
|
""" Compiled: 2020-09-18 10:38:52 """
#__src_file__ = "extensions/export/./etc/FSyncronizeBPWithTransHist.py"
"""-------------------------------------------------------------------------------------------------------
MODULE
FSyncronizeBPWithTransHist -
This module takes care of creating and updating BusinessProcesses as defined
by an FIntegration instance
(c) Copyright 2012 SunGard FRONT ARENA. All rights reserved.
DESCRIPTION
See ExportBaseReadMe.py for more information about this module
-------------------------------------------------------------------------------------------------------"""
import acm
import FAssetManagementUtils
import FBusinessProcessUtils
import FExportUtils
import FIntegration
import FTransactionHistoryReader
logger = FAssetManagementUtils.GetLogger()
class FSyncronizeBPWithTransHist():
def __init__(self, integrationInstance, ACMTradeQueryIdList=None, additionalQueryDictionary=dict()):
assert(integrationInstance)
self._integrationInstance = integrationInstance
self._stateChart = self._integrationInstance.StateChart()
self._ACMTradeQueryIdList = ACMTradeQueryIdList
self._additionalQueryDictionary = additionalQueryDictionary
def _Filter(self, trade):
for storedQuery in FExportUtils.TradeFilterQueriesForIntegration(self._integrationInstance.TradeACMQueryPrefix()):
if storedQuery.Query().IsSatisfiedBy(trade):
return True
return False
def TradeQueryIdList(self):
return self._ACMTradeQueryIdList
def AdditionalQueryDictionary(self):
return self._additionalQueryDictionary
def _CreateBusinessProcessToTrade(self, trade, transition, businessProcesses):
businessProcess = FBusinessProcessUtils.GetBusinessProcessWithCache(trade, self._stateChart.Name())
if not businessProcess:
# Create a BP if there is a create event for the transition, and the trade is still in this state
if transition.EventId() == FIntegration.FTransition.CREATE_EVENT_ID and trade.Status() == transition.ToStatus():
businessProcess = FBusinessProcessUtils.CreateBusinessProcess(trade, self._stateChart.Name())
businessProcess.Commit()
logger.debug("Created business process %i for %s %i" % (businessProcess.Oid(), trade.Class().Name(), trade.Oid()))
if businessProcess:
businessProcesses.append(businessProcess)
return bool(businessProcess)
def _CreateBusinessProcessToLinkedObject(self, trade, businessProcesses):
if self._integrationInstance.LinkedExportObjects():
for (linkedObjectFunc, stateChartId, exportObjectId) in self._integrationInstance.LinkedExportObjects():
linkedObject = linkedObjectFunc(trade)
if linkedObject:
logger.debug("Found linked object %s %i from trade %i using %s function" % (linkedObject.Class().Name(), linkedObject.Oid(), trade.Oid(), str(linkedObjectFunc)))
businessProcess = FBusinessProcessUtils.GetBusinessProcessWithCache(linkedObject, stateChartId)
if not businessProcess:
errStr = "The linked export objects with id '%s' has no matching query list. Check the initialization of FExportProcess." % exportObjectId
assert self.AdditionalQueryDictionary().has_key(exportObjectId), errStr
linkQueries = self.AdditionalQueryDictionary()[exportObjectId]
if FExportUtils.FindMatchingQueryId(linkedObject, linkQueries):
businessProcess = FBusinessProcessUtils.GetOrCreateBusinessProcess(linkedObject, stateChartId)
errStr = "Could not create business process for %s %i on state chart %s" % (linkedObject.Class().Name(), linkedObject.Oid(), stateChartId)
assert(businessProcess), errStr
businessProcess.Commit()
logger.debug("Created business process %i for %s %i" % (businessProcess.Oid(), linkedObject.Class().Name(), linkedObject.Oid()))
if businessProcess:
businessProcesses.append(businessProcess)
@staticmethod
def _TransactionPushBusinessProcess(transaction, businessProcess):
transition = transaction.Transition()
subject = transaction.Current()
# There is a BP already created, and we want to push it further
# The matched trades must have BPs and it will also be moved according to trade events that has occurred
if (FBusinessProcessUtils.IsValidEvent(businessProcess, transition.EventId())):
try:
businessProcess.HandleEvent(transition.EventId())
businessProcess.Commit()
currentStep = businessProcess.CurrentStep()
assert(currentStep), "A business process must have a current step"
currentStep.SubjectVersion(transaction.Version())
currentStep.Commit()
subject = businessProcess.Subject()
assert(subject == transaction.Current()), 'Subject and transactionEvent objects are not the same'
logger.info("Business process %i for trade %i is now in state %s", businessProcess.Oid(), subject.Oid(), transition.EventId())
except StandardError as error:
errStr = 'Could not invoke %s on business process %i: %s' % (transition.EventId(), businessProcess.Oid(), error)
logger.error(errStr)
FBusinessProcessUtils.SetBusinessProcessToError(businessProcess, errStr)
raise StandardError(errStr)
def _TransactionCreateAndPushBusinessProcess(self, transaction):
businessProcesses = list()
transition = transaction.Transition()
trade = transaction.Current()
if self._CreateBusinessProcessToTrade(trade, transition, businessProcesses):
# Linked object business processes are only created if a business process on the trade exists
self._CreateBusinessProcessToLinkedObject(trade, businessProcesses)
for businessProcess in businessProcesses:
self._TransactionPushBusinessProcess(transaction, businessProcess)
def Execute(self):
"""
This synchronises the relevant Business Processes with events that has occurred
in the ADS since last run.
"""
subscriptionId = self._integrationInstance.Id()
transitionsSpecification = self._integrationInstance.TradeTransitions()
reader = FTransactionHistoryReader.FPastTradeStatusTransitions(subscriptionId, transitionsSpecification, self._Filter)
try:
readerItems = reader.Read()
except StandardError as error:
logger.error('Past transactions could not be read. %s', error)
else:
for tradeOid, transactions in readerItems:
trade = acm.FTrade[tradeOid]
assert(trade)
transactions.sort()
n1=len(transactions)
bp=acm.FTrade[tradeOid].BusinessProcesses()
ready_state=False
if bp:
bp=bp[0]
ready_state=bp.CurrentStep().State().Name()=='Ready'
p1=transactions[n1-1].Transition().ToStatus()=='FO Amend'
p2=ready_state and transactions[n1-1].Transition().ToStatus()=='Void'
#See that this trade matches the selected queries
if self.TradeQueryIdList() == None or FExportUtils.FindMatchingQueryId(trade, self.TradeQueryIdList()):
for transaction in transactions:
try:
if not p1:
if not p2:
self._TransactionCreateAndPushBusinessProcess(transaction)
else:
if bp:
bp.ForceToState('Cancel Sent', 'Forced to the state because the trade was not exported before it entered into Void state')
bp.Commit()
#if transaction.Transition().ToStatus()=='Void':
# self._TransactionCreateAndPushBusinessProcess(transaction)
else:
if ready_state:
if bp:
bp.ForceToState('Awaiting Confirmation', 'Forced to the state because trade was not exported before it entered into FO Amend state')
bp.Commit()
else:
if not transaction.Transition().ToStatus()=='BO Confirmed':
self._TransactionCreateAndPushBusinessProcess(transaction)
except StandardError as error:
logger.error('Error in _TransactionCreateAndPushBusinessProcess: %s' % error)
try:
reader.Accept()
except StandardError as error:
logger.error('Could not recover all transactions. Please try again. %s', error)
def InitialiseIntegration(self):
"""
Initialises all exportable trades (and linked objects) with business processes, without
considering transaction history state.
"""
logger.info('Initialising all exportable trades for integration "%s"', self._integrationInstance.Id())
creationTransitions = [t for t in self._integrationInstance.TradeTransitions() if t.EventId() == FIntegration.FTransition.CREATE_EVENT_ID]
if not creationTransitions:
raise RuntimeError('No export creation transitions found for integration')
businessProcesses = list()
for storedQuery in FExportUtils.TradeFilterQueriesForIntegration(self._integrationInstance.TradeACMQueryPrefix()):
trades = storedQuery.Query().Select()
logger.debug('Processing %d trades for query "%s"', trades.Size(), storedQuery.Name())
for trade in trades:
for transition in creationTransitions:
if self._CreateBusinessProcessToTrade(trade, transition, businessProcesses):
self._CreateBusinessProcessToLinkedObject(trade, businessProcesses)
break
logger.info('Processed %d export business processes initialising integration.', len(businessProcesses))
|
[
"[email protected]"
] | |
fd01db6f4def3cbe494858b10ab39bf6cedb772e
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/4/jf0.py
|
23fe4da1b12d8d8afd5dea9d2a49f103757bc42d
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 486 |
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'jF0':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"[email protected]"
] | |
200c2e434c8baba1cca508ed6aeeac33b6aa710d
|
b0f1acbe5cd30c2ade801465924c12403ab7e585
|
/Corda_Api_Library/openapi_client/model/net_corda_core_contracts_state_ref.py
|
190e98ef92804385fb048410022c9a0d8a6a81e0
|
[] |
no_license
|
TanzimAzadNishan/Blockchain-Based-Online-Ticketing-Platform
|
94ea0f06a7761f9033f7a1dc61548ade6f6ff499
|
d04a2696cab4c41743c7c5999c623002d0e57f80
|
refs/heads/main
| 2023-03-09T14:34:27.148340 | 2021-02-24T11:49:26 | 2021-02-24T11:49:26 | 338,845,282 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,711 |
py
|
"""
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class NetCordaCoreContractsStateRef(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'txhash': (str,), # noqa: E501
'index': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'txhash': 'txhash', # noqa: E501
'index': 'index', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, txhash, index, *args, **kwargs): # noqa: E501
"""NetCordaCoreContractsStateRef - a model defined in OpenAPI
Args:
txhash (str): Base 58 Encoded Secure Hash
index (int):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.txhash = txhash
self.index = index
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
[
"[email protected]"
] | |
8ba3542efe1c21d14f7e692d5f0fd03380f2f97c
|
4bb1a23a62bf6dc83a107d4da8daefd9b383fc99
|
/contests/agc032/b.py
|
c8b8c5aeb0753d30b8b877cc4b14c6817e83b4a7
|
[] |
no_license
|
takushi-m/atcoder-work
|
0aeea397c85173318497e08cb849efd459a9f6b6
|
f6769f0be9c085bde88129a1e9205fb817bb556a
|
refs/heads/master
| 2021-09-24T16:52:58.752112 | 2021-09-11T14:17:10 | 2021-09-11T14:17:10 | 144,509,843 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 530 |
py
|
# -*- coding: utf-8 -*-
from itertools import permutations
n = int(input())
res = []
for r in permutations(range(1,n+1)):
c = r[0]+r[2]
flag = True
for i in range(1,n-1):
if r[i-1]+r[i+1]!=c:
flag = False
break
if r[1]==r[n-2]==c:
pass
elif r[1]+r[n-1]==c:
res.append((r[1],r[n-1]))
else:
flag = False
if flag:
for i in range(n-1):
res.append((r[i],r[i+1]))
break
print(len(res))
for r in res:
print(r[0],r[1])
|
[
"[email protected]"
] | |
b9a24063b9bce5ff5aa13bc48e98eeb55726458b
|
a140fe192fd643ce556fa34bf2f84ddbdb97f091
|
/.history/모듈과 패키지/내장함수_20200711173115.py
|
c38f61cd8193f05c0ab2b92568285151c96acb56
|
[] |
no_license
|
sangha0719/py-practice
|
826f13cb422ef43992a69f822b9f04c2cb6d4815
|
6d71ce64bf91cc3bccee81378577d84ba9d9c121
|
refs/heads/master
| 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 150 |
py
|
# input : 사용자 입력을 받는 함수
language = input("무슨 언어를 좋아하세요?")
print("{0}은 아주 좋은 좋은 언어입니다")
|
[
"[email protected]"
] | |
7dc8bd4ab8584672a430541be313b7e69a40dc53
|
59e97ee94a024e24ca475b9c036fc960e1a2e7e4
|
/itfsite/models.py
|
5e01313fdff72b4a0509bcab6471abb5a25ca612
|
[] |
no_license
|
if-then-fund/if.then.fund
|
0553e585945052fcc51f81c7674316541e0e1582
|
8bc2bbf0da62ee797285e925d738adb48e03cd22
|
refs/heads/master
| 2021-01-17T00:24:31.979000 | 2017-09-05T18:51:05 | 2017-09-05T18:53:07 | 25,827,685 | 11 | 5 | null | 2017-04-03T11:26:39 | 2014-10-27T16:14:00 |
Python
|
UTF-8
|
Python
| false | false | 15,551 |
py
|
import enum
from django.db import models, transaction
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.utils import timezone
from django.template import Template, Context
from django.conf import settings
from django.http import Http404
from enumfields import EnumIntegerField as EnumField
from itfsite.accounts import User, NotificationsFrequency, AnonymousUser
from itfsite.utils import JSONField, TextFormat
#####################################################################
#
# Organizations and Campaigns
#
#####################################################################
from .middleware import load_brandings, get_branding
load_brandings()
class OrganizationType(enum.Enum):
User = 1 # any user can create an 'organization'
C4 = 2 # a 501c4
Company = 3 # a corporation/LLC
ItfBrand = 4 # us under one of our brands besides if.then.fund
def slug(self):
if self == OrganizationType.User: return "user"
if self == OrganizationType.C4: return "org"
if self == OrganizationType.Company: return "org"
if self == OrganizationType.ItfBrand: return "org"
raise ValueError()
def display(self):
if self == OrganizationType.User: return "User"
if self == OrganizationType.C4: return "Nonprofit Organization"
if self == OrganizationType.Company: return "Company"
if self == OrganizationType.ItfBrand: return "Company"
raise ValueError()
class Organization(models.Model):
"""An organization can be the owner of Triggers and TriggerCustomizations."""
name = models.CharField(max_length=200, help_text="The name of the Organization.")
slug = models.SlugField(max_length=200, help_text="The unique URL slug for this Organization.")
orgtype = EnumField(OrganizationType, help_text="The type of the organization.")
created = models.DateTimeField(auto_now_add=True, db_index=True)
updated = models.DateTimeField(auto_now=True, db_index=True)
description = models.TextField(help_text="Description text in the format given by description_format.")
description_format = EnumField(TextFormat, default=TextFormat.Markdown, help_text="The format of the description text.")
profile_image = models.ImageField(blank=True, null=True, upload_to="campaign-media", help_text="The logo or headshot to display as the profile picture on the organization's page, and the default og:image (for Facebook and Twitter posts) if og_image is not provided. At least 120px x 120px and must be square.")
og_image = models.ImageField(blank=True, null=True, upload_to="campaign-media", help_text="The og:image (for Facebook and Twitter posts) for the organization's profile page and the default og:image for the organization's campaigns. At least 120px x 120px and must be square.")
banner_image = models.ImageField(upload_to='org-banner-image', blank=True, null=True, help_text="This organization's banner image. Should be about 1300px wide and at least 500px tall, but the image will be resized and cropped as necessary.")
website_url = models.URLField(max_length=256, blank=True, null=True, help_text="The URL to this organization's website.")
facebook_url = models.URLField(max_length=256, blank=True, null=True, help_text="The URL to this organization's Facebook Page.")
twitter_handle = models.CharField(max_length=64, blank=True, null=True, help_text="The organization's Twitter handle (omit the @-sign).")
de_recip_id = models.CharField(max_length=64, blank=True, null=True, help_text="The recipient ID on Democracy Engine for taking tips.")
extra = JSONField(blank=True, help_text="Additional information stored with this object.")
def __str__(self):
return "%s [%d]" % (self.name, self.id)
@property
def is_real(self):
return self.orgtype != OrganizationType.ItfBrand
def get_absolute_url(self):
return "/%s/%d/%s" % (self.orgtype.slug(), self.id, self.slug)
def open_campaigns(self):
return self.campaigns.filter(status=CampaignStatus.Open)
class CampaignStatus(enum.Enum):
Draft = 0
Open = 1
Paused = 2
Closed = 3
class Campaign(models.Model):
"""A call to action."""
# Metadata
brand = models.IntegerField(default=1, choices=settings.BRAND_CHOICES, help_text="Which multi-brand site does this campaign appear on.")
title = models.CharField(max_length=200, help_text="The title for the campaign.")
slug = models.SlugField(max_length=200, help_text="The URL slug for this campaign.")
subhead = models.TextField(help_text="Short sub-heading text for use in list pages and the meta description tag, in the format given by subhead_format.")
subhead_format = EnumField(TextFormat, default=TextFormat.Markdown, help_text="The format of the subhead and image_credit text.")
status = EnumField(CampaignStatus, default=CampaignStatus.Draft, help_text="The current status of the campaign.")
owner = models.ForeignKey(Organization, blank=True, null=True, on_delete=models.PROTECT, related_name="campaigns", help_text="The user/organization which owns the campaign. Null if the campaign is created by us.")
# Content
headline = models.CharField(max_length=256, help_text="Headline text for the page.")
og_image = models.ImageField(blank=True, null=True, upload_to="campaign-media", help_text="The og:image (for Facebook and Twitter posts) for the campaign. At least 120px x 120px and must be square. If not set and the campaign has an owner, then the owner's og:image is used.")
splash_image = models.ImageField(blank=True, null=True, upload_to="campaign-media", help_text="The big image to display behind the main call to action. Should be about 1300px wide and at least 500px tall, but the image will be resized and cropped as necessary.")
image_credit = models.TextField(blank=True, null=True, help_text="Image credit, in the same format as the subhead.")
body_text = models.TextField(help_text="Body text, in the format given by body_format.")
body_format = EnumField(TextFormat, default=TextFormat.Markdown, help_text="The format of the body_text field.")
# Actions.
contrib_triggers = models.ManyToManyField('contrib.Trigger', blank=True, related_name="campaigns", help_text="Triggers to offer the user to take action on (or to show past actions).")
# Additional data.
extra = JSONField(blank=True, help_text="Additional information stored with this object.")
created = models.DateTimeField(auto_now_add=True, db_index=True)
updated = models.DateTimeField(auto_now=True, db_index=True)
# METHODS
def __str__(self):
return "Campaign(%d, %s)" % (self.id, repr(self.title))
def get_absolute_url(self):
# Because Campaigns appear on only one brand, this function
# must only be called for views for URLs that are definitely
# on the same brand as this Campaign displays on.
return "/a/%d/%s%s" % (self.id, (self.owner.slug + "-") if (self.owner and self.owner.is_real) else "", self.slug)
def get_short_url(self):
# Returns an absolute URL for this Campaign, which can be used
# in place of get_absolute_url when we're not sure the Campaign
# is for the same brand as the page we're looking at, or if this
# is for a public link.
return get_branding(self.brand)['ROOT_URL'] + ("/a/%d" % self.id)
#
def get_active_trigger(self):
# What trigger should the user take action on? It's the most recently created
# trigger that is open or executed (since users can still take action on
# executed triggers). During drafting, also allow the user editing the page to
# see a draft trigger.
from contrib.models import Trigger, TriggerStatus, TriggerCustomization
trigger_must_have_status = [TriggerStatus.Open, TriggerStatus.Executed]
if self.status == CampaignStatus.Draft:
trigger_must_have_status.append(TriggerStatus.Draft)
trigger = self.contrib_triggers\
.filter(status__in=trigger_must_have_status)\
.order_by('-created')\
.first()
# Show customized trigger options when the campaign has an owner and that owner
# has a TriggerCustomization for the trigger.
tcust = None
if trigger and self.owner:
tcust = TriggerCustomization.objects.filter(trigger=trigger, owner=self.owner).first()
return (trigger, tcust)
def contrib_triggers_with_tcust(self):
return [
(t, t.customizations.filter(owner=self.owner).first())
for t in self.contrib_triggers.all()
]
def is_sole_trigger(self, trigger):
return self.contrib_triggers.count() == 1 and self.contrib_triggers.filter(id=trigger.id).exists()
def get_contrib_totals(self):
# Get all of the displayable totals for this campaign.
ret = { }
from django.db.models import Sum, Count
from contrib.models import TriggerStatus, TriggerCustomization, Pledge, PledgeExecution, PledgeExecutionProblem
# What pledges should we show? For consistency across stats, filter out unconfirmed
# pledges and pledges made after the trigger was executed, which shouldn't be shown
# as a "pledge" per se --- those will be executed soon.
pledges_base = Pledge.objects.exclude(user=None).filter(made_after_trigger_execution=False)
if self.owner:
# When we're showing a campaign owned by an organization, then we
# only count pledges to this very campaign.
pledges = pledges_base.filter(via_campaign=self)
else:
# Otherwise, we can count any campaign but only to triggers in this
# campaign.
pledges = pledges_base.filter(trigger__in=self.contrib_triggers.all())
# If no trigger cutomization has a fixed outcome, then we can show
# plege totals. (We can't show pledge totals when there is a fixed
# outcome.) In no case do we break this down by desired outcome.
# There are never TriggerCustomizations when this campaign has no
# owner.
tcusts = TriggerCustomization.objects.filter(owner=self.owner, trigger__campaigns=self)
if not tcusts.exclude(outcome=None).exists():
ret["pledged_total"] = pledges.aggregate(sum=Sum('amount'))["sum"] or 0
ret["pledged_user_count"] = pledges.values("user").distinct().aggregate(count=Count('user'))["count"] or 0
else:
ret["pledged_site_wide"] = pledges_base.filter(trigger__in=self.contrib_triggers.all()).aggregate(sum=Sum('amount'))["sum"] or 0
# If any trigger has been executed, then we can show executed totals.
# In all cases we can show the total amount of contributions across all triggers.
# Of course, this could be on all sides of an issue, so this isn't usually
# interesting.
ret["contrib_total"] = 0
ret["contrib_user_count"] = 0 # not distinct across triggers
# Assume all fixed-outcome triggers are about the same issue. Compute the totals
# across those triggers. Otherwise, we don't know whether outcome X in any trigger
# corresponds to the same real-world issue as outcome X in any other trigger.
ret["contrib_fixed_outcome_total"] = 0
# Report outcomes by trigger, with a breakdown by outcome, and sum across triggers.
from contrib.views import report_fetch_data
ret["by_trigger"] = []
for trigger in self.contrib_triggers.filter(status=TriggerStatus.Executed).order_by('-created'):
try:
# Get this trigger's totals.
agg = report_fetch_data(trigger, via_campaign=self if self.owner else None)
ret["by_trigger"].append({
"trigger": trigger,
"aggregates": agg
})
# We sum here and not in an aggregate SQL statement for two reasons:
# Triggers that haven't had all of their pledges executed should not
# reveal grossly incomplete information. And our templates assume that
# if contrib_total > 0, then there is by_trigger information. So we
# do these two parts together for consistency.
ret["contrib_total"] += agg["total"]["total"]
ret["contrib_user_count"] += agg["users"] # not distinct
# If this trigger has a TriggerCustomization with a fixed outcome,
# sum the total contributions for that outcome only.
tcust = TriggerCustomization.objects.filter(owner=self.owner, trigger=trigger).first()
if tcust and tcust.outcome is not None:
for outcome in agg["outcomes"]:
if outcome["outcome"] == tcust.outcome:
# No easy way to get the total number of unique users.
ret["contrib_fixed_outcome_total"] += outcome["total"]
except Http404:
# This is how report_fetch_data indicates that data
# is not available. That could be because we haven't
# yet executed enough pledges to report contribution
# totals.
continue
return ret
#####################################################################
#
# Notifications
#
# Alerts for users.
#
#####################################################################
class NotificationType(enum.Enum):
TriggerRecommendation = 1
class Notification(models.Model):
"""A notification that we want to show to a user."""
user = models.ForeignKey(User, db_index=True, on_delete=models.CASCADE, help_text="The user the notification is sent to.")
notif_type = EnumField(NotificationType, help_text="The type of the notiication.")
source_content_type = models.ForeignKey(ContentType, help_text="The content type of the object generating the notiication.")
source_object_id = models.PositiveIntegerField(help_text="The primary key of the object generating the notiication.")
source = GenericForeignKey('source_content_type', 'source_object_id')
created = models.DateTimeField(auto_now_add=True, db_index=True)
updated = models.DateTimeField(auto_now=True, db_index=True)
dismissed_at = models.DateTimeField(blank=True, null=True, help_text="Whether and when the notification was dismissed by the user by.")
mailed_at = models.DateTimeField(blank=True, null=True, help_text="Whether and when the notification was sent to the user by email.")
clicked_at = models.DateTimeField(blank=True, null=True, help_text="Whether and when the notification was clicked on by the user to see more information.")
extra = JSONField(blank=True, help_text="Additional information stored with this object.")
class Meta:
unique_together = [('user', 'notif_type', 'source_content_type', 'source_object_id')]
index_together = [('user', 'created')]
def __str__(self):
return ", ".join([self.created.isoformat(), str(self.user), self.notif_type.name, str(self.source)])
def dismiss(self):
self.dismissed_at = timezone.now()
self.save()
@staticmethod
def render(qs, for_client=True):
# Get JSON-able data so the client can render the user's notifications.
notifications = list(qs)
# Get a unique set of classes that manage these notifications.
classes = set(n.source_content_type.model_class() for n in notifications)
# Ask each class to render the notifications it is responsible for
# into one or more alerts.
alerts = sum([c.render_notifications(set(n for n in notifications if n.source_content_type.model_class() == c))
for c in classes], [])
for alert in alerts:
# Render the alert content.
alert["body_html"] = Template(alert["body_html"]).render(Context(alert["body_context"]))
alert["body_text"] = Template(alert["body_text"]).render(Context(alert["body_context"]))
# Add common properties derived from the notifications that underlie the alerts.
alert["date"] = max(n.created for n in alert['notifications']) # most recent notification
if for_client: alert["date"] = alert["date"].isoformat()
alert["ids"] = [n.id for n in alert['notifications']]
alert["new"] = any(n.dismissed_at is None for n in alert['notifications'])
if for_client: del alert["notifications"] # not JSON-serializable
# Sort the alerts.
alerts.sort(key = lambda a : a['date'], reverse=True)
# Return.
return alerts
|
[
"[email protected]"
] | |
b0e268051ad088f2df2f790ecafedb64cb8bf533
|
eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7
|
/google/cloud/oslogin/v1beta/oslogin-v1beta-py/google/cloud/oslogin_v1beta/types/__init__.py
|
88e7d276b11dd4afd082e065d3155f5738b6dc58
|
[
"Apache-2.0"
] |
permissive
|
Tryweirder/googleapis-gen
|
2e5daf46574c3af3d448f1177eaebe809100c346
|
45d8e9377379f9d1d4e166e80415a8c1737f284d
|
refs/heads/master
| 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,118 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .oslogin import (
DeletePosixAccountRequest,
DeleteSshPublicKeyRequest,
GetLoginProfileRequest,
GetSshPublicKeyRequest,
ImportSshPublicKeyRequest,
ImportSshPublicKeyResponse,
LoginProfile,
UpdateSshPublicKeyRequest,
)
__all__ = (
'DeletePosixAccountRequest',
'DeleteSshPublicKeyRequest',
'GetLoginProfileRequest',
'GetSshPublicKeyRequest',
'ImportSshPublicKeyRequest',
'ImportSshPublicKeyResponse',
'LoginProfile',
'UpdateSshPublicKeyRequest',
)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
8517363ceefdfaab31040b419d1493f2f053b671
|
1c58aef845d5dc1398249d784450c3825a1a75a5
|
/LeetCode/Easy/Strings/383_ransome_note.py
|
38aeefec3e3d6f05551f4931a49b5b9643b88e90
|
[] |
no_license
|
AmitKulkarni23/Leet_HackerRank
|
b1c1d7e5915397fd971d777baf75bb0f6fd27c78
|
047b167311d2fb93a53998a20d73533a4cae2ab8
|
refs/heads/master
| 2021-06-01T20:24:40.659530 | 2020-02-06T22:10:14 | 2020-02-06T22:10:14 | 123,007,444 | 0 | 0 | null | 2018-07-12T18:42:40 | 2018-02-26T17:58:28 |
Python
|
UTF-8
|
Python
| false | false | 1,159 |
py
|
# Given an arbitrary ransom note string and another string containing letters from all the magazines,
# write a function that will return true if the ransom note can be constructed from the magazines ; otherwise, it will return false.
#
# Each letter in the magazine string can only be used once in your ransom note.
#
# Note:
# You may assume that both strings contain only lowercase letters.
#
# canConstruct("a", "b") -> false
# canConstruct("aa", "ab") -> false
# canConstruct("aa", "aab") -> true
def canConstruct(ransomNote, magazine):
"""
:type ransomNote: str
:type magazine: str
:rtype: bool
"""
mag = list(magazine)
for ch in ransomNote:
if ch in mag:
mag.remove(ch)
else:
return False
return True
def best_leetcode_sol(ransomNote, magazine):
"""
:type ransomNote: str
:type magazine: str
:rtype: bool
"""
for i in set(ransomNote):
if ransomNote.count(i) > magazine.count(i):
return False
return True
# Examples:
print(best_leetcode_sol("a", "b"))
print(best_leetcode_sol("aa", "ab"))
print(best_leetcode_sol("aa", "aab"))
|
[
"[email protected]"
] | |
05af7a6876005f0fdc7f380a78ae17d751910559
|
e47b0a17197fa2045d5b51e4385f85b0bdfe1e58
|
/venv/bin/pip
|
09e97c32100c9540940194241f7cd792d7933139
|
[] |
no_license
|
Waweru007/WEB
|
29c62a4b094f1d8adf3687f9f3dd77c33c0aaae9
|
9cd0bb6ee2c5301b854adc460b6a465d984e4cfb
|
refs/heads/master
| 2020-04-15T05:52:36.234218 | 2019-01-23T16:26:57 | 2019-01-23T16:26:57 | 164,440,120 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 398 |
#!/root/PycharmProjects/ForecastWeb/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
|
[
"[email protected]"
] | ||
8907112478d9caeabd453553b72d4e927ce3d745
|
31780af7a5558523def1aae5f25df3e0b084be9b
|
/Ex67.py
|
ffc1f975db4f03a3ed514384bdd5a1865506d385
|
[] |
no_license
|
sevilaybayatli/PYTHS19
|
1796615ff939f2e98ce657feeaa3efd47a2e66c6
|
ae0607e215a0d8205475d124c0362c39881e5eda
|
refs/heads/master
| 2020-07-23T16:12:17.922548 | 2020-03-23T22:03:00 | 2020-03-23T22:03:00 | 207,624,169 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,164 |
py
|
import sys
import math
import matplotlib.pyplot as plt
##def histogram(items):
# for n in items:
# output=''
# times=n
# while times>0:
# output+='@'
# times-=1
# print(output)
#histogram([9,2,4,5,3])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Write a Python program to concatenate all elements in a list into a string and return it
#def concatenate(lisst):
# stringg=''
# for n in lisst:
# stringg+=n
#print(stringg)
#ll=input("enter the list of letters: ")
#lisst=ll.split(",")
#concatenate(lisst)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Write a Python program to print all even numbers from a given numbers list in the same order and stop the printing if any numbers that come after 237 in the sequence.
#numbers = [
# 386, 462, 47, 418, 907, 344, 236, 375, 823, 566, 597, 978, 328, 615, 953, 345,
# 399, 162, 758, 219, 918, 237, 412, 566, 826, 248, 866, 950, 626, 949, 687, 217,
# 815, 67, 104, 58, 512, 24, 892, 894, 767, 553, 81, 379, 843, 831, 445, 742, 717,
# 958,743, 527
# ]
#def printing(numbers):
# for n in numbers:
# if n==237:
# print(n)
# break;
# elif (n%2==0):
# print(n)
#numbers = [386, 462, 47, 418, 907, 344, 236, 375, 823, 566, 597, 978, 328, 615, 953, 345, 399, 162, 758, 219, 918, 237, 412, 566, 826, 248, 866, 950, 626, 949, 687, 217, 815, 67, 104, 58, 512, 24, 892, 894, 767, 553, 81, 379, 843, 831, 445, 742, 717, 958,743, 527]
#printing(numbers)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Write a Python program to print out a set containing all the colors from color_list_1 which are not present in color_list_2.
#def colorlist(tset):
#color_list_1 = set(["White", "Black", "Red"])
#color_list_2 = set(["Red", "Green"])
# for n in color_list_1:
# if n not in color_list_2:
# tset.append(n)
#tset=set()
#print(color_list_1.difference(color_list_2))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Write a Python program to compute the greatest common divisor (GCD) of two positive integers.
#def gcd(x, y):
# gcd = 1
# if x % y == 0:
# return y
# for k in range(int(y / 2), 0, -1):
# if x % k == 0 and y % k == 0:
# gcd = k
# break
# return gcd
#print(gcd(12, 17))
#print(gcd(4, 6))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Write a Python program to get the least common multiple (LCM) of two positive integers.
#def lcd(x,y):
# if x>y:
# z=x
# else:
# z=y
# while(True):
# if (z%x==0 and z%y==0):
# lcd=z
# return lcd
#z+=1
#print(lcd(4,6))
#print(lcd(15,17))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Write a Python program to sum of three given integers. However, if two values are equal sum will be zero
#x=int(input("enter a n1"))
#y=int(input("enter a n2"))
#z=int(input("enter a n3"))
#k=0
#if (x==y or x==z or y==z):
# k=x+y+z
# k=0
# print(k)
#else:
# print(x+y+z)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Write a Python program to sum of two given integers. However, if the sum is between 15 to 20 it will return 20.
#def summ(x,y):
# z=x+y
# if z in range (15,20):
# sum=20
# return sum
# else:
# return z
#x=int(input("enter a number: "))
#y=int(input("enter a number: "))
#print(summ(x,y))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Write a Python program that will return true if the two given integer values are equal or their sum or difference is 5
def values(x,y):
z=x+y
k=x-y
if (x==y or z==5 or k==5):
return True
x=int(input("enter a number: "))
y=int(input("enter a number: "))
print(values(x,y))
|
[
"[email protected]"
] | |
896ef150b0486cb58818cfdfc7767977826fb19d
|
435360144d233c85ea23e50e05ee6edbfb4e4dd2
|
/python网络数据采集/图像识别与文字处理/clean-image.py
|
6308ecf105a1f49e07a86cb81607bf69597f5bb4
|
[] |
no_license
|
liazylee/python
|
5d56e1a60b6472660c30a3e7548da873f5ee3ebd
|
f90b5d18ad8375b2b0c951fa68e7e0041b016cc6
|
refs/heads/master
| 2020-04-03T18:24:39.434080 | 2018-10-30T13:00:27 | 2018-10-30T13:00:27 | 155,482,955 | 1 | 0 | null | 2018-10-31T01:56:47 | 2018-10-31T01:56:47 | null |
UTF-8
|
Python
| false | false | 715 |
py
|
'''
利用 Pillow 库,我们可以创建一个 阈值过滤器来去掉渐变的背景色,只把文字留下来,从而让图片更加清晰
'''
from PIL import Image
import subprocess
def cleanFile(filePath, newFilePath):
image = Image.open(filePath)
#Set a threshold value for the image, and save
image = image.point(lambda x: 0 if x<143 else 255)
image.save(newFilePath)
#子进程调用tesseract Tesseract 最大的缺点是对渐变背景色的处理
subprocess.call(["tesseract", newFilePath, "test"])
#Open and read the resulting data file
outputFile = open("test.txt", 'r')
print(outputFile.read())
outputFile.close()
cleanFile("text.png", "text_clean.png")
|
[
"[email protected]"
] | |
85952d0e2cac31f79d9c8daae2b41a3d0e24e218
|
194313096f9b7a520a3ce21a5778b4b49b384932
|
/src/idleobject.py
|
0ce1c6e0e416fd8563c7e49f71c2dc5997fb01c5
|
[] |
no_license
|
atareao/imagedownloader
|
4af9fe1f78c09000c844dcfebd4a8dfdd434876e
|
d5f6c8b5240d606626dead2bc00ad29d953f335d
|
refs/heads/master
| 2021-01-17T06:49:29.134980 | 2016-07-08T18:46:41 | 2016-07-08T18:46:41 | 51,201,959 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,193 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of PushBullet-Commons
#
# Copyright (C) 2014-2016
# Lorenzo Carbonell Cerezo <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import GObject
from gi.repository import GLib
class IdleObject(GObject.GObject):
"""
Override GObject.GObject to always emit signals in the main thread
by emmitting on an idle handler
"""
def __init__(self):
GObject.GObject.__init__(self)
def emit(self, *args):
GLib.idle_add(GObject.GObject.emit, self, *args)
|
[
"[email protected]"
] | |
39001bd1a26f2714673d019cbd303670f8a82b2f
|
f03bd5bd7873c5cc33b4ef5199f219539f3a340e
|
/CAAPR/CAAPR_AstroMagic/PTS/pts/do/core/plotprogress.py
|
30874beb519285c495bb670c954f6c5d3f02651c
|
[
"GPL-1.0-or-later",
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-philippe-de-muyter",
"MIT"
] |
permissive
|
Stargrazer82301/CAAPR
|
5f8a7033b16792f23abd5d07021b53b9228a5db4
|
62b2339beb2eb956565e1605d44d92f934361ad7
|
refs/heads/master
| 2022-08-29T02:53:33.658022 | 2022-08-05T19:06:46 | 2022-08-05T19:06:46 | 49,977,601 | 8 | 1 |
MIT
| 2022-08-05T19:06:47 | 2016-01-19T19:32:42 |
Python
|
UTF-8
|
Python
| false | false | 3,694 |
py
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.do.core.plotprogress Plot progress for the various phases of a SKIRT simulation.
#
# This script plots the progress in function of time for certain phases of a SKIRT simulation, based on the log
# messages. A seperate PDF plot is created for each of the following phases, if present in the simulation:
# - shooting photons for stellar emission ("prefix_progress_stellar_photons.pdf");
# - calculating dust emission spectra ("prefix_progress_dust_spectra.pdf");
# - shooting photons for dust emission ("prefix_progress_dust_photons.pdf");
#
# The dust self-absorption phase, if present, is ignored in the current implementation of the script.
#
# For multi-process (MPI) simulations with verbose logging (i.e. with a separate log file per process),
# the progress for all processes is displayed on the same plot.
#
# The script expects the complete output of a SKIRT simulation to be present (including log file etc.).
# If there are no arguments, the script processes all simulation output sets residing in the current directory.
# If the first argument contains a slash, the script processes all simulation output sets in the indicated directory.
# If the first argument does not contain a slash, the script processes just the simulation in the current directory
# with the indicated prefix.
#
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from pts.core.simulation.simulation import createsimulations
from pts.core.extract.progress import ProgressExtractor, ProgressTable
from pts.core.plot.progress import ProgressPlotter
from pts.core.tools import filesystem as fs
from pts.core.basics.configuration import ConfigurationDefinition, ConfigurationReader
# -----------------------------------------------------------------
# Create the configuration definition
definition = ConfigurationDefinition()
# Add flags
definition.add_flag("table", "save the extracted progress table")
# Get configuration
reader = ConfigurationReader("plotprogress")
config = reader.read(definition)
# -----------------------------------------------------------------
# Look for a file in the current working directory that contains extracted progress information
progress_table_path = fs.join(fs.cwd(), "progress.dat")
if fs.is_file(progress_table_path): table = ProgressTable.from_file(progress_table_path)
# If extracted progress information is not present, first perform the extraction
else:
# Create a SkirtSimulation object based on a log file present in the current working directory
simulation = createsimulations(single=True)
# Create a new ProgressExtractor instance
extractor = ProgressExtractor()
# Run the extractor and get the table
table = extractor.run(simulation)
# -----------------------------------------------------------------
if config.table and not fs.is_file(progress_table_path): table.saveto(progress_table_path)
# -----------------------------------------------------------------
# Determine the path to the plotting directory
plot_path = fs.join(fs.cwd())
# Create a ProgressPlotter instance
plotter = ProgressPlotter()
# Run the progress plotter
plotter.run(table, plot_path)
# -----------------------------------------------------------------
|
[
"[email protected]"
] | |
b6c95a0746610ebaa31ebac4b23e51316764c3eb
|
1c0158145cbc7afa9b969b739f7e3507b73276a4
|
/pyScript/custom_src/Flow.py
|
2684fab3671245ac61bd4ee8a12676c8be6c9ed4
|
[
"MIT"
] |
permissive
|
farukdemirbas/pyScript
|
cc3726d0de730234d4f36ba535532b9306e3c971
|
89139615f95c86178cfdb072945942de3be405b7
|
refs/heads/master
| 2023-08-03T12:26:58.328450 | 2020-06-04T09:26:04 | 2020-06-04T09:26:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 59,744 |
py
|
from PySide2.QtWidgets import QGraphicsView, QGraphicsScene, QListWidgetItem, QShortcut, QMenu, QGraphicsItem, QScrollBar, QUndoStack, QUndoCommand
from PySide2.QtGui import QPainter, QPainterPath, QPen, QColor, QBrush, QRadialGradient, QKeySequence, QTabletEvent, QImage, QGuiApplication
from PySide2.QtCore import Qt, QPointF, QPoint, QRectF, QSizeF
import math, json, inspect
from custom_src.GlobalAccess import GlobalStorage
from custom_src.FlowProxyWidget import FlowProxyWidget
from custom_src.Node import Node, SetVariable_Node, GetVariable_Node
from custom_src.NodeInstance import NodeInstance
from custom_src.PortInstance import PortInstance, PortInstanceGate
from custom_src.custom_nodes.SetVar_NodeInstance import SetVar_NodeInstance
from custom_src.custom_nodes.GetVar_NodeInstance import GetVar_NodeInstance
from custom_src.NodeChoiceWidget.NodeChoiceWidget import NodeChoiceWidget
from custom_src.DrawingObject import DrawingObject
from custom_src.FlowStylusModesWidget import FlowStylusModesWidget
from custom_src.FlowZoomWidget import FlowZoomWidget
from custom_src.RenderView import RenderView
class Flow(QGraphicsView):
def __init__(self, main_window, parent_script, config=None):
super(Flow, self).__init__()
# shortcuts
place_new_node_shortcut = QShortcut(QKeySequence('Shift+P'), self)
place_new_node_shortcut.activated.connect(self.place_new_node_by_shortcut)
move_selected_nodes_left_shortcut = QShortcut(QKeySequence('Shift+Left'), self)
move_selected_nodes_left_shortcut.activated.connect(self.move_selected_nodes_left)
move_selected_nodes_up_shortcut = QShortcut(QKeySequence('Shift+Up'), self)
move_selected_nodes_up_shortcut.activated.connect(self.move_selected_nodes_up)
move_selected_nodes_right_shortcut = QShortcut(QKeySequence('Shift+Right'), self)
move_selected_nodes_right_shortcut.activated.connect(self.move_selected_nodes_right)
move_selected_nodes_down_shortcut = QShortcut(QKeySequence('Shift+Down'), self)
move_selected_nodes_down_shortcut.activated.connect(self.move_selected_nodes_down)
select_all_shortcut = QShortcut(QKeySequence('Ctrl+A'), self)
select_all_shortcut.activated.connect(self.select_all)
copy_shortcut = QShortcut(QKeySequence.Copy, self)
copy_shortcut.activated.connect(self.copy)
cut_shortcut = QShortcut(QKeySequence.Cut, self)
cut_shortcut.activated.connect(self.cut)
paste_shortcut = QShortcut(QKeySequence.Paste, self)
paste_shortcut.activated.connect(self.paste)
# undo redo
self.undo_stack = QUndoStack(self)
self.undo_action = self.undo_stack.createUndoAction(self, 'undo')
self.undo_action.setShortcuts(QKeySequence.Undo)
self.redo_action = self.undo_stack.createRedoAction(self, 'redo')
self.redo_action.setShortcuts(QKeySequence.Redo)
undo_shortcut = QShortcut(QKeySequence.Undo, self)
undo_shortcut.activated.connect(self.undo_activated)
redo_shortcut = QShortcut(QKeySequence.Redo, self)
redo_shortcut.activated.connect(self.redo_activated)
# general attributes
self.parent_script = parent_script
self.all_node_instances: [NodeInstance] = []
self.all_node_instance_classes = main_window.all_node_instance_classes # reference!!!
self.all_nodes = main_window.all_nodes # reference!!!
#self.selected_node_instance: NodeInstance = None
self.selected_node_instances = []
self.dragging_node_instance_or_drawing = False
self.gate_selected: PortInstanceGate = None
self.dragging_connection = False
self.ignore_mouse_event = False
self.ignore_key_event = False
self.last_mouse_move_pos: QPointF = None
self.node_place_pos = QPointF()
self.left_mouse_pressed_in_flow = False
self.mouse_moved_while_pressed = False
self.mouse_press_pos: QPointF = None
self.moving_scene = False # with Pen
self.tablet_press_pos: QPointF = None
self.last_tablet_move_pos: QPointF = None
self.selection_rect: QRectF = None
self.auto_connection_gate = None # stores the gate that we may try to auto connect to newly placed NI
# self.design_style = 'dark std'
self.current_scale = 1
self.total_scale_div = 1
# create UI
scene = QGraphicsScene(self)
scene.setItemIndexMethod(QGraphicsScene.NoIndex)
scene.setSceneRect(0, 0, 10*self.width(), 10*self.height())
self.setScene(scene)
self.setCacheMode(QGraphicsView.CacheBackground)
self.setViewportUpdateMode(QGraphicsView.BoundingRectViewportUpdate)
self.setRenderHint(QPainter.Antialiasing)
self.setTransformationAnchor(QGraphicsView.AnchorUnderMouse)
self.setAcceptDrops(True)
self.centerOn(QPointF(self.viewport().width()/2, self.viewport().height()/2))
self.node_choice_proxy = FlowProxyWidget(self)
self.node_choice_widget = NodeChoiceWidget(self, main_window.all_nodes) # , main_window.node_images)
self.node_choice_proxy.setWidget(self.node_choice_widget)
self.scene().addItem(self.node_choice_proxy)
self.node_choice_proxy.setZValue(1000)
self.hide_node_choice_widget()
# zoom widget
self.zoom_proxy = FlowProxyWidget(self)
self.zoom_proxy.setFlag(QGraphicsItem.ItemIgnoresTransformations, True)
self.zoom_widget = FlowZoomWidget(self)
self.zoom_proxy.setWidget(self.zoom_widget)
self.scene().addItem(self.zoom_proxy)
self.set_zoom_proxy_pos()
# ------------
# self.setHorizontalScrollBar(FlowScrollBar(self, Qt.Horizontal)) # to enable custom blocking
# self.setVerticalScrollBar(FlowScrollBar(self, Qt.Vertical))
# stylus stuff
self.stylus_mode = ''
self.current_drawing = None
self.drawing = None
self.drawings = []
self.selected_drawings = []
self.stylus_modes_proxy = FlowProxyWidget(self)
self.stylus_modes_proxy.setFlag(QGraphicsItem.ItemIgnoresTransformations, True)
self.stylus_modes_widget = FlowStylusModesWidget(self)
self.stylus_modes_proxy.setWidget(self.stylus_modes_widget)
self.scene().addItem(self.stylus_modes_proxy)
self.set_stylus_proxy_pos()
# ------------
if config:
node_instances = self.place_nodes_from_config(config['nodes'])
self.connect_nodes_from_config(node_instances, config['connections'])
if list(config.keys()).__contains__('drawings'):
# the if is here just because it's very new feature and not all project files have drawings arr just yet
self.place_drawings_from_config(config['drawings'])
self.undo_stack.clear()
# def set_design_style(self, new_style):
# self.design_style = new_style
def design_style_changed(self):
self.viewport().update()
def contextMenuEvent(self, event):
QGraphicsView.contextMenuEvent(self, event)
# in the case of the menu already being shown by a widget under the mouse, the event is accepted here
if event.isAccepted():
return
for i in self.items(event.pos()):
if self.find_type_in_object(i, NodeInstance):
ni: NodeInstance = i
menu: QMenu = ni.get_context_menu()
menu.exec_(event.globalPos())
event.accept()
def undo_activated(self):
self.undo_stack.undo()
self.viewport().update()
def redo_activated(self):
self.undo_stack.redo()
self.viewport().update()
def mousePressEvent(self, event):
GlobalStorage.debug('mouse press event received, point:', event.pos())
# there might be a proxy widget meant to receive the event instead of the flow
QGraphicsView.mousePressEvent(self, event)
if self.ignore_mouse_event:
self.ignore_mouse_event = False
return
# GlobalStorage.debug('mouse press event in flow')
if event.button() == Qt.LeftButton:
if self.node_choice_proxy.isVisible():
self.hide_node_choice_widget()
if not self.itemAt(event.pos()):
GlobalStorage.debug('clearing selection')
self.clear_selection()
else:
for i in self.items(event.pos()):
if event.modifiers() == Qt.CTRL: # CTRL
if self.find_type_in_object(i, NodeInstance):
if self.selected_node_instances.__contains__(i):
self.selected_node_instances.remove(i)
else:
self.selected_node_instances.append(i)
self.viewport().update()
break
elif self.find_type_in_object(i, DrawingObject):
if self.selected_drawings.__contains__(i):
self.selected_drawings.remove(i)
else:
self.selected_drawings.append(i)
self.viewport().update()
break
else: # NOT CTRL
if self.find_type_in_object(i, PortInstanceGate):
self.gate_selected = i
self.dragging_connection = True
break
elif self.find_type_in_object(i, NodeInstance):
if i not in self.selected_node_instances:
self.clear_selection()
self.selected_node_instances = [i]
self.viewport().update()
self.dragging_node_instance_or_drawing = True
break
elif self.find_type_in_object(i, DrawingObject):
if i not in self.selected_drawings:
self.clear_selection()
self.selected_drawings = [i]
self.viewport().update()
self.dragging_node_instance_or_drawing = True
break
self.left_mouse_pressed_in_flow = True
self.mouse_press_pos = self.mapToScene(event.pos())
def mouseMoveEvent(self, event):
QGraphicsView.mouseMoveEvent(self, event)
if self.dragging_node_instance_or_drawing:
self.move_selected_components_from_drag(event.pos())
self.viewport().repaint()
else:
if self.mouse_moved_while_pressed and not self.dragging_connection: # selecting multiple nodes
self.selection_rect = QRectF(self.mouse_press_pos, self.mapToScene(event.pos()))
self.viewport().repaint()
self.last_mouse_move_pos = self.mapToScene(event.pos())
if self.dragging_connection:
self.viewport().repaint()
if self.left_mouse_pressed_in_flow and event.buttons() == Qt.LeftButton:
self.mouse_moved_while_pressed = True
def mouseReleaseEvent(self, event):
# GlobalStorage.debug('ignore mouse event is:', self.ignore_mouse_event)
# there might be a proxy widget meant to receive the event instead of the flow
QGraphicsView.mouseReleaseEvent(self, event)
if event.button() == Qt.RightButton:
return
if self.ignore_mouse_event or not self.left_mouse_pressed_in_flow:
self.ignore_mouse_event = False
return
# GlobalStorage.debug('mouse release in flow')
if self.dragging_node_instance_or_drawing and self.mouse_moved_while_pressed:
for i in self.get_selected_components(): # undo moving cuz it will finally be performed in MoveCommand
i.setPos(i.pos()-(self.last_mouse_move_pos-self.mouse_press_pos))
self.undo_stack.push(MoveComponents_Command(self,
self.get_abs_indices_of_components(self.get_selected_components()),
self.last_mouse_move_pos - self.mouse_press_pos))
# connection dropped over specific gate
if self.dragging_connection and self.itemAt(event.pos()) and type(self.itemAt(event.pos())) == PortInstanceGate:
self.connect_gates__cmd(self.gate_selected, self.itemAt(event.pos()))
# connection dropped over NodeInstance
elif self.dragging_connection and self.find_type_in_objects(self.items(event.pos()), NodeInstance):
# find node instance
ni_under_drop = None
for item in self.items(event.pos()):
if self.find_type_in_object(item, NodeInstance):
ni_under_drop = item
break
# connect
self.try_conn_gate_and_ni(self.gate_selected, ni_under_drop)
# connection dropped somewhere else
elif self.dragging_connection:
self.auto_connection_gate = self.gate_selected
self.show_node_choice_widget(event.pos())
if self.mouse_moved_while_pressed:
if not self.dragging_connection and not self.dragging_node_instance_or_drawing:
self.select_area(QRectF(self.mouse_press_pos, self.mapToScene(event.pos())))
else:
if len(self.selected_node_instances) == 0 and len(self.selected_drawings) == 0:
self.node_choice_widget.reset_list()
self.show_node_choice_widget(event.pos())
self.left_mouse_pressed_in_flow = False
self.dragging_node_instance_or_drawing = False
self.dragging_connection = False
self.gate_selected = None
self.mouse_moved_while_pressed = False
self.selection_rect = None
self.viewport().repaint()
def keyPressEvent(self, event):
# GlobalStorage.debug('key press event in flow')
QGraphicsView.keyPressEvent(self, event)
if self.ignore_key_event:
self.ignore_key_event = False
return
if event.key() == Qt.Key_Escape and (len(self.selected_node_instances) > 0 or len(self.drawings) > 0):
self.clear_selection()
self.clearFocus()
self.setFocus()
return True
elif event.key() == Qt.Key_Delete:
if len(self.selected_node_instances) > 0 or len(self.selected_drawings) > 0:
self.remove_selected_components()
def wheelEvent(self, event):
if event.modifiers() == Qt.CTRL and event.angleDelta().x() == 0:
self.zoom(event.pos(), self.mapToScene(event.pos()), event.angleDelta().y())
event.accept()
return True
QGraphicsView.wheelEvent(self, event)
def tabletEvent(self, event): # TODO: I think, I actually should use self.last_mouse_move_pos etc. instead of these custom tablet position variables (see undomove etc.)
if event.type() == QTabletEvent.TabletPress:
self.tablet_press_pos = event.pos()
if event.buttons() == Qt.LeftButton and event.pointerType() == QTabletEvent.Eraser:
# GlobalStorage.debug('eraser!')
pass
elif event.buttons() == Qt.LeftButton:
# GlobalStorage.debug('left button press!')
if self.stylus_mode == 'comment':
new_drawing = self.create_and_place_drawing__cmd(self.mapToScene(self.tablet_press_pos))
self.current_drawing = new_drawing
self.drawing = True
elif event.buttons() == Qt.RightButton:
# GlobalStorage.debug('right button press!')
self.moving_scene = True
self.last_tablet_move_pos = self.mapToScene(event.pos())
elif event.type() == QTabletEvent.TabletMove:
if event.pointerType() == QTabletEvent.Eraser:
if self.stylus_mode == 'comment':
for i in self.items(event.pos()):
if self.find_type_in_object(i, DrawingObject):
self.remove_drawing(i)
break
elif self.stylus_mode == 'comment' and self.drawing:
# GlobalStorage.debug('adding new point to paint object')
self.current_drawing.try_to_append_point(self.mapToScene(event.pos()) - self.current_drawing.pos())
self.current_drawing.stroke_weights.append(event.pressure())
self.current_drawing.update()
self.viewport().update()
elif self.stylus_mode == 'comment' and self.moving_scene and self.last_tablet_move_pos:
x_diff = self.mapToScene(event.pos()).x()-self.last_tablet_move_pos.x()
y_diff = self.mapToScene(event.pos()).y()-self.last_tablet_move_pos.y()
current_center_x = self.mapToScene(self.viewport().pos()).x() + (self.viewport().width() * self.total_scale_div) / 2
current_center_y = self.mapToScene(self.viewport().pos()).y() + (self.viewport().height() * self.total_scale_div) / 2
new_center = QPoint(current_center_x - x_diff,
current_center_y - y_diff)
self.centerOn(new_center)
self.last_tablet_move_pos = self.mapToScene(event.pos())
elif event.type() == QTabletEvent.TabletRelease:
# GlobalStorage.debug('tabelt release!')
if self.stylus_mode == 'comment' and self.drawing:
GlobalStorage.debug('paint object finished!')
self.current_drawing.finished()
self.current_drawing = None
self.drawing = False
self.dragging_node_instance_or_drawing = False # may be true
event.accept()
if not self.stylus_mode == 'edit':
self.ignore_mouse_event = True # accepting the event is not enough even though the docs say it would be...
return True
def dragEnterEvent(self, event):
#GlobalStorage.debug('drag entered!')
#GlobalStorage.debug(event.mimeData().formats())
if event.mimeData().hasFormat('text/plain'):
event.acceptProposedAction()
def dragMoveEvent(self, event):
if event.mimeData().hasFormat('text/plain'):
event.acceptProposedAction()
def dropEvent(self, event):
text = event.mimeData().text()
item: QListWidgetItem = event.mimeData()
GlobalStorage.debug('received in Flow:', text)
j_obj = json.loads(text)
# if j_obj['type'] == 'variable':
# GlobalStorage.debug('placing variable!')
# var = self.parent_function.parent_scope_object.vy_variables[int(j_obj['index'])]
# # give the node choice widget only the variable now
# self.node_choice_widget.update_list([], [], [], [var])
# self.show_node_choice_widget(event.pos())
def drawBackground(self, painter, rect):
painter.fillRect(rect.intersected(self.sceneRect()), QColor('#333333'))
painter.setPen(Qt.NoPen)
painter.drawRect(self.sceneRect())
self.set_stylus_proxy_pos() # has to be here to prevent lagging
self.set_zoom_proxy_pos()
def drawForeground(self, painter, rect):
pen = QPen()
if GlobalStorage.storage['design style'] == 'dark std':
# pen.setColor('#BCBBF2')
pen.setWidth(5)
pen.setCapStyle(Qt.RoundCap)
elif GlobalStorage.storage['design style'] == 'dark tron':
# pen.setColor('#452666')
pen.setWidth(4)
pen.setCapStyle(Qt.RoundCap)
for ni in self.all_node_instances:
for o in ni.outputs:
for cpi in o.connected_port_instances:
if o.type_ == 'data':
pen.setStyle(Qt.DashLine)
elif o.type_ == 'exec':
pen.setStyle(Qt.SolidLine)
path = self.connection_path(ni.pos()+o.gate.pos(), cpi.parent_node_instance.pos()+cpi.gate.pos())
w = path.boundingRect().width()
h = path.boundingRect().height()
gradient = QRadialGradient(path.boundingRect().center(),
self.pythagoras(w, h)/2)
r = 0
g = 0
b = 0
if GlobalStorage.storage['design style'] == 'dark std':
r = 188
g = 187
b = 242
elif GlobalStorage.storage['design style'] == 'dark tron':
r = 0
g = 120
b = 180
gradient.setColorAt(0.0, QColor(r, g, b, 255))
gradient.setColorAt(0.75, QColor(r, g, b, 200))
gradient.setColorAt(0.95, QColor(r, g, b, 0))
gradient.setColorAt(1.0, QColor(r, g, b, 0))
pen.setBrush(gradient)
painter.setPen(pen)
painter.drawPath(path)
if self.dragging_connection:
pen = QPen('#101520')
pen.setWidth(3)
pen.setStyle(Qt.DotLine)
painter.setPen(pen)
gate_pos = self.gate_selected.parent_node_instance.pos()+self.gate_selected.pos()
if self.gate_selected.parent_port_instance.direction == 'output':
painter.drawPath(
self.connection_path(gate_pos,
self.last_mouse_move_pos)
)
else:
painter.drawPath(
self.connection_path(self.last_mouse_move_pos, gate_pos)
)
if self.selection_rect:
brush = QBrush(QColor(188, 187, 242, 100))
painter.setBrush(brush)
painter.setPen(Qt.NoPen)
painter.drawRect(self.selection_rect)
for ni in self.selected_node_instances:
pen = QPen(QColor('#245d75'))
pen.setWidth(3)
painter.setPen(pen)
painter.setBrush(Qt.NoBrush)
size_factor = 1.2
x = ni.pos().x() - ni.width/2*size_factor
y = ni.pos().y() - ni.height/2*size_factor
w = ni.width * size_factor
h = ni.height * size_factor
painter.drawRoundedRect(x, y, w, h, 10, 10)
for p_o in self.selected_drawings:
pen = QPen(QColor('#a3cc3b'))
pen.setWidth(2)
painter.setPen(pen)
painter.setBrush(Qt.NoBrush)
size_factor = 1.05
x = p_o.pos().x() - p_o.width/2*size_factor
y = p_o.pos().y() - p_o.height/2*size_factor
w = p_o.width * size_factor
h = p_o.height * size_factor
painter.drawRoundedRect(x, y, w, h, 6, 6)
painter.drawEllipse(p_o.pos().x(), p_o.pos().y(), 2, 2)
def get_viewport_img(self):
self.hide_proxies()
img = QImage(self.viewport().rect().width(), self.viewport().height(), QImage.Format_ARGB32)
img.fill(Qt.transparent)
painter = QPainter(img)
painter.setRenderHint(QPainter.Antialiasing)
self.render(painter, self.viewport().rect(), self.viewport().rect())
self.show_proxies()
return img
def get_whole_scene_img(self):
self.hide_proxies()
img = QImage(self.sceneRect().width()/self.total_scale_div, self.sceneRect().height()/self.total_scale_div, QImage.Format_RGB32)
img.fill(Qt.transparent)
painter = QPainter(img)
painter.setRenderHint(QPainter.Antialiasing)
rect = QRectF()
rect.setLeft(-self.viewport().pos().x())
rect.setTop(-self.viewport().pos().y())
rect.setWidth(img.rect().width())
rect.setHeight(img.rect().height())
# rect is right... but it only renders from the viewport's point down-and rightwards, not from topleft (0,0) ...
self.render(painter, rect, rect.toRect())
self.show_proxies()
return img
# PROXY POSITIONS
def set_zoom_proxy_pos(self):
self.zoom_proxy.setPos(self.mapToScene(self.viewport().width() - self.zoom_widget.width(), 0))
def set_stylus_proxy_pos(self):
self.stylus_modes_proxy.setPos(self.mapToScene(self.viewport().width() - self.stylus_modes_widget.width() - self.zoom_widget.width(), 0))
def hide_proxies(self):
self.stylus_modes_proxy.hide()
self.zoom_proxy.hide()
def show_proxies(self):
self.stylus_modes_proxy.show()
self.zoom_proxy.show()
# NODE CHOICE WIDGET
def show_node_choice_widget(self, pos): # just opens the choice dialog
# calculating position
self.node_place_pos = self.mapToScene(pos)
dialog_pos = QPoint(pos.x()+1, pos.y()+1)
# ensure that the node_choice_widget stays in the viewport
if dialog_pos.x()+self.node_choice_widget.width()/self.total_scale_div > self.viewport().width():
dialog_pos.setX(dialog_pos.x() - (dialog_pos.x() + self.node_choice_widget.width() / self.total_scale_div - self.viewport().width()))
if dialog_pos.y()+self.node_choice_widget.height()/self.total_scale_div > self.viewport().height():
dialog_pos.setY(dialog_pos.y() - (dialog_pos.y() + self.node_choice_widget.height() / self.total_scale_div - self.viewport().height()))
dialog_pos = self.mapToScene(dialog_pos)
# open nodes dialog
# the dialog emits 'node_chosen' which is connected to self.place_node,
# so this all continues at self.place_node below
self.node_choice_widget.update_view()
self.node_choice_proxy.setPos(dialog_pos)
self.node_choice_proxy.show()
self.node_choice_widget.refocus()
def hide_node_choice_widget(self):
self.node_choice_proxy.hide()
self.node_choice_widget.clearFocus()
self.auto_connection_gate = None
def find_type_in_objects(self, objects, base):
for o in objects:
found = self.find_type_in_object(o, base)
if found:
return True
return False
def find_type_in_object(self, obj, base):
return base in inspect.getmro(type(obj))
# ZOOM
def zoom_in(self, amount):
local_viewport_center = QPoint(self.viewport().width()/2, self.viewport().height()/2)
self.zoom(local_viewport_center, self.mapToScene(local_viewport_center), amount)
def zoom_out(self, amount):
local_viewport_center = QPoint(self.viewport().width()/2, self.viewport().height()/2)
self.zoom(local_viewport_center, self.mapToScene(local_viewport_center), -amount)
def zoom(self, p_abs, p_mapped, angle):
by = 0
velocity = 2*(1/self.current_scale)+0.5
if velocity > 3:
velocity = 3
direction = ''
if angle > 0:
by = 1 + (angle / 360 * 0.1 * velocity)
direction = 'in'
elif angle < 0:
by = 1 - (-angle / 360 * 0.1 * velocity)
direction = 'out'
else:
by = 1
scene_rect_width = self.mapFromScene(self.sceneRect()).boundingRect().width()
scene_rect_height = self.mapFromScene(self.sceneRect()).boundingRect().height()
if direction == 'in':
if self.current_scale*by < 3:
self.scale(by, by)
self.current_scale *= by
elif direction == 'out':
if scene_rect_width*by >= self.viewport().size().width() and scene_rect_height*by >= self.viewport().size().height():
self.scale(by, by)
self.current_scale *= by
w = self.viewport().width()
h = self.viewport().height()
wf = self.mapToScene(QPoint(w-1, 0)).x() - self.mapToScene(QPoint(0, 0)).x()
hf = self.mapToScene(QPoint(0, h-1)).y() - self.mapToScene(QPoint(0, 0)).y()
lf = p_mapped.x() - p_abs.x() * wf / w
tf = p_mapped.y() - p_abs.y() * hf / h
self.ensureVisible(lf, tf, wf, hf, 0, 0)
target_rect = QRectF(QPointF(lf, tf),
QSizeF(wf, hf))
self.total_scale_div = target_rect.width() / self.viewport().width()
self.ensureVisible(target_rect, 0, 0)
# NODE PLACING: -----
def place_new_node_by_shortcut(self): # gets called by shortcut Shift+P
point_in_viewport = None
if len(self.selected_node_instances) > 0:
x = self.selected_node_instances[-1].pos().x() + 150
y = self.selected_node_instances[-1].pos().y()
self.node_place_pos = QPointF(x, y)
point_in_viewport = self.mapFromScene(QPoint(x, y))
else:
viewport_x = self.viewport().width()/2
viewport_y = self.viewport().height()/2
point_in_viewport = QPointF(viewport_x, viewport_y).toPoint()
self.node_place_pos = self.mapToScene(point_in_viewport)
self.node_choice_widget.reset_list()
self.show_node_choice_widget(point_in_viewport)
def place_nodes_from_config(self, nodes_config, offset_pos: QPoint = QPoint(0, 0)):
new_node_instances = []
for n_c in nodes_config:
# find parent node by title, type, package name and description as identifiers
parent_node_title = n_c['parent node title']
# parent_node_type = n['parent node type']
parent_node_package_name = n_c['parent node package']
# parent_node_description = n['parent node description']
parent_node = None
for pn in self.all_nodes:
pn: Node = pn
if pn.title == parent_node_title and \
pn.package == parent_node_package_name:
parent_node = pn
break
new_node_instances.append(self.place_node(parent_node,
QPoint(n_c['position x'], n_c['position y']) + offset_pos,
n_c))
self.selected_node_instances = new_node_instances
return new_node_instances
def place_node__cmd(self, node: Node, config=None):
# IMPORTANT EXPLANATION:
# Placing and removing NIs is a kind of special action because it edits the Flow object itself. To enable undo/
# redo actions, this has to happen through a command. But creating NIs can happen through different commands
# (placing redo() and deleting undo()), so I decided to do all this still in the Flow (create_and...instance_())
# So this function call here results in self.create_and_place_node_instance_(node, pos, config)
place_command = PlaceNodeInstance_Command(self, node,
self.node_place_pos.toPoint() if
type(self.node_place_pos) == QPointF else self.node_place_pos,
config)
self.undo_stack.push(place_command)
# GlobalStorage.debug('finished placing node instance')
return self.get_all_components()[place_command.new_node_instance_component_index]
def place_node(self, node: Node, pos, config=None): # called from commands
GlobalStorage.debug(type(node))
node_instance = self.get_node_instance_class_from_node(node)(node, self, config)
self.scene().addItem(node_instance)
node_instance.setPos(pos) # mapping is already done here because sometimes (copy/paste etc) it shouldnt be mapped
node_instance.add_content_to_scene_and_compute_shape()
self.all_node_instances.append(node_instance)
self.selected_node_instances = [node_instance]
if self.auto_connection_gate:
self.try_conn_gate_and_ni(self.auto_connection_gate, node_instance)
self.viewport().update()
return node_instance
def get_node_instance_class_from_node(self, node):
return self.all_node_instance_classes[node]
def get_custom_input_widget_classes(self):
return self.parent_script.main_window.custom_node_input_widget_classes
def connect_nodes_from_config(self, node_instances, connections_config):
for c in connections_config:
c_parent_node_instance_index = c['parent node instance index']
c_output_port_index = c['output port index']
c_connected_node_instance = c['connected node instance']
c_connected_input_port_index = c['connected input port index']
if c_connected_node_instance is not None: # which can be the case when pasting
parent_node_instance = node_instances[c_parent_node_instance_index]
connected_node_instance = node_instances[c_connected_node_instance]
self.connect_gates(parent_node_instance.outputs[c_output_port_index].gate,
connected_node_instance.inputs[c_connected_input_port_index].gate)
def place_drawings_from_config(self, drawings, offset_pos=QPoint(0, 0)):
new_drawings = []
for d in drawings:
x = d['pos x']
y = d['pos y']
new_drawing = self.create_and_place_drawing(QPoint(x, y)+offset_pos, d['points'])
new_drawings.append(new_drawing)
return new_drawings
def create_and_place_drawing__cmd(self, pos, config=None):
# IMPORTANT EXPLANATION: see place_node__cmd() -- same thing here
place_command = PlaceDrawingObject_Command(self, pos, config)
self.undo_stack.push(place_command)
return self.get_all_components()[place_command.drawing_obj_component_index]
def create_and_place_drawing(self, pos, config=None):
new_drawing = DrawingObject(config)
self.scene().addItem(new_drawing)
new_drawing.setPos(pos)
self.drawings.append(new_drawing)
return new_drawing
def place_existing_drawing(self, drawing_obj):
self.scene().addItem(drawing_obj)
self.drawings.append(drawing_obj)
return drawing_obj
def get_selected_components(self):
return self.selected_node_instances+self.selected_drawings
def get_all_components(self):
return self.all_node_instances+self.drawings
def inset_component(self, index, component):
if self.find_type_in_object(component, NodeInstance):
self.all_node_instances.insert(index, component)
elif self.find_type_in_object(component, DrawingObject):
self.drawings.insert(index-len(self.all_node_instances), component)
def get_abs_indices_of_components(self, components):
all_components = self.get_all_components()
selected_components_indices = [all_components.index(e) for e in components]
return selected_components_indices
def move_selected_copmonents__cmd(self, x, y):
new_rel_pos = QPointF(x, y)
# if one node instance would leave the scene (f.ex. pos.x < 0), stop
left = False
for ni in self.get_selected_components():
new_pos = ni.pos() + new_rel_pos
if new_pos.x() - ni.width / 2 < 0 or \
new_pos.x() + ni.width / 2 > self.scene().width() or \
new_pos.y() - ni.height / 2 < 0 or \
new_pos.y() + ni.height / 2 > self.scene().height():
left = True
break
if not left:
self.undo_stack.push(MoveComponents_Command(self, self.get_abs_indices_of_components(self.get_selected_components()), new_rel_pos))
# for e in self.get_selected_elements():
# e.setPos(e.pos() + new_rel_pos)
self.viewport().repaint()
def move_selected_nodes_left(self):
self.move_selected_copmonents__cmd(-40, 0)
def move_selected_nodes_up(self):
self.move_selected_copmonents__cmd(0, -40)
def move_selected_nodes_right(self):
self.move_selected_copmonents__cmd(+40, 0)
def move_selected_nodes_down(self):
self.move_selected_copmonents__cmd(0, +40)
def move_selected_components_from_drag(self, event_pos):
# moving selected nodes
mouse_distance_x = self.mapToScene(event_pos).x() - self.last_mouse_move_pos.x()
mouse_distance_y = self.mapToScene(event_pos).y() - self.last_mouse_move_pos.y()
# ni = self.selected_node_instance
for ni in self.selected_node_instances:
ni.setPos(QPointF(ni.pos().x() + mouse_distance_x, ni.pos().y() + mouse_distance_y))
for p_o in self.selected_drawings:
p_o.setPos(QPointF(p_o.pos().x() + mouse_distance_x, p_o.pos().y() + mouse_distance_y))
def select_all(self):
self.selected_node_instances = self.all_node_instances.copy()
self.selected_drawings = self.drawings.copy()
self.viewport().repaint()
def copy(self): # called from shortcut ctrl+c
data = {'nodes': self.get_node_instances_json_data(self.selected_node_instances),
'connections': self.get_connections_json_data(self.selected_node_instances),
'drawings': self.get_drawings_json_data(self.selected_drawings)}
QGuiApplication.clipboard().setText(json.dumps(data))
def cut(self): # called from shortcut ctrl+x
data = {'nodes': self.get_node_instances_json_data(self.selected_node_instances),
'connections': self.get_connections_json_data(self.selected_node_instances),
'drawings': self.get_drawings_json_data(self.selected_drawings)}
QGuiApplication.clipboard().setText(json.dumps(data))
self.remove_selected_components()
def paste(self):
data = {}
try:
data = json.loads(QGuiApplication.clipboard().text())
except Exception as e:
return
self.clear_selection()
# calculate offset
positions = []
for d in data['drawings']:
positions.append({'x': d['pos x'],
'y': d['pos y']})
for n in data['nodes']:
positions.append({'x': n['position x'],
'y': n['position y']})
offset_for_middle_pos = QPointF(0, 0)
if len(positions) > 0:
rect = QRectF(positions[0]['x'], positions[0]['y'], 0, 0)
for p in positions:
x = p['x']
y = p['y']
if x < rect.left():
rect.setLeft(x)
if x > rect.right():
rect.setRight(x)
if y < rect.top():
rect.setTop(y)
if y > rect.bottom():
rect.setBottom(y)
offset_for_middle_pos = self.last_mouse_move_pos - rect.center()
self.undo_stack.push(Paste_Command(self, data, offset_for_middle_pos))
def remove_selected_components(self):
self.undo_stack.push(RemoveComponents_Command(self, self.get_abs_indices_of_components(self.get_selected_components())))
self.viewport().update()
def remove_node_instance_triggered(self, node_instance): # called from context menu from NodeInstance
if node_instance in self.selected_node_instances:
self.remove_selected_components()
else:
self.remove_node_instance(node_instance)
def remove_component(self, e):
if self.find_type_in_object(e, NodeInstance):
self.remove_node_instance(e)
elif self.find_type_in_object(e, DrawingObject):
self.remove_drawing(e)
def remove_node_instance(self, ni):
ni.about_to_remove_from_flow()
ni.del_and_remove_content_from_scene() # removes all connections too
self.scene().removeItem(ni)
GlobalStorage.debug('calling ni removed')
self.all_node_instances.remove(ni)
if self.selected_node_instances.__contains__(ni):
self.selected_node_instances.remove(ni)
def remove_drawing(self, drawing):
self.scene().removeItem(drawing)
self.drawings.remove(drawing)
if self.selected_drawings.__contains__(drawing):
self.selected_drawings.remove(drawing)
def pythagoras(self, a, b):
return math.sqrt(a**2 + b**2)
# NODE SELECTION: ----
def select_area(self, rect: QRectF):
# GlobalStorage.debug('selecting area')
node_instances_in_area = []
for n in self.all_node_instances:
if rect.contains(n.pos()):
node_instances_in_area.append(n)
paint_objects_in_area = []
for p_o in self.drawings:
if rect.contains(p_o.pos()):
paint_objects_in_area.append(p_o)
# GlobalStorage.debug(node_instances_in_area)
self.selected_node_instances = node_instances_in_area
self.selected_drawings = paint_objects_in_area
def clear_selection(self):
self.selected_node_instances.clear()
self.selected_drawings.clear()
# CONNECTIONS: ----
def connect_gates__cmd(self, parent_gate: PortInstanceGate, child_gate: PortInstanceGate):
self.undo_stack.push(ConnectGates_Command(self, parent_gate, child_gate))
def connect_gates(self, parent_gate: PortInstanceGate, child_gate: PortInstanceGate):
parent_port_instance: PortInstance = parent_gate.parent_port_instance
child_port_instance: PortInstance = child_gate.parent_port_instance
# if they, their directions and their parent node instances are not equal and if their types are equal
if parent_port_instance.direction != child_port_instance.direction and \
parent_port_instance.parent_node_instance != child_port_instance.parent_node_instance and \
parent_port_instance.type_ == child_port_instance.type_:
try: # remove connection if port instances are already connected
index = parent_port_instance.connected_port_instances.index(child_port_instance)
parent_port_instance.connected_port_instances.remove(child_port_instance)
parent_port_instance.disconnected()
child_port_instance.connected_port_instances.remove(parent_port_instance)
child_port_instance.disconnected()
except ValueError: # connect port instances
# remove all connections from parent port instance if it's a data input
if parent_port_instance.direction == 'input' and parent_port_instance.type_ == 'data':
for cpi in parent_port_instance.connected_port_instances:
self.connect_gates__cmd(parent_gate, cpi.gate) # actually disconnects the gates
# remove all connections from child port instance it it's a data input
if child_port_instance.direction == 'input' and child_port_instance.type_ == 'data':
for cpi in child_port_instance.connected_port_instances:
self.connect_gates__cmd(child_gate, cpi.gate) # actually disconnects the gates
parent_port_instance.connected_port_instances.append(child_port_instance)
child_port_instance.connected_port_instances.append(parent_port_instance)
parent_port_instance.connected()
child_port_instance.connected()
self.viewport().repaint()
def try_conn_gate_and_ni(self, parent_gate: PortInstanceGate, child_ni: NodeInstance):
parent_port_instance: PortInstance = parent_gate.parent_port_instance
if parent_port_instance.direction == 'output':
for inp in child_ni.inputs:
if parent_port_instance.type_ == inp.type_:
self.connect_gates__cmd(parent_gate, inp.gate)
return
elif parent_port_instance.direction == 'input':
for out in child_ni.outputs:
if parent_port_instance.type_ == out.type_:
self.connect_gates__cmd(parent_gate, out.gate)
return
@staticmethod
def connection_path(p1: QPointF, p2: QPointF):
path = QPainterPath()
path.moveTo(p1)
distance_x = abs(p1.x()) - abs(p2.x())
distance_y = abs(p1.y()) - abs(p2.y())
if ((p1.x() < p2.x() - 30) or math.sqrt( (distance_x**2) + (distance_y**2) ) < 100) and (p1.x() < p2.x()):
path.cubicTo( p1.x() + (( p2.x() - p1.x() )/2), p1.y(),
p1.x() + (( p2.x() - p1.x() )/2), p2.y(),
p2.x(), p2.y())
elif p2.x() < p1.x() - 100 and abs(distance_x)/2 > abs(distance_y):
path.cubicTo( p1.x() + 100 + (p1.x() - p2.x())/10, p1.y(),
p1.x() + 100 + (p1.x() - p2.x())/10, p1.y() - (distance_y/2),
p1.x() - (distance_x/2), p1.y() - (distance_y/2))
path.cubicTo( p2.x() - 100 - (p1.x() - p2.x())/10, p2.y() + (distance_y/2),
p2.x() - 100 - (p1.x() - p2.x())/10, p2.y(),
p2.x(), p2.y())
else:
path.cubicTo( p1.x() + 100 + (p1.x() - p2.x())/3, p1.y(),
p2.x() - 100 - (p1.x() - p2.x())/3, p2.y(),
p2.x(), p2.y())
return path
# GET JSON DATA
def get_json_data(self):
flow_dict = {}
flow_dict['nodes'] = self.get_node_instances_json_data(self.all_node_instances)
flow_dict['connections'] = self.get_connections_json_data(self.all_node_instances)
flow_dict['drawings'] = self.get_drawings_json_data(self.drawings)
return flow_dict
def get_node_instances_json_data(self, node_instances):
# NODE INSTANCES
script_node_instances_list = []
for ni in node_instances:
node_instance_dict = ni.get_json_data()
script_node_instances_list.append(node_instance_dict)
return script_node_instances_list
def get_connections_json_data(self, node_instances, only_with_connections_to=None):
# CONNECTIONS (not decentralized so far, probably also nicer this way)
script_ni_connections_list = []
for ni in node_instances:
for out in ni.outputs:
if len(out.connected_port_instances) > 0:
for connected_port in out.connected_port_instances:
# this only applies when saving config data through deleting node instances:
if only_with_connections_to is not None and \
connected_port.parent_node_instance not in only_with_connections_to and \
ni not in only_with_connections_to:
continue
# because I am not allowed to save connections between nodes connected to each other and both
# connected to the deleted node, only the connections to the deleted node shall be saved
connection_dict = {'parent node instance index': node_instances.index(ni),
'output port index': ni.outputs.index(out)}
# yes, very important: when copying components, there might be connections going outside the
# selected lists, these should be ignored. When saving a project, all components are considered,
# so then the index values will never be none
connected_ni_index = node_instances.index(connected_port.parent_node_instance) if \
node_instances.__contains__(connected_port.parent_node_instance) else \
None
connection_dict['connected node instance'] = connected_ni_index
connected_ip_index = connected_port.parent_node_instance.inputs.index(connected_port) if \
connected_ni_index is not None else None
connection_dict['connected input port index'] = connected_ip_index
script_ni_connections_list.append(connection_dict)
return script_ni_connections_list
def get_drawings_json_data(self, drawings):
# DRAWINGS
drawings_list = []
for drawing in drawings:
drawing_dict = {'pos x': drawing.pos().x(),
'pos y': drawing.pos().y()}
points_list = []
for i in range(len(drawing.points)):
p = drawing.points[i]
points_list.append({'x': p.x(),
'y': p.y(),
'w': drawing.stroke_weights[i]})
drawing_dict['points'] = points_list
drawings_list.append(drawing_dict)
return drawings_list
class MoveComponents_Command(QUndoCommand):
def __init__(self, flow, indices, pos_diff):
super(MoveComponents_Command, self).__init__()
self.flow = flow
self.indices = indices
self.pos_diff = pos_diff
def undo(self):
components = self.flow.get_all_components()
for index in self.indices:
c = components[index]
c.setPos(c.pos()-self.pos_diff)
def redo(self):
components = self.flow.get_all_components()
for index in self.indices:
c = components[index]
c.setPos(c.pos()+self.pos_diff)
class PlaceNodeInstance_Command(QUndoCommand):
def __init__(self, flow, node, pos, config):
super(PlaceNodeInstance_Command, self).__init__()
self.flow = flow
self.node = node
self.new_node_instance_component_index = -1
self.node_place_pos = pos
self.config = config
def undo(self):
self.flow.remove_node_instance(self.flow.get_all_components()[self.new_node_instance_component_index])
def redo(self):
new_node_instance = self.flow.place_node(self.node, self.node_place_pos, self.config)
self.new_node_instance_component_index = self.flow.get_all_components().index(new_node_instance)
class PlaceDrawingObject_Command(QUndoCommand):
def __init__(self, flow, pos, config):
super(PlaceDrawingObject_Command, self).__init__()
self.flow = flow
self.drawing_obj_component_index = -1
self.drawing_obj_place_pos = pos
self.config = config
def undo(self):
self.flow.remove_component(self.flow.get_all_components()[self.drawing_obj_component_index])
def redo(self):
new_drawing_object = self.flow.create_and_place_drawing(self.drawing_obj_place_pos, self.config)
self.drawing_obj_component_index = self.flow.get_all_components().index(new_drawing_object)
class RemoveComponents_Command(QUndoCommand):
def __init__(self, flow, indices):
super(RemoveComponents_Command, self).__init__()
self.flow = flow
self.indices = sorted(indices)
self.config_of_deleted_content = {}
self.drawings_copy = []
all_components = self.flow.get_all_components()
self.deleted_components = [all_components[index] for index in self.indices]
self.node_instances = []
for e in self.deleted_components:
if self.flow.find_type_in_object(e, NodeInstance):
self.node_instances.append(e)
self.connected_node_instances_indices_not_in_del_selection = []
for n in self.node_instances:
for i in n.inputs:
for cpi in i.connected_port_instances:
cpn = cpi.parent_node_instance
index = self.flow.get_all_components().index(cpn)
if cpn not in self.node_instances and index not in self.connected_node_instances_indices_not_in_del_selection:
self.connected_node_instances_indices_not_in_del_selection.append(index)
for o in n.outputs:
for cpi in o.connected_port_instances:
cpn = cpi.parent_node_instance
index = self.flow.get_all_components().index(cpn)
if cpn not in self.node_instances and index not in self.connected_node_instances_indices_not_in_del_selection:
self.connected_node_instances_indices_not_in_del_selection.append(index)
def undo(self):
self.node_instances.clear()
new_deleted_components = [] # actually POTENTIALLY (when using redo() deleted components
for i in range(len(self.indices)):
index = self.indices[i]
old_component = self.deleted_components[i] # the one that gets recreated
if self.flow.find_type_in_object(old_component, NodeInstance):
new_node_instance = self.flow.place_node(old_component.parent_node, old_component.pos(),
self.config_of_deleted_content['components'][i])
self.flow.all_node_instances.remove(self.flow.all_node_instances[-1])
self.flow.inset_component(index, new_node_instance)
self.node_instances.append(new_node_instance)
new_deleted_components.append(new_node_instance)
elif self.flow.find_type_in_object(old_component, DrawingObject):
new_drawing = self.flow.place_existing_drawing(old_component)
new_deleted_components.append(new_drawing)
self.flow.drawings.remove(self.flow.drawings[-1])
self.flow.inset_component(index, new_drawing)
self.deleted_components = new_deleted_components
self.flow.connect_nodes_from_config(self.node_instances + self.get_connected_node_instances(),
self.config_of_deleted_content['connections'])
self.flow.selected_node_instances = self.node_instances
def redo(self):
self.drawings_copy = self.flow.drawings.copy()
components = self.flow.get_all_components()
self.config_of_deleted_content.clear()
components_configs = []
connections_config = self.flow.get_connections_json_data(self.node_instances + self.get_connected_node_instances(),
only_with_connections_to=self.node_instances)
index_decrease = 0
for index in self.indices:
e = self.flow.get_all_components()[index - index_decrease]
components_configs.append(e.get_json_data())
self.flow.remove_component(e)
index_decrease += 1
self.config_of_deleted_content['components'] = components_configs
self.config_of_deleted_content['connections'] = connections_config
def get_connected_node_instances(self):
all_components = self.flow.get_all_components()
connected_node_instances = [all_components[index] for index in self.connected_node_instances_indices_not_in_del_selection]
return connected_node_instances
class ConnectGates_Command(QUndoCommand):
def __init__(self, flow, parent_gate, child_gate):
super(ConnectGates_Command, self).__init__()
self.flow = flow
self.parent_port_index = -1
self.parent_port_direction = parent_gate.parent_port_instance.direction
if self.parent_port_direction == 'input':
self.parent_port_index = parent_gate.parent_port_instance.parent_node_instance.inputs.index(
parent_gate.parent_port_instance)
elif self.parent_port_direction == 'output':
self.parent_port_index = parent_gate.parent_port_instance.parent_node_instance.outputs.index(
parent_gate.parent_port_instance)
self.parent_port_node_instance_index = self.flow.get_all_components().index(parent_gate.parent_port_instance.parent_node_instance)
self.child_port_index = -1
self.child_port_direction = child_gate.parent_port_instance.direction
if self.child_port_direction == 'input':
self.child_port_index = child_gate.parent_port_instance.parent_node_instance.inputs.index(
child_gate.parent_port_instance)
elif self.child_port_direction == 'output':
self.child_port_index = child_gate.parent_port_instance.parent_node_instance.outputs.index(
child_gate.parent_port_instance)
self.child_port_node_instance_index = self.flow.get_all_components().index(child_gate.parent_port_instance.parent_node_instance)
def undo(self):
parent_port, child_port = self.get_ports()
self.flow.connect_gates(parent_port, child_port)
def redo(self):
parent_port, child_port = self.get_ports()
self.flow.connect_gates(parent_port, child_port)
def get_ports(self):
parent_node_instance = self.flow.get_all_components()[self.parent_port_node_instance_index]
parent_port = parent_node_instance.inputs[self.parent_port_index].gate if self.parent_port_direction == 'input' \
else parent_node_instance.outputs[self.parent_port_index].gate
child_node_instance = self.flow.get_all_components()[self.child_port_node_instance_index]
child_port = child_node_instance.inputs[self.child_port_index].gate if self.child_port_direction == 'input' \
else child_node_instance.outputs[self.child_port_index].gate
return parent_port, child_port
class Paste_Command(QUndoCommand):
def __init__(self, flow, data, offset_for_middle_pos):
super(Paste_Command, self).__init__()
self.flow = flow
self.data = data
self.offset_for_middle_pos = offset_for_middle_pos
self.pasted_components_indices = []
def undo(self):
component_index_decrease = 0
all_components = self.flow.get_all_components()
for index in self.pasted_components_indices:
self.flow.remove_component(self.flow.get_all_components()[index - component_index_decrease])
component_index_decrease += 1
def redo(self):
self.pasted_components_indices.clear()
new_node_instances = self.flow.place_nodes_from_config(self.data['nodes'], offset_pos=self.offset_for_middle_pos.toPoint())
self.flow.selected_node_instances = new_node_instances
all_components = self.flow.get_all_components()
self.pasted_components_indices += [all_components.index(ni) for ni in new_node_instances]
self.flow.connect_nodes_from_config(new_node_instances, self.data['connections'])
new_drawing_objects = self.flow.place_drawings_from_config(self.data['drawings'], offset_pos=self.offset_for_middle_pos.toPoint())
self.flow.selected_drawings = new_drawing_objects
all_components = self.flow.get_all_components()
self.pasted_components_indices += [all_components.index(d_o) for d_o in new_drawing_objects]
|
[
"[email protected]"
] | |
6dd21581be8808e6af84d54ea4876e29955d7868
|
97e557d328b89adbd1459f8988a12ec3a9f4adc7
|
/trino/datadog_checks/trino/config_models/defaults.py
|
dd72d075a339c504cf5a54565c63a0a9ed0e45d6
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
DataDog/integrations-extras
|
1b9c9928af4e5a146b9611aed29db206f420710c
|
1d20f8d945ef802fa6e01b41b6ba99148ac508a4
|
refs/heads/master
| 2023-08-31T04:01:33.555722 | 2023-08-30T18:51:09 | 2023-08-30T18:51:09 | 51,574,196 | 221 | 667 |
BSD-3-Clause
| 2023-09-14T16:07:56 | 2016-02-12T07:55:28 |
Python
|
UTF-8
|
Python
| false | false | 2,456 |
py
|
# This file is autogenerated.
# To change this file you should edit assets/configuration/spec.yaml and then run the following commands:
# ddev -x validate config -s <INTEGRATION_NAME>
# ddev -x validate models -s <INTEGRATION_NAME>
from datadog_checks.base.utils.models.fields import get_default_field_value
def shared_collect_default_metrics(field, value):
return False
def shared_conf(field, value):
return get_default_field_value(field, value)
def shared_new_gc_metrics(field, value):
return False
def shared_service(field, value):
return get_default_field_value(field, value)
def shared_service_check_prefix(field, value):
return get_default_field_value(field, value)
def instance_collect_default_jvm_metrics(field, value):
return True
def instance_empty_default_hostname(field, value):
return False
def instance_is_jmx(field, value):
return False
def instance_java_bin_path(field, value):
return get_default_field_value(field, value)
def instance_java_options(field, value):
return get_default_field_value(field, value)
def instance_jmx_url(field, value):
return get_default_field_value(field, value)
def instance_key_store_password(field, value):
return get_default_field_value(field, value)
def instance_key_store_path(field, value):
return get_default_field_value(field, value)
def instance_min_collection_interval(field, value):
return 15
def instance_name(field, value):
return get_default_field_value(field, value)
def instance_password(field, value):
return get_default_field_value(field, value)
def instance_process_name_regex(field, value):
return get_default_field_value(field, value)
def instance_rmi_client_timeout(field, value):
return 15000
def instance_rmi_connection_timeout(field, value):
return 20000
def instance_rmi_registry_ssl(field, value):
return False
def instance_service(field, value):
return get_default_field_value(field, value)
def instance_tags(field, value):
return get_default_field_value(field, value)
def instance_tools_jar_path(field, value):
return get_default_field_value(field, value)
def instance_trust_store_password(field, value):
return get_default_field_value(field, value)
def instance_trust_store_path(field, value):
return get_default_field_value(field, value)
def instance_user(field, value):
return get_default_field_value(field, value)
|
[
"[email protected]"
] | |
223af6cc25b1fbb1b1897dd9fc56907beca41a5f
|
1d75146a66245dc046dc216bb602129208e00733
|
/closed/Intel/code/rnnt/pytorch-cpu/pytorch/model_separable_rnnt.py
|
f0ef252130cdddba876ef1e6e96538b72431c578
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
georgelyuan/inference_results_v1.1
|
febf287bd5967bf7f087355a81f06a2bd298cbfe
|
3196a5587887c39203ee3ac246fa5dbe789d9085
|
refs/heads/main
| 2023-08-16T08:49:45.274284 | 2021-09-23T20:57:17 | 2021-09-23T20:57:17 | 409,773,141 | 0 | 0 |
NOASSERTION
| 2021-09-23T23:36:37 | 2021-09-23T23:36:37 | null |
UTF-8
|
Python
| false | false | 7,227 |
py
|
from typing import Optional, Tuple
import numpy as np
import torch
from rnn import rnn
from rnn import StackTime
class RNNT(torch.nn.Module):
def __init__(self, rnnt=None, num_classes=1, **kwargs):
super().__init__()
if kwargs.get("no_featurizer", False):
in_features = kwargs.get("in_features")
else:
feat_config = kwargs.get("feature_config")
# This may be useful in the future, for MLPerf
# configuration.
in_features = feat_config['features'] * \
feat_config.get("frame_splicing", 1)
self.encoder = Encoder(in_features,
rnnt["encoder_n_hidden"],
rnnt["encoder_pre_rnn_layers"],
rnnt["encoder_post_rnn_layers"],
rnnt["forget_gate_bias"],
None if "norm" not in rnnt else rnnt["norm"],
rnnt["rnn_type"],
rnnt["encoder_stack_time_factor"],
rnnt["dropout"],
)
self.prediction = Prediction(
num_classes,
rnnt["pred_n_hidden"],
rnnt["pred_rnn_layers"],
rnnt["forget_gate_bias"],
None if "norm" not in rnnt else rnnt["norm"],
rnnt["rnn_type"],
rnnt["dropout"],
-1, #_SOS
)
self.joint = Joint(
num_classes,
rnnt["pred_n_hidden"],
rnnt["encoder_n_hidden"],
rnnt["joint_n_hidden"],
rnnt["dropout"],
)
def forward(self, x_padded: torch.Tensor, x_lens: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return self.encoder(x_padded, x_lens)
class Encoder(torch.nn.Module):
def __init__(self, in_features, encoder_n_hidden,
encoder_pre_rnn_layers, encoder_post_rnn_layers,
forget_gate_bias, norm, rnn_type, encoder_stack_time_factor,
dropout):
super().__init__()
self.pre_rnn = rnn(
rnn=rnn_type,
input_size=in_features,
hidden_size=encoder_n_hidden,
num_layers=encoder_pre_rnn_layers,
norm=norm,
forget_gate_bias=forget_gate_bias,
dropout=dropout,
)
self.stack_time = StackTime(factor=encoder_stack_time_factor)
self.post_rnn = rnn(
rnn=rnn_type,
input_size=encoder_stack_time_factor * encoder_n_hidden,
hidden_size=encoder_n_hidden,
num_layers=encoder_post_rnn_layers,
norm=norm,
forget_gate_bias=forget_gate_bias,
norm_first_rnn=True,
dropout=dropout,
)
def forward(self, x_padded: torch.Tensor, x_lens: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
x_padded, _ = self.pre_rnn(x_padded, None)
x_padded, x_lens = self.stack_time(x_padded, x_lens)
# (T, B, H)
x_padded, _ = self.post_rnn(x_padded, None)
# (B, T, H)
x_padded = x_padded.transpose_(0, 1)
return x_padded, x_lens
class Prediction(torch.nn.Module):
def __init__(self, vocab_size, n_hidden, pred_rnn_layers,
forget_gate_bias, norm, rnn_type, dropout, sos_val):
super().__init__()
self.embed = torch.nn.Embedding(vocab_size - 1, n_hidden)
self.n_hidden = n_hidden
self.dec_rnn = rnn(
rnn=rnn_type,
input_size=n_hidden,
hidden_size=n_hidden,
num_layers=pred_rnn_layers,
norm=norm,
forget_gate_bias=forget_gate_bias,
dropout=dropout,
)
self._SOS = sos_val
def forward(self, y: torch.Tensor,
state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
b: int = 1) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""
B - batch size
U - label length
H - Hidden dimension size
L - Number of decoder layers = 2
Args:
y: (B, U)
Returns:
Tuple (g, hid) where:
g: (B, U + 1, H)
hid: (h, c) where h is the final sequence hidden state and c is
the final cell state:
h (tensor), shape (L, B, H)
c (tensor), shape (L, B, H)
"""
# SOS hack, there is no SOS, and SOS should as if embedding give 0.0
# So identify SOS and fill lookup result with 0.0
# If embedding table contains SOS token this would save a lot of
# trouble
y_mask = y.eq(self._SOS)
y.masked_fill_(y_mask, 0)
y = self.embed(y)
y.masked_fill_(y_mask.unsqueeze(2), 0.0)
# if state is None:
# batch = y.size(0)
# state = [
# (torch.zeros(batch, self.pred_n_hidden, dtype=y.dtype, device=y.device),
# torch.zeros(batch, self.pred_n_hidden, dtype=y.dtype, device=y.device))
# for _ in range(self.pred_rnn_layers)
# ]
y = y.transpose_(0, 1) # .contiguous() # (U + 1, B, H)
g, hid = self.dec_rnn(y, state)
g = g.transpose_(0, 1) # .contiguous() # (B, U + 1, H)
# del y, state
return g, hid
class Joint(torch.nn.Module):
def __init__(self, vocab_size, pred_n_hidden, enc_n_hidden,
joint_n_hidden, dropout):
super().__init__()
layers = [
torch.nn.Linear(pred_n_hidden + enc_n_hidden, joint_n_hidden),
torch.nn.ReLU(),
] + ([torch.nn.Dropout(p=dropout), ] if dropout else []) + [
torch.nn.Linear(joint_n_hidden, vocab_size)
]
self.net = torch.nn.Sequential(
*layers
)
def forward(self, f: torch.Tensor, g: torch.Tensor):
"""
f should be shape (B, T, H)
g should be shape (B, U + 1, H)
returns:
logits of shape (B, T, U, K + 1)
"""
# Combine the input states and the output states
B, T, H = f.shape
B, U_, H2 = g.shape
f = f.unsqueeze(dim=2) # (B, T, 1, H)
f = f.expand((B, T, U_, H))
g = g.unsqueeze(dim=1) # (B, 1, U + 1, H)
g = g.expand((B, T, U_, H2))
inp = torch.cat([f, g], dim=3) # (B, T, U, 2H)
res = self.net(inp)
# del f, g, inp
return res
def label_collate(labels):
"""Collates the label inputs for the rnn-t prediction network.
If `labels` is already in torch.Tensor form this is a no-op.
Args:
labels: A torch.Tensor List of label indexes or a torch.Tensor.
Returns:
A padded torch.Tensor of shape (batch, max_seq_len).
"""
if isinstance(labels, torch.Tensor):
return labels.type(torch.int64)
if not isinstance(labels, (list, tuple)):
raise ValueError(
f"`labels` should be a list or tensor not {type(labels)}"
)
batch_size = len(labels)
max_len = max(len(l) for l in labels)
cat_labels = np.full((batch_size, max_len), fill_value=0.0, dtype=np.int32)
for e, l in enumerate(labels):
cat_labels[e, :len(l)] = l
labels = torch.LongTensor(cat_labels)
return labels
|
[
"[email protected]"
] | |
46a7ad367d32d1f25f5e67ee022c7e9c51b182f4
|
9d0195aa83cc594a8c61f334b90375961e62d4fe
|
/JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano1855.py
|
0852a5548985ede083bf928f46ccb8ab96b3311d
|
[] |
no_license
|
rsk146/CMS
|
4e49592fc64f6438051544c5de18598db36ed985
|
5f8dab8c59ae556598b9747b52b88205fffc4dbe
|
refs/heads/master
| 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,292 |
py
|
# Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/60000/E8B8B831-8E38-6A44-A073-695FB36D0F0C.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest1855.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
|
[
"[email protected]"
] | |
215d4e5fe25b13aa2b68168297d79b1ec68bbcc4
|
95b29e6ce83320a8e2368efd104a5db1f2af697e
|
/vel/openai/baselines/common/vec_env/subproc_vec_env.py
|
ebe55e4a848cab67dc37944e6dd69bc5c04c0a6c
|
[
"MIT"
] |
permissive
|
tigerwlin/vel
|
9b237c0a2ebb0fc6285db13e404c596907eb9107
|
00e4fbb7b612e888e2cbb5d8455146664638cd0b
|
refs/heads/master
| 2020-04-17T07:06:58.759152 | 2019-04-18T01:43:57 | 2019-04-18T01:43:57 | 166,354,546 | 0 | 0 |
MIT
| 2019-05-01T11:17:59 | 2019-01-18T06:20:18 |
Python
|
UTF-8
|
Python
| false | false | 3,363 |
py
|
import numpy as np
from multiprocessing import Process, Pipe
from vel.openai.baselines.common.vec_env import VecEnv, CloudpickleWrapper
from vel.openai.baselines.common.tile_images import tile_images
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'render':
remote.send(env.render(mode='rgb_array'))
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode='human'):
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
bigimg = tile_images(imgs)
if mode == 'human':
import cv2
cv2.imshow('vecenv', bigimg[:,:,::-1])
cv2.waitKey(1)
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
|
[
"[email protected]"
] | |
38e465b29c5974e1c2697b29528910e655c026ee
|
c78bc2364a22e84b8c39f8b1aeb889d2376ea669
|
/tests/test_core/test_compile.py
|
28a111ad89b5ae093c1811f98ef36980fc406b7f
|
[
"BSD-3-Clause"
] |
permissive
|
funkelab/spimagine
|
ee5262f4698eee56bb8bd5640ebae24829337a1c
|
d7fb0aac8986421df339486e1f0d33d0ba1c820c
|
refs/heads/master
| 2020-04-23T17:29:12.209161 | 2019-02-18T18:24:40 | 2019-02-18T18:24:40 | 171,333,799 | 0 | 0 |
BSD-3-Clause
| 2019-02-18T18:21:13 | 2019-02-18T18:21:11 | null |
UTF-8
|
Python
| false | false | 182 |
py
|
"""
[email protected]
"""
from __future__ import print_function, unicode_literals, absolute_import, division
import os
os.environ["PYOPENCL_COMPILER_OUTPUT"]="1"
import spimagine
|
[
"[email protected]"
] | |
6518aa95bae63d388734eea94e2bdfe15ec47178
|
9ca0c6248b3a0904d6af8ea069ff0974a3a36498
|
/production_water_level.py
|
dff59db587958f49b5fdaa968986640f77ab35a8
|
[] |
no_license
|
CUrW-SL/ShapeFile
|
a85f1d9b604348ce9cf3209e1f86163131068697
|
7593d8634d7f59a088fce983bd253d550cc70fb5
|
refs/heads/master
| 2021-07-08T20:06:34.861316 | 2020-07-11T01:30:46 | 2020-07-11T01:30:46 | 149,579,460 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 28,366 |
py
|
#!/usr/bin/python3
import csv
import getopt
import json
import os
import sys
import traceback
import copy
from datetime import datetime, timedelta
from os.path import join as pjoin
from curwmysqladapter import MySQLAdapter
import Constants
from LIBFLO2DWATERLEVELGRID import getWaterLevelOfChannels
from Util.LibForecastTimeseries import extractForecastTimeseries
from Util.LibForecastTimeseries import extractForecastTimeseriesInDays
from Util.Utils import getUTCOffset
def usage():
usageText = """
Usage: ./EXTRACTFLO2DTOWATERLEVEL.py [-d YYYY-MM-DD] [-t HH:MM:SS] [-p -o -h] [-S YYYY-MM-DD] [-T HH:MM:SS]
-h --help Show usage
-f --forceInsert Force Insert into the database. May override existing values.
-F --flo2d_config Configuration for FLO2D model run
-d --date Model State Date in YYYY-MM-DD. Default is current date.
-t --time Model State Time in HH:MM:SS. If -d passed, then default is 00:00:00. Otherwise Default is current time.
-S --start_date Base Date of FLO2D model output in YYYY-MM-DD format. Default is same as -d option value.
-T --start_time Base Time of FLO2D model output in HH:MM:SS format. Default is set to 00:00:00
-p --path FLO2D model path which include HYCHAN.OUT
-o --out Suffix for 'water_level-<SUFFIX>' and 'water_level_grid-<SUFFIX>' output directories.
Default is 'water_level-<YYYY-MM-DD>' and 'water_level_grid-<YYYY-MM-DD>' same as -d option value.
-n --name Name field value of the Run table in Database. Use time format such as 'Cloud-1-<%H:%M:%S>' to replace with time(t).
-u --utc_offset UTC offset of current timestamps. "+05:30" or "-10:00". Default value is "+00:00".
"""
print(usageText)
def get_water_level_of_channels(lines, channels=None):
"""
Get Water Levels of given set of channels
:param lines:
:param channels:
:return:
"""
if channels is None:
channels = []
water_levels = {}
for line in lines[1:]:
if line == '\n':
break
v = line.split()
if v[0] in channels:
# Get flood level (Elevation)
water_levels[v[0]] = v[5]
# Get flood depth (Depth)
# water_levels[int(v[0])] = v[2]
return water_levels
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def save_forecast_timeseries(my_adapter, my_timeseries, my_model_date, my_model_time, my_opts):
# print('EXTRACTFLO2DWATERLEVEL:: save_forecast_timeseries >>', my_opts)
# Convert date time with offset
date_time = datetime.strptime('%s %s' % (my_model_date, my_model_time), Constants.COMMON_DATE_TIME_FORMAT)
if 'utcOffset' in my_opts:
date_time = date_time + my_opts['utcOffset']
my_model_date = date_time.strftime('%Y-%m-%d')
my_model_time = date_time.strftime('%H:%M:%S')
# If there is an offset, shift by offset before proceed
forecast_timeseries = []
if 'utcOffset' in my_opts:
# print('Shit by utcOffset:', my_opts['utcOffset'].resolution)
for item in my_timeseries:
forecast_timeseries.append(
[datetime.strptime(item[0], Constants.COMMON_DATE_TIME_FORMAT) + my_opts['utcOffset'], item[1]])
forecast_timeseries = extractForecastTimeseries(forecast_timeseries, my_model_date, my_model_time, by_day=True)
else:
forecast_timeseries = extractForecastTimeseries(my_timeseries, my_model_date, my_model_time, by_day=True)
# print(forecast_timeseries[:10])
extracted_timeseries = extractForecastTimeseriesInDays(forecast_timeseries)
# for ll in extractedTimeseries :
# print(ll)
# Check whether existing station
force_insert = my_opts.get('forceInsert', False)
station = my_opts.get('station', '')
source = my_opts.get('source', 'FLO2D_250')
is_station_exists = adapter.get_station({'name': station})
if station == 'Parlimant Lake Side':
print('--------------------------------------------------------------------')
print('source : ', source)
print('station : ', station)
# print('forecast_timeseries : ', forecast_timeseries)
print('len(forecast_timeseries) : ', len(forecast_timeseries))
# print('extracted_timeseries : ', extracted_timeseries)
print('len(extracted_timeseries) : ', len(extracted_timeseries))
print('--------------------------------------------------------------------')
else:
print('len(forecast_timeseries) : ', len(forecast_timeseries))
print('len(extracted_timeseries) : ', len(extracted_timeseries))
if is_station_exists is None:
print('WARNING: Station %s does not exists. Continue with others.' % station)
return
# TODO: Create if station does not exists.
run_name = my_opts.get('run_name', 'Cloud-1')
less_char_index = run_name.find('<')
greater_char_index = run_name.find('>')
if -1 < less_char_index > -1 < greater_char_index:
start_str = run_name[:less_char_index]
date_format_str = run_name[less_char_index + 1:greater_char_index]
end_str = run_name[greater_char_index + 1:]
try:
date_str = date_time.strftime(date_format_str)
run_name = start_str + date_str + end_str
except ValueError:
raise ValueError("Incorrect data format " + date_format_str)
types = [
'Forecast-0-d',
'Forecast-1-d-after',
'Forecast-2-d-after',
'Forecast-3-d-after',
'Forecast-4-d-after',
'Forecast-5-d-after',
'Forecast-6-d-after',
'Forecast-7-d-after',
'Forecast-8-d-after',
'Forecast-9-d-after',
'Forecast-10-d-after',
'Forecast-11-d-after',
'Forecast-12-d-after',
'Forecast-13-d-after',
'Forecast-14-d-after'
]
meta_data = {
'station': station,
'variable': 'WaterLevel',
'unit': 'm',
'type': types[0],
'source': source,
'name': run_name
}
for i in range(0, min(len(types), len(extracted_timeseries))):
meta_data_copy = copy.deepcopy(meta_data)
meta_data_copy['type'] = types[i]
event_id = my_adapter.get_event_id(meta_data_copy)
if meta_data_copy['station'] == 'Parlimant Lake Side':
print('meta_data_copy : ', meta_data_copy)
if event_id is None:
event_id = my_adapter.create_event_id(meta_data_copy)
else:
if not force_insert:
print('Timeseries already exists. User --force to update the existing.\n')
continue
if meta_data_copy['station'] == 'Dehiwala Canal':
print('meta_data_copy : ', meta_data_copy)
print('extracted_timeseries[i] : ', extracted_timeseries[i])
if meta_data_copy['station'] == 'Parlimant Lake Side':
print('meta_data_copy : ', meta_data_copy)
print('extracted_timeseries[i] : ', extracted_timeseries[i])
row_count = my_adapter.insert_timeseries(event_id, extracted_timeseries[i], force_insert)
print('row_count : ', row_count)
if meta_data_copy['station'] == 'Parlimant Lake Side':
print('meta_data_copy : ', meta_data_copy)
print('%s rows inserted.\n' % row_count)
# -- END OF SAVE_FORECAST_TIMESERIES
try:
CONFIG = json.loads(open('CONFIG.json').read())
CWD = os.getcwd()
HYCHAN_OUT_FILE = 'HYCHAN.OUT'
TIMEDEP_FILE = 'TIMDEP.OUT'
WATER_LEVEL_FILE = 'water_level.txt'
WATER_LEVEL_DIR = 'water_level'
OUTPUT_DIR = 'OUTPUT'
RUN_FLO2D_FILE = 'RUN_FLO2D.json'
UTC_OFFSET = '+00:00:00'
FLO2D_MODEL = "FLO2D_250" # FLO2D source to CHANNEL_CELL_MAP from DB.
MYSQL_HOST = "localhost"
MYSQL_USER = "root"
MYSQL_DB = "curw"
MYSQL_PASSWORD = ""
if 'HYCHAN_OUT_FILE' in CONFIG:
HYCHAN_OUT_FILE = CONFIG['HYCHAN_OUT_FILE']
if 'TIMEDEP_FILE' in CONFIG:
TIMEDEP_FILE = CONFIG['TIMEDEP_FILE']
if 'WATER_LEVEL_FILE' in CONFIG:
WATER_LEVEL_FILE = CONFIG['WATER_LEVEL_FILE']
if 'OUTPUT_DIR' in CONFIG:
OUTPUT_DIR = CONFIG['OUTPUT_DIR']
if 'FLO2D_MODEL' in CONFIG:
FLO2D_MODEL = CONFIG['FLO2D_MODEL']
if 'MYSQL_HOST' in CONFIG:
MYSQL_HOST = CONFIG['MYSQL_HOST']
if 'MYSQL_USER' in CONFIG:
MYSQL_USER = CONFIG['MYSQL_USER']
if 'MYSQL_DB' in CONFIG:
MYSQL_DB = CONFIG['MYSQL_DB']
if 'MYSQL_PASSWORD' in CONFIG:
MYSQL_PASSWORD = CONFIG['MYSQL_PASSWORD']
adapter = MySQLAdapter(host=MYSQL_HOST, user=MYSQL_USER, password=MYSQL_PASSWORD, db=MYSQL_DB)
# TODO: Pass source name as a paramter to script
flo2d_source = adapter.get_source(name=FLO2D_MODEL)
try:
flo2d_source = json.loads(flo2d_source.get('parameters', "{}"))
except Exception as e:
print(e)
traceback.print_exc()
CHANNEL_CELL_MAP = {}
flo2d_source = {"CHANNEL_CELL_MAP": {"179": "Wellawatta Canal-St Peters College", "220": "Dehiwala Canal",
"261": "Mutwal Outfall", "387": "Swarna Rd-Wellawatta",
"388": "Thummodara", "475": "Babapulle", "545": "Ingurukade Jn",
"592": "Torrinton", "616": "Nagalagam Street",
"618": "Nagalagam Street River", "660": "OUSL-Narahenpita Rd",
"684": "Dematagoda Canal-Orugodawatta", "813": "Kirimandala Mw",
"823": "LesliRanagala Mw", "885": "OUSL-Nawala Kirulapana Canal",
"912": "Kittampahuwa", "973": "Near SLLRDC", "991": "Kalupalama",
"1062": "Yakbedda", "1161": "Kittampahuwa River", "1243": "Vivekarama Mw",
"1333": "Wellampitiya", "1420": "Madinnagoda", "1517": "Kotte North Canal",
"1528": "Harwad Band", "1625": "Kotiyagoda", "1959": "Koratuwa Rd",
"2174": "Weliwala Pond", "2371": "JanakalaKendraya",
"2395": "Kelani Mulla Outfall", "2396": "Salalihini-River",
"2597": "Old Awissawella Rd", "2693": "Talatel Culvert",
"2695": "Wennawatta", "3580": "Ambatale Outfull1",
"3673": "Ambatale River", "3919": "Amaragoda", "4192": "Malabe"},
"FLOOD_PLAIN_CELL_MAP": {"24": "Baira Lake Nawam Mw", "153": "Baira Lake Railway",
"1838": "Polduwa-Parlimant Rd", "1842": "Abagaha Jn",
"2669": "Parlimant Lake Side", "2686": "Aggona",
"2866": "Kibulawala 1", "2874": "Rampalawatta"}}
if 'CHANNEL_CELL_MAP' in flo2d_source:
CHANNEL_CELL_MAP = flo2d_source['CHANNEL_CELL_MAP']
FLOOD_PLAIN_CELL_MAP = {}
if 'FLOOD_PLAIN_CELL_MAP' in flo2d_source:
FLOOD_PLAIN_CELL_MAP = flo2d_source['FLOOD_PLAIN_CELL_MAP']
"""
{
"CHANNEL_CELL_MAP": {
"179": "Wellawatta",
"221": "Dehiwala",
"592": "Torington",
"616": "N'Street-Canal",
"618": "N'Street-River",
"684": "Dematagoda-Canal",
"814": "Heen Ela",
"1062": "Kolonnawa-Canal",
"991": "kittampahuwa-Out",
"1161": "Kittampahuwa-River",
"1515": "Parliament Lake Bridge-Kotte Canal",
"2158": "Parliament Lake-Out",
"2396": "Salalihini-River",
"2496": "Salalihini-Canal",
"3580": "Madiwela-Out",
"3673": "Ambathale"
},
"FLOOD_PLAIN_CELL_MAP": {
"2265": "Parliament Lake",
"3559": "Madiwela-US"
}
}
"""
ELEMENT_NUMBERS = CHANNEL_CELL_MAP.keys()
FLOOD_ELEMENT_NUMBERS = FLOOD_PLAIN_CELL_MAP.keys()
SERIES_LENGTH = 0
MISSING_VALUE = -999
date = ''
time = ''
path = ''
output_suffix = ''
start_date = ''
start_time = ''
flo2d_config = ''
run_name_default = 'Cloud-1'
runName = ''
utc_offset = ''
forceInsert = False
try:
opts, args = getopt.getopt(sys.argv[1:], "hF:d:t:p:o:S:T:fn:u:",
["help", "flo2d_config=", "date=", "time=", "path=", "out=", "start_date=",
"start_time=", "name=", "forceInsert", "utc_offset="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-F", "--flo2d_config"):
flo2d_config = arg
elif opt in ("-d", "--date"):
date = arg
elif opt in ("-t", "--time"):
time = arg
elif opt in ("-p", "--path"):
path = arg.strip()
elif opt in ("-o", "--out"):
output_suffix = arg.strip()
elif opt in ("-S", "--start_date"):
start_date = arg.strip()
elif opt in ("-T", "--start_time"):
start_time = arg.strip()
elif opt in ("-n", "--name"):
runName = arg.strip()
elif opt in ("-f", "--forceInsert"):
forceInsert = True
elif opt in ("-u", "--utc_offset"):
utc_offset = arg.strip()
appDir = pjoin(CWD, date + '_Kelani')
if path:
appDir = pjoin(CWD, path)
# Load FLO2D Configuration file for the Model run if available
FLO2D_CONFIG_FILE = pjoin(appDir, RUN_FLO2D_FILE)
if flo2d_config:
FLO2D_CONFIG_FILE = pjoin(CWD, flo2d_config)
FLO2D_CONFIG = json.loads('{}')
# Check FLO2D Config file exists
if os.path.exists(FLO2D_CONFIG_FILE):
FLO2D_CONFIG = json.loads(open(FLO2D_CONFIG_FILE).read())
# Default run for current day
now = datetime.now()
if 'MODEL_STATE_DATE' in FLO2D_CONFIG and len(
FLO2D_CONFIG['MODEL_STATE_DATE']): # Use FLO2D Config file data, if available
now = datetime.strptime(FLO2D_CONFIG['MODEL_STATE_DATE'], '%Y-%m-%d')
if date:
now = datetime.strptime(date, '%Y-%m-%d')
date = now.strftime("%Y-%m-%d")
if 'MODEL_STATE_TIME' in FLO2D_CONFIG and len(
FLO2D_CONFIG['MODEL_STATE_TIME']): # Use FLO2D Config file data, if available
now = datetime.strptime('%s %s' % (date, FLO2D_CONFIG['MODEL_STATE_TIME']), '%Y-%m-%d %H:%M:%S')
if time:
now = datetime.strptime('%s %s' % (date, time), '%Y-%m-%d %H:%M:%S')
time = now.strftime("%H:%M:%S")
if start_date:
start_date = datetime.strptime(start_date, '%Y-%m-%d')
start_date = start_date.strftime("%Y-%m-%d")
elif 'TIMESERIES_START_DATE' in FLO2D_CONFIG and len(
FLO2D_CONFIG['TIMESERIES_START_DATE']): # Use FLO2D Config file data, if available
start_date = datetime.strptime(FLO2D_CONFIG['TIMESERIES_START_DATE'], '%Y-%m-%d')
start_date = start_date.strftime("%Y-%m-%d")
else:
start_date = date
if start_time:
start_time = datetime.strptime('%s %s' % (start_date, start_time), '%Y-%m-%d %H:%M:%S')
start_time = start_time.strftime("%H:%M:%S")
elif 'TIMESERIES_START_TIME' in FLO2D_CONFIG and len(
FLO2D_CONFIG['TIMESERIES_START_TIME']): # Use FLO2D Config file data, if available
start_time = datetime.strptime('%s %s' % (start_date, FLO2D_CONFIG['TIMESERIES_START_TIME']),
'%Y-%m-%d %H:%M:%S')
start_time = start_time.strftime("%H:%M:%S")
else:
start_time = datetime.strptime(start_date, '%Y-%m-%d') # Time is set to 00:00:00
start_time = start_time.strftime("%H:%M:%S")
# Run Name of DB
if 'RUN_NAME' in FLO2D_CONFIG and len(FLO2D_CONFIG['RUN_NAME']): # Use FLO2D Config file data, if available
runName = FLO2D_CONFIG['RUN_NAME']
if not runName:
runName = run_name_default
# UTC Offset
if 'UTC_OFFSET' in FLO2D_CONFIG and len(FLO2D_CONFIG['UTC_OFFSET']): # Use FLO2D Config file data, if available
UTC_OFFSET = FLO2D_CONFIG['UTC_OFFSET']
if utc_offset:
UTC_OFFSET = utc_offset
utcOffset = getUTCOffset(UTC_OFFSET, default=True)
print('Extract Water Level Result of FLO2D on', date, '@', time, 'with Bast time of', start_date, '@', start_time)
print('With UTC Offset of ', str(utcOffset), ' <= ', UTC_OFFSET)
OUTPUT_DIR_PATH = pjoin(CWD, OUTPUT_DIR)
HYCHAN_OUT_FILE_PATH = pjoin(appDir, HYCHAN_OUT_FILE)
WATER_LEVEL_DIR_PATH = pjoin(OUTPUT_DIR_PATH, "%s-%s" % (WATER_LEVEL_DIR, date))
if 'FLO2D_OUTPUT_SUFFIX' in FLO2D_CONFIG and len(
FLO2D_CONFIG['FLO2D_OUTPUT_SUFFIX']): # Use FLO2D Config file data, if available
WATER_LEVEL_DIR_PATH = pjoin(OUTPUT_DIR_PATH, "%s-%s" % (WATER_LEVEL_DIR, FLO2D_CONFIG['FLO2D_OUTPUT_SUFFIX']))
if output_suffix:
WATER_LEVEL_DIR_PATH = pjoin(OUTPUT_DIR_PATH, "%s-%s" % (WATER_LEVEL_DIR, output_suffix))
print('Processing FLO2D model on', appDir)
# Check BASE.OUT file exists
if not os.path.exists(HYCHAN_OUT_FILE_PATH):
print('Unable to find file : ', HYCHAN_OUT_FILE_PATH)
sys.exit()
# Create OUTPUT Directory
if not os.path.exists(OUTPUT_DIR_PATH):
os.makedirs(OUTPUT_DIR_PATH)
# Calculate the size of time series
bufsize = 65536
with open(HYCHAN_OUT_FILE_PATH) as infile:
isWaterLevelLines = False
isCounting = False
countSeriesSize = 0 # HACK: When it comes to the end of file, unable to detect end of time series
while True:
lines = infile.readlines(bufsize)
if not lines or SERIES_LENGTH:
break
for line in lines:
if line.startswith('CHANNEL HYDROGRAPH FOR ELEMENT NO:', 5):
isWaterLevelLines = True
elif isWaterLevelLines:
cols = line.split()
if len(cols) > 0 and cols[0].replace('.', '', 1).isdigit():
countSeriesSize += 1
isCounting = True
elif isWaterLevelLines and isCounting:
SERIES_LENGTH = countSeriesSize
break
print('Series Length is :', SERIES_LENGTH)
bufsize = 65536
#################################################################
# Extract Channel Water Level elevations from HYCHAN.OUT file #
#################################################################
print('Extract Channel Water Level Result of FLO2D HYCHAN.OUT on', date, '@', time, 'with Bast time of', start_date,
'@', start_time)
with open(HYCHAN_OUT_FILE_PATH) as infile:
isWaterLevelLines = False
isSeriesComplete = False
waterLevelLines = []
seriesSize = 0 # HACK: When it comes to the end of file, unable to detect end of time series
while True:
lines = infile.readlines(bufsize)
if not lines:
break
for line in lines:
if line.startswith('CHANNEL HYDROGRAPH FOR ELEMENT NO:', 5):
seriesSize = 0
elementNo = line.split()[5]
if elementNo in ELEMENT_NUMBERS:
isWaterLevelLines = True
waterLevelLines.append(line)
else:
isWaterLevelLines = False
elif isWaterLevelLines:
cols = line.split()
if len(cols) > 0 and isfloat(cols[0]):
seriesSize += 1
waterLevelLines.append(line)
if seriesSize == SERIES_LENGTH:
isSeriesComplete = True
if isSeriesComplete:
baseTime = datetime.strptime('%s %s' % (start_date, start_time), '%Y-%m-%d %H:%M:%S')
timeseries = []
elementNo = waterLevelLines[0].split()[5]
# print('Extracted Cell No', elementNo, CHANNEL_CELL_MAP[elementNo])
for ts in waterLevelLines[1:]:
v = ts.split()
if len(v) < 1:
continue
# Get flood level (Elevation)
value = v[1]
# Get flood depth (Depth)
# value = v[2]
if not isfloat(value):
value = MISSING_VALUE
continue # If value is not present, skip
if value == 'NaN':
continue # If value is NaN, skip
timeStep = float(v[0])
currentStepTime = baseTime + timedelta(hours=timeStep)
dateAndTime = currentStepTime.strftime("%Y-%m-%d %H:%M:%S")
timeseries.append([dateAndTime, value])
# Create Directory
if not os.path.exists(WATER_LEVEL_DIR_PATH):
os.makedirs(WATER_LEVEL_DIR_PATH)
# Get Time stamp Ref:http://stackoverflow.com/a/13685221/1461060
ModelTime = float(waterLevelLines[1].split()[3])
fileModelTime = datetime.strptime(date, '%Y-%m-%d')
fileModelTime = fileModelTime + timedelta(hours=ModelTime)
dateAndTime = fileModelTime.strftime("%Y-%m-%d_%H-%M-%S")
# Create files
fileName = WATER_LEVEL_FILE.rsplit('.', 1)
stationName = CHANNEL_CELL_MAP[elementNo].replace(' ', '_')
fileTimestamp = "%s_%s" % (date, time.replace(':', '-'))
fileName = "%s-%s-%s.%s" % (fileName[0], stationName, fileTimestamp, fileName[1])
WATER_LEVEL_FILE_PATH = pjoin(WATER_LEVEL_DIR_PATH, fileName)
csvWriter = csv.writer(open(WATER_LEVEL_FILE_PATH, 'w'), delimiter=',', quotechar='|')
csvWriter.writerows(timeseries)
# Save Forecast values into Database
opts = {
'forceInsert': forceInsert,
'station': CHANNEL_CELL_MAP[elementNo],
'run_name': runName
}
# print('>>>>>', opts)
if utcOffset != timedelta():
opts['utcOffset'] = utcOffset
save_forecast_timeseries(adapter, timeseries, date, time, opts)
isWaterLevelLines = False
isSeriesComplete = False
waterLevelLines = []
# -- END for loop
# -- END while loop
#################################################################
# Extract Flood Plain water elevations from BASE.OUT file #
#################################################################
print('appDir : ', appDir)
TIMEDEP_FILE_PATH = pjoin(appDir, TIMEDEP_FILE)
print('TIMEDEP_FILE_PATH : ', TIMEDEP_FILE_PATH)
print('Extract Flood Plain Water Level Result of FLO2D on', date, '@', time, 'with Bast time of', start_date, '@',
start_time)
with open(TIMEDEP_FILE_PATH) as infile:
waterLevelLines = []
waterLevelSeriesDict = dict.fromkeys(FLOOD_ELEMENT_NUMBERS, [])
while True:
lines = infile.readlines(bufsize)
if not lines:
break
for line in lines:
if len(line.split()) == 1:
if len(waterLevelLines) > 0:
waterLevels = get_water_level_of_channels(waterLevelLines, FLOOD_ELEMENT_NUMBERS)
# Create Directory
if not os.path.exists(WATER_LEVEL_DIR_PATH):
os.makedirs(WATER_LEVEL_DIR_PATH)
# Get Time stamp Ref:http://stackoverflow.com/a/13685221/1461060
# print(waterLevelLines[0].split())
ModelTime = float(waterLevelLines[0].split()[0])
baseTime = datetime.strptime('%s %s' % (start_date, start_time), '%Y-%m-%d %H:%M:%S')
currentStepTime = baseTime + timedelta(hours=ModelTime)
dateAndTime = currentStepTime.strftime("%Y-%m-%d %H:%M:%S")
for elementNo in FLOOD_ELEMENT_NUMBERS:
tmpTS = waterLevelSeriesDict[elementNo][:]
if elementNo in waterLevels:
tmpTS.append([dateAndTime, waterLevels[elementNo]])
else:
tmpTS.append([dateAndTime, MISSING_VALUE])
waterLevelSeriesDict[elementNo] = tmpTS
isWaterLevelLines = False
# for l in waterLevelLines :
# print(l)
waterLevelLines = []
waterLevelLines.append(line)
if len(waterLevelLines) > 0:
waterLevels = get_water_level_of_channels(waterLevelLines, FLOOD_ELEMENT_NUMBERS)
# Create Directory
if not os.path.exists(WATER_LEVEL_DIR_PATH):
os.makedirs(WATER_LEVEL_DIR_PATH)
# Get Time stamp Ref:http://stackoverflow.com/a/13685221/1461060
# print(waterLevelLines[0].split())
ModelTime = float(waterLevelLines[0].split()[0])
baseTime = datetime.strptime('%s %s' % (start_date, start_time), '%Y-%m-%d %H:%M:%S')
currentStepTime = baseTime + timedelta(hours=ModelTime)
dateAndTime = currentStepTime.strftime("%Y-%m-%d %H:%M:%S")
for elementNo in FLOOD_ELEMENT_NUMBERS:
tmpTS = waterLevelSeriesDict[elementNo][:]
if elementNo in waterLevels:
tmpTS.append([dateAndTime, waterLevels[elementNo]])
else:
tmpTS.append([dateAndTime, MISSING_VALUE])
waterLevelSeriesDict[elementNo] = tmpTS
isWaterLevelLines = False
# for l in waterLevelLines :
# print(l)
waterLevelLines = []
# Create files
print('WATER_LEVEL_DIR_PATH : ', WATER_LEVEL_DIR_PATH)
print('len(FLOOD_ELEMENT_NUMBERS) : ', len(FLOOD_ELEMENT_NUMBERS))
for elementNo in FLOOD_ELEMENT_NUMBERS:
fileName = WATER_LEVEL_FILE.rsplit('.', 1)
stationName = FLOOD_PLAIN_CELL_MAP[elementNo].replace(' ', '_')
fileTimestamp = "%s_%s" % (date, time.replace(':', '-'))
fileName = "%s-%s-%s.%s" % \
(fileName[0], FLOOD_PLAIN_CELL_MAP[elementNo].replace(' ', '_'), fileTimestamp, fileName[1])
WATER_LEVEL_FILE_PATH = pjoin(WATER_LEVEL_DIR_PATH, fileName)
# print('WATER_LEVEL_FILE_PATH : ',WATER_LEVEL_FILE_PATH)
csvWriter = csv.writer(open(WATER_LEVEL_FILE_PATH, 'w'), delimiter=',', quotechar='|')
csvWriter.writerows(waterLevelSeriesDict[elementNo])
# Save Forecast values into Database
opts = {
'forceInsert': forceInsert,
'station': FLOOD_PLAIN_CELL_MAP[elementNo],
'run_name': runName,
'source': FLO2D_MODEL
}
if utcOffset != timedelta():
opts['utcOffset'] = utcOffset
save_forecast_timeseries(adapter, waterLevelSeriesDict[elementNo], date, time, opts)
# print('Extracted Cell No', elementNo, FLOOD_PLAIN_CELL_MAP[elementNo], 'into -> ', fileName)
except Exception as e:
traceback.print_exc()
print(e)
# finally:
# print('Completed processing', HYCHAN_OUT_FILE_PATH, ' to ', WATER_LEVEL_FILE_PATH)
|
[
"[email protected]"
] | |
7d873ca9121b2c3bed3e471ae3fc74443792ccb3
|
0cc4eb3cb54f8394c127ace62d3108fdb5230c85
|
/.spack-env/view/lib/python3.7/unittest/test/test_functiontestcase.py
|
47dd2fab664747f2f99d9af248b81aae5f18cc19
|
[] |
no_license
|
jacobmerson/spack-develop-env
|
5b2d76f58c0b64ae97c64f77a3c4d33a770c71c8
|
5fca20ca343b1a76f05fc635c87f94ed25417d94
|
refs/heads/master
| 2022-07-04T02:22:50.264727 | 2020-05-06T05:13:50 | 2020-05-06T05:13:50 | 261,657,112 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 162 |
py
|
/lore/mersoj/spack/spack/opt/spack/linux-rhel7-x86_64/gcc-7.3.0/python-3.7.7-oihhthdoxtgh4krvzpputn5ozwcnq2by/lib/python3.7/unittest/test/test_functiontestcase.py
|
[
"[email protected]"
] | |
e59d923bae404186dae6b1d5495e1017b6bdcb90
|
f484afd10f622babaa0a38197cb7fa5f4da99630
|
/news/models.py
|
9414cc1ae1f13e9040a4cad961d53ab91f1e34a5
|
[] |
permissive
|
brayomumo/Django-mini-project
|
95bc68ddf41f47bace23a34f6bacbc60cb3ea895
|
47a9731e5f319c7225e1e1451b6fb5b20d7ff752
|
refs/heads/master
| 2021-10-30T10:51:42.807899 | 2021-10-24T19:05:14 | 2021-10-24T19:05:14 | 203,002,911 | 0 | 0 |
MIT
| 2021-09-08T01:13:45 | 2019-08-18T12:54:21 |
Python
|
UTF-8
|
Python
| false | false | 1,761 |
py
|
from django.db import models
import datetime as dt
from django.contrib.auth.models import User
from tinymce.models import HTMLField
# Create your models here.
class Editor(models.Model):
first_name = models.CharField(max_length = 30)
last_name = models.CharField(max_length = 30)
email = models.EmailField()
phone_number = models.CharField(max_length = 10, blank = True)
def __str__(self):
return self.first_name
def save_editor(self):
self.save()
class Meta:
ordering = ['first_name']
class Tags(models.Model):
name = models.CharField(max_length = 30)
def __str__(self):
return self.name
class Article(models.Model):
title = models.CharField(max_length = 60)
post = HTMLField(default="post")
editor = models.ForeignKey(User,on_delete=models.CASCADE)
tags = models.ManyToManyField(Tags)
pub_date = models.DateTimeField(auto_now_add=True)
article_image = models.ImageField(upload_to='articles/',default="")
def __str__(self):
return self.title
def save_article(self):
self.save()
# def delete_editor(self):
# self.delete()
def delete_Article(self):
self.delete()
@classmethod
def todays_news(cls):
today = dt.date.today()
news = cls.objects.filter(pub_date__date = today)
return news
@classmethod
def days_news(cls, date):
news = cls.objects.filter(pub_date__date = date)
return news
@classmethod
def search_by_title(cls,search_term):
news = cls.objects.filter(title__icontains=search_term)
return news
class NewsLetterRecepients(models.Model):
name = models.CharField(max_length = 30)
email = models.EmailField()
|
[
"[email protected]"
] | |
5f071a543f2c99f4a357cd5d63caef4e9a02a7fe
|
c1655d6c6c11dafc1c7fa9f771b8e1f99cf7f123
|
/venv/lib/python3.6/site-packages/pyomo/core/base/check.py
|
f09834fb6a0999a7f44f8d6080960c7c6864325d
|
[] |
no_license
|
igorsowa9/vpp
|
a27520f19a54d7490534016ded9cd66f4ef5385b
|
ea91e3b2db921e7b1a450d243f39dbcf61231107
|
refs/heads/master
| 2021-04-30T03:28:56.642244 | 2019-09-16T09:01:49 | 2019-09-16T09:01:49 | 121,514,524 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,735 |
py
|
# ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
__all__ = ['BuildCheck']
import logging
import types
from pyomo.util.timing import ConstructionTimer
from pyomo.core.base.plugin import register_component
from pyomo.core.base.indexed_component import IndexedComponent
from pyomo.core.base.misc import apply_indexed_rule
logger = logging.getLogger('pyomo.core')
class BuildCheck(IndexedComponent):
"""
A build check, which executes a rule for all valid indices. If
the function returns False an exception is raised.
Constructor arguments:
rule The rule that is executed for every indice.
Private class attributes:
_rule The rule that is executed for every indice.
"""
def __init__(self, *args, **kwd):
self._rule = kwd.pop('rule', None)
kwd['ctype'] = BuildCheck
IndexedComponent.__init__(self, *args, **kwd)
#
if not type(self._rule) is types.FunctionType:
raise ValueError("BuildCheck must have an 'rule' option specified whose value is a function")
def _pprint(self):
return ([], None, None, None)
def construct(self, data=None):
""" Apply the rule to construct values in this set """
if __debug__ and logger.isEnabledFor(logging.DEBUG): #pragma:nocover
logger.debug("Constructing Check, name="+self.name)
#
if self._constructed: #pragma:nocover
return
timer = ConstructionTimer(self)
self._constructed=True
#
if not self.is_indexed():
# Scalar component
res = self._rule(self._parent())
if not res:
raise ValueError("BuildCheck %r identified error" % self.name)
else:
# Indexed component
for index in self._index:
res = apply_indexed_rule(self, self._rule, self._parent(), index)
if not res:
raise ValueError("BuildCheck %r identified error with index %r" % (self.name, str(index)))
timer.report()
register_component(BuildCheck, "A component that performs tests during model construction. The action rule is applied to every index value.")
|
[
"[email protected]"
] | |
199d2bf06aabfed4e29852c18c294ea926cd1786
|
78d9827b9a7d8789c2d266adebe3a05fc1ed474b
|
/read_file.py
|
1a696994b66117ceeaf8fb60429cd1f722f9479c
|
[
"MIT"
] |
permissive
|
t4d-classes/python_03012021
|
8a62331d3b742097de56d334e9de9a8f82f5ed9e
|
48b970d58e2e7b08f619be21154e8ec199155a50
|
refs/heads/master
| 2023-03-19T06:42:52.554737 | 2021-03-04T22:08:11 | 2021-03-04T22:08:11 | 341,255,879 | 0 | 1 | null | 2021-03-03T16:36:38 | 2021-02-22T16:01:51 |
Python
|
UTF-8
|
Python
| false | false | 575 |
py
|
# with open("colors.txt", "r") as colors_file:
# colors_file.seek(30)
# colors_data = colors_file.read(30)
# print(colors_data)
# start_line = 3
# stop_line = 6
# with open("colors.txt", "r") as colors_file:
# lines = []
# for line_count, color in enumerate(colors_file):
# if line_count >= start_line and line_count < stop_line:
# lines.append(color.strip())
# if line_count >= stop_line:
# break
# print(lines)
with open("colors.txt", "r") as colors_file:
lines = colors_file.readlines()
print(lines)
|
[
"[email protected]"
] | |
19dd6237396dfe491b5f242c1848a0b5e0599d84
|
c3b3eb44d1f4c6a17f095d46ba12adc7eb535e22
|
/src/cube2stress/protocol/client_write_helper.py
|
260d37fb7f88443f26afa75af28cdb77045d27af
|
[] |
no_license
|
fdChasm/cube2stress
|
52512c3eb7664612ea8b462838c9924f83c9cb4d
|
38b74493716595482708e931a3fbe487b930249b
|
refs/heads/master
| 2020-03-30T06:47:19.134982 | 2013-10-12T07:36:12 | 2013-10-13T16:56:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,206 |
py
|
from cube2common.constants import message_types, DMF, DVELF, empty_material_types, material_types
from cube2common.ivec import ivec
from cube2common.vec import vec
from cube2common.utils.clamp import clamp
from cube2common.utils.vectoyawpitch import vectoyawpitch
def lookupmaterial(feetpos):
#I don't want to implement this right now so just pretend we're in the air
return empty_material_types.MAT_AIR
class cwh(object):
@staticmethod
def put_connect(cds, name, playermodel, pwdhash, authdomain, authname):
cds.putint(message_types.N_CONNECT)
cds.putstring(name)
cds.putint(playermodel)
cds.putstring(pwdhash)
cds.putstring(authdomain)
cds.putstring(authname)
@staticmethod
def put_spawn(cds, lifesequence, gunselect):
cds.putint(message_types.N_SPAWN)
cds.putint(lifesequence)
cds.putint(gunselect)
@staticmethod
def put_pos(cds, cn, physics_state):
d = physics_state
cds.putint(message_types.N_POS)
cds.putuint(cn)
# 3 bits phys state, 1 bit life sequence, 2 bits move, 2 bits strafe
physstate = d.physstate | ((d.lifesequence & 1) << 3) | ((d.move & 3) << 4) | ((d.strafe & 3) << 6)
cds.putbyte(physstate)
o = ivec(vec(d.o.x, d.o.y, d.o.z - d.eyeheight))
vel = min(int(d.vel.magnitude() * DVELF), 0xFFFF)
fall = min(int(d.falling.magnitude() * DVELF), 0xFFFF)
# 3 bits position, 1 bit velocity, 3 bits falling, 1 bit material
flags = 0;
if (o.x < 0 or o.x > 0xFFFF): flags |= 1 << 0
if (o.y < 0 or o.y > 0xFFFF): flags |= 1 << 1
if (o.z < 0 or o.z > 0xFFFF): flags |= 1 << 2
if (vel > 0xFF): flags |= 1 << 3
if fall > 0:
flags |= 1 << 4
if fall > 0xFF:
flags |= 1 << 5
if d.falling.x or d.falling.y or d.falling.z > 0:
flags |= 1 << 6
if lookupmaterial(d.feetpos()) & material_types.MATF_CLIP == empty_material_types.MAT_GAMECLIP:
flags |= 1 << 7
cds.putuint(flags)
for k in xrange(3):
cds.putbyte(o[k] & 0xFF)
cds.putbyte((o[k] >> 8) & 0xFF)
if o[k] < 0 or o[k] > 0xFFFF:
cds.putbyte((o[k] >> 16) & 0xFF)
if d.yaw < 0:
dir = 360 + int(d.yaw) % 360
else:
dir = int(d.yaw) % 360
dir += clamp(int(d.pitch + 90), 0, 180) * 360
cds.putbyte(dir & 0xFF)
cds.putbyte((dir >> 8) & 0xFF)
cds.putbyte(clamp(int(d.roll + 90), 0, 180))
cds.putbyte(vel & 0xFF)
if vel > 0xFF:
cds.putbyte((vel >> 8) & 0xFF)
velyaw, velpitch = vectoyawpitch(d.vel)
if velyaw < 0:
veldir = 360 + int(velyaw) % 360
else:
veldir = int(velyaw) % 360
veldir += clamp(int(velpitch + 90), 0, 180) * 360
cds.putbyte(veldir & 0xFF)
cds.putbyte((veldir >> 8) & 0xFF)
if fall > 0:
cds.putbyte(fall & 0xFF)
if fall > 0xFF:
cds.putbyte((fall >> 8) & 0xFF)
if d.falling.x or d.falling.y or d.falling.z > 0:
fallyaw, fallpitch = vectoyawpitch(d.falling)
if fallyaw < 0:
falldir = 360 + int(fallyaw) % 360
else:
falldir = int(fallyaw) % 360
falldir += clamp(int(fallpitch + 90), 0, 180) * 360
cds.putbyte(falldir & 0xFF)
cds.putbyte((falldir >> 8) & 0xFF)
@staticmethod
def put_clientdata(data_stream, client, data):
data_stream.putint(message_types.N_CLIENT)
data_stream.putint(client.cn)
data_stream.putuint(len(data))
data_stream.write(data)
@staticmethod
def put_text(cds, text):
cds.putint(message_types.N_TEXT)
cds.putstring(text)
@staticmethod
def put_switchname(cds, name):
cds.putint(message_types.N_SWITCHNAME)
cds.putstring(name)
@staticmethod
def put_tryspawn(cds):
cds.putint(message_types.N_TRYSPAWN)
|
[
"[email protected]"
] | |
37ca6e320e30a0faf3a9f92bfbe821484272da12
|
3cf1cb73a60e0fcf9be8b70ec9f51ef4325b14c5
|
/tests/providers/aws/services/cloudtrail/cloudtrail_multi_region_enabled/cloudtrail_multi_region_enabled_test.py
|
a05524c3284113adec7d519852ceaf5993b8e574
|
[
"Apache-2.0"
] |
permissive
|
gabrielsoltz/prowler
|
ab791d754fe6cd4939ead9bdcc3e109ccde95278
|
3c2096db68d171ca9f0313ffa48a6fa61d87f6f0
|
refs/heads/master
| 2023-05-24T23:07:20.771523 | 2023-04-25T09:59:30 | 2023-04-25T09:59:30 | 188,259,272 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,693 |
py
|
from re import search
from unittest import mock
from boto3 import client, session
from moto import mock_cloudtrail, mock_s3
from prowler.providers.aws.lib.audit_info.models import AWS_Audit_Info
from prowler.providers.aws.services.cloudtrail.cloudtrail_service import Trail
AWS_ACCOUNT_NUMBER = "123456789012"
class Test_cloudtrail_multi_region_enabled:
def set_mocked_audit_info(self):
audit_info = AWS_Audit_Info(
session_config=None,
original_session=None,
audit_session=session.Session(
profile_name=None,
botocore_session=None,
),
audited_account=AWS_ACCOUNT_NUMBER,
audited_user_id=None,
audited_partition="aws",
audited_identity_arn=None,
profile=None,
profile_region=None,
credentials=None,
assumed_role_info=None,
audited_regions=["us-east-1", "eu-west-1"],
organizations_metadata=None,
audit_resources=None,
)
return audit_info
@mock_cloudtrail
def test_no_trails(self):
from prowler.providers.aws.services.cloudtrail.cloudtrail_service import (
Cloudtrail,
)
current_audit_info = self.set_mocked_audit_info()
with mock.patch(
"prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
new=current_audit_info,
):
with mock.patch(
"prowler.providers.aws.services.cloudtrail.cloudtrail_multi_region_enabled.cloudtrail_multi_region_enabled.cloudtrail_client",
new=Cloudtrail(current_audit_info),
):
# Test Check
from prowler.providers.aws.services.cloudtrail.cloudtrail_multi_region_enabled.cloudtrail_multi_region_enabled import (
cloudtrail_multi_region_enabled,
)
check = cloudtrail_multi_region_enabled()
result = check.execute()
assert len(result) == len(current_audit_info.audited_regions)
for report in result:
assert report.status == "FAIL"
assert search(
"No CloudTrail trails enabled and logging were found",
report.status_extended,
)
assert report.resource_id == "No trails"
assert report.resource_arn == "No trails"
@mock_cloudtrail
@mock_s3
def test_various_trails_no_login(self):
cloudtrail_client_us_east_1 = client("cloudtrail", region_name="us-east-1")
s3_client_us_east_1 = client("s3", region_name="us-east-1")
cloudtrail_client_eu_west_1 = client("cloudtrail", region_name="eu-west-1")
s3_client_eu_west_1 = client("s3", region_name="eu-west-1")
trail_name_us = "trail_test_us"
bucket_name_us = "bucket_test_us"
trail_name_eu = "trail_test_eu"
bucket_name_eu = "bucket_test_eu"
s3_client_us_east_1.create_bucket(Bucket=bucket_name_us)
s3_client_eu_west_1.create_bucket(
Bucket=bucket_name_eu,
CreateBucketConfiguration={"LocationConstraint": "eu-west-1"},
)
_ = cloudtrail_client_us_east_1.create_trail(
Name=trail_name_us, S3BucketName=bucket_name_us, IsMultiRegionTrail=False
)
_ = cloudtrail_client_eu_west_1.create_trail(
Name=trail_name_eu, S3BucketName=bucket_name_eu, IsMultiRegionTrail=False
)
from prowler.providers.aws.services.cloudtrail.cloudtrail_service import (
Cloudtrail,
)
current_audit_info = self.set_mocked_audit_info()
with mock.patch(
"prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
new=current_audit_info,
):
with mock.patch(
"prowler.providers.aws.services.cloudtrail.cloudtrail_multi_region_enabled.cloudtrail_multi_region_enabled.cloudtrail_client",
new=Cloudtrail(current_audit_info),
):
# Test Check
from prowler.providers.aws.services.cloudtrail.cloudtrail_multi_region_enabled.cloudtrail_multi_region_enabled import (
cloudtrail_multi_region_enabled,
)
check = cloudtrail_multi_region_enabled()
result = check.execute()
assert len(result) == len(current_audit_info.audited_regions)
for report in result:
assert report.status == "FAIL"
assert search(
"No CloudTrail trails enabled and logging were found",
report.status_extended,
)
assert report.resource_id == "No trails"
assert report.resource_arn == "No trails"
@mock_cloudtrail
@mock_s3
def test_various_trails_with_and_without_login(self):
cloudtrail_client_us_east_1 = client("cloudtrail", region_name="us-east-1")
s3_client_us_east_1 = client("s3", region_name="us-east-1")
cloudtrail_client_eu_west_1 = client("cloudtrail", region_name="eu-west-1")
s3_client_eu_west_1 = client("s3", region_name="eu-west-1")
trail_name_us = "trail_test_us"
bucket_name_us = "bucket_test_us"
trail_name_eu = "trail_test_eu"
bucket_name_eu = "bucket_test_eu"
s3_client_us_east_1.create_bucket(Bucket=bucket_name_us)
s3_client_eu_west_1.create_bucket(
Bucket=bucket_name_eu,
CreateBucketConfiguration={"LocationConstraint": "eu-west-1"},
)
trail_us = cloudtrail_client_us_east_1.create_trail(
Name=trail_name_us, S3BucketName=bucket_name_us, IsMultiRegionTrail=False
)
cloudtrail_client_eu_west_1.create_trail(
Name=trail_name_eu, S3BucketName=bucket_name_eu, IsMultiRegionTrail=False
)
_ = cloudtrail_client_us_east_1.start_logging(Name=trail_name_us)
_ = cloudtrail_client_us_east_1.get_trail_status(Name=trail_name_us)
from prowler.providers.aws.services.cloudtrail.cloudtrail_service import (
Cloudtrail,
)
current_audit_info = self.set_mocked_audit_info()
with mock.patch(
"prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
new=current_audit_info,
):
with mock.patch(
"prowler.providers.aws.services.cloudtrail.cloudtrail_multi_region_enabled.cloudtrail_multi_region_enabled.cloudtrail_client",
new=Cloudtrail(current_audit_info),
):
# Test Check
from prowler.providers.aws.services.cloudtrail.cloudtrail_multi_region_enabled.cloudtrail_multi_region_enabled import (
cloudtrail_multi_region_enabled,
)
check = cloudtrail_multi_region_enabled()
result = check.execute()
assert len(result) == len(current_audit_info.audited_regions)
for report in result:
if report.resource_id == trail_name_us:
assert report.status == "PASS"
assert search(
"is not multiregion and it is logging",
report.status_extended,
)
assert report.resource_id == trail_name_us
assert report.resource_arn == trail_us["TrailARN"]
else:
assert report.status == "FAIL"
assert search(
"No CloudTrail trails enabled and logging were found",
report.status_extended,
)
assert report.resource_id == "No trails"
assert report.resource_arn == "No trails"
@mock_cloudtrail
@mock_s3
def test_trail_multiregion_logging_and_single_region_not_login(self):
cloudtrail_client_us_east_1 = client("cloudtrail", region_name="us-east-1")
s3_client_us_east_1 = client("s3", region_name="us-east-1")
cloudtrail_client_eu_west_1 = client("cloudtrail", region_name="eu-west-1")
s3_client_eu_west_1 = client("s3", region_name="eu-west-1")
trail_name_us = "trail_test_us"
bucket_name_us = "bucket_test_us"
trail_name_eu = "aaaaa"
bucket_name_eu = "bucket_test_eu"
s3_client_us_east_1.create_bucket(Bucket=bucket_name_us)
s3_client_eu_west_1.create_bucket(
Bucket=bucket_name_eu,
CreateBucketConfiguration={"LocationConstraint": "eu-west-1"},
)
trail_us = cloudtrail_client_us_east_1.create_trail(
Name=trail_name_us, S3BucketName=bucket_name_us, IsMultiRegionTrail=True
)
cloudtrail_client_eu_west_1.create_trail(
Name=trail_name_eu, S3BucketName=bucket_name_eu, IsMultiRegionTrail=False
)
_ = cloudtrail_client_us_east_1.start_logging(Name=trail_name_us)
_ = cloudtrail_client_us_east_1.get_trail_status(Name=trail_name_us)
from prowler.providers.aws.services.cloudtrail.cloudtrail_service import (
Cloudtrail,
)
current_audit_info = self.set_mocked_audit_info()
with mock.patch(
"prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
new=current_audit_info,
):
with mock.patch(
"prowler.providers.aws.services.cloudtrail.cloudtrail_multi_region_enabled.cloudtrail_multi_region_enabled.cloudtrail_client",
new=Cloudtrail(current_audit_info),
) as cloudtrail_client:
# Test Check
from prowler.providers.aws.services.cloudtrail.cloudtrail_multi_region_enabled.cloudtrail_multi_region_enabled import (
cloudtrail_multi_region_enabled,
)
##############################################################################################################
# Only until moto issue is solved (Right now is not getting shadow us-east-1 trail status in eu-west-1 region)
cloudtrail_client.trails = [
Trail(
name=trail_name_us,
is_multiregion=True,
home_region="us-east-1",
arn=trail_us["TrailARN"],
region="us-east-1",
is_logging=True,
),
Trail(
name=trail_name_eu,
is_multiregion=False,
home_region="eu-west-1",
arn="",
region="eu-west-1",
is_logging=False,
),
Trail(
name=trail_name_us,
is_multiregion=True,
home_region="us-east-1",
arn=trail_us["TrailARN"],
region="eu-west-1",
is_logging=True,
),
]
##############################################################################################################
check = cloudtrail_multi_region_enabled()
result = check.execute()
assert len(result) == len(current_audit_info.audited_regions)
for report in result:
if report.region == "us-east-1":
assert report.status == "PASS"
assert search(
f"Trail {trail_name_us} is multiregion and it is logging",
report.status_extended,
)
assert report.resource_id == trail_name_us
assert report.resource_arn == trail_us["TrailARN"]
elif report.region == "eu-west-1":
assert report.status == "PASS"
assert search(
f"Trail {trail_name_us} is multiregion and it is logging",
report.status_extended,
)
assert report.resource_id == trail_name_us
assert report.resource_arn == trail_us["TrailARN"]
|
[
"[email protected]"
] | |
aabdc28245a847b6a4e05ae366760caa283b0ed9
|
38bd99c72ca2521489ce1eb02b7604095b02b585
|
/src/2211-CountCollisionsOnARoad.py
|
23452618552f63035a2aab03ebe988c3d8207fa7
|
[
"MIT"
] |
permissive
|
Jiezhi/myleetcode
|
eadbd7d9f1f0ea6a0ee15c2da9040dcfbd28b522
|
4dd1e54d8d08f7e6590bc76abd08ecaacaf775e5
|
refs/heads/master
| 2023-03-16T15:52:21.833622 | 2023-03-09T14:33:03 | 2023-03-09T14:33:03 | 139,965,948 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,328 |
py
|
#!/usr/bin/env python
"""
CREATED AT: 2022/3/20
Des:
https://leetcode.com/problems/count-collisions-on-a-road/
GITHUB: https://github.com/Jiezhi/myleetcode
Difficulty: Medium
Tag: Weekly Contest 285
See:
"""
class Solution:
def countCollisions2(self, s: str) -> int:
"""
See https://leetcode-cn.com/problems/count-collisions-on-a-road/solution/zui-duan-dai-ma-bu-jie-shi-by-freeyourmi-6o0r/
:param s:
:return:
"""
return len(s.lstrip('L').rstrip('R')) - s.count('S')
def countCollisions(self, directions: str) -> int:
"""
1 <= directions.length <= 10^5
directions[i] is either 'L', 'R', or 'S'.
"""
if not directions:
return 0
start = 0
while start < len(directions) and directions[start] == 'L':
start += 1
end = len(directions) - 1
while end > 0 and directions[end] == 'R':
end -= 1
if start >= end:
return 0
ds = directions[start: end + 1]
if len(ds) == 1:
return 0
pre = ds[0]
ret = 0
i = 1
while i < len(ds):
d = ds[i]
if pre == 'S':
if d == 'L':
ret += 1
elif d == 'R':
pre = 'R'
elif pre == 'R':
if d == 'S':
ret += 1
pre = 'S'
elif d == 'L':
ret += 2
pre = 'S'
elif d == 'R':
pos = 1
while i + pos < len(ds) and ds[i + pos] == 'R':
pos += 1
if ds[i + pos] == 'L':
ret += pos + 2
i += pos + 1
pre = 'S'
continue
elif ds[i + pos] == 'S':
ret += pos + 1
i += pos + 1
pre = 'S'
continue
i += 1
return ret
def test():
assert Solution().countCollisions("RLRSLL") == 5
assert Solution().countCollisions2("RLRSLL") == 5
assert Solution().countCollisions("LLRR") == 0
if __name__ == '__main__':
test()
|
[
"[email protected]"
] | |
76da260cb85dac15d43e6462305ec7633cbaf61f
|
f37d4992d322e703b31c42dbc2a5b5d52cedb80a
|
/Ship-detector/ship_model.py
|
36e04955e6a08af51a2db9ec297f25c493269a76
|
[] |
no_license
|
Jimut123/SATELLITE
|
a9eff4ebcf2890576a857fcb1c5b086e43831c75
|
97d948b823b701831b3e6cf78f0b23274046db9f
|
refs/heads/master
| 2023-04-06T17:47:23.038610 | 2021-04-24T14:37:55 | 2021-04-24T14:37:55 | 281,049,955 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,190 |
py
|
import json, sys, random
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Flatten, Activation
from keras.layers import Dropout
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.utils.np_utils import to_categorical
from keras.utils import np_utils
from keras.optimizers import SGD, Nadam
import keras.callbacks
from PIL import Image, ImageDraw
from matplotlib import pyplot as plt
# download dataset from json object
f = open(r'shipsnet.json')
dataset = json.load(f)
f.close()
input_data = np.array(dataset['data']).astype('uint8')
output_data = np.array(dataset['labels']).astype('uint8')
n_spectrum = 3 # color chanel (RGB)
weight = 80
height = 80
X = input_data.reshape([-1, n_spectrum, weight, height])
X[0].shape
# get one chanel
pic = X[0]
rad_spectrum = pic[0]
green_spectrum = pic[1]
blue_spectum = pic[2]
# output encoding
y = to_categorical(output_data, 2)
# shuffle all indexes
indexes = np.arange(2800)
np.random.shuffle(indexes)
X_train = X[indexes].transpose([0,2,3,1])
y_train = y[indexes]
# normalization
X_train = X_train / 255
np.random.seed(42)
from tensorflow.keras import layers
from tensorflow.keras import initializers
he_initializer = initializers.HeNormal()
inputs = keras.Input(shape=(80, 80, 3), name="img")
x = layers.Conv2D(32, (3, 3), padding='same',activation='relu',kernel_initializer=he_initializer,
bias_initializer="zeros")(inputs)
x = layers.MaxPooling2D(pool_size=(2, 2))(x) #40x40
x = layers.Dropout(0.25)(x)
x = layers.Conv2D(32, (3, 3), padding='same',activation='relu',kernel_initializer=he_initializer,
bias_initializer="zeros")(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x) #20x20
x = layers.Dropout(0.25)(x)
x = layers.Conv2D(64, (3, 3), padding='same',activation='relu',kernel_initializer=he_initializer,
bias_initializer="zeros")(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x) #10x10
x = layers.Dropout(0.25)(x)
x = layers.Conv2D(64, (3, 3), padding='same',activation='relu',kernel_initializer=he_initializer,
bias_initializer="zeros")(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x) #5x5
x = layers.Dropout(0.25)(x)
x = layers.Conv2D(128, (3, 3), padding='same',activation='relu',kernel_initializer=he_initializer,
bias_initializer="zeros")(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x) #5x5
x = layers.Dropout(0.25)(x)
x = layers.Flatten()(x)
x = layers.Dense(512, activation='relu')(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(2, activation='softmax')(x)
model = keras.Model(inputs, outputs, name="My_model")
from tensorflow.keras.utils import plot_model as pm #plotting the model structure
pm(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True,dpi=60)
# augmentation
# example of horizontal shift image augmentation
from numpy import expand_dims
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import ImageDataGenerator
# aug = ImageDataGenerator(
# featurewise_center=True,
# samplewise_center=True,
# featurewise_std_normalization=True,
# samplewise_std_normalization=True,
# #zca_whitening=True,
# #zca_epsilon=1e-06,
# rotation_range=360,
# width_shift_range=0.25,
# height_shift_range=0.25,
# brightness_range=(150,255),
# shear_range=0.45,
# zoom_range=0.35,
# #channel_shift_range=0.35,
# fill_mode="nearest",
# #cval=0.0,
# horizontal_flip=True,
# vertical_flip=True,
# rescale=0.35,
# #preprocessing_function=None,
# #data_format=None,
# validation_split=0.35,
# )
aug = ImageDataGenerator(
rotation_range=360,
#zoom_range=0.2,
width_shift_range=0.10,
height_shift_range=0.10,
#brightness_range=[0.7,1.0],
shear_range=0.10,
horizontal_flip=True,
vertical_flip=True,
fill_mode="nearest")
from tensorflow.keras.callbacks import ModelCheckpoint
from datetime import datetime
# for storing logs into tensorboard
logdir="logs/fit/" + datetime.now().strftime("%Y%m%d-%H%M%S")
callbacks = [
ModelCheckpoint("./model_checkpoint", monitor='val_loss'),
keras.callbacks.TensorBoard(log_dir=logdir)
]
# optimization setup
# sgd = SGD(lr=0.01, momentum=0.9, nesterov=True)
nadam = Nadam(
learning_rate=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-07#, name="Nadam"#, **kwargs
)
model.compile(
loss='categorical_crossentropy',
optimizer=nadam, #sgd,
metrics=['accuracy'])
# # training
# history = model.fit(
# X_train,
# y_train,
# batch_size=32,
# callbacks=callbacks,
# epochs=18,
# #steps_per_epoch=len(X_train) // 32,
# validation_split=0.2,
# shuffle=True,
# verbose=1)
history = model.fit(
x=aug.flow(X_train, y_train, batch_size=64),
validation_data=(X_train, y_train),
steps_per_epoch=len(X_train) // 64,
callbacks=callbacks,
epochs=1000,
verbose=1)
model.save('satseg1000e_nadam.h5')
from keras.models import load_model
load_model('satseg1000e_nadam.h5')
with open('history.json', 'w') as f:
json.dump(history.history, f)
with open('history.json') as f:
d = json.load(f)
#print(d)
|
[
"[email protected]"
] | |
b95eda2bd4c2c59b60b57777ab4b0838c5f4d8ea
|
52b89b3a75fe9f9f7bdc86398fd78c77e1fc6a4b
|
/engine/trainer.py
|
d22b75b9fd6154f930ab8616c6b34d4ecc8a3f1b
|
[] |
no_license
|
boxiangliu/cpi_contact
|
dada4f7ecb664eb9480261bd0d1ae65e048dbbd1
|
930c40de9c3c3f43c6e55a637b3eca5532d371e5
|
refs/heads/main
| 2023-07-18T09:20:23.661900 | 2021-09-08T18:36:35 | 2021-09-08T18:36:35 | 360,314,616 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 18,792 |
py
|
import yaml
from easydict import EasyDict as edict
import pickle
import numpy as np
import os
import sys
import torch
from torch.autograd import Variable
import torch.nn as nn
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../")
sys.path.append(os.getcwd() + "/model/")
from model import MSANet, MONN
import math
from torch.utils.data import Dataset, DataLoader
import time
from tensorboardX import SummaryWriter
from torch import optim
from data.dataset import CPIDataset, collate_cpi
import logging
from sklearn.metrics import mean_squared_error, precision_score, roc_auc_score
from scipy import stats
elem_list = ['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na', 'Ca', 'Fe', 'As', 'Al', 'I', 'B', 'V', 'K', 'Tl', 'Yb', 'Sb', 'Sn', 'Ag', 'Pd', 'Co', 'Se', 'Ti', 'Zn', 'H', 'Li', 'Ge', 'Cu', 'Au', 'Ni', 'Cd', 'In', 'Mn', 'Zr', 'Cr', 'Pt', 'Hg', 'Pb', 'W', 'Ru', 'Nb', 'Re', 'Te', 'Rh', 'Tc', 'Ba', 'Bi', 'Hf', 'Mo', 'U', 'Sm', 'Os', 'Ir', 'Ce','Gd','Ga','Cs', 'unknown']
atom_fdim = len(elem_list) + 6 + 6 + 6 + 1
bond_fdim = 6
class Trainer(object):
def __init__(self, args, cfg, train_data, valid_data, test_data):
self.args = args
self.cfg = cfg
self.train_data = train_data
self.valid_data = valid_data
self.test_data = test_data
self.setup()
self.init_model()
self.init_data()
self.init_log()
def setup(self):
self.device = torch.device("cuda")
self.summary_writer = SummaryWriter(self.args.save_path)
if self.args.logtofile is True:
logging.basicConfig(level=logging.INFO,
handlers=[
logging.FileHandler(self.args.save_path + "/log.txt"),
logging.StreamHandler()])
else:
logging.basicConfig(level=logging.INFO)
def init_model(self):
train_cfg = self.cfg.TRAIN
blosum_dict = load_blosum62(train_cfg.BLOSUM62)
init_A, init_B, init_W = loading_emb(
processed_dir=train_cfg.PROCESSED,
measure=train_cfg.MEASURE,
blosum_dict=blosum_dict)
init_A = init_A.to(self.device)
init_B = init_B.to(self.device)
init_W = init_W.to(self.device)
params = [train_cfg.GNN_DEPTH,
train_cfg.CNN_DEPTH,
train_cfg.DMA_DEPTH,
train_cfg.K_HEAD,
train_cfg.KERNEL_SIZE,
train_cfg.HIDDEN_SIZE_1,
train_cfg.HIDDEN_SIZE_2,
train_cfg.HIDDEN_SIZE_3]
if self.cfg.MODEL == "MSANet":
self.net = MSANet(init_A, init_B, init_W, params).to(self.device)
elif self.cfg.MODEL == "MONN":
self.net = MONN(init_A, init_B, init_W, params).to(self.device)
else:
raise ValueError("{} has not been implemented.".format(self.cfg.MODEL))
self.net.apply(weights_init)
total_params = sum(p.numel() for p in self.net.parameters() if p.requires_grad)
sys.stderr.write("Total parameters: {}\n".format(total_params))
self.criterion1 = nn.MSELoss()
self.criterion2 = Masked_BCELoss()
self.optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.net.parameters()),
lr=self.cfg.SOLVER.LR,
weight_decay=self.cfg.SOLVER.WEIGHT_DECAY,
amsgrad=True)
self.scheduler = optim.lr_scheduler.StepLR(self.optimizer,
step_size=self.cfg.SOLVER.LR_STEP_SIZE,
gamma=self.cfg.SOLVER.LR_GAMMA)
def init_data(self):
self.train_dataset = CPIDataset(self.train_data)
self.train_loader = DataLoader(self.train_dataset, batch_size=self.cfg.TRAIN.BATCH_SIZE, collate_fn=collate_cpi)
self.train_iter = iter(self.train_loader)
self.valid_dataset = CPIDataset(self.valid_data)
self.valid_loader = DataLoader(self.valid_dataset, batch_size=self.cfg.TRAIN.BATCH_SIZE, collate_fn=collate_cpi)
def init_log(self):
self.summary = {
"step": 0,
"log_step": 0,
"epoch": 1,
"loss_sum": 0.0,
"loss_pairwise_sum": 0.0,
"loss_aff_sum": 0.0,
"loss_dev": 0.0,
"loss_pairwise_dev": 0.0,
"loss_aff_dev": 0.0,
"rmse_dev": 0.0,
"pearson_dev": 0.0,
"spearman_dev": 0.0,
"average_pairwise_auc": 0.0,
"loss_dev_best": float("inf"),
"loss_pairwise_dev_best": float("inf"),
"loss_aff_dev_best": float("inf")
}
self.time_stamp = time.time()
def reset_log(self):
self.summary["log_step"] = 0
self.summary["loss_sum"] = 0.0
self.summary["loss_pairwise_sum"] = 0.0
self.summary["loss_aff_sum"] = 0.0
def logging(self, mode="Train"):
time_elapsed = time.time() - self.time_stamp
self.time_stamp = time.time()
if mode == "Train":
log_step = self.summary["log_step"]
loss = self.summary["loss_sum"] / log_step
loss_pairwise = self.summary["loss_pairwise_sum"] / log_step
loss_aff = self.summary["loss_aff_sum"] / log_step
logging.info(
"{}, Train, Epoch: {}, Step: {}, Loss: {:.3f}, "
"PairLoss: {:.3f}, AffLoss: {:.3f}, Runtime: {:.2f} s"
.format(time.strftime("%Y-%m-%d %H:%M:%S"),
self.summary["epoch"], self.summary["step"],
loss, loss_pairwise, loss_aff, time_elapsed))
elif mode == "Dev":
logging.info(
"{}, Dev, Epoch: {}, Step: {}, Loss: {:.3f}, "
"PairLoss: {:.3f}, AffLoss: {:.3f}, RMSE: {:.3f}, "
"Pearson: {:3f}, Spearman: {:3f}, AUC: {:.3f}, Runtime: {:.2f} s"
.format(time.strftime("%Y-%m-%d %H:%M:%S"),
self.summary["epoch"], self.summary["step"],
self.summary["loss_dev"], self.summary["loss_pairwise_dev"],
self.summary["loss_aff_dev"], self.summary["rmse_dev"],
self.summary["pearson_dev"], self.summary["spearman_dev"],
self.summary["average_pairwise_auc"], time_elapsed))
def train_step(self):
try:
(vertex_mask, vertex, edge, atom_adj,
bond_adj, nbs_mask, seq_mask, sequence,
msa_feature, affinity_label, pairwise_mask,
pairwise_label) = next(self.train_iter)
except StopIteration:
self.summary['epoch'] += 1
self.train_iter = iter(self.train_loader)
(vertex_mask, vertex, edge, atom_adj,
bond_adj, nbs_mask, seq_mask, sequence,
msa_feature, affinity_label, pairwise_mask,
pairwise_label) = next(self.train_iter)
vertex_mask = vertex_mask.to(self.device)
vertex = vertex.to(self.device)
edge = edge.to(self.device)
atom_adj = atom_adj.to(self.device)
bond_adj = bond_adj.to(self.device)
nbs_mask = nbs_mask.to(self.device)
seq_mask = seq_mask.to(self.device)
sequence = sequence.to(self.device)
msa_feature = msa_feature.to(self.device)
affinity_label = affinity_label.to(self.device)
pairwise_mask = pairwise_mask.to(self.device)
pairwise_label = pairwise_label.to(self.device)
affinity_pred, pairwise_pred = self.net(
vertex_mask, vertex, edge,
atom_adj, bond_adj, nbs_mask,
seq_mask, sequence, msa_feature)
loss_aff = self.criterion1(affinity_pred, affinity_label)
loss_pairwise = self.criterion2(pairwise_pred, pairwise_label, pairwise_mask, vertex_mask, seq_mask)
loss = loss_aff + 0.1*loss_pairwise
self.optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(self.net.parameters(), 5)
self.optimizer.step()
self.summary["loss_sum"] += loss_aff.item()
self.summary["loss_pairwise_sum"] += loss_pairwise.item()
self.summary["loss_aff_sum"] += loss_aff.item()
self.summary["step"] += 1
self.summary["log_step"] += 1
def dev_epoch(self):
self.time_stamp = time.time()
torch.set_grad_enabled(False)
self.net.eval()
steps = len(self.valid_loader)
valid_iter = iter(self.valid_loader)
loss_sum = 0.0
loss_pairwise_sum = 0.0
loss_aff_sum = 0.0
aff_pred_list = []
aff_label_list = []
pairwise_auc_list = []
for step in range(steps):
(vertex_mask, vertex, edge, atom_adj,
bond_adj, nbs_mask, seq_mask, sequence,
msa_feature, affinity_label, pairwise_mask,
pairwise_label) = next(valid_iter)
vertex_mask = vertex_mask.to(self.device)
vertex = vertex.to(self.device)
edge = edge.to(self.device)
atom_adj = atom_adj.to(self.device)
bond_adj = bond_adj.to(self.device)
nbs_mask = nbs_mask.to(self.device)
seq_mask = seq_mask.to(self.device)
sequence = sequence.to(self.device)
msa_feature = msa_feature.to(self.device)
affinity_label = affinity_label.to(self.device)
pairwise_mask = pairwise_mask.to(self.device)
pairwise_label = pairwise_label.to(self.device)
affinity_pred, pairwise_pred = self.net(
vertex_mask, vertex, edge, \
atom_adj, bond_adj, nbs_mask, \
seq_mask, sequence, msa_feature)
loss_aff = self.criterion1(affinity_pred, affinity_label)
loss_pairwise = self.criterion2(pairwise_pred, pairwise_label, pairwise_mask, vertex_mask, seq_mask)
loss = loss_aff + 0.1*loss_pairwise
loss_sum += loss.item()
loss_pairwise_sum += loss.item()
loss_aff_sum += loss.item()
for j in range(len(pairwise_mask)):
if pairwise_mask[j]:
num_vertex = int(torch.sum(vertex_mask[j,:]))
num_residue = int(torch.sum(seq_mask[j,:]))
pairwise_pred_i = pairwise_pred[j, :num_vertex, :num_residue].cpu().detach().numpy().reshape(-1)
pairwise_label_i = pairwise_label[j].cpu().detach().numpy().reshape(-1)
if pairwise_label_i.shape == pairwise_pred_i.shape: # Boxiang
pairwise_auc_list.append(roc_auc_score(pairwise_label_i, pairwise_pred_i))
aff_pred_list += affinity_pred.cpu().detach().numpy().reshape(-1).tolist()
aff_label_list += affinity_label.cpu().detach().numpy().reshape(-1).tolist()
aff_pred_list = np.array(aff_pred_list)
aff_label_list = np.array(aff_label_list)
rmse_value, pearson_value, spearman_value = reg_scores(aff_label_list, aff_pred_list)
average_pairwise_auc = np.mean(pairwise_auc_list)
dev_performance = [rmse_value, pearson_value, spearman_value, average_pairwise_auc]
self.summary["loss_dev"] = loss_sum / steps
self.summary["loss_pairwise_dev"] = loss_pairwise_sum / steps
self.summary["loss_aff_dev"] = loss_aff_sum / steps
self.summary["rmse_dev"] = rmse_value
self.summary["pearson_dev"] = pearson_value
self.summary["spearman_dev"] = spearman_value
self.summary["average_pairwise_auc"] = average_pairwise_auc
torch.set_grad_enabled(True)
self.net.train()
def write_summary(self, mode="Train"):
if mode == "Train":
self.summary_writer.add_scalar(
"Train/loss",
self.summary["loss_sum"] / self.summary["log_step"],
self.summary["step"])
self.summary_writer.add_scalar(
"Train/loss_pairwise",
self.summary["loss_pairwise_sum"] / self.summary["log_step"],
self.summary["step"])
self.summary_writer.add_scalar(
"Train/loss_aff",
self.summary["loss_aff_sum"] / self.summary["log_step"],
self.summary["step"])
elif mode == "Dev":
self.summary_writer.add_scalar(
"Dev/loss",
self.summary["loss_dev"],
self.summary["step"])
self.summary_writer.add_scalar(
"Dev/loss_pairwise",
self.summary["loss_pairwise_dev"],
self.summary["step"])
self.summary_writer.add_scalar(
"Dev/loss_aff",
self.summary["loss_aff_dev"],
self.summary["step"])
def save_model(self, mode="Train"):
if mode == "Train":
torch.save(
{
"epoch": self.summary["epoch"],
"step": self.summary["step"],
"loss_dev_best": self.summary["loss_dev_best"],
"loss_pairwise_dev_best": self.summary["loss_pairwise_dev_best"],
"loss_aff_dev_best": self.summary["loss_aff_dev_best"]
},
os.path.join(self.args.save_path, "train.ckpt"))
elif mode == "Dev":
save_best = False
if self.summary["loss_dev"] < self.summary["loss_dev_best"]:
self.summary["loss_dev_best"] = self.summary["loss_dev"]
self.summary["loss_pairwise_dev_best"] = self.summary["loss_pairwise_dev"]
self.summary["loss_aff_dev_best"] = self.summary["loss_aff_dev"]
save_best = True
if save_best:
torch.save(
{
"epoch": self.summary["epoch"],
"step": self.summary["step"],
"loss_dev_best": self.summary["loss_dev_best"],
"loss_pairwise_dev_best": self.summary["loss_pairwise_dev_best"],
"loss_aff_dev_best": self.summary["loss_aff_dev_best"]
},
os.path.join(self.args.save_path, "best.ckpt"))
logging.info(
"{}, Best, Epoch: {}, Step: {}, Loss: {:.3f}, "
"PairLoss: {:.3f}, AffLoss: {:.3f}"
.format(time.strftime("%Y-%m-%d %H:%M:%S"),
self.summary["epoch"], self.summary["step"],
self.summary["loss_dev"], self.summary["loss_pairwise_dev"],
self.summary["loss_aff_dev"]))
def close(self):
self.summary_writer.close()
def loading_emb(processed_dir, measure, blosum_dict):
fn = os.path.join(processed_dir, "pdbbind_all_atom_dict_{}".format(measure))
with open(fn, "rb") as f:
atom_dict = pickle.load(f)
fn = os.path.join(processed_dir, "pdbbind_all_bond_dict_{}".format(measure))
with open(fn, "rb") as f:
bond_dict = pickle.load(f)
fn = os.path.join(processed_dir, "pdbbind_all_word_dict_{}".format(measure))
with open(fn, "rb") as f:
word_dict = pickle.load(f)
sys.stderr.write("Atom dict size: {}\n".format(len(atom_dict)))
sys.stderr.write("Bond dict size: {}\n".format(len(bond_dict)))
sys.stderr.write("Word dict size: {}\n".format(len(word_dict)))
init_atom_features = np.zeros((len(atom_dict), atom_fdim))
init_bond_features = np.zeros((len(bond_dict), bond_fdim))
init_word_features = np.zeros((len(word_dict), 20))
for key,value in atom_dict.items():
init_atom_features[value] = np.array(list(map(int, key)))
for key,value in bond_dict.items():
init_bond_features[value] = np.array(list(map(int, key)))
for key, value in word_dict.items():
if key not in blosum_dict:
continue
init_word_features[value] = blosum_dict[key]
init_word_features = torch.cat([torch.zeros(1,20), torch.FloatTensor(init_word_features)], dim=0)
init_atom_features = torch.FloatTensor(init_atom_features)
init_bond_features = torch.FloatTensor(init_bond_features)
init_word_features = torch.FloatTensor(init_word_features)
return init_atom_features, init_bond_features, init_word_features
def load_blosum62(fn):
blosum_dict = {}
with open(fn) as f:
for i, line in enumerate(f):
if i == 0:
continue
split_line = line.strip("\n").split()
blosum_dict[split_line[0]] = np.array(split_line[1:], dtype=float)
return blosum_dict
#Model parameter intializer
def weights_init(m):
if isinstance(m, nn.Conv1d) or isinstance(m,nn.Linear):
nn.init.normal_(m.weight.data, mean=0, std=min(1.0 / math.sqrt(m.weight.data.shape[-1]), 0.1))
nn.init.constant_(m.bias, 0)
class Masked_BCELoss(nn.Module):
def __init__(self):
super(Masked_BCELoss, self).__init__()
self.criterion = nn.BCELoss(reduce=False)
def forward(self, pred, label, pairwise_mask, vertex_mask, seq_mask):
batch_size = pred.size(0)
loss_all = self.criterion(pred, label)
loss_mask = torch.matmul(vertex_mask.view(batch_size,-1,1), seq_mask.view(batch_size,1,-1))*pairwise_mask.view(-1, 1, 1)
loss = torch.sum(loss_all*loss_mask) / torch.sum(pairwise_mask).clamp(min=1e-10)
return loss
def reg_scores(label, pred):
label = label.reshape(-1)
pred = pred.reshape(-1)
return rmse(label, pred), pearson(label, pred), spearman(label, pred)
def rmse(y,f):
"""
Task: To compute root mean squared error (RMSE)
Input: y Vector with original labels (pKd [M])
f Vector with predicted labels (pKd [M])
Output: rmse RSME
"""
rmse = math.sqrt(((y - f)**2).mean(axis=0))
return rmse
def pearson(y,f):
"""
Task: To compute Pearson correlation coefficient
Input: y Vector with original labels (pKd [M])
f Vector with predicted labels (pKd [M])
Output: rp Pearson correlation coefficient
"""
rp = np.corrcoef(y, f)[0,1]
return rp
def spearman(y,f):
"""
Task: To compute Spearman's rank correlation coefficient
Input: y Vector with original labels (pKd [M])
f Vector with predicted labels (pKd [M])
Output: rs Spearman's rank correlation coefficient
"""
rs = stats.spearmanr(y, f)[0]
return rs
|
[
"[email protected]"
] | |
6c68ddaba2e5984f4c18ded79956dfd03edfea34
|
266947fd84eed629ed0c21f6d91134239512afd9
|
/BeginnerContest_B/029.py
|
94cce86a31db05424621737f3905347e8a3c6ed7
|
[] |
no_license
|
SkiMsyk/AtCoder
|
c86adeec4fa470ec14c1be7400c9fc8b3fb301cd
|
8102b99cf0fb6d7fa304edb942d21cf7016cba7d
|
refs/heads/master
| 2022-09-03T01:23:10.748038 | 2022-08-15T01:19:55 | 2022-08-15T01:19:55 | 239,656,752 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 100 |
py
|
res = 0
for _ in range(12):
s = input()
if s.__contains__('r'):
res += 1
print(res)
|
[
"[email protected]"
] | |
b560cc41d956fc94b579f1df47d17c8cc742c9d5
|
5f9243fc8282cdc15e0aba8c3bb4e4a9ae55d88d
|
/lib/apikey/ncloud_apikey/properties_parser.py
|
a25dae3a68d20aa8211f1dd2820a962e6ad894e6
|
[
"MIT"
] |
permissive
|
KidongSohn/ncloud-sdk-py
|
ab7c743e4628d9498bda79e74bd87a7a4ba11a0d
|
1c62471a9bd320d77164ed3193a0ebb9f64229ff
|
refs/heads/master
| 2020-03-21T21:46:49.895974 | 2018-06-29T03:39:04 | 2018-06-29T03:39:04 | 139,083,793 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 605 |
py
|
# coding: utf-8
class PropertiesParser(object):
def parse(self, filepath):
with open(filepath, 'r') as f:
lines = f.readlines()
prop_list = []
for line in lines:
if line == '' or len(line) < 3 or line[0] == "#" :
continue
split_str = line.strip().split('=')
if len(split_str) < 2 :
continue
prop_list.append((split_str[0].strip(), split_str[1].strip()))
return prop_list
#print(PropertiesParser().parse("/Users/user/.ncloud/configure"))
|
[
"[email protected]"
] | |
c39faacad300340acf5afbc1dc9638d135b33ac7
|
aa5e9defea373d64d75336fc6c5a03124e24abbd
|
/mwel/xml2mwel
|
a38bc73f0f316136adbc60e91e01f311502a0660
|
[
"MIT"
] |
permissive
|
esayui/mworks
|
e8ae5d8b07d36d5bbdec533a932d29641f000eb9
|
0522e5afc1e30fdbf1e67cedd196ee50f7924499
|
refs/heads/master
| 2022-02-18T03:47:49.858282 | 2019-09-04T16:42:52 | 2019-09-05T13:55:06 | 208,943,825 | 0 | 0 |
MIT
| 2019-09-17T02:43:38 | 2019-09-17T02:43:38 | null |
UTF-8
|
Python
| false | false | 167 |
#!/usr/bin/python
from __future__ import division, print_function, unicode_literals
import sys
import mwel
if __name__ == '__main__':
sys.exit(mwel.fromxml())
|
[
"[email protected]"
] | ||
e8ea04934ba2fd9e709df7aacea5088ce1de1a5f
|
4111ca5a73a22174f189361bef654c3f91c3b7ed
|
/Lintcode/Ladder_37_BB/medium/831. 3Sum II.py
|
a2a340f0781320c786d23c4fbcaf343c3acbd7e9
|
[
"MIT"
] |
permissive
|
ctc316/algorithm-python
|
58b541b654509ecf4e9eb8deebfcbdf785699cc4
|
ac4580d55e05e93e407c6156c9bb801808027d60
|
refs/heads/master
| 2020-03-16T06:09:50.130146 | 2019-08-02T02:50:49 | 2019-08-02T02:50:49 | 132,548,222 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 526 |
py
|
class Solution:
"""
@param n: an integer
@return: the number of solutions
"""
def threeSum2(self, n):
res = 0
for z in range(0, int(n ** 0.5) + 1):
x, y = 0, z
target = n - z ** 2
while x <= y:
summ = x ** 2 + y ** 2
if summ > target:
y -= 1
elif summ < target:
x += 1
else:
y -= 1
res += 1
return res
|
[
"[email protected]"
] | |
d676d645eb1adbaf3bf29bfaeda688d91a7d5206
|
50de54517ef5e157b43598e412c477fd66890a3e
|
/Assignment 01/Problem 16.py
|
217e6ac4a6272a81683f430b2354c5f51928e3ef
|
[] |
no_license
|
Shihabsarker93/BRACU-CSE111
|
f530be247bebaaee9cc5e85948dc070adae0c6ae
|
17c95c76f84abffe9d9bdcb5861fbacbc510b5a6
|
refs/heads/main
| 2023-08-13T15:33:57.331850 | 2021-10-07T10:56:09 | 2021-10-07T10:56:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 712 |
py
|
def count_dict(string):
"""
Given a string, this function returns a dictionary containing
the characters of the string as "key" and how many times the
character repeated itself as a value.
"""
dictionary = {}
for char in string:
if char == " ":
continue
dictionary[char] = dictionary.get(char, 0) + 1
return dictionary
user_input_1 = input()
user_input_2 = input()
dict_1 = count_dict(user_input_1)
dict_2 = count_dict(user_input_2)
if user_input_1 != user_input_2:
if dict_1 == dict_2:
print("Those strings are anagrams.")
else:
print("Those strings are not anagrams.")
else:
print("Those strings are not anagrams.")
|
[
"[email protected]"
] | |
a50740f0b46094ea02a198d3e53ffeca7ee7bd49
|
94ff68c2bf2a231584e8434a9d4363c56ea3af46
|
/apps/goods/filter.py
|
578d28d583582dc3f217f989340e04fa74f5301c
|
[] |
no_license
|
Asunqingwen/MxShop
|
c95eae8d36273148220cfe31796e560a43b99c31
|
eb4730f7c6921aa2a9099f210c7c914d8adfc3aa
|
refs/heads/master
| 2022-12-23T19:22:13.226669 | 2019-12-13T08:03:32 | 2019-12-13T08:03:32 | 226,297,474 | 0 | 0 | null | 2022-12-08T03:19:38 | 2019-12-06T09:54:43 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,406 |
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/12/5 0005 16:09
# @Author : 没有蜡笔的小新
# @E-mail : [email protected]
# @FileName: filter.py
# @Software: PyCharm
# @Blog :https://blog.csdn.net/Asunqingwen
# @GitHub :https://github.com/Asunqingwen
# @WebSite : labixiaoxin.me
from django.db.models import Q
from django_filters import rest_framework as filters
from .models import Goods
class GoodsFilter(filters.FilterSet):
"""
商品过滤类
"""
# 两个参数,name是要过滤的字段,lookup是执行的行为,’小于等于本店价格‘
price_min = filters.NumberFilter(field_name='shop_price', lookup_expr='gte', help_text='大于等于本店价格')
price_max = filters.NumberFilter(field_name='shop_price', lookup_expr='lte', help_text='小于等于本店价格')
# 行为: 名称中包含某字符,且字符不区分大小写
# name = filters.CharFilter(field_name="name" ,lookup_expr="icontains")
top_category = filters.NumberFilter(field_name="category", method='top_category_filter')
def top_category_filter(self, queryset, name, value):
# 不管当前点击的是一级目录二级目录还是三级目录。
return queryset.filter(Q(category_id=value) | Q(category__parent_category_id=value) | Q(
category__parent_category__parent_category_id=value))
class Meta:
model = Goods
fields = ['price_min', 'price_max', 'name', 'is_hot', 'is_new']
|
[
"[email protected]"
] | |
7e64c40255aea30895b1a4ba08e789acc97b8949
|
ad4952035a8ea3116a18c346d5d8cbfa26fb8e98
|
/PalindromePartitioning.py
|
823396b73c932b11a64f0eb3fef963e7683f5dd0
|
[] |
no_license
|
AbhiniveshP/Backtracking-2
|
48908072a8f186dfb0bc77d7107e8229172d6a67
|
8ad2faaa93d100a47a37e3d453626acdadf4febe
|
refs/heads/master
| 2021-03-14T06:44:12.538985 | 2020-03-13T11:29:31 | 2020-03-13T11:29:31 | 246,747,056 | 0 | 0 | null | 2020-03-12T04:56:15 | 2020-03-12T04:56:14 | null |
UTF-8
|
Python
| false | false | 1,954 |
py
|
'''
Solution
1. Using Backtracking, we check whether the substring considered is palindrome or not.
2. If palindrome, we check the remaining possible substrings
3. If not palindrome, we backtrack to the previous state and check for other possible substrings from that state.
Time Complexity: O(n * 2^n)
Space Complexity: O(n)
--- Passed all testcases on Leetcode successfully
'''
class PalindromePartitioning(object):
def __init__(self):
self.finalList = []
self.tempList = []
def __isPalindrome(self, s, fromId, toId):
# check for palindrome using 2 pointers
if (toId >= len(s)):
return False
while (fromId <= toId):
if (s[fromId] != s[toId]):
return False
fromId += 1; toId -= 1
return True
def __backtracking(self, s, fromIndex):
# base case
if (fromIndex == len(s)):
self.finalList.append(list(self.tempList))
return
# from current index to total length
for toIndex in range(fromIndex, len(s)):
# only if palindrome, do the following
if self.__isPalindrome(s, fromIndex, toIndex):
# action -- appending the current substring to the list
self.tempList.append(s[fromIndex: toIndex + 1])
# recursion -- just to check whether the partition can be valid or not
self.__backtracking(s, toIndex + 1)
# backtrack -- removing the current substring from the list
self.tempList.pop()
def partition(self, s):
"""
:type s: str
:rtype: List[List[str]]
"""
# edge case check
if (s == None or len(s) == 0):
return self.finalList
# main call to the helper function
self.__backtracking(s, 0)
# return the final list
return self.finalList
|
[
"[email protected]"
] | |
a05788207b0351d1b7a36360cd29cc39e8273709
|
0ca0dfcdbd6e07280f401aa447d659cc04233398
|
/db_migrate.py
|
044f0247f9cfac4c523bcfbe9b55be7dada94a93
|
[] |
no_license
|
pace-noge/apt-assessment
|
775530ca50bb95f3a34892477b0e8e794be9be98
|
113a0d645a69e3c66d7efd1e21e023b19751af38
|
refs/heads/master
| 2021-01-09T20:41:22.144393 | 2016-07-21T10:56:38 | 2016-07-21T10:56:38 | 63,752,001 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 857 |
py
|
import imp
from app import db
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
migration = SQLALCHEMY_MIGRATE_REPO + ('/versions/%03d_migration.py' % (v+1))
tmp_module = imp.new_module('old_model')
old_model = api.create_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
exec(old_model, tmp_module.__dict__)
script = api.make_update_script_for_model(
SQLALCHEMY_DATABASE_URI,
SQLALCHEMY_MIGRATE_REPO,
tmp_module.meta,
db.metadata
)
open(migration, "wt").write(script)
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print('[+] New migration created as ' + migration)
print('[+] Current db version: ' + str(v))
|
[
"[email protected]"
] | |
03ef89e2525d97d86133e194f46b15b129dd712f
|
316c45c7900c2440d4c72ec96c3e41358611585e
|
/test/CSVSL/MVATrainer_PseudoVertexNoSoftLepton_B_C_cfg.py
|
bc48465f474314f4c2c4a3c0f35adc4168805bf7
|
[] |
no_license
|
cms-btv-pog/RecoBTau-JetTagMVALearning
|
49c52529774762c44a6eb8a9f6e130c4c0f01df3
|
691937f31d7c2f1865c555623ab4027362235d6e
|
refs/heads/master
| 2020-12-24T13:20:49.115617 | 2015-10-07T08:39:14 | 2015-10-07T08:39:14 | 12,036,491 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,451 |
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("IPTrainer")
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(1))
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.combinedSVTrainer = cms.EDAnalyzer("JetTagMVATreeTrainer",
useCategories = cms.bool(False),
calibrationRecord = cms.string("CombinedSVPseudoVertexNoSoftLepton"),
ignoreFlavours = cms.vint32(0, 1, 2, 3, 21),
signalFlavours = cms.vint32(5, 7),
minimumTransverseMomentum = cms.double(15.0),
minimumPseudoRapidity = cms.double(0),
maximumPseudoRapidity = cms.double(2.5),
fileNames = cms.vstring(
"/user/pvmulder/NewEraOfDataAnalysis/BTagServiceWork/DEVELOPMENT/SuperTaggerDev/CMSSW_5_3_14/src/RootFiles/SkimmedRootFiles/skimmed_max20k_eachptetabin_CombinedSVPseudoVertexNoSoftLepton_B.root",
"/user/pvmulder/NewEraOfDataAnalysis/BTagServiceWork/DEVELOPMENT/SuperTaggerDev/CMSSW_5_3_14/src/RootFiles/SkimmedRootFiles/skimmed_max20k_eachptetabin_CombinedSVPseudoVertexNoSoftLepton_C.root"
)
)
process.looper = cms.Looper("JetTagMVATrainerLooper",
trainers = cms.VPSet(
cms.PSet(
calibrationRecord = cms.string("CombinedSVPseudoVertexNoSoftLepton"),
trainDescription = cms.untracked.string("Save_PseudoVertexNoSoftLepton_B_C.xml"),
loadState = cms.untracked.bool(False),
saveState = cms.untracked.bool(False)
)
)
)
process.p = cms.Path(process.combinedSVTrainer)
|
[
"[email protected]"
] | |
fbf7cb2946644bc35830453506338793505f355e
|
453d0e25a142c6ab26e5d1ac02e245b3b572ea91
|
/machin/frame/algorithms/trpo.py
|
5e84094d37f701b0deb13435482076542585c7a6
|
[
"MIT"
] |
permissive
|
felicitylyzhang/machin
|
d92fee12481eb98ebd8ca8f257e70e5a07587187
|
faf7c72f9cb88d028519e5af38ea8f8f3e886c88
|
refs/heads/master
| 2023-06-05T03:07:35.142652 | 2021-06-23T03:12:14 | 2021-06-23T03:12:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 20,386 |
py
|
from .a2c import *
from .utils import safe_return
from machin.utils.logging import default_logger
# Implementation Reference: https://github.com/Khrylx/PyTorch-RL
# Implementation Reference: https://github.com/openai/spinningup
class TRPO(A2C):
"""
TRPO framework.
See `Trust Region Policy Optimization <https://arxiv.org/pdf/1502.05477.pdf>`_.
"""
def __init__(
self,
actor: Union[NeuralNetworkModule, nn.Module],
critic: Union[NeuralNetworkModule, nn.Module],
optimizer: Callable,
criterion: Callable,
*_,
lr_scheduler: Callable = None,
lr_scheduler_args: Tuple[Tuple, Tuple] = (),
lr_scheduler_kwargs: Tuple[Dict, Dict] = (),
batch_size: int = 100,
critic_update_times: int = 10,
actor_learning_rate: float = 0.003,
critic_learning_rate: float = 0.001,
entropy_weight: float = None,
value_weight: float = 0.5,
gradient_max: float = np.inf,
gae_lambda: float = 1.0,
discount: float = 0.99,
normalize_advantage: bool = True,
kl_max_delta: float = 0.01,
damping: float = 0.1,
line_search_backtracks: int = 10,
conjugate_eps: float = 1e-8,
conjugate_iterations: int = 10,
conjugate_res_threshold: float = 1e-10,
hv_mode: str = "fim",
replay_size: int = 500000,
replay_device: Union[str, t.device] = "cpu",
replay_buffer: Buffer = None,
visualize: bool = False,
visualize_dir: str = "",
**__
):
"""
See Also:
:class:`.A2C`
Important:
TRPO requires a slightly different actor model compared to other
stochastic policy algorithms such as A2C, PPO, etc.
When given a state, and an optional action actor must
at least return two values, these two values are the same
as what A2C and PPO requires, for more information please refer to
:class:`.A2C`.
**1. Action**
**2. Log likelihood of action (action probability)**
The model must have another three methods:
1. ``get_kl(self, states: Any, ...)``, returns kl divergence of the model
when given a batch of state inputs. kl divergence is computed by:
:math:`D_{KL}(\pi, \pi_k)= \sum(\pi_k)\log\frac{\pi_k}{\pi}`
Where :math:`\pi_k = \pi` is the current policy model at iteration k,
since parameter :math:`\theta_k = \theta`, you should detach
:math`$\theta_k$` in the computation and make it fixed.
2. ``compare_kl(self, params: t.Tensor, states: Any, ...)``, returns kl
divergence between model with given params and model with current params,
given params are flat.
3. ``get_fim(self, states: Any, ...)``, returns the Fisher
information matrix on mean parameter :math:`\mu` of the model when
given a batch of state inputs.
You can refer to this `article <https://www.ii.pwr.edu.pl/~tomczak/
PDF/[JMT]Fisher_inf.pdf>` on how to compute this matrix, note we only need fisher
information matrix on the mean parameter.
Since it's a diagonal matrix, we only need to return diagonal elements
to fully represent it.
Two base class for the discrete case and continuous case (which samples
from a diagonal normal distribution) with this additional method are
provided in :module:`machin.model.algorithms.trpo`, you **must** extend on
these two base classes to create your model.
Args:
actor: Actor network module.
critic: Critic network module.
optimizer: Optimizer used to optimize ``actor`` and ``critic``.
criterion: Criterion used to evaluate the value loss.
lr_scheduler: Learning rate scheduler of ``optimizer``.
lr_scheduler_args: Arguments of the learning rate scheduler.
lr_scheduler_kwargs: Keyword arguments of the learning
rate scheduler.
batch_size: Batch size used only during training of critic. Actor train
on the whole buffer.
critic_update_times: Times to update critic in ``update()``.
actor_learning_rate: Learning rate of the actor optimizer,
not compatible with ``lr_scheduler``.
critic_learning_rate: Learning rate of the critic optimizer,
not compatible with ``lr_scheduler``.
entropy_weight: Weight of entropy in your loss function, a positive
entropy weight will minimize entropy, while a negative one will
maximize entropy.
value_weight: Weight of critic value loss.
gradient_max: Maximum gradient.
gae_lambda: :math:`\\lambda` used in generalized advantage
estimation.
discount: :math:`\\gamma` used in the bellman function.
normalize_advantage: Whether to normalize sampled advantage values in
the batch.
kl_max_delta: Maximum delta allowed of the kl divergence between current
model and the updated model.
damping: Artifact for numerical stability, should be smallish.
Adjusts Hessian-vector product calculation:
:math:`Hv \\rightarrow (\\alpha I + H)v`
where :math:`\\alpha` is the damping coefficient.
Probably don't play with this hyperparameter.
See `Conjugate gradient bundle adjustment <https://www1.maths.lth.se/
matematiklth/vision/publdb/reports/pdf/byrod-eccv-10.pdf>` equation 6.
line_search_backtracks: Maximum number of times to try in the line search.
conjugate_eps: A small constant used to prevent conjugate gradient from
outputing nan value in the first iteration.
conjugate_iterations: Maximum number of iterations of the conjugate gradient
algorithm.
conjugate_res_threshold: The threshold squared length of the residual
vector.
hv_mode: Which method to use to compute hessian vector product. One of
"fim" or "direct", "fim" is faster and the default method.
replay_size: Replay buffer size. Not compatible with
``replay_buffer``.
replay_device: Device where the replay buffer locates on, Not
compatible with ``replay_buffer``.
replay_buffer: Custom replay buffer.
visualize: Whether visualize the network flow in the first pass.
visualize_dir: Visualized graph save directory.
"""
super().__init__(
actor,
critic,
optimizer,
criterion,
lr_scheduler=lr_scheduler,
lr_scheduler_args=lr_scheduler_args,
lr_scheduler_kwargs=lr_scheduler_kwargs,
batch_size=batch_size,
actor_update_times=1,
critic_update_times=critic_update_times,
actor_learning_rate=actor_learning_rate,
critic_learning_rate=critic_learning_rate,
entropy_weight=entropy_weight,
value_weight=value_weight,
gradient_max=gradient_max,
gae_lambda=gae_lambda,
discount=discount,
normalize_advantage=normalize_advantage,
replay_size=replay_size,
replay_device=replay_device,
replay_buffer=replay_buffer,
visualize=visualize,
visualize_dir=visualize_dir,
)
self.line_search_backtracks = line_search_backtracks
self.kl_max_delta = kl_max_delta
self.damping = damping
self.conjugate_eps = conjugate_eps
self.conjugate_iterations = conjugate_iterations
self.conjugate_res_threshold = conjugate_res_threshold
self.hv_mode = hv_mode
def update(
self, update_value=True, update_policy=True, concatenate_samples=True, **__
):
# DOC INHERITED
sum_value_loss = 0
self.actor.train()
self.critic.train()
# sample a batch for actor training
batch_size, (state, action, advantage) = self.replay_buffer.sample_batch(
-1,
sample_method="all",
concatenate=concatenate_samples,
sample_attrs=["state", "action", "gae"],
additional_concat_attrs=["gae"],
)
# normalize advantage
if self.normalize_advantage:
advantage = (advantage - advantage.mean()) / (advantage.std() + 1e-6)
# Train actor
# define two closures needed by fvp functions
___, fixed_action_log_prob, *_ = self._eval_act(state, action)
fixed_action_log_prob = fixed_action_log_prob.view(batch_size, 1).detach()
fixed_params = self.get_flat_params(self.actor)
def actor_loss_func():
____, action_log_prob, *_ = self._eval_act(state, action)
action_log_prob = action_log_prob.view(batch_size, 1)
action_loss = -advantage.to(action_log_prob.device) * t.exp(
action_log_prob - fixed_action_log_prob
)
return action_loss.mean()
def actor_kl_func():
state["params"] = fixed_params
return safe_return(safe_call(self.actor, state, method="compare_kl"))
act_policy_loss = actor_loss_func()
if self.visualize:
self.visualize_model(act_policy_loss, "actor", self.visualize_dir)
# Update actor network
if update_policy:
def fvp(v):
if self.hv_mode == "fim":
return self._fvp_fim(state, v, self.damping)
else:
return self._fvp_direct(state, v, self.damping)
loss_grad = self.get_flat_grad(
act_policy_loss, list(self.actor.parameters())
).detach()
# usually 1e-15 is low enough
if t.allclose(loss_grad, t.zeros_like(loss_grad), atol=1e-15):
default_logger.warning(
"TRPO detects zero gradient, update step skipped."
)
return 0, 0
step_dir = self._conjugate_gradients(
fvp,
-loss_grad,
eps=self.conjugate_eps,
iterations=self.conjugate_iterations,
res_threshold=self.conjugate_res_threshold,
)
# Maximum step size mentioned in appendix C of the paper.
beta = np.sqrt(2 * self.kl_max_delta / step_dir.dot(fvp(step_dir)).item())
full_step = step_dir * beta
if not self._line_search(
self.actor, actor_loss_func, actor_kl_func, full_step, self.kl_max_delta
):
default_logger.warning(
"Cannot find an update step to satisfy kl_max_delta, "
"consider increase line_search_backtracks"
)
for _ in range(self.critic_update_times):
# sample a batch
batch_size, (state, target_value) = self.replay_buffer.sample_batch(
self.batch_size,
sample_method="random_unique",
concatenate=concatenate_samples,
sample_attrs=["state", "value"],
additional_concat_attrs=["value"],
)
# calculate value loss
value = self._criticize(state)
value_loss = (
self.criterion(target_value.type_as(value), value) * self.value_weight
)
if self.visualize:
self.visualize_model(value_loss, "critic", self.visualize_dir)
# Update critic network
if update_value:
self.critic.zero_grad()
self._backward(value_loss)
nn.utils.clip_grad_norm_(self.critic.parameters(), self.gradient_max)
self.critic_optim.step()
sum_value_loss += value_loss.item()
self.replay_buffer.clear()
self.actor.eval()
self.critic.eval()
return (
act_policy_loss,
sum_value_loss / self.critic_update_times,
)
@staticmethod
def _conjugate_gradients(Avp_f, b, eps, iterations, res_threshold):
"""
The conjugate gradient method, which solves a linear system :math`Ax = b`.
See `Conjugate gradient method \
<https://en.wikipedia.org/wiki/Conjugate_gradient_method>`_.
Args:
Avp_f: A function which takes current basis vector :math`p` and
returns :math`Ap`.
b: RHS of :math`Ax = b`.
iterations: Max number of iterations to run this algorithm
res_threshold: The threshold squared length of the residual vector
:math:`r_k`, where :math`k` is the iteration step.
Returns:
Solution of :math:`x`.
"""
x = t.zeros(b.shape, dtype=b.dtype, device=b.device)
r = b.clone()
p = b.clone()
r_dot_r = t.dot(r, r)
for i in range(iterations):
Avp = Avp_f(p)
alpha = r_dot_r / (t.dot(p, Avp) + eps)
x += alpha * p
r -= alpha * Avp
new_r_dot_r = t.dot(r, r)
beta = new_r_dot_r / (r_dot_r + eps)
p = r + beta * p
r_dot_r = new_r_dot_r
if r_dot_r < res_threshold:
break
return x
@staticmethod
def _line_search(
model, loss_func, kl_func, full_step, kl_max_delta, max_backtracks=10
):
flat_params = TRPO.get_flat_params(model)
with t.no_grad():
loss = loss_func().item()
for fraction in [0.5 ** i for i in range(max_backtracks)]:
new_params = flat_params + fraction * full_step
TRPO.set_flat_params(model, new_params)
new_loss = loss_func().item()
improve = loss - new_loss
# Note: some implementations like Khrylx/PyTorch-RL
# use a method which compares delta of loss to:
#
# expected_improve = -loss_grad.dot(full_step)
#
# and then compute a ratio, if ratio > 0.1, break out
# of the iteration:
#
# ratio = actual_improve / expected_improve
#
# since the meaning of this method is not clearly stated
# anywhere, we choose to obey the implementation in the
# paper and openai/spinningup, which checks kl range and
# loss.
if kl_func() <= kl_max_delta and improve > 0:
return True
TRPO.set_flat_params(model, flat_params)
return False
def _fvp_direct(self, state: Dict[str, Any], vector: t.Tensor, damping: float):
"""
The generic way to compute the Fisher-vector product mentioned in Appendix
C.1 of the paper.
Args:
state: State dictionary to be fed to the actor.
vector: The vector to multiply with the Hessian matrix.
damping: Coefficient for numerical stability.
Returns:
Matrix product of :math:`Hv`
"""
kl = safe_return(safe_call(self.actor, state, method="get_kl"))
kl = kl.mean()
grads = t.autograd.grad(kl, list(self.actor.parameters()), create_graph=True)
flat_grad_kl = t.cat([grad.view(-1) for grad in grads])
kl_v = (flat_grad_kl * vector).sum()
grads = t.autograd.grad(kl_v, list(self.actor.parameters()))
flat_grad_grad_kl = t.cat(
[grad.contiguous().view(-1) for grad in grads]
).detach()
return flat_grad_grad_kl + vector * damping
def _fvp_fim(self, state: Dict[str, Any], vector: t.Tensor, damping: float):
"""
The more optimized way to compute the Fisher-vector product mentioned in
Appendix C.1 of the paper.
Please refer to `this blog <https://www.telesens.co/2018/06/09/efficien
tly-computing-the-fisher-vector-product-in-trpo/>`_ for more details
Args:
state: State dictionary to be fed to the actor.
vector: The vector to multiply with the Hessian matrix.
damping: Coefficient for numerical stability.
Returns:
Matrix product of :math:`Hv`
"""
batch_size = next(st.shape[0] for st in state.values() if t.is_tensor(st))
# M is the second derivative of the KL distance w.r.t. network output
# (M*M diagonal matrix compressed into a M*1 vector)
M, act_param = safe_call(self.actor, state, method="get_fim")
# From now on we will use symbol `mu` as the action parameter of the
# distribution, this symbol is used in equation 56. and 57. of the
# paper
mu = act_param.view(-1)
# t is an arbitrary constant vector that does not depend on actor parameter
# theta, we use t_ here since torch is imported as t
t_ = t.ones(mu.shape, requires_grad=True, device=mu.device)
mu_t = (mu * t_).sum()
Jt = self.get_flat_grad(mu_t, list(self.actor.parameters()), create_graph=True)
Jtv = (Jt * vector).sum()
Jv = t.autograd.grad(Jtv, t_)[0]
MJv = M * Jv.detach()
mu_MJv = (MJv * mu).sum()
JTMJv = self.get_flat_grad(mu_MJv, list(self.actor.parameters())).detach()
JTMJv /= batch_size
return JTMJv + vector * damping
@staticmethod
def get_flat_params(model: nn.Module):
"""
Return flattened param tensor of shape [n] of input model.
"""
params = []
for param in model.parameters():
params.append(param.view(-1))
flat_params = t.cat(params)
return flat_params
@staticmethod
def set_flat_params(model: nn.Module, flat_params: t.Tensor):
"""
Set model parameters according to the flattened parameter tensor.
"""
idx = 0
for param in model.parameters():
flat_size = int(np.prod(list(param.shape)))
param.data.copy_(flat_params[idx : idx + flat_size].view(param.shape))
idx += flat_size
@staticmethod
def get_flat_grad(
output: t.Tensor,
parameters: List[nn.Parameter],
retain_graph=False,
create_graph=False,
):
"""
Compute gradient w.r.t. parameters and returns a flattened gradient tensor.
Note: use a list of parameters since it is hard to reset the iterator provided
by calling model.parameters() after t.autograd.grad call.
"""
if create_graph:
retain_graph = True
# allow unused parameters in graph since some parameter (like action log std)
# may not receive gradient in the first or both passes.
grads = t.autograd.grad(
output,
parameters,
retain_graph=retain_graph,
create_graph=create_graph,
allow_unused=True,
)
out_grads = []
for g, p in zip(grads, parameters):
if g is not None:
out_grads.append(g.view(-1))
else:
out_grads.append(t.zeros_like(p).view(-1))
grads = t.cat(out_grads)
for param in parameters:
param.grad = None
return grads
@classmethod
def generate_config(cls, config: Union[Dict[str, Any], Config]):
config = A2C.generate_config(config)
config["frame"] = "TRPO"
config["frame_config"]["kl_max_delta"] = 0.01
config["frame_config"]["damping"] = 0.1
config["frame_config"]["line_search_backtracks"] = 10
config["frame_config"]["conjugate_eps"] = 1e-8
config["frame_config"]["conjugate_iterations"] = 10
config["frame_config"]["conjugate_res_threshold"] = 1e-10
config["frame_config"]["hv_mode"] = "fim"
return config
|
[
"[email protected]"
] | |
b163cc7eb9a6f0762cd988763d79bf757e4f7f35
|
644c10493df293ba492133140ca6b6153802c75b
|
/{{cookiecutter.project_slug}}/{{cookiecutter.main_app}}/migrations/0001_initial.py
|
50dd6e4583cac35236265bdbc4b2a8a3c65b9076
|
[
"BSD-3-Clause"
] |
permissive
|
huogerac/cookiecutter-djangofloppyforms
|
cc27aec961a3d339d390dba6deb791676650aab4
|
0a2c1d7fe506a5df13aaefde0f716373dbb8194e
|
refs/heads/main
| 2023-04-30T21:34:14.822736 | 2021-05-15T18:20:10 | 2021-05-15T18:20:10 | 334,623,255 | 3 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 630 |
py
|
# Generated by Django 3.1.5 on 2021-01-26 18:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='{{ cookiecutter.main_model }}',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=264)),
('done', models.BooleanField(default=False)),
('due_to', models.DateTimeField()),
],
),
]
|
[
"[email protected]"
] | |
5e88ca3857afb692a59c3fe77b58248c23b0726a
|
6ae5affdffdf5f18161c90e400623a823976330d
|
/codejam/R1C2020/b.py
|
5fb0bb6da5c871ec28f36f116957b6f4e1818618
|
[] |
no_license
|
organization-lab/codejam
|
01b7544600f421f8878dcc8ca275d521d4db5984
|
92e2b1cc25e1dd31a9cddf3ebb34c0e4e0c38568
|
refs/heads/master
| 2022-08-21T12:28:03.557676 | 2022-08-18T02:21:46 | 2022-08-18T02:21:46 | 121,612,804 | 0 | 2 | null | 2022-08-18T02:21:47 | 2018-02-15T09:48:39 |
Python
|
UTF-8
|
Python
| false | false | 647 |
py
|
# author: mofhu@github
ncase = int(input())
for case in range(1, ncase+1):
u = int(input())
s = {}
z0 = {}
for i in range(10000):
qi, si = input().split(' ')
if si[0] not in s:
s[si[0]] = 1
else:
s[si[0]] += 1
if len(z0) < 10:
if si[-1] not in z0:
z0[si[-1]] = 1
else:
z0[si[-1]] += 1
t = [(s[i], i) for i in s]
t.sort(key=lambda x:x[0], reverse=True)
for i in z0:
if i not in s:
ans = [i]
for i in t:
ans.append(i[1])
print("Case #{}: {}".format(case, ''.join(ans)))
|
[
"[email protected]"
] | |
7ccd37964a1cd6c0813afc434697d4c7f77c866c
|
55ab64b67d8abc02907eb43a54ff6c326ded6b72
|
/scripts/startup/tila_OP_ToggleOverlay.py
|
d3d02affd772876cd82f8dc6e49843aec7341875
|
[
"MIT"
] |
permissive
|
Tilapiatsu/blender-custom_config
|
2f03b0bb234c3b098d2830732296d199c91147d0
|
00e14fc190ebff66cf50ff911f25cf5ad3529f8f
|
refs/heads/master
| 2023-08-16T14:26:39.990840 | 2023-08-16T01:32:41 | 2023-08-16T01:32:41 | 161,249,779 | 6 | 2 |
MIT
| 2023-04-12T05:33:59 | 2018-12-10T23:25:14 |
Python
|
UTF-8
|
Python
| false | false | 2,687 |
py
|
import bpy
bl_info = {
"name": "Tila : Toggle Overlay",
"author": "Tilapiatsu",
"version": (1, 0, 0, 0),
"blender": (2, 80, 0),
"location": "View3D",
"category": "View3D",
}
class TILA_ToggleOverlay(bpy.types.Operator):
bl_idname = "view3d.toggle_overlay"
bl_label = "TILA: Toggle overlay"
bl_options = {'REGISTER', 'UNDO'}
mode : bpy.props.EnumProperty(items=[("TOGGLE", "Toggle", ""), ("SOFT", "Soft", "")])
soft_parameters=['show_annotation',
'show_extras',
'show_bones',
# 'show_relationship_lines',
'show_motion_paths',
'show_outline_selected',
'show_object_origins',
'show_floor',
'show_axis_x',
'show_axis_y',
'show_face_orientation',
'show_faces']
def toggle_state(self, state=None):
if state is None:
self.enabled = not (self.enabled)
else:
self.enabled = state
def toggle_soft(self, state=None):
if state is None:
return
else:
for p in self.soft_parameters:
if p in dir(bpy.context.space_data.overlay):
setattr(bpy.context.space_data.overlay, p, state)
def is_enable(self):
if self.mode == 'TOGGLE':
return bpy.context.space_data.overlay
elif self.mode == 'SOFT':
return self.enabled
def get_state(self):
if self.mode == 'TOGGLE':
self.enabled = self.is_enable()
elif self.mode == 'SOFT':
state = True
for p in self.soft_parameters:
if p in dir(bpy.context.space_data.overlay):
state = state and getattr(bpy.context.space_data.overlay, p)
self.enabled = state
def execute(self, context):
self.get_state()
if self.mode == 'TOGGLE':
bpy.ops.wm.context_toggle(data_path='space_data.overlay.show_overlays')
if self.is_enable:
self.toggle_state(state=False)
else:
self.toggle_state(state=True)
elif self.mode == 'SOFT':
spaces = context.area.spaces
for s in spaces:
if s.type =='VIEW_3D':
self.toggle_soft(not self.enabled)
self.toggle_state()
return {'FINISHED'}
classes = (TILA_ToggleOverlay,)
register, unregister = bpy.utils.register_classes_factory(classes)
if __name__ == "__main__":
register()
|
[
"[email protected]"
] | |
377290350c8709f3863a64ab8d860ca274944ee4
|
1d0bb94bc33b80b96f47f4b8ff204a6711af7eac
|
/app.py
|
8c33fb7173b9c81cd464c8d6782ef69326458ad7
|
[] |
no_license
|
mfwarren/kpi
|
9ff78de66b240ea41b1b560ea00ef76d1b6a681d
|
239272e06b616873413f4942424c28dc8e7fd4b3
|
refs/heads/master
| 2016-09-05T18:58:57.040531 | 2015-05-08T13:37:27 | 2015-05-08T13:37:27 | 34,973,160 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,553 |
py
|
#!/usr/bin/env python
from gevent import monkey
monkey.patch_all()
import time
import os
import datetime
import pytz
from threading import Thread
import dateutil.parser
from refreshbooks import api as freshbooks_api
from github import Github
import requests
from flask import Flask, render_template
from flask.ext.socketio import SocketIO
TIMEZONE = pytz.timezone('America/Edmonton')
app = Flask(__name__)
app.config['SECRET_KEY'] = 'n7xw34tydr897123gj9s34r76t'
socketio =SocketIO(app)
thread = None
hub = Github(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_PASSWORD'])
freshbooks = freshbooks_api.TokenClient(
os.environ['FRESHBOOKS_DOMAIN'],
os.environ['FRESHBOOKS_API_TOKEN'],
user_agent='KPIDashboard/1.0'
)
def check_commit(commit_url, timeseries):
"""
Get information about a particualr commit
"""
r = requests.get(commit_url)
data = r.json()
print(commit_url)
print(data)
date = dateutil.parser.parse(data['commit']['committer']['date']).date()
stats = data['stats']
timeseries[date.isoformat()].append(stats)
def process_push_event(event, timeseries):
for commit in event.payload['commits']:
# check_commit(commit['url'], timeseries)
local_date = event.created_at.replace(tzinfo=pytz.utc).astimezone(TIMEZONE).date()
timeseries[local_date.isoformat()] += 1
def process_issues_event(event, timeseries):
# need to convert created_at from UTC to MST
local_date = event.created_at.replace(tzinfo=pytz.utc).astimezone(TIMEZONE).date()
timeseries[local_date.isoformat()] += 1
def recent_issues():
date_array = [datetime.date.today() + datetime.timedelta(days=-i) for i in range(7)]
timeseries = {d.isoformat(): 0 for d in date_array}
user = hub.get_user('mfwarren')
events = user.get_events()
for event in events:
try:
if event.type == 'IssuesEvent':
process_issues_event(event, timeseries)
except:
break
return sum(timeseries.values())
def recent_commits():
date_array = [datetime.date.today() + datetime.timedelta(days=-i) for i in range(7)]
timeseries = {d.isoformat(): 0 for d in date_array}
user = hub.get_user('mfwarren')
events = user.get_events()
for event in events:
try:
if event.type == 'PushEvent':
process_push_event(event, timeseries)
except:
break
return timeseries[datetime.date.today().isoformat()], sum(timeseries.values())
def background_thread():
while True:
issues = 0
commits_today = 0
commits_this_week = 0
client_count = 0
try:
issues = recent_issues()
commits_today, commits_this_week = recent_commits()
except Exception as ex:
print("Github crashed")
try:
client_count = freshbooks.client.list().clients.attrib['total']
except:
print("freshbooks crashed")
socketio.emit('response', {'issues': issues,
'commits': commits_this_week,
'commits_today': commits_today,
'critical_number': client_count}, namespace='')
time.sleep(60*30) # 30 minutes
@app.route('/')
def index():
global thread
if thread is None:
thread = Thread(target=background_thread)
thread.start()
return render_template('index.html')
@socketio.on('event')
def message(message):
pass
if __name__ == '__main__':
socketio.run(app)
|
[
"[email protected]"
] | |
11de6c893af7b3b2e87c793fc57f5b71eeb79bf3
|
65bf0113da75390c4cf3960b6a409aca15569a06
|
/orders/models.py
|
ba438c4bcc8db09fb277ffc7b398d222c68cb9a4
|
[] |
no_license
|
wenpengfan/opsadmin
|
e7701538265253653adb1c8ce490e0ce71d3b4f6
|
3d997259353dc2734ad153c137a91f3530e0a8ec
|
refs/heads/master
| 2023-03-29T11:50:10.756596 | 2020-11-16T02:41:18 | 2020-11-16T02:41:18 | 313,171,594 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,142 |
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from appconf.models import Database, Project, AppOwner
# Create your models here.
class Require(models.Model):
title = models.CharField(max_length=255, verbose_name=u"标题", null=True, blank=False)
description = models.CharField(max_length=5000, verbose_name=u"需求描述", null=True, blank=True)
status = models.BooleanField(verbose_name=u"部署状态", default=False)
create_time = models.DateTimeField(verbose_name=u"创建时间", auto_now_add=True)
operating_time = models.DateTimeField(verbose_name=u"预约操作时间", null=True, blank=False)
update_time = models.DateTimeField(verbose_name=u"更新时间", auto_now=True)
order_user = models.CharField(max_length=255, verbose_name=u"提交用户", null=True, blank=True)
owner = models.ForeignKey(AppOwner, verbose_name=u"负责人", on_delete=models.deletion.SET_NULL, null=True, blank=False)
completion_time = models.DateTimeField(verbose_name=u"完成时间", null=True, blank=True)
def __unicode__(self):
return self.title
class Meta:
ordering = ['-completion_time','operating_time']
class DBScript(models.Model):
db_name = models.ForeignKey(Database, verbose_name=u"数据库", on_delete=models.deletion.SET_NULL, null=True, blank=False)
description = models.CharField(max_length=5000, verbose_name=u"更新描述", null=True, blank=True)
script_name = models.CharField(max_length=255, verbose_name=u"脚本名称", null=True, blank=False)
status = models.BooleanField(verbose_name=u"部署状态", default=False)
create_time = models.DateTimeField(verbose_name=u"创建时间", auto_now_add=True)
operating_time = models.DateTimeField(verbose_name=u"预约操作时间", null=True, blank=False)
update_time = models.DateTimeField(verbose_name=u"更新时间", auto_now=True)
order_user = models.CharField(max_length=255, verbose_name=u"提交用户", null=True, blank=True)
env = models.CharField(max_length=10, verbose_name=u"环境", null=True, blank=False)
completion_time = models.DateTimeField(verbose_name=u"完成时间", null=True, blank=True)
def __unicode__(self):
return self.script_name
class Meta:
ordering = ['-completion_time','operating_time']
class Deploy(models.Model):
app_name = models.ForeignKey(Project, verbose_name=u"应用名称", on_delete=models.deletion.SET_NULL, null=True, blank=False)
description = models.CharField(max_length=5000, verbose_name=u"更新描述", null=True, blank=False)
version = models.CharField(max_length=255, verbose_name=u"程序版本号", blank=False,unique=True)
conf_version = models.ForeignKey('Config',to_field="conf_version", verbose_name=u"配置版本号",
on_delete=models.deletion.SET_NULL, null=True, blank=True)
status = models.BooleanField(verbose_name=u"部署状态", default=False)
create_time = models.DateTimeField(verbose_name=u"创建时间", auto_now_add=True)
operating_time = models.DateTimeField(verbose_name=u"预约操作时间", null=True, blank=False)
update_time = models.DateTimeField(verbose_name=u"更新时间", auto_now=True)
order_user = models.CharField(max_length=255, verbose_name=u"提交用户", null=True, blank=True)
order_status = models.BooleanField(verbose_name=u"工单状态", default=False)
dbscript = models.ForeignKey(DBScript, verbose_name=u"数据库工单", on_delete=models.deletion.SET_NULL, null=True, blank=True)
is_new = models.BooleanField(verbose_name=u"是否新应用", default=False)
is_tested = models.IntegerField(u"是否测试通过,0:未确定,1:测试通过,2:测试未通过", default=0, null=True, blank=False)
completion_time = models.DateTimeField(verbose_name=u"完成时间", null=True, blank=True)
def __unicode__(self):
return self.version
class Meta:
unique_together = ('app_name', 'version',)
ordering = ['-completion_time','operating_time']
class Config(models.Model):
app_name = models.ForeignKey(Project, verbose_name=u"应用名称",on_delete=models.deletion.SET_NULL, null=True, blank=False)
env = models.CharField(max_length=255, verbose_name=u"环境", null=True, blank=False)
description = models.CharField(max_length=5000, verbose_name=u"更新描述", null=True, blank=False)
conf_version = models.CharField(max_length=255, verbose_name=u"配置版本号", blank=False,unique=True)
app_version = models.ForeignKey('Deploy',to_field="version", verbose_name=u"程序版本号",
on_delete=models.deletion.SET_NULL, null=True, blank=True)
status = models.BooleanField(verbose_name=u"部署状态", default=False)
create_time = models.DateTimeField(verbose_name=u"创建时间", auto_now_add=True)
operating_time = models.DateTimeField(verbose_name=u"预约操作时间", null=True, blank=False)
update_time = models.DateTimeField(verbose_name=u"更新时间", auto_now=True)
order_user = models.CharField(max_length=255, verbose_name=u"提交用户", null=True, blank=True)
order_status = models.BooleanField(verbose_name=u"工单状态", default=False)
completion_time = models.DateTimeField(verbose_name=u"完成时间", null=True, blank=True)
def __unicode__(self):
return self.conf_version
class Meta:
unique_together = ('app_name', 'conf_version','app_version')
ordering = ['-completion_time','operating_time']
class Document(models.Model):
doc_id = models.IntegerField(u"文档编号", default=0)
name = models.CharField(u"应用名称", max_length=50, default=None, null=False, blank=False)
description = models.CharField(u"应用描述", max_length=5000, null=True, blank=True)
current_ver = models.CharField(u"当前版本", max_length=255, null=True, blank=True)
next_ver = models.CharField(u"后续版本", max_length=255, null=True, blank=True)
language_type = models.CharField(u"语言类型", max_length=30, null=True, blank=False)
app_type = models.CharField(u"程序类型", max_length=30, null=True, blank=False)
app_arch = models.CharField(u"程序框架", max_length=30, null=True, blank=True)
code_address = models.CharField(u"代码库地址", max_length=255, null=True, blank=False)
start_cmd = models.CharField(u"启动命令", max_length=255, null=True, blank=True)
stop_cmd = models.CharField(u"停止命令", max_length=255, null=True, blank=True)
config_detail = models.TextField(u"配置文件说明", max_length=1000, null=True, blank=True)
docker_expose = models.TextField(u"Docker容器说明", max_length=1000, null=True, blank=True)
app_monitor = models.CharField(u"需要的业务监控项", max_length=255, null=True, blank=True)
need_ha = models.CharField(u"高可用说明", default="无需高可用", max_length=30, null=False, blank=False)
need_dn = models.CharField(u"需要新增域名", max_length=255, null=True, blank=True)
need_location = models.CharField(u"需要新增二级目录", max_length=255, null=True, blank=True)
uri_mapping_from = models.CharField(u"URI映射来源", max_length=255, null=True, blank=True)
uri_mapping_to = models.CharField(u"URI映射目标", max_length=255, null=True, blank=True)
need_wan = models.CharField(u"是否需要访问外网", default="否", max_length=2, null=False, blank=False)
requester = models.CharField(u"调用方", max_length=255, null=True, blank=True)
rely_on = models.TextField(u"应用依赖的其他需求", max_length=255, null=True, blank=True)
product_id = models.IntegerField(u"所属产品线", default=0, null=True, blank=True)
dev_id = models.IntegerField(u"开发负责人", default=0, null=True, blank=True)
ops_id = models.IntegerField(u"运维负责人", default=0, null=True, blank=True)
def __unicode__(self):
return self.name
class Meta:
unique_together = ('doc_id', 'current_ver',)
|
[
"[email protected]"
] | |
46f246599c2d98d4fc3accaaaca6b5bb5e65db56
|
957187f350bc0f74ccb99d7b652ee705dd746cea
|
/app_botiquin/migrations/0002_auto_20150801_2326.py
|
4a62f821016102d781b46c7ab7589d090b015d47
|
[] |
no_license
|
dbsiavichay/botiquinmagap
|
8188aa9905300c96ca94c2bc658f58141ea38aef
|
5cc0eda2e89fae90ce6ab7217141b53919aed5b4
|
refs/heads/master
| 2021-01-01T18:48:32.406184 | 2015-08-04T23:46:48 | 2015-08-04T23:46:48 | 32,053,265 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 968 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('app_botiquin', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='grupoproducto',
options={'verbose_name': 'grupo de producto', 'verbose_name_plural': 'grupos de producto'},
),
migrations.AlterModelOptions(
name='medidaproducto',
options={'verbose_name': 'medida de producto', 'verbose_name_plural': 'medidas de producto'},
),
migrations.AlterModelOptions(
name='producto',
options={'verbose_name': 'producto', 'verbose_name_plural': 'productos'},
),
migrations.AlterModelOptions(
name='tipoproducto',
options={'verbose_name': 'tipo de producto', 'verbose_name_plural': 'tipos de producto'},
),
]
|
[
"[email protected]"
] | |
26759fdbcc261d053631ee8955293a036b3a9a3b
|
9d5d057e4077b77980093a22d39af1cdea1040a0
|
/python/paddle/fluid/core.py
|
b904f07b0a7e5eb76d5f66c012be14b22565e0e5
|
[
"Apache-2.0"
] |
permissive
|
heavengate/Paddle
|
16b8d046e9cba818ba36394bec496180b5cd5938
|
f9c801ffa6ac0a8a1a271c09b915d8603aba41ff
|
refs/heads/develop
| 2023-05-23T06:15:22.855477 | 2023-02-08T08:31:23 | 2023-02-08T08:31:23 | 150,394,550 | 1 | 3 |
Apache-2.0
| 2022-04-01T13:19:14 | 2018-09-26T08:33:49 |
Python
|
UTF-8
|
Python
| false | false | 18,876 |
py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import site
import sys
import os
import warnings
import platform
import logging
has_paddle_dy_lib = False
dy_lib_name = 'libpaddle'
dy_lib_suffix = 'so'
if os.name == 'nt':
dy_lib_suffix = 'pyd'
current_path = os.path.abspath(os.path.dirname(__file__))
if os.path.exists(current_path + os.sep + dy_lib_name + '.' + dy_lib_suffix):
has_paddle_dy_lib = True
try:
if os.name == 'nt':
third_lib_path = current_path + os.sep + '..' + os.sep + 'libs'
# Will load shared library from 'path' on windows
os.environ['path'] = (
current_path + ';' + third_lib_path + ';' + os.environ['path']
)
sys.path.insert(0, third_lib_path)
# Note: from python3.8, PATH will not take effect
# https://github.com/python/cpython/pull/12302
# Use add_dll_directory to specify dll resolution path
if sys.version_info[:2] >= (3, 8):
os.add_dll_directory(third_lib_path)
except ImportError as e:
if os.name == 'nt':
executable_path = os.path.abspath(os.path.dirname(sys.executable))
raise ImportError(
"""NOTE: You may need to run \"set PATH=%s;%%PATH%%\"
if you encounters \"DLL load failed\" errors. If you have python
installed in other directory, replace \"%s\" with your own
directory. The original error is: \n %s"""
% (executable_path, executable_path, str(e))
)
else:
raise ImportError(
"""NOTE: You may need to run \"export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH\"
if you encounters \"libmkldnn.so not found\" errors. If you have python
installed in other directory, replace \"/usr/local/lib\" with your own
directory. The original error is: \n"""
+ str(e)
)
except Exception as e:
raise e
def avx_supported():
"""
Whether current system(Linux, MacOS, Windows) is supported with AVX.
"""
sysstr = platform.system().lower()
has_avx = False
if sysstr == 'linux':
try:
pipe = os.popen('cat /proc/cpuinfo | grep -i avx')
has_avx = pipe.read() != ''
pipe.close()
except Exception as e:
sys.stderr.write(
'Can not get the AVX flag from /proc/cpuinfo.\n'
'The original error is: %s\n' % str(e)
)
return has_avx
elif sysstr == 'darwin':
try:
pipe = os.popen('sysctl machdep.cpu.features | grep -i avx')
has_avx = pipe.read() != ''
pipe.close()
except Exception as e:
sys.stderr.write(
'Can not get the AVX flag from machdep.cpu.features.\n'
'The original error is: %s\n' % str(e)
)
if not has_avx:
import subprocess
pipe = subprocess.Popen(
'sysctl machdep.cpu.leaf7_features | grep -i avx',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
_ = pipe.communicate()
has_avx = True if pipe.returncode == 0 else False
return has_avx
elif sysstr == 'windows':
import ctypes
ONE_PAGE = ctypes.c_size_t(0x1000)
def asm_func(code_str, restype=ctypes.c_uint32, argtypes=()):
# Call the code_str as a function
# Alloc 1 page to ensure the protection
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
address = pfnVirtualAlloc(
None, ONE_PAGE, MEM_COMMIT, PAGE_READWRITE
)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the code into the memory segment
memmove = ctypes.CFUNCTYPE(
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_size_t,
)(ctypes._memmove_addr)
if memmove(address, code_str, len(code_str)) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(
ctypes.c_void_p(address),
ONE_PAGE,
PAGE_EXECUTE,
ctypes.byref(ctypes.c_ulong(0)),
)
if not res:
raise Exception("Failed VirtualProtect")
# Flush instruction cache
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
res = ctypes.windll.kernel32.FlushInstructionCache(
prochandle, ctypes.c_void_p(address), ONE_PAGE
)
if not res:
raise Exception("Failed FlushInstructionCache")
# Cast the memory to function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
func = functype(address)
return func, address
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
# mov eax,0x1; cpuid; mov cx, ax; ret
code_str = b"\xB8\x01\x00\x00\x00\x0f\xa2\x89\xC8\xC3"
avx_bit = 28
retval = 0
try:
# Convert the code_str into a function that returns uint
func, address = asm_func(code_str)
retval = func()
ctypes.windll.kernel32.VirtualFree(
ctypes.c_void_p(address), ctypes.c_size_t(0), ONE_PAGE
)
except Exception as e:
sys.stderr.write(
'Failed getting the AVX flag on Windows.\n'
'The original error is: %s\n' % str(e)
)
return (retval & (1 << avx_bit)) > 0
else:
sys.stderr.write('Do not get AVX flag on %s\n' % sysstr)
return False
def run_shell_command(cmd):
import subprocess
out, err = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
).communicate()
if err:
return None
else:
return out.decode('utf-8').strip()
def get_dso_path(core_so, dso_name):
if core_so and dso_name:
return run_shell_command(
"ldd %s|grep %s|awk '{print $3}'" % (core_so, dso_name)
)
else:
return None
def load_dso(dso_absolute_path):
if dso_absolute_path:
try:
from ctypes import cdll
cdll.LoadLibrary(dso_absolute_path)
except:
warnings.warn("Load {} failed".format(dso_absolute_path))
def pre_load(dso_name):
if has_paddle_dy_lib:
core_so = current_path + os.sep + dy_lib_name + '.' + dy_lib_suffix
else:
core_so = None
dso_path = get_dso_path(core_so, dso_name)
load_dso(dso_path)
def get_libc_ver():
ldd_glibc = run_shell_command("ldd --version | awk '/ldd/{print $NF}'")
if ldd_glibc is not None:
return ("glibc", ldd_glibc)
ldd_musl = run_shell_command("ldd 2>&1 | awk '/Version/{print $NF}'")
if ldd_musl is not None:
return ("musl", ldd_musl)
return (None, None)
def less_than_ver(a, b):
if a is None or b is None:
return False
import re
import operator
def to_list(s):
s = re.sub(r'(\.0+)+$', '', s)
return [int(x) for x in s.split('.')]
return operator.lt(to_list(a), to_list(b))
# NOTE(zhiqiu): An error may occurs when import paddle in linux platform with glibc < 2.22,
# the error message of which is "dlopen: cannot load any more object with static TLS".
# This happens when:
# (1) the number of dynamic shared librarys (DSO) loaded > 14,
# (2) after that, load a dynamic shared library (DSO) with static TLS.
# For paddle, the problem is that 'libgomp' is a DSO with static TLS, and it is loaded after 14 DSOs.
# So, here is a tricky way to solve the problem by pre load 'libgomp' before 'libpaddle.so'.
# The final solution is to upgrade glibc to > 2.22 on the target system.
if platform.system().lower() == 'linux':
libc_type, libc_ver = get_libc_ver()
if libc_type == 'glibc' and less_than_ver(libc_ver, '2.23'):
try:
pre_load('libgomp')
except Exception as e:
# NOTE(zhiqiu): do not abort if failed, since it may success when import libpaddle.so
sys.stderr.write('Error: Can not preload libgomp.so')
try:
from . import libpaddle
if avx_supported() and not libpaddle.is_compiled_with_avx():
sys.stderr.write(
"Hint: Your machine support AVX, but the installed paddlepaddle doesn't have avx core. "
"Hence, no-avx core with worse preformance will be imported.\nIf you like, you could "
"reinstall paddlepaddle by 'python -m pip install --force-reinstall paddlepaddle-gpu[==version]' "
"to get better performance.\n"
)
# assign tensor alias
libpaddle.LoDTensor = libpaddle.Tensor
from .libpaddle import *
from .libpaddle import __doc__, __file__, __name__, __package__
from .libpaddle import __unittest_throw_exception__
from .libpaddle import _append_python_callable_object_and_return_id
from .libpaddle import _cleanup, _Scope
from .libpaddle import _get_use_default_grad_op_desc_maker_ops
from .libpaddle import _get_all_register_op_kernels
from .libpaddle import _is_program_version_supported
from .libpaddle import _set_eager_deletion_mode
from .libpaddle import _get_eager_deletion_vars
from .libpaddle import _set_fuse_parameter_group_size
from .libpaddle import _set_fuse_parameter_memory_size
from .libpaddle import _is_dygraph_debug_enabled
from .libpaddle import _dygraph_debug_level
from .libpaddle import _switch_tracer
from .libpaddle import _set_paddle_lib_path
from .libpaddle import _create_loaded_parameter
from .libpaddle import _cuda_synchronize
from .libpaddle import _is_compiled_with_heterps
from .libpaddle import _promote_types_if_complex_exists
from .libpaddle import _set_cached_executor_build_strategy
from .libpaddle import _device_synchronize
from .libpaddle import _xpu_device_synchronize
from .libpaddle import _get_current_stream
from .libpaddle import _Profiler, _ProfilerResult, _RecordEvent
from .libpaddle import _set_current_stream
from .libpaddle import _get_phi_kernel_name
from .libpaddle import _add_skip_comp_ops
from .libpaddle import _remove_skip_comp_ops
# prim controller flags
from .libpaddle import __set_bwd_prim_enabled
from .libpaddle import _is_bwd_prim_enabled
from .libpaddle import __set_fwd_prim_enabled
from .libpaddle import _is_fwd_prim_enabled
from .libpaddle import __set_all_prim_enabled
from .libpaddle import _set_prim_target_grad_name
# custom devivce
from .libpaddle import _get_current_custom_device_stream
from .libpaddle import _set_current_custom_device_stream
from .libpaddle import _synchronize_custom_device
from .libpaddle import CustomDeviceStream
from .libpaddle import CustomDeviceEvent
if sys.platform != 'win32':
from .libpaddle import _set_process_pids
from .libpaddle import _erase_process_pids
from .libpaddle import _set_process_signal_handler
from .libpaddle import _throw_error_if_process_failed
from .libpaddle import _convert_to_tensor_list
from .libpaddle import _array_to_share_memory_tensor
from .libpaddle import _cleanup_mmap_fds
from .libpaddle import _remove_tensor_list_mmap_fds
from .libpaddle import _set_max_memory_map_allocation_pool_size
except Exception as e:
if has_paddle_dy_lib:
sys.stderr.write(
'Error: Can not import paddle core while this file exists: '
+ current_path
+ os.sep
+ 'libpaddle.'
+ dy_lib_suffix
+ '\n'
)
if not avx_supported() and libpaddle.is_compiled_with_avx():
sys.stderr.write(
"Error: Your machine doesn't support AVX, but the installed PaddlePaddle is avx core, "
"you should reinstall paddlepaddle with no-avx core.\n"
)
raise e
def set_paddle_custom_device_lib_path(lib_path):
if os.environ.get('CUSTOM_DEVICE_ROOT', None) is not None:
# use setted environment value
return
if os.path.exists(lib_path):
# set CUSTOM_DEVICE_ROOT default path
os.environ['CUSTOM_DEVICE_ROOT'] = os.path.normpath(lib_path)
else:
os.environ['CUSTOM_DEVICE_ROOT'] = ''
# set paddle lib path
def set_paddle_lib_path():
site_dirs = (
site.getsitepackages()
if hasattr(site, 'getsitepackages')
else [x for x in sys.path if 'site-packages' in x]
)
for site_dir in site_dirs:
lib_dir = os.path.sep.join([site_dir, 'paddle', 'libs'])
if os.path.exists(lib_dir):
_set_paddle_lib_path(lib_dir)
set_paddle_custom_device_lib_path(
os.path.sep.join([lib_dir, '..', '..', 'paddle-plugins'])
)
return
if hasattr(site, 'USER_SITE'):
lib_dir = os.path.sep.join([site.USER_SITE, 'paddle', 'libs'])
if os.path.exists(lib_dir):
_set_paddle_lib_path(lib_dir)
set_paddle_custom_device_lib_path(
os.path.sep.join([lib_dir, '..', '..', 'paddle-plugins'])
)
set_paddle_lib_path()
# We have 3 FLAGS to judge whether prim is enabled
# FLAGS_prim_forward: Open or close forward prim strategy
# FLAGS_prim_backward: Open or close backward prim strategy
# FLAGS_prim_all: Open or close all prim strategy
#
#
# Priorities:
# if With CINN and Dy2St:
# # # _set_prim_all_enabled > FLAGS_prim_all > check_and_set_prim_all_enabled == _set_prim_backward_enabled == _set_prim_backward_enabled > FLAGS_prim_forward == FLAGS_prim_backward
# else:
# # # _set_prim_all_enabled > FLAGS_prim_all == check_and_set_prim_all_enabled == _set_prim_backward_enabled == _set_prim_backward_enabled > FLAGS_prim_forward == FLAGS_prim_backward
def __sync_stat_with_flag(flag):
if flag is "FLAGS_prim_forward":
flag_value = os.getenv("FLAGS_prim_forward")
assert flag_value is not None
flag_value = flag_value.lower()
if flag_value == "false":
__set_fwd_prim_enabled(False)
elif flag_value == "true":
__set_fwd_prim_enabled(True)
else:
raise TypeError(f"flag {flag} should be true or false.")
print("forward prim enabled: ", bool(_is_fwd_prim_enabled()))
elif flag is "FLAGS_prim_backward":
flag_value = os.getenv("FLAGS_prim_backward")
assert flag_value is not None
flag_value = flag_value.lower()
if flag_value == "false":
__set_bwd_prim_enabled(False)
elif flag_value == "true":
__set_bwd_prim_enabled(True)
else:
raise TypeError(f"flag {flag} should be true or false.")
print("backward prim enabled: ", bool(_is_bwd_prim_enabled()))
elif flag is "FLAGS_prim_all":
flag_value = os.getenv("FLAGS_prim_all")
assert flag_value is not None
flag_value = flag_value.lower()
if flag_value == "false":
__set_all_prim_enabled(False)
elif flag_value == "true":
__set_all_prim_enabled(True)
else:
raise TypeError(f"flag {flag} should be true or false.")
print(
"all prim enabled: ",
bool(_is_fwd_prim_enabled() and _is_bwd_prim_enabled()),
)
else:
raise TypeError(
f"We only support FLAGS_prim_forward/FLAGS_prim_backward/FLAGS_prim_all but we got {flag}."
)
# Alert!!! This method is only for test coveraget, user should never use it directly, this may cause serious system errors.
def _test_use_sync(value):
__sync_stat_with_flag(value)
# ops in forward_blacklisk will not be replaced by composite ops.
prim_config = {"forward_blacklist": []}
def _set_prim_forward_blacklist(ops=None):
if ops is None:
prim_config["forward_blacklist"] = []
elif isinstance(ops, str):
prim_config["forward_blacklist"].append(ops)
elif isinstance(ops, (list, tuple)):
for item in ops:
if not isinstance(item, str):
raise TypeError(
"ops set in forward_blacklist must belong to [str, str of tuple or list]"
)
else:
prim_config["forward_blacklist"].append(item)
else:
raise TypeError(
"ops set in forward_blacklist must belong to [str, str of tuple or list]"
)
return
def _set_prim_backward_enabled(value):
__set_bwd_prim_enabled(bool(value))
print("backward prim enabled: ", bool(_is_bwd_prim_enabled()))
def _set_prim_forward_enabled(value):
__set_fwd_prim_enabled(bool(value))
print("forward prim enabled: ", bool(_is_fwd_prim_enabled()))
def _set_prim_all_enabled(value):
__set_all_prim_enabled(bool(value))
print(
"all prim enabled: ",
bool(_is_fwd_prim_enabled() and _is_bwd_prim_enabled()),
)
def __sync_prim_backward_status():
flag_value = os.getenv("FLAGS_prim_backward")
if flag_value is None:
print("backward prim enabled: ", bool(_is_bwd_prim_enabled()))
else:
__sync_stat_with_flag("FLAGS_prim_backward")
def __sync_prim_forward_status():
flag_value = os.getenv("FLAGS_prim_forward")
if flag_value is None:
print("forward prim enabled: ", bool(_is_fwd_prim_enabled()))
else:
__sync_stat_with_flag("FLAGS_prim_forward")
def check_and_set_prim_all_enabled():
flag_value = os.getenv("FLAGS_prim_all")
if flag_value is None:
__sync_prim_backward_status()
__sync_prim_forward_status()
else:
__sync_stat_with_flag("FLAGS_prim_all")
|
[
"[email protected]"
] | |
42ba1e3c015f20441c374256ce07cf703eadaaad
|
9814fc360414ed900573181485966f63a56b261d
|
/setup.py
|
965ede863e91d9a8138bf2f09c34795175da88de
|
[
"MIT"
] |
permissive
|
krallin/captain-comeback
|
7d38425ec8f3b51626418f6dc6a6abfa8fa7cda3
|
e02e3774eab62d7b8ba454331a785e2ae32c89fc
|
refs/heads/master
| 2023-09-03T19:52:22.155417 | 2016-07-07T14:42:34 | 2016-07-07T16:17:02 | 62,813,918 | 0 | 1 | null | 2016-07-07T14:37:49 | 2016-07-07T14:37:48 | null |
UTF-8
|
Python
| false | false | 1,668 |
py
|
#!/usr/bin/env python
# coding: utf-8
import os
from setuptools import setup
HERE = os.path.dirname(__file__)
with open(os.path.join(HERE, 'README.md')) as readme_file:
readme = readme_file.read()
with open(os.path.join(HERE, 'CHANGELOG.md')) as history_file:
changelog = history_file.read()
requirements = ["linuxfd>=1.0,<2", "psutil>=4.3,<5", "six>=1.0,<2"]
test_requirements = []
setup(
name='captain_comeback',
version='0.1.0',
description="Userland container OOM manager.",
long_description=readme + '\n\n' + changelog,
author="Thomas Orozco",
author_email='[email protected]',
url='https://github.com/krallin/captain_comeback',
packages=[
'captain_comeback',
'captain_comeback.restart',
'captain_comeback.test'
],
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='captain_comeback',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='captain_comeback.test',
tests_require=test_requirements,
entry_points={'console_scripts': [
'captain-comeback = captain_comeback.cli:cli_entrypoint']}
)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.